Compare commits

..

1 Commits

449 changed files with 12799 additions and 23463 deletions

13
.github/ISSUE_TEMPLATE/01-feature.md vendored Normal file
View File

@ -0,0 +1,13 @@
---
name: Feature request
about: If you're just not sure how to do something, see "ask a question".
labels: enhancement, needs-triage
---
### Include required information
Please be sure to include at least:
- what problem your new feature would solve
- how or why you think it is generally useful (i.e., not just for you)
- what alternatives or workarounds you considered

View File

@ -1,28 +0,0 @@
name: Feature request
description: File a new feature request
labels: ["enhancement", "needs-triage"]
body:
- type: textarea
id: feature
attributes:
label: Feature description
description: Please describe the behavior you'd like to see.
validations:
required: true
- type: textarea
id: problem-usecase
attributes:
label: Problem or use case
description: Please explain which problem this would solve, or what the use case is for the feature. Keep in mind that it's more likely to be implemented if it's generally useful for a larger number of users.
validations:
required: true
- type: textarea
id: alternatives
attributes:
label: Alternatives or workarounds
description: Please describe any alternatives or workarounds you have considered and, possibly, rejected.
validations:
required: true

23
.github/ISSUE_TEMPLATE/02-bug.md vendored Normal file
View File

@ -0,0 +1,23 @@
---
name: Bug report
about: If you're actually looking for support, see "ask a question".
labels: bug, needs-triage
---
### Does your log mention database corruption?
If your Syncthing log reports panics because of database corruption it is
most likely a fault with your system's storage or memory. Affected log
entries will contain lines starting with `panic: leveldb`. You will need to
delete the index database to clear this, by running `syncthing
-reset-database`.
### Include required information
Please be sure to include at least:
- which version of Syncthing and what operating system you are using
- browser and version, if applicable
- what happened,
- what you expected to happen instead, and
- any steps to reproduce the problem.

View File

@ -1,51 +0,0 @@
name: Bug report
description: If you're actually looking for support instead, see "I need help / I have a question".
labels: ["bug", "needs-triage"]
body:
- type: markdown
attributes:
value: |
:no_entry_sign: If you want to report a security issue, please see [our Security Policy](https://syncthing.net/security/) and do not report the issue here.
:interrobang: If you are not sure if there is a bug, but something isn't working right and you need help, please [use the forum](https://forum.syncthing.net/).
- type: textarea
id: what-happened
attributes:
label: What happened?
description: Also tell us, what did you expect to happen, and any steps we might use to reproduce the problem.
placeholder: Tell us what you see!
validations:
required: true
- type: input
id: version
attributes:
label: Syncthing version
description: What version of Syncthing are you running?
placeholder: v1.27.4
validations:
required: true
- type: input
id: platform
attributes:
label: Platform & operating system
description: On what platform(s) are you seeing the problem?
placeholder: Linux arm64
validations:
required: true
- type: input
id: browser
attributes:
label: Browser version
description: If the problem is related to the GUI, describe your browser and version.
placeholder: Safari 17.3.1
- type: textarea
id: logs
attributes:
label: Relevant log output
description: Please copy and paste any relevant log output or crash backtrace. This will be automatically formatted into code, so no need for backticks.
render: shell

View File

@ -1,13 +1,13 @@
version: 2 version: 2
updates: updates:
- package-ecosystem: "github-actions" - package-ecosystem: "github-actions"
directory: "/" directory: "/"
schedule: schedule:
interval: weekly interval: weekly
open-pull-requests-limit: 10 open-pull-requests-limit: 10
- package-ecosystem: "gomod" - package-ecosystem: "gomod"
directory: "/" directory: "/"
schedule: schedule:
interval: weekly interval: weekly
open-pull-requests-limit: 10 open-pull-requests-limit: 10

View File

@ -1,74 +0,0 @@
name: Build Infrastructure Images
on:
push:
branches:
- infrastructure
- infra-*
env:
GO_VERSION: "~1.22.3"
CGO_ENABLED: "0"
BUILD_USER: docker
BUILD_HOST: github.syncthing.net
jobs:
docker-syncthing:
name: Build and push Docker images
if: github.repository == 'syncthing/syncthing'
runs-on: ubuntu-latest
environment: docker
strategy:
matrix:
pkg:
- stcrashreceiver
- strelaypoolsrv
- stupgrades
- ursrv
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
check-latest: true
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build binaries
run: |
for arch in arm64 amd64; do
go run build.go -goos linux -goarch "$arch" build ${{ matrix.pkg }}
mv ${{ matrix.pkg }} ${{ matrix.pkg }}-linux-"$arch"
done
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Set Docker tags (all branches)
run: |
tags=syncthing/${{ matrix.pkg }}:${{ github.sha }}
echo "TAGS=$tags" >> $GITHUB_ENV
- name: Set Docker tags (latest)
if: github.ref == 'refs/heads/infrastructure'
run: |
tags=syncthing/${{ matrix.pkg }}:latest,${{ env.TAGS }}
echo "TAGS=$tags" >> $GITHUB_ENV
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
file: ./Dockerfile.${{ matrix.pkg }}
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ env.TAGS }}
labels: |
org.opencontainers.image.revision=${{ github.sha }}

View File

@ -3,16 +3,10 @@ name: Build Syncthing
on: on:
pull_request: pull_request:
push: push:
schedule:
# Run nightly build at 05:00 UTC
- cron: '00 05 * * *'
workflow_dispatch:
env: env:
# The go version to use for builds. We set check-latest to true when # The go version to use for builds.
# installing, so we get the latest patch version that matches the GO_VERSION: "1.19.5"
# expression.
GO_VERSION: "~1.22.3"
# Optimize compatibility on the slow archictures. # Optimize compatibility on the slow archictures.
GO386: softfloat GO386: softfloat
@ -36,109 +30,11 @@ env:
jobs: jobs:
# #
# Tests for all platforms. Runs a matrix build on Windows, Linux and Mac, # Windows, quick build and test, runs always
# with the list of expected supported Go versions (current, previous).
# #
build-test: build-windows:
name: Build and test name: Build and test on Windows
strategy:
fail-fast: false
matrix:
runner: ["windows-latest", "ubuntu-latest", "macos-latest"]
# The oldest version in this list should match what we have in our go.mod.
# Variables don't seem to be supported here, or we could have done something nice.
go: ["~1.21.7", "~1.22.3"]
runs-on: ${{ matrix.runner }}
steps:
- name: Set git to use LF
if: matrix.runner == 'windows-latest'
# Without this, the Windows checkout will happen with CRLF line
# endings, which is fine for the source code but messes up tests
# that depend on data on disk being as expected. Ideally, those
# tests should be fixed, but not today.
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
cache: true
check-latest: true
- name: Build
run: |
go run build.go
- name: Install go-test-json-to-loki
run: |
go install calmh.dev/go-test-json-to-loki@latest
- name: Test
run: |
go version
go run build.go test | go-test-json-to-loki
env:
GOFLAGS: "-json"
LOKI_URL: ${{ vars.LOKI_URL }}
LOKI_USER: ${{ vars.LOKI_USER }}
LOKI_PASSWORD: ${{ secrets.LOKI_PASSWORD }}
LOKI_LABELS: "go=${{ matrix.go }},runner=${{ matrix.runner }},repo=${{ github.repository }},ref=${{ github.ref }}"
#
# Meta checks for formatting, copyright, etc
#
correctness:
name: Check correctness
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache: false
check-latest: true
- name: Check correctness
run: |
go test -v ./meta
#
# The basic checks job is a virtual one that depends on the matrix tests,
# the correctness checks, and various builds that we always do. This makes
# it easy to have the PR process have a single test as a gatekeeper for
# merging, instead of having to add all the matrix tests and update them
# each time the version changes. (The top level test is not available for
# choosing there, only the matrix "children".)
#
basics:
name: Basic checks passed
runs-on: ubuntu-latest
needs:
- build-test
- correctness
- package-linux
- package-cross
- package-source
- package-debian
- govulncheck
steps:
- uses: actions/checkout@v4
#
# Windows
#
package-windows:
name: Package for Windows
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
environment: signing
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- name: Set git to use LF - name: Set git to use LF
@ -150,22 +46,52 @@ jobs:
git config --global core.autocrlf false git config --global core.autocrlf false
git config --global core.eol lf git config --global core.eol lf
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- uses: actions/setup-go@v3
# `cache: true` gives us automatic caching of modules and build
# cache, speeding up builds. The cache key is dependent on the Go
# version and our go.sum contents.
with:
go-version: ${{ env.GO_VERSION }}
cache: true
- name: Build and test
run: |
go run build.go
go run build.go test
#
# Windows, build signed packages
#
package-windows:
name: Create packages for Windows
runs-on: windows-latest
# We only run this job for release pushes.
if: github.event_name == 'push' && startsWith(github.ref, 'refs/heads/release')
# This is also enforced by the environment which contains the secrets.
environment: signing
needs:
- build-windows
steps:
- name: Set git to use LF
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- uses: actions/checkout@v3
# `fetch-depth: 0` because we want to check out the entire repo
# including tags and branches, not just the latest commit which
# lacks version info.
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-go@v5 - uses: actions/setup-go@v3
with: with:
go-version: ${{ env.GO_VERSION }} go-version: ${{ env.GO_VERSION }}
cache: false
check-latest: true
- name: Get actual Go version - uses: actions/cache@v3
run: |
go version
echo "GO_VERSION=$(go version | sed 's#^.*go##;s# .*##')" >> $GITHUB_ENV
- uses: actions/cache@v4
with: with:
path: | path: |
~\AppData\Local\go-build ~\AppData\Local\go-build
@ -183,666 +109,13 @@ jobs:
go run build.go -goarch arm64 zip go run build.go -goarch arm64 zip
go run build.go -goarch 386 zip go run build.go -goarch 386 zip
env: env:
CGO_ENABLED: "0"
CODESIGN_SIGNTOOL: ${{ secrets.CODESIGN_SIGNTOOL }} CODESIGN_SIGNTOOL: ${{ secrets.CODESIGN_SIGNTOOL }}
CODESIGN_CERTIFICATE_BASE64: ${{ secrets.CODESIGN_CERTIFICATE_BASE64 }} CODESIGN_CERTIFICATE_BASE64: ${{ secrets.CODESIGN_CERTIFICATE_BASE64 }}
CODESIGN_CERTIFICATE_PASSWORD: ${{ secrets.CODESIGN_CERTIFICATE_PASSWORD }} CODESIGN_CERTIFICATE_PASSWORD: ${{ secrets.CODESIGN_CERTIFICATE_PASSWORD }}
CODESIGN_TIMESTAMP_SERVER: ${{ secrets.CODESIGN_TIMESTAMP_SERVER }} CODESIGN_TIMESTAMP_SERVER: ${{ secrets.CODESIGN_TIMESTAMP_SERVER }}
- name: Archive artifacts - name: Archive artifacts
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
with: with:
name: packages-windows name: packages
path: syncthing-windows-*.zip path: syncthing-windows-*.zip
#
# Linux
#
package-linux:
name: Package for Linux
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache: false
check-latest: true
- name: Get actual Go version
run: |
go version
echo "GO_VERSION=$(go version | sed 's#^.*go##;s# .*##')" >> $GITHUB_ENV
- uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-package-${{ hashFiles('**/go.sum') }}
- name: Create packages
run: |
archs=$(go tool dist list | grep linux | sed 's#linux/##')
for goarch in $archs ; do
go run build.go -goarch "$goarch" tar
done
env:
CGO_ENABLED: "0"
- name: Archive artifacts
uses: actions/upload-artifact@v4
with:
name: packages-linux
path: syncthing-linux-*.tar.gz
#
# macOS
#
package-macos:
name: Package for macOS
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
environment: signing
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache: false
check-latest: true
- name: Get actual Go version
run: |
go version
echo "GO_VERSION=$(go version | sed 's#^.*go##;s# .*##')" >> $GITHUB_ENV
- uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-package-${{ hashFiles('**/go.sum') }}
- name: Import signing certificate
run: |
# Set up a run-specific keychain, making it available for the
# `codesign` tool.
umask 066
KEYCHAIN_PATH=$RUNNER_TEMP/codesign.keychain
KEYCHAIN_PASSWORD=$(uuidgen)
security create-keychain -p "$KEYCHAIN_PASSWORD" "$KEYCHAIN_PATH"
security default-keychain -s "$KEYCHAIN_PATH"
security unlock-keychain -p "$KEYCHAIN_PASSWORD" "$KEYCHAIN_PATH"
security set-keychain-settings -lut 21600 "$KEYCHAIN_PATH"
# Import the certificate
CERTIFICATE_PATH=$RUNNER_TEMP/codesign.p12
echo "$DEVELOPER_ID_CERTIFICATE_BASE64" | base64 -d -o "$CERTIFICATE_PATH"
security import "$CERTIFICATE_PATH" -k "$KEYCHAIN_PATH" -P "$DEVELOPER_ID_CERTIFICATE_PASSWORD" -T /usr/bin/codesign -T /usr/bin/productsign
security set-key-partition-list -S apple-tool:,apple: -s -k actions "$KEYCHAIN_PATH"
# Set the codesign identity for following steps
echo "CODESIGN_IDENTITY=$CODESIGN_IDENTITY" >> $GITHUB_ENV
env:
DEVELOPER_ID_CERTIFICATE_BASE64: ${{ secrets.DEVELOPER_ID_CERTIFICATE_BASE64 }}
DEVELOPER_ID_CERTIFICATE_PASSWORD: ${{ secrets.DEVELOPER_ID_CERTIFICATE_PASSWORD }}
CODESIGN_IDENTITY: ${{ secrets.CODESIGN_IDENTITY }}
- name: Create package (amd64)
run: |
go run build.go -goarch amd64 zip
env:
CGO_ENABLED: "1"
- name: Create package (arm64 cross)
run: |
cat <<EOT > xgo.sh
#!/bin/bash
CGO_ENABLED=1 \
CGO_CFLAGS="-target arm64-apple-macos10.15" \
CGO_LDFLAGS="-target arm64-apple-macos10.15" \
go "\$@"
EOT
chmod 755 xgo.sh
go run build.go -gocmd ./xgo.sh -goarch arm64 zip
env:
CGO_ENABLED: "1"
- name: Create package (universal)
run: |
rm -rf _tmp
mkdir _tmp
pushd _tmp
unzip ../syncthing-macos-amd64-*.zip
unzip ../syncthing-macos-arm64-*.zip
lipo -create syncthing-macos-amd64-*/syncthing syncthing-macos-arm64-*/syncthing -o syncthing
amd64=(syncthing-macos-amd64-*)
universal="${amd64/amd64/universal}"
mv "$amd64" "$universal"
mv syncthing "$universal"
zip -r "../$universal.zip" "$universal"
- name: Archive artifacts
uses: actions/upload-artifact@v4
with:
name: packages-macos
path: syncthing-*.zip
notarize-macos:
name: Notarize for macOS
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
environment: signing
needs:
- package-macos
- basics
runs-on: macos-latest
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
name: packages-macos
- name: Notarize binaries
run: |
APPSTORECONNECT_API_KEY_PATH="$RUNNER_TEMP/apikey.p8"
echo "$APPSTORECONNECT_API_KEY" | base64 -d -o "$APPSTORECONNECT_API_KEY_PATH"
for file in syncthing-macos-*.zip ; do
xcrun notarytool submit \
-k "$APPSTORECONNECT_API_KEY_PATH" \
-d "$APPSTORECONNECT_API_KEY_ID" \
-i "$APPSTORECONNECT_API_KEY_ISSUER" \
$file
done
env:
APPSTORECONNECT_API_KEY: ${{ secrets.APPSTORECONNECT_API_KEY }}
APPSTORECONNECT_API_KEY_ID: ${{ secrets.APPSTORECONNECT_API_KEY_ID }}
APPSTORECONNECT_API_KEY_ISSUER: ${{ secrets.APPSTORECONNECT_API_KEY_ISSUER }}
#
# Cross compile other unixes
#
package-cross:
name: Package cross compiled
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache: false
check-latest: true
- name: Get actual Go version
run: |
go version
echo "GO_VERSION=$(go version | sed 's#^.*go##;s# .*##')" >> $GITHUB_ENV
- uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-cross-${{ hashFiles('**/go.sum') }}
- name: Create packages
run: |
platforms=$(go tool dist list \
| grep -v aix/ppc64 \
| grep -v android/ \
| grep -v darwin/ \
| grep -v ios/ \
| grep -v js/ \
| grep -v linux/ \
| grep -v nacl/ \
| grep -v plan9/ \
| grep -v windows/ \
| grep -v /wasm \
)
# Build for each platform with errors silenced, because we expect
# some oddball platforms to fail. This avoids a bunch of errors in
# the GitHub Actions output, instead summarizing each build
# failure as a warning.
for plat in $platforms; do
goos="${plat%/*}"
goarch="${plat#*/}"
echo "::group ::$plat"
if ! go run build.go -goos "$goos" -goarch "$goarch" tar 2>/dev/null; then
echo "::warning ::Failed to build for $plat"
fi
echo "::endgroup::"
done
env:
CGO_ENABLED: "0"
- name: Archive artifacts
uses: actions/upload-artifact@v4
with:
name: packages-other
path: syncthing-*.tar.gz
#
# Source
#
package-source:
name: Package source code
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache: false
check-latest: true
- name: Package source
run: |
version=$(go run build.go version)
echo "$version" > RELEASE
go mod vendor
go run build.go assets
cd ..
tar c -z -f "syncthing-source-$version.tar.gz" \
--exclude .git \
syncthing
mv "syncthing-source-$version.tar.gz" syncthing
- name: Archive artifacts
uses: actions/upload-artifact@v4
with:
name: packages-source
path: syncthing-source-*.tar.gz
#
# Sign binaries for auto upgrade, generate ASC signature files
#
sign-for-upgrade:
name: Sign for upgrade
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
environment: signing
needs:
- basics
- package-windows
- package-linux
- package-macos
- package-cross
- package-source
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/checkout@v4
with:
repository: syncthing/release-tools
path: tools
fetch-depth: 0
- name: Download artifacts
uses: actions/download-artifact@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache: false
check-latest: true
- name: Install signing tool
run: |
go install ./cmd/stsigtool
- name: Sign archives
run: |
export PRIVATE_KEY="$RUNNER_TEMP/privkey.pem"
export PATH="$PATH:$(go env GOPATH)/bin"
echo "$STSIGTOOL_PRIVATE_KEY" | base64 -d > "$PRIVATE_KEY"
mkdir packages
mv packages-*/* packages
pushd packages
"$GITHUB_WORKSPACE/tools/sign-only"
rm -f "$PRIVATE_KEY"
env:
STSIGTOOL_PRIVATE_KEY: ${{ secrets.STSIGTOOL_PRIVATE_KEY }}
- name: Create and sign .asc files
run: |
sudo apt update
sudo apt -y install gnupg
export SIGNING_KEY="$RUNNER_TEMP/gpg-secret.asc"
echo "$GNUPG_SIGNING_KEY_BASE64" | base64 -d > "$SIGNING_KEY"
gpg --import < "$SIGNING_KEY"
pushd packages
files=(*.tar.gz *.zip)
sha1sum "${files[@]}" | gpg --clearsign > sha1sum.txt.asc
sha256sum "${files[@]}" | gpg --clearsign > sha256sum.txt.asc
gpg --sign --armour --detach syncthing-source-*.tar.gz
popd
rm -f "$SIGNING_KEY" .gnupg
env:
GNUPG_SIGNING_KEY_BASE64: ${{ secrets.GNUPG_SIGNING_KEY_BASE64 }}
- name: Archive artifacts
uses: actions/upload-artifact@v4
with:
name: packages-signed
path: packages/*
#
# Debian
#
package-debian:
name: Package for Debian
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache: false
check-latest: true
- name: Get actual Go version
run: |
go version
echo "GO_VERSION=$(go version | sed 's#^.*go##;s# .*##')" >> $GITHUB_ENV
- uses: ruby/setup-ruby@v1
with:
ruby-version: '3.0'
- name: Install fpm
run: |
gem install fpm
- uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-debian-${{ hashFiles('**/go.sum') }}
- name: Package for Debian
run: |
for arch in amd64 i386 armhf armel arm64 ; do
go run build.go -no-upgrade -installsuffix=no-upgrade -goarch "$arch" deb
done
env:
BUILD_USER: debian
- name: Archive artifacts
uses: actions/upload-artifact@v4
with:
name: debian-packages
path: "*.deb"
#
# Nightlies
#
publish-nightly:
name: Publish nightly build
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && startsWith(github.ref, 'refs/heads/release-nightly')
environment: signing
needs:
- sign-for-upgrade
- notarize-macos
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
repository: syncthing/release-tools
path: tools
fetch-depth: 0
- name: Download artifacts
uses: actions/download-artifact@v4
with:
name: packages-signed
path: packages
- uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache: false
check-latest: true
- name: Create release json
run: |
cd packages
"$GITHUB_WORKSPACE/tools/generate-release-json" "$BASE_URL" > nightly.json
env:
BASE_URL: https://syncthing.ams3.digitaloceanspaces.com/nightly/
- name: Push artifacts
uses: docker://docker.io/rclone/rclone:latest
env:
RCLONE_CONFIG_SPACES_TYPE: s3
RCLONE_CONFIG_SPACES_PROVIDER: DigitalOcean
RCLONE_CONFIG_SPACES_ACCESS_KEY_ID: ${{ secrets.SPACES_KEY }}
RCLONE_CONFIG_SPACES_SECRET_ACCESS_KEY: ${{ secrets.SPACES_SECRET }}
RCLONE_CONFIG_SPACES_ENDPOINT: ams3.digitaloceanspaces.com
RCLONE_CONFIG_SPACES_ACL: public-read
with:
args: sync packages spaces:syncthing/nightly
#
# Push release artifacts to Spaces
#
publish-release-files:
name: Publish release files
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && github.ref == 'refs/heads/release'
environment: signing
needs:
- sign-for-upgrade
- package-debian
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Download signed packages
uses: actions/download-artifact@v4
with:
name: packages-signed
path: packages
- name: Download debian packages
uses: actions/download-artifact@v4
with:
name: debian-packages
path: packages
- uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache: false
check-latest: true
- name: Set version
run: |
version=$(go run build.go version)
echo "VERSION=$version" >> $GITHUB_ENV
- name: Push to Spaces (${{ env.VERSION }})
uses: docker://docker.io/rclone/rclone:latest
env:
RCLONE_CONFIG_SPACES_TYPE: s3
RCLONE_CONFIG_SPACES_PROVIDER: DigitalOcean
RCLONE_CONFIG_SPACES_ACCESS_KEY_ID: ${{ secrets.SPACES_KEY }}
RCLONE_CONFIG_SPACES_SECRET_ACCESS_KEY: ${{ secrets.SPACES_SECRET }}
RCLONE_CONFIG_SPACES_ENDPOINT: ams3.digitaloceanspaces.com
RCLONE_CONFIG_SPACES_ACL: public-read
with:
args: sync packages spaces:syncthing/release/${{ env.VERSION }}
- name: Push to Spaces (latest)
uses: docker://docker.io/rclone/rclone:latest
env:
RCLONE_CONFIG_SPACES_TYPE: s3
RCLONE_CONFIG_SPACES_PROVIDER: DigitalOcean
RCLONE_CONFIG_SPACES_ACCESS_KEY_ID: ${{ secrets.SPACES_KEY }}
RCLONE_CONFIG_SPACES_SECRET_ACCESS_KEY: ${{ secrets.SPACES_SECRET }}
RCLONE_CONFIG_SPACES_ENDPOINT: ams3.digitaloceanspaces.com
RCLONE_CONFIG_SPACES_ACL: public-read
with:
args: sync spaces:syncthing/release/${{ env.VERSION }} spaces:syncthing/release/latest
#
# Build and push to Docker Hub
#
docker-syncthing:
name: Build and push Docker images
runs-on: ubuntu-latest
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || github.ref == 'refs/heads/main' || github.ref == 'refs/heads/infrastructure' || startsWith(github.ref, 'refs/heads/release-'))
environment: docker
strategy:
matrix:
pkg:
- syncthing
- strelaysrv
- stdiscosrv
include:
- pkg: syncthing
dockerfile: Dockerfile
image: syncthing/syncthing
- pkg: strelaysrv
dockerfile: Dockerfile.strelaysrv
image: syncthing/relaysrv
- pkg: stdiscosrv
dockerfile: Dockerfile.stdiscosrv
image: syncthing/discosrv
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache: false
check-latest: true
- name: Get actual Go version
run: |
go version
echo "GO_VERSION=$(go version | sed 's#^.*go##;s# .*##')" >> $GITHUB_ENV
- uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-docker-${{ matrix.pkg }}-${{ hashFiles('**/go.sum') }}
- name: Build binaries
run: |
for arch in amd64 arm64 arm; do
go run build.go -goos linux -goarch "$arch" -no-upgrade build ${{ matrix.pkg }}
mv ${{ matrix.pkg }} ${{ matrix.pkg }}-linux-"$arch"
done
env:
CGO_ENABLED: "0"
BUILD_USER: docker
- name: Check if we will be able to push images
run: |
if [[ "${{ secrets.DOCKERHUB_TOKEN }}" != "" ]]; then
echo "DOCKER_PUSH=true" >> $GITHUB_ENV;
fi
- name: Login to Docker Hub
uses: docker/login-action@v3
if: env.DOCKER_PUSH == 'true'
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Set version tags
run: |
version=$(go run build.go version)
version=${version#v}
if [[ $version == @([0-9]|[0-9][0-9]).@([0-9]|[0-9][0-9]).@([0-9]|[0-9][0-9]) ]] ; then
echo Release version, pushing to :latest and version tags
major=${version%.*.*}
minor=${version%.*}
tags=${{ matrix.image }}:$version,${{ matrix.image }}:$major,${{ matrix.image }}:$minor,${{ matrix.image }}:latest
elif [[ $version == *-rc.@([0-9]|[0-9][0-9]) ]] ; then
echo Release candidate, pushing to :rc
tags=${{ matrix.image }}:rc
else
echo Development version, pushing to :edge
tags=${{ matrix.image }}:edge
fi
echo "DOCKER_TAGS=$tags" >> $GITHUB_ENV
echo "VERSION=$version" >> $GITHUB_ENV
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: .
file: ${{ matrix.dockerfile }}
platforms: linux/amd64,linux/arm64,linux/arm/7
push: ${{ env.DOCKER_PUSH == 'true' }}
tags: ${{ env.DOCKER_TAGS }}
labels: |
org.opencontainers.image.version=${{ env.VERSION }}
org.opencontainers.image.revision=${{ github.sha }}
#
# Check for known vulnerabilities in Go dependencies
#
govulncheck:
runs-on: ubuntu-latest
name: Run govulncheck
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache: false
check-latest: true
- name: run govulncheck
run: |
go run build.go assets
go install golang.org/x/vuln/cmd/govulncheck@latest
govulncheck ./...

View File

@ -1,21 +0,0 @@
name: Trigger nightly build & release
on:
workflow_dispatch:
schedule:
# Run nightly build at 01:00 UTC
- cron: '00 01 * * *'
jobs:
trigger-nightly:
runs-on: ubuntu-latest
name: Push to release-nightly to trigger build
steps:
- uses: actions/checkout@v4
with:
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
fetch-depth: 0
- run: |
git push origin main:release-nightly

View File

@ -2,7 +2,7 @@ name: Update translations and documentation
on: on:
workflow_dispatch: workflow_dispatch:
schedule: schedule:
- cron: '42 3 * * 1' - cron: '42 3 * * 1'
jobs: jobs:
@ -10,13 +10,13 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Update translations and documentation name: Update translations and documentation
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
with: with:
fetch-depth: 0 fetch-depth: 0
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }} token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
- uses: actions/setup-go@v5 - uses: actions/setup-go@268d8c0ca0432bb2cf416faae41297df9d262d7f # v2
with: with:
go-version: stable go-version: ^1.18.4
- run: | - run: |
set -euo pipefail set -euo pipefail
git config --global user.name 'Syncthing Release Automation' git config --global user.name 'Syncthing Release Automation'
@ -25,4 +25,5 @@ jobs:
bash build.sh prerelease bash build.sh prerelease
git push git push
env: env:
WEBLATE_TOKEN: ${{ secrets.WEBLATE_TOKEN }} TRANSIFEX_USER: ${{ secrets.TRANSIFEX_USER }}
TRANSIFEX_PASS: ${{ secrets.TRANSIFEX_PASS }}

4
.gitignore vendored
View File

@ -1,10 +1,11 @@
/syncthing /syncthing
/stdiscosrv /stdiscosrv
syncthing.exe
stdiscosrv.exe
*.tar.gz *.tar.gz
*.zip *.zip
*.asc *.asc
*.deb *.deb
*.exe
.jshintrc .jshintrc
coverage.out coverage.out
files/pidx files/pidx
@ -18,3 +19,4 @@ deb
/repos /repos
/proto/scripts/protoc-gen-gosyncthing /proto/scripts/protoc-gen-gosyncthing
/gui/next-gen-gui /gui/next-gen-gui
.idea

View File

@ -1,4 +0,0 @@
line_ending: lf
formatter:
type: basic
retain_line_breaks: true

41
AUTHORS
View File

@ -23,13 +23,10 @@ Alessandro G. (alessandro.g89) <alessandro.g89@gmail.com>
Alex Lindeman <139387+aelindeman@users.noreply.github.com> Alex Lindeman <139387+aelindeman@users.noreply.github.com>
Alex Xu <alex.hello71@gmail.com> Alex Xu <alex.hello71@gmail.com>
Alexander Graf (alex2108) <register-github@alex-graf.de> Alexander Graf (alex2108) <register-github@alex-graf.de>
Alexander Seiler <seileralex@gmail.com>
Alexandre Alves <alexandrealvesdb.contact@gmail.com> Alexandre Alves <alexandrealvesdb.contact@gmail.com>
Alexandre Viau (aviau) <alexandre@alexandreviau.net> <aviau@debian.org> Alexandre Viau (aviau) <alexandre@alexandreviau.net> <aviau@debian.org>
Aman Gupta <aman@tmm1.net> Aman Gupta <aman@tmm1.net>
Anatoli Babenia <anatoli@rainforce.org>
Anderson Mesquita (andersonvom) <andersonvom@gmail.com> Anderson Mesquita (andersonvom) <andersonvom@gmail.com>
Andreas Sommer <andreas.sommer87@googlemail.com>
andresvia <andres.via@gmail.com> andresvia <andres.via@gmail.com>
Andrew Dunham (andrew-d) <andrew@du.nham.ca> Andrew Dunham (andrew-d) <andrew@du.nham.ca>
Andrew Meyer <andrewm.bpi@gmail.com> Andrew Meyer <andrewm.bpi@gmail.com>
@ -38,7 +35,6 @@ Andrey D (scienmind) <scintertech@cryptolab.net> <scienmind@users.noreply.github
André Colomb (acolomb) <src@andre.colomb.de> <github.com@andre.colomb.de> André Colomb (acolomb) <src@andre.colomb.de> <github.com@andre.colomb.de>
andyleap <andyleap@gmail.com> andyleap <andyleap@gmail.com>
Anjan Momi <anjan@momi.ca> Anjan Momi <anjan@momi.ca>
Anthony Goeckner <agoeckner@users.noreply.github.com>
Antoine Lamielle (0x010C) <antoine.lamielle@0x010c.fr> <gh@0x010c.fr> Antoine Lamielle (0x010C) <antoine.lamielle@0x010c.fr> <gh@0x010c.fr>
Antony Male (canton7) <antony.male@gmail.com> Antony Male (canton7) <antony.male@gmail.com>
Anur <anurnomeru@163.com> Anur <anurnomeru@163.com>
@ -51,7 +47,6 @@ Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com> <github
Aurélien Rainone <476650+arl@users.noreply.github.com> Aurélien Rainone <476650+arl@users.noreply.github.com>
BAHADIR YILMAZ <bahadiryilmaz32@gmail.com> BAHADIR YILMAZ <bahadiryilmaz32@gmail.com>
Bart De Vries (mogwa1) <devriesb@gmail.com> Bart De Vries (mogwa1) <devriesb@gmail.com>
Beat Reichenbach <44111292+beatreichenbach@users.noreply.github.com>
Ben Curthoys (bencurthoys) <ben@bencurthoys.com> Ben Curthoys (bencurthoys) <ben@bencurthoys.com>
Ben Schulz (uok) <ueomkail@gmail.com> <uok@users.noreply.github.com> Ben Schulz (uok) <ueomkail@gmail.com> <uok@users.noreply.github.com>
Ben Shepherd (benshep) <bjashepherd@gmail.com> Ben Shepherd (benshep) <bjashepherd@gmail.com>
@ -70,53 +65,42 @@ Brian R. Becker (brbecker) <brbecker@gmail.com>
bt90 <btom1990@googlemail.com> bt90 <btom1990@googlemail.com>
Caleb Callaway (cqcallaw) <enlightened.despot@gmail.com> Caleb Callaway (cqcallaw) <enlightened.despot@gmail.com>
Carsten Hagemann (carstenhag) <moter8@gmail.com> <carsten@chagemann.de> Carsten Hagemann (carstenhag) <moter8@gmail.com> <carsten@chagemann.de>
Catfriend1 <16361913+Catfriend1@users.noreply.github.com>
Cathryne Linenweaver (Cathryne) <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com> <katrinleinweber@MAC.local> Cathryne Linenweaver (Cathryne) <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com> <katrinleinweber@MAC.local>
Cedric Staniewski (xduugu) <cedric@gmx.ca> Cedric Staniewski (xduugu) <cedric@gmx.ca>
chenrui <rui@meetup.com> chenrui <rui@meetup.com>
Chih-Hsuan Yen <yan12125@gmail.com> <1937689+yan12125@users.noreply.github.com> Chih-Hsuan Yen <yan12125@gmail.com>
Choongkyu <choongkyu.kim+gh@gmail.com> <vapidlyrapid+gh@gmail.com> Choongkyu <choongkyu.kim+gh@gmail.com> <vapidlyrapid+gh@gmail.com>
Chris Howie (cdhowie) <me@chrishowie.com> Chris Howie (cdhowie) <me@chrishowie.com>
Chris Joel (cdata) <chris@scriptolo.gy> Chris Joel (cdata) <chris@scriptolo.gy>
Chris Tonkinson <chris@masterbran.ch> Chris Tonkinson <chris@masterbran.ch>
Christian Kujau <ckujau@users.noreply.github.com>
Christian Prescott <me@christianprescott.com> Christian Prescott <me@christianprescott.com>
chucic <chucic@seznam.cz> chucic <chucic@seznam.cz>
cjc7373 <niuchangcun@gmail.com>
Colin Kennedy (moshen) <moshen.colin@gmail.com> Colin Kennedy (moshen) <moshen.colin@gmail.com>
Cromefire_ <tim.l@nghorst.net> <26320625+cromefire@users.noreply.github.com> Cromefire_ <tim.l@nghorst.net> <26320625+cromefire@users.noreply.github.com>
cui fliter <imcusg@gmail.com> cui fliter <imcusg@gmail.com>
Cyprien Devillez <cypx@users.noreply.github.com> Cyprien Devillez <cypx@users.noreply.github.com>
d-volution <49024624+d-volution@users.noreply.github.com>
Dale Visser <dale.visser@live.com> Dale Visser <dale.visser@live.com>
Dan <benda.daniel@gmail.com> Dan <benda.daniel@gmail.com>
Daniel Barczyk <46358936+DanielBarczyk@users.noreply.github.com> Daniel Barczyk <46358936+DanielBarczyk@users.noreply.github.com>
Daniel Bergmann (brgmnn) <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com> Daniel Bergmann (brgmnn) <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
Daniel Harte (norgeous) <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com> Daniel Harte (norgeous) <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
Daniel Martí (mvdan) <mvdan@mvdan.cc> Daniel Martí (mvdan) <mvdan@mvdan.cc>
Daniel Padrta <64928366+danpadcz@users.noreply.github.com>
Darshil Chanpura (dtchanpura) <dtchanpura@gmail.com> <dcprime314@gmail.com> Darshil Chanpura (dtchanpura) <dtchanpura@gmail.com> <dcprime314@gmail.com>
David Rimmer (dinosore) <dinosore@dbrsoftware.co.uk> David Rimmer (dinosore) <dinosore@dbrsoftware.co.uk>
deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com>
DeflateAwning <11021263+DeflateAwning@users.noreply.github.com>
Denis A. (dva) <denisva@gmail.com> Denis A. (dva) <denisva@gmail.com>
Dennis Wilson (snnd) <dw@risu.io> Dennis Wilson (snnd) <dw@risu.io>
dependabot-preview[bot] <dependabot-preview[bot]@users.noreply.github.com> <27856297+dependabot-preview[bot]@users.noreply.github.com> dependabot-preview[bot] <dependabot-preview[bot]@users.noreply.github.com> <27856297+dependabot-preview[bot]@users.noreply.github.com>
dependabot[bot] <dependabot[bot]@users.noreply.github.com> <49699333+dependabot[bot]@users.noreply.github.com> dependabot[bot] <dependabot[bot]@users.noreply.github.com> <49699333+dependabot[bot]@users.noreply.github.com>
derekriemer <derek.riemer@colorado.edu> derekriemer <derek.riemer@colorado.edu>
DerRockWolf <50499906+DerRockWolf@users.noreply.github.com>
desbma <desbma@users.noreply.github.com> desbma <desbma@users.noreply.github.com>
Devon G. Redekopp <devon@redekopp.com> Devon G. Redekopp <devon@redekopp.com>
diemade <spamkill@posteo.ch>
digital <didev@dinid.net>
Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com>
Dmitry Saveliev (dsaveliev) <d.e.saveliev@gmail.com> Dmitry Saveliev (dsaveliev) <d.e.saveliev@gmail.com>
Domenic Horner <domenic@tgxn.net> Domenic Horner <domenic@tgxn.net>
Dominik Heidler (asdil12) <dominik@heidler.eu> Dominik Heidler (asdil12) <dominik@heidler.eu>
Elias Jarlebring (jarlebring) <jarlebring@gmail.com> Elias Jarlebring (jarlebring) <jarlebring@gmail.com>
Elliot Huffman <thelich2@gmail.com> Elliot Huffman <thelich2@gmail.com>
Emil Hessman (ceh) <emil@hessman.se> Emil Hessman (ceh) <emil@hessman.se>
Emil Lundberg <emil@emlun.se>
Eng Zer Jun <engzerjun@gmail.com> Eng Zer Jun <engzerjun@gmail.com>
entity0xfe <109791748+entity0xfe@users.noreply.github.com> <entity0xfe@my.domain> entity0xfe <109791748+entity0xfe@users.noreply.github.com> <entity0xfe@my.domain>
Eric Lesiuta <elesiuta@gmail.com> Eric Lesiuta <elesiuta@gmail.com>
@ -125,7 +109,6 @@ Erik Meitner (WSGCSysadmin) <e.meitner@willystreet.coop>
Evan Spensley <94762716+0evan@users.noreply.github.com> Evan Spensley <94762716+0evan@users.noreply.github.com>
Evgeny Kuznetsov <evgeny@kuznetsov.md> Evgeny Kuznetsov <evgeny@kuznetsov.md>
Federico Castagnini (facastagnini) <federico.castagnini@gmail.com> Federico Castagnini (facastagnini) <federico.castagnini@gmail.com>
Felix <53702818+f-eliks@users.noreply.github.com>
Felix Ableitner (Nutomic) <me@nutomic.com> Felix Ableitner (Nutomic) <me@nutomic.com>
Felix Lampe <mail@flampe.de> Felix Lampe <mail@flampe.de>
Felix Unterpaintner (bigbear2nd) <bigbear2nd@gmail.com> Felix Unterpaintner (bigbear2nd) <bigbear2nd@gmail.com>
@ -139,8 +122,6 @@ Gleb Sinyavskiy <zhulik.gleb@gmail.com>
Graham Miln (grahammiln) <graham.miln@dssw.co.uk> <graham.miln@miln.eu> Graham Miln (grahammiln) <graham.miln@dssw.co.uk> <graham.miln@miln.eu>
greatroar <61184462+greatroar@users.noreply.github.com> greatroar <61184462+greatroar@users.noreply.github.com>
Greg <gco@jazzhaiku.com> Greg <gco@jazzhaiku.com>
guangwu <guoguangwu@magic-shield.com>
gudvinr <gudvinr@gmail.com>
Han Boetes <han@boetes.org> Han Boetes <han@boetes.org>
HansK-p <42314815+HansK-p@users.noreply.github.com> HansK-p <42314815+HansK-p@users.noreply.github.com>
Harrison Jones (harrisonhjones) <harrisonhjones@users.noreply.github.com> Harrison Jones (harrisonhjones) <harrisonhjones@users.noreply.github.com>
@ -157,14 +138,13 @@ Jacek Szafarkiewicz (hadogenes) <szafar@linux.pl>
Jack Croft <jccroft1@users.noreply.github.com> Jack Croft <jccroft1@users.noreply.github.com>
Jacob <jyundt@gmail.com> Jacob <jyundt@gmail.com>
Jake Peterson (acogdev) <jake@acogdev.com> Jake Peterson (acogdev) <jake@acogdev.com>
Jakob Borg (calmh) <jakob@nym.se> <jakob@kastelo.net> <jborg@coreweave.com> Jakob Borg (calmh) <jakob@nym.se> <jakob@kastelo.net>
James O'Beirne <wild-github@au92.org> James O'Beirne <wild-github@au92.org>
James Patterson (jpjp) <jamespatterson@operamail.com> <jpjp@users.noreply.github.com> James Patterson (jpjp) <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
janost <janost@tuta.io> janost <janost@tuta.io>
Jaroslav Lichtblau <svetlemodry@users.noreply.github.com> Jaroslav Lichtblau <svetlemodry@users.noreply.github.com>
Jaroslav Malec (dzarda) <dzardacz@gmail.com> Jaroslav Malec (dzarda) <dzardacz@gmail.com>
jaseg <githubaccount@jaseg.net> jaseg <githubaccount@jaseg.net>
Jaspitta <ste.scarpitta@gmail.com>
Jauder Ho <jauderho@users.noreply.github.com> Jauder Ho <jauderho@users.noreply.github.com>
Jaya Chithra (jayachithra) <s.k.jayachithra@gmail.com> Jaya Chithra (jayachithra) <s.k.jayachithra@gmail.com>
Jaya Kumar <jaya.kumar@ict.nl> Jaya Kumar <jaya.kumar@ict.nl>
@ -183,14 +163,11 @@ Jonathan Cross <jcross@gmail.com>
Jonta <359397+Jonta@users.noreply.github.com> Jonta <359397+Jonta@users.noreply.github.com>
Jose Manuel Delicado (jmdaweb) <jmdaweb@hotmail.com> <jmdaweb@users.noreply.github.com> Jose Manuel Delicado (jmdaweb) <jmdaweb@hotmail.com> <jmdaweb@users.noreply.github.com>
jtagcat <git-514635f7@jtag.cat> <git-12dbd862@jtag.cat> jtagcat <git-514635f7@jtag.cat> <git-12dbd862@jtag.cat>
Julian Lehrhuber <jul13579@users.noreply.github.com>
Jörg Thalheim <Mic92@users.noreply.github.com> Jörg Thalheim <Mic92@users.noreply.github.com>
Jędrzej Kula <kula.jedrek@gmail.com> Jędrzej Kula <kula.jedrek@gmail.com>
K.B.Dharun Krishna <kbdharunkrishna@gmail.com>
Kalle Laine <pahakalle@protonmail.com> Kalle Laine <pahakalle@protonmail.com>
Karol Różycki (krozycki) <rozycki.karol@gmail.com> Karol Różycki (krozycki) <rozycki.karol@gmail.com>
Kebin Liu <lkebin@gmail.com> Kebin Liu <lkebin@gmail.com>
Keith Harrison <keithh@protonmail.com>
Keith Turner <kturner@apache.org> Keith Turner <kturner@apache.org>
Kelong Cong (kc1212) <kc04bc@gmx.com> <kc1212@users.noreply.github.com> Kelong Cong (kc1212) <kc04bc@gmx.com> <kc1212@users.noreply.github.com>
Ken'ichi Kamada (kamadak) <kamada@nanohz.org> Ken'ichi Kamada (kamadak) <kamada@nanohz.org>
@ -199,7 +176,6 @@ Kevin Bushiri (keevBush) <keevbush@gmail.com> <36192217+keevBush@users.noreply.g
Kevin White, Jr. (kwhite17) <kevinwhite1710@gmail.com> Kevin White, Jr. (kwhite17) <kevinwhite1710@gmail.com>
klemens <ka7@github.com> klemens <ka7@github.com>
Kurt Fitzner (Kudalufi) <kurt@va1der.ca> <kurt.fitzner@gmail.com> Kurt Fitzner (Kudalufi) <kurt@va1der.ca> <kurt.fitzner@gmail.com>
kylosus <33132401+kylosus@users.noreply.github.com>
Lars K.W. Gohlke (lkwg82) <lkwg82@gmx.de> Lars K.W. Gohlke (lkwg82) <lkwg82@gmx.de>
Lars Lehtonen <lars.lehtonen@gmail.com> Lars Lehtonen <lars.lehtonen@gmail.com>
Laurent Arnoud <laurent@spkdev.net> Laurent Arnoud <laurent@spkdev.net>
@ -210,7 +186,6 @@ Lode Hoste (Zillode) <zillode@zillode.be>
Lord Landon Agahnim (LordLandon) <lordlandon@gmail.com> Lord Landon Agahnim (LordLandon) <lordlandon@gmail.com>
LSmithx2 <42276854+lsmithx2@users.noreply.github.com> LSmithx2 <42276854+lsmithx2@users.noreply.github.com>
Lukas Lihotzki <lukas@lihotzki.de> Lukas Lihotzki <lukas@lihotzki.de>
Luke Hamburg <1992842+luckman212@users.noreply.github.com>
luzpaz <luzpaz@users.noreply.github.com> luzpaz <luzpaz@users.noreply.github.com>
Majed Abdulaziz (majedev) <majed.alhajry@gmail.com> Majed Abdulaziz (majedev) <majed.alhajry@gmail.com>
Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name> Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name>
@ -221,7 +196,6 @@ Marcus Legendre <marcus.legendre@gmail.com>
Mario Majila <mariustshipichik@gmail.com> Mario Majila <mariustshipichik@gmail.com>
Mark Pulford (mpx) <mark@kyne.com.au> Mark Pulford (mpx) <mark@kyne.com.au>
Martchus <martchus@gmx.net> Martchus <martchus@gmx.net>
Martin Polehla <p0l0us@users.noreply.github.com>
Mateusz Naściszewski (mateon1) <matin1111@wp.pl> Mateusz Naściszewski (mateon1) <matin1111@wp.pl>
Mateusz Ż <thedead4fun@live.com> Mateusz Ż <thedead4fun@live.com>
Matic Potočnik <hairyfotr@gmail.com> Matic Potočnik <hairyfotr@gmail.com>
@ -233,14 +207,12 @@ Max <github@germancoding.com>
Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com> Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com>
MaximAL <almaximal@ya.ru> MaximAL <almaximal@ya.ru>
Maxime Thirouin <m@moox.io> Maxime Thirouin <m@moox.io>
Maximilian <maxi.rostock@outlook.de> <public@complexvector.space>
mclang <1721600+mclang@users.noreply.github.com> mclang <1721600+mclang@users.noreply.github.com>
Michael Jephcote (Rewt0r) <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com> Michael Jephcote (Rewt0r) <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
Michael Ploujnikov (plouj) <ploujj@gmail.com> Michael Ploujnikov (plouj) <ploujj@gmail.com>
Michael Rienstra <mrienstra@gmail.com> Michael Rienstra <mrienstra@gmail.com>
Michael Tilli (pyfisch) <pyfisch@gmail.com> Michael Tilli (pyfisch) <pyfisch@gmail.com>
MichaIng <micha@dietpi.com> MichaIng <micha@dietpi.com>
Migelo <miha@filetki.si>
Mike Boone <mike@boonedocks.net> Mike Boone <mike@boonedocks.net>
MikeLund <MikeLund@users.noreply.github.com> MikeLund <MikeLund@users.noreply.github.com>
MikolajTwarog <43782609+MikolajTwarog@users.noreply.github.com> MikolajTwarog <43782609+MikolajTwarog@users.noreply.github.com>
@ -248,7 +220,6 @@ Mingxuan Lin <gdlmx@users.noreply.github.com>
mv1005 <49659413+mv1005@users.noreply.github.com> mv1005 <49659413+mv1005@users.noreply.github.com>
Nate Morrison (nrm21) <natemorrison@gmail.com> Nate Morrison (nrm21) <natemorrison@gmail.com>
Naveen <172697+naveensrinivasan@users.noreply.github.com> Naveen <172697+naveensrinivasan@users.noreply.github.com>
nf <nf@wh3rd.net>
Nicholas Rishel (PrototypeNM1) <rishel.nick@gmail.com> <PrototypeNM1@users.noreply.github.com> Nicholas Rishel (PrototypeNM1) <rishel.nick@gmail.com> <PrototypeNM1@users.noreply.github.com>
Nick Busey <NickBusey@users.noreply.github.com> Nick Busey <NickBusey@users.noreply.github.com>
Nico Stapelbroek <3368018+nstapelbroek@users.noreply.github.com> Nico Stapelbroek <3368018+nstapelbroek@users.noreply.github.com>
@ -260,7 +231,6 @@ NinoM4ster <ninom4ster@gmail.com>
Nitroretro <43112364+Nitroretro@users.noreply.github.com> Nitroretro <43112364+Nitroretro@users.noreply.github.com>
NoLooseEnds <jon.koslung@gmail.com> NoLooseEnds <jon.koslung@gmail.com>
Oliver Freyermuth <o.freyermuth@googlemail.com> Oliver Freyermuth <o.freyermuth@googlemail.com>
orangekame3 <miya.org.0309@gmail.com>
otbutz <tbutz@optitool.de> otbutz <tbutz@optitool.de>
Otiel <Otiel@users.noreply.github.com> Otiel <Otiel@users.noreply.github.com>
overkill <22098433+0verk1ll@users.noreply.github.com> overkill <22098433+0verk1ll@users.noreply.github.com>
@ -299,7 +269,6 @@ Sacheendra Talluri (sacheendra) <sacheendra.t@gmail.com>
Scott Klupfel (kluppy) <kluppy@going2blue.com> Scott Klupfel (kluppy) <kluppy@going2blue.com>
sec65 <106604020+sec65@users.noreply.github.com> sec65 <106604020+sec65@users.noreply.github.com>
Sergey Mishin (ralder) <ralder@yandex.ru> Sergey Mishin (ralder) <ralder@yandex.ru>
Sertonix <83883937+Sertonix@users.noreply.github.com>
Shaarad Dalvi <60266155+shaaraddalvi@users.noreply.github.com> <shdalv@microsoft.com> Shaarad Dalvi <60266155+shaaraddalvi@users.noreply.github.com> <shdalv@microsoft.com>
Simon Frei (imsodin) <freisim93@gmail.com> Simon Frei (imsodin) <freisim93@gmail.com>
Simon Mwepu <simonmwepu@gmail.com> Simon Mwepu <simonmwepu@gmail.com>
@ -308,15 +277,12 @@ Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org> <stefan@rumpelsepp.org> Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org> <stefan@rumpelsepp.org>
Steven Eckhoff <steven.eckhoff.opensource@gmail.com> Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
Suhas Gundimeda (snugghash) <suhas.gundimeda@gmail.com> <snugghash@gmail.com> Suhas Gundimeda (snugghash) <suhas.gundimeda@gmail.com> <snugghash@gmail.com>
Sven Bachmann <dev@mcbachmann.de>
Syncthing Automation <automation@syncthing.net> Syncthing Automation <automation@syncthing.net>
Syncthing Release Automation <release@syncthing.net> Syncthing Release Automation <release@syncthing.net>
Taylor Khan (nelsonkhan) <nelsonkhan@gmail.com> Taylor Khan (nelsonkhan) <nelsonkhan@gmail.com>
Thomas <9749173+uhthomas@users.noreply.github.com>
Thomas Hipp <thomashipp@gmail.com> Thomas Hipp <thomashipp@gmail.com>
Tim Abell (timabell) <tim@timwise.co.uk> Tim Abell (timabell) <tim@timwise.co.uk>
Tim Howes (timhowes) <timhowes@berkeley.edu> Tim Howes (timhowes) <timhowes@berkeley.edu>
Tim Nordenfur <tim@gurka.se>
Tobias Klauser <tobias.klauser@gmail.com> Tobias Klauser <tobias.klauser@gmail.com>
Tobias Nygren (tnn2) <tnn@nygren.pp.se> Tobias Nygren (tnn2) <tnn@nygren.pp.se>
Tobias Tom (tobiastom) <t.tom@succont.de> Tobias Tom (tobiastom) <t.tom@succont.de>
@ -327,15 +293,12 @@ Tully Robinson (tojrobinson) <tully@tojr.org>
Tyler Brazier (tylerbrazier) <tyler@tylerbrazier.com> Tyler Brazier (tylerbrazier) <tyler@tylerbrazier.com>
Tyler Kropp <kropptyler@gmail.com> Tyler Kropp <kropptyler@gmail.com>
Unrud (Unrud) <unrud@openaliasbox.org> <Unrud@users.noreply.github.com> Unrud (Unrud) <unrud@openaliasbox.org> <Unrud@users.noreply.github.com>
vapatel2 <149737089+vapatel2@users.noreply.github.com>
Veeti Paananen (veeti) <veeti.paananen@rojekti.fi> Veeti Paananen (veeti) <veeti.paananen@rojekti.fi>
Victor Buinsky (buinsky) <vix_booja@tut.by> Victor Buinsky (buinsky) <vix_booja@tut.by>
Vik <63919734+ViktorOn@users.noreply.github.com>
Vil Brekin (Vilbrekin) <vilbrekin@gmail.com> Vil Brekin (Vilbrekin) <vilbrekin@gmail.com>
villekalliomaki <53118179+villekalliomaki@users.noreply.github.com> villekalliomaki <53118179+villekalliomaki@users.noreply.github.com>
Vladimir Rusinov <vrusinov@google.com> <vladimir.rusinov@gmail.com> Vladimir Rusinov <vrusinov@google.com> <vladimir.rusinov@gmail.com>
wangguoliang <liangcszzu@163.com> wangguoliang <liangcszzu@163.com>
Will Rouesnel <wrouesnel@wrouesnel.com>
William A. Kennington III (wkennington) <william@wkennington.com> William A. Kennington III (wkennington) <william@wkennington.com>
wouter bolsterlee <wouter@bolsterl.ee> wouter bolsterlee <wouter@bolsterl.ee>
Wulf Weich (wweich) <wweich@users.noreply.github.com> <wweich@gmx.de> <wulf@weich-kr.de> Wulf Weich (wweich) <wweich@users.noreply.github.com> <wweich@gmx.de> <wulf@weich-kr.de>

View File

@ -24,15 +24,10 @@ too much information will never get you yelled at. :)
## Contributing Translations ## Contributing Translations
All translations are done via All translations are done via
[Weblate](https://hosted.weblate.org/projects/syncthing/). If you wish [Transifex](https://www.transifex.com/projects/p/syncthing/). If you
to contribute to a translation, just head over there and sign up. wish to contribute to a translation, just head over there and sign up.
Before every release, the language resources are updated from the Before every release, the language resources are updated from the
latest info on Weblate. latest info on Transifex.
Note that the previously used service at
[Transifex](https://www.transifex.com/projects/p/syncthing/) is being
retired and we kindly ask you to sign up on Weblate for continued
involvement.
## Contributing Code ## Contributing Code

View File

@ -1,41 +1,15 @@
ARG GOVERSION=latest ARG GOVERSION=latest
#
# Maybe build Syncthing. This is a bit ugly as we can't make an entire
# section of the Dockerfile conditional, so we end up always pulling the
# golang image as builder. Then we check if the executable we need already
# exists (pre-built) otherwise we build it.
#
FROM golang:$GOVERSION AS builder FROM golang:$GOVERSION AS builder
ARG BUILD_USER
ARG BUILD_HOST
ARG TARGETARCH
WORKDIR /src WORKDIR /src
COPY . . COPY . .
ENV CGO_ENABLED=0 ENV CGO_ENABLED=0
RUN if [ ! -f syncthing-linux-$TARGETARCH ] ; then \ ENV BUILD_HOST=syncthing.net
go run build.go -no-upgrade build syncthing ; \ ENV BUILD_USER=docker
mv syncthing syncthing-linux-$TARGETARCH ; \ RUN rm -f syncthing && go run build.go -no-upgrade build syncthing
fi
#
# The rest of the Dockerfile uses the binary from the builder, prebuilt or
# not.
#
FROM alpine FROM alpine
ARG TARGETARCH
LABEL org.opencontainers.image.authors="The Syncthing Project" \
org.opencontainers.image.url="https://syncthing.net" \
org.opencontainers.image.documentation="https://docs.syncthing.net" \
org.opencontainers.image.source="https://github.com/syncthing/syncthing" \
org.opencontainers.image.vendor="The Syncthing Project" \
org.opencontainers.image.licenses="MPL-2.0" \
org.opencontainers.image.title="Syncthing"
EXPOSE 8384 22000/tcp 22000/udp 21027/udp EXPOSE 8384 22000/tcp 22000/udp 21027/udp
@ -43,7 +17,7 @@ VOLUME ["/var/syncthing"]
RUN apk add --no-cache ca-certificates curl libcap su-exec tzdata RUN apk add --no-cache ca-certificates curl libcap su-exec tzdata
COPY --from=builder /src/syncthing-linux-$TARGETARCH /bin/syncthing COPY --from=builder /src/syncthing /bin/syncthing
COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh
ENV PUID=1000 PGID=1000 HOME=/var/syncthing ENV PUID=1000 PGID=1000 HOME=/var/syncthing
@ -52,6 +26,5 @@ HEALTHCHECK --interval=1m --timeout=10s \
CMD curl -fkLsS -m 2 127.0.0.1:8384/rest/noauth/health | grep -o --color=never OK || exit 1 CMD curl -fkLsS -m 2 127.0.0.1:8384/rest/noauth/health | grep -o --color=never OK || exit 1
ENV STGUIADDRESS=0.0.0.0:8384 ENV STGUIADDRESS=0.0.0.0:8384
ENV STHOMEDIR=/var/syncthing/config
RUN chmod 755 /bin/entrypoint.sh RUN chmod 755 /bin/entrypoint.sh
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/syncthing"] ENTRYPOINT ["/bin/entrypoint.sh", "/bin/syncthing", "-home", "/var/syncthing/config"]

View File

@ -1,14 +1,6 @@
ARG GOVERSION=latest ARG GOVERSION=latest
FROM golang:$GOVERSION FROM golang:$GOVERSION
LABEL org.opencontainers.image.authors="The Syncthing Project" \
org.opencontainers.image.url="https://syncthing.net" \
org.opencontainers.image.documentation="https://docs.syncthing.net" \
org.opencontainers.image.source="https://github.com/syncthing/syncthing" \
org.opencontainers.image.vendor="The Syncthing Project" \
org.opencontainers.image.licenses="MPL-2.0" \
org.opencontainers.image.title="Syncthing Builder"
# FPM to build Debian packages # FPM to build Debian packages
RUN apt-get update && apt-get install -y --no-install-recommends \ RUN apt-get update && apt-get install -y --no-install-recommends \
locales rubygems ruby-dev build-essential git \ locales rubygems ruby-dev build-essential git \

19
Dockerfile.buildx Normal file
View File

@ -0,0 +1,19 @@
FROM alpine
ARG TARGETARCH
EXPOSE 8384 22000/tcp 22000/udp 21027/udp
VOLUME ["/var/syncthing"]
RUN apk add --no-cache ca-certificates curl libcap su-exec tzdata
COPY ./syncthing-linux-$TARGETARCH /bin/syncthing
COPY ./script/docker-entrypoint.sh /bin/entrypoint.sh
ENV PUID=1000 PGID=1000 HOME=/var/syncthing
HEALTHCHECK --interval=1m --timeout=10s \
CMD curl -fkLsS -m 2 127.0.0.1:8384/rest/noauth/health | grep -o --color=never OK || exit 1
ENV STGUIADDRESS=0.0.0.0:8384
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/syncthing", "-home", "/var/syncthing/config"]

View File

@ -1,16 +0,0 @@
FROM alpine
ARG TARGETARCH
LABEL org.opencontainers.image.authors="The Syncthing Project" \
org.opencontainers.image.url="https://syncthing.net" \
org.opencontainers.image.documentation="https://docs.syncthing.net" \
org.opencontainers.image.source="https://github.com/syncthing/syncthing" \
org.opencontainers.image.vendor="The Syncthing Project" \
org.opencontainers.image.licenses="MPL-2.0" \
org.opencontainers.image.title="Syncthing Crash Receiver"
EXPOSE 8080
COPY stcrashreceiver-linux-${TARGETARCH} /bin/stcrashreceiver
ENTRYPOINT [ "/bin/stcrashreceiver" ]

View File

@ -1,28 +1,15 @@
ARG GOVERSION=latest ARG GOVERSION=latest
FROM golang:$GOVERSION AS builder FROM golang:$GOVERSION AS builder
ARG BUILD_USER
ARG BUILD_HOST
ARG TARGETARCH
WORKDIR /src WORKDIR /src
COPY . . COPY . .
ENV CGO_ENABLED=0 ENV CGO_ENABLED=0
RUN if [ ! -f stdiscosrv-linux-$TARGETARCH ] ; then \ ENV BUILD_HOST=syncthing.net
go run build.go -no-upgrade build stdiscosrv ; \ ENV BUILD_USER=docker
mv stdiscosrv stdiscosrv-linux-$TARGETARCH ; \ RUN rm -f stdiscosrv && go run build.go -no-upgrade build stdiscosrv
fi
FROM alpine FROM alpine
ARG TARGETARCH
LABEL org.opencontainers.image.authors="The Syncthing Project" \
org.opencontainers.image.url="https://syncthing.net" \
org.opencontainers.image.documentation="https://docs.syncthing.net" \
org.opencontainers.image.source="https://github.com/syncthing/syncthing" \
org.opencontainers.image.vendor="The Syncthing Project" \
org.opencontainers.image.licenses="MPL-2.0" \
org.opencontainers.image.title="Syncthing Discovery Server"
EXPOSE 19200 8443 EXPOSE 19200 8443
@ -30,7 +17,7 @@ VOLUME ["/var/stdiscosrv"]
RUN apk add --no-cache ca-certificates su-exec RUN apk add --no-cache ca-certificates su-exec
COPY --from=builder /src/stdiscosrv-linux-$TARGETARCH /bin/stdiscosrv COPY --from=builder /src/stdiscosrv /bin/stdiscosrv
COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh
ENV PUID=1000 PGID=1000 HOME=/var/stdiscosrv ENV PUID=1000 PGID=1000 HOME=/var/stdiscosrv

View File

@ -1,24 +0,0 @@
FROM alpine
ARG TARGETARCH
LABEL org.opencontainers.image.authors="The Syncthing Project" \
org.opencontainers.image.url="https://syncthing.net" \
org.opencontainers.image.documentation="https://docs.syncthing.net" \
org.opencontainers.image.source="https://github.com/syncthing/syncthing" \
org.opencontainers.image.vendor="The Syncthing Project" \
org.opencontainers.image.licenses="MPL-2.0" \
org.opencontainers.image.title="Syncthing Relay Pool Server"
EXPOSE 8080
RUN apk add --no-cache ca-certificates su-exec curl
ENV PUID=1000 PGID=1000 MAXMIND_KEY=
RUN mkdir /var/strelaypoolsrv && chown 1000 /var/strelaypoolsrv
USER 1000
COPY strelaypoolsrv-linux-${TARGETARCH} /bin/strelaypoolsrv
COPY script/strelaypoolsrv-entrypoint.sh /bin/entrypoint.sh
WORKDIR /var/strelaypoolsrv
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/strelaypoolsrv", "-listen", ":8080"]

View File

@ -1,28 +1,15 @@
ARG GOVERSION=latest ARG GOVERSION=latest
FROM golang:$GOVERSION AS builder FROM golang:$GOVERSION AS builder
ARG BUILD_USER
ARG BUILD_HOST
ARG TARGETARCH
WORKDIR /src WORKDIR /src
COPY . . COPY . .
ENV CGO_ENABLED=0 ENV CGO_ENABLED=0
RUN if [ ! -f strelaysrv-linux-$TARGETARCH ] ; then \ ENV BUILD_HOST=syncthing.net
go run build.go -no-upgrade build strelaysrv ; \ ENV BUILD_USER=docker
mv strelaysrv strelaysrv-linux-$TARGETARCH ; \ RUN rm -f strelaysrv && go run build.go -no-upgrade build strelaysrv
fi
FROM alpine FROM alpine
ARG TARGETARCH
LABEL org.opencontainers.image.authors="The Syncthing Project" \
org.opencontainers.image.url="https://syncthing.net" \
org.opencontainers.image.documentation="https://docs.syncthing.net" \
org.opencontainers.image.source="https://github.com/syncthing/syncthing" \
org.opencontainers.image.vendor="The Syncthing Project" \
org.opencontainers.image.licenses="MPL-2.0" \
org.opencontainers.image.title="Syncthing Relay Server"
EXPOSE 22067 22070 EXPOSE 22067 22070
@ -30,7 +17,7 @@ VOLUME ["/var/strelaysrv"]
RUN apk add --no-cache ca-certificates su-exec RUN apk add --no-cache ca-certificates su-exec
COPY --from=builder /src/strelaysrv-linux-$TARGETARCH /bin/strelaysrv COPY --from=builder /src/strelaysrv /bin/strelaysrv
COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh
ENV PUID=1000 PGID=1000 HOME=/var/strelaysrv ENV PUID=1000 PGID=1000 HOME=/var/strelaysrv

View File

@ -1,16 +0,0 @@
FROM alpine
ARG TARGETARCH
LABEL org.opencontainers.image.authors="The Syncthing Project" \
org.opencontainers.image.url="https://syncthing.net" \
org.opencontainers.image.documentation="https://docs.syncthing.net" \
org.opencontainers.image.source="https://github.com/syncthing/syncthing" \
org.opencontainers.image.vendor="The Syncthing Project" \
org.opencontainers.image.licenses="MPL-2.0" \
org.opencontainers.image.title="Syncthing Upgrades"
EXPOSE 8080
COPY stupgrades-linux-${TARGETARCH} /bin/stupgrades
ENTRYPOINT [ "/bin/stupgrades" ]

View File

@ -1,16 +0,0 @@
FROM alpine
ARG TARGETARCH
LABEL org.opencontainers.image.authors="The Syncthing Project" \
org.opencontainers.image.url="https://syncthing.net" \
org.opencontainers.image.documentation="https://docs.syncthing.net" \
org.opencontainers.image.source="https://github.com/syncthing/syncthing" \
org.opencontainers.image.vendor="The Syncthing Project" \
org.opencontainers.image.licenses="MPL-2.0" \
org.opencontainers.image.title="Syncthing Usage Reporting Server"
EXPOSE 8080
COPY ursrv-linux-${TARGETARCH} /bin/ursrv
ENTRYPOINT [ "/bin/ursrv" ]

View File

@ -24,17 +24,17 @@ to avoid corrupting the user's files.
### 2. Secure Against Attackers ### 2. Secure Against Attackers
Again, protecting the user's data is paramount. Regardless of our other Again, protecting the user's data is paramount. Regardless of our other
goals, we must never allow the user's data to be susceptible to eavesdropping goals we must never allow the user's data to be susceptible to eavesdropping
or modification by unauthorized parties. or modification by unauthorized parties.
> This should be understood in context. It is not necessarily reasonable to > This should be understood in context. It is not necessarily reasonable to
> expect Syncthing to be resistant against well equipped state level > expect Syncthing to be resistant against well equipped state level
> attackers. We will, however, do our best. Note also that this is different > attackers. We will however do our best. Note also that this is different
> from anonymity which is not, currently, a goal. > from anonymity which is not, currently, a goal.
### 3. Easy to Use ### 3. Easy to Use
Syncthing should be approachable, understandable, and inclusive. Syncthing should be approachable, understandable and inclusive.
> Complex concepts and maths form the base of Syncthing's functionality. > Complex concepts and maths form the base of Syncthing's functionality.
> This should nonetheless be abstracted or hidden to a degree where > This should nonetheless be abstracted or hidden to a degree where
@ -52,18 +52,18 @@ User interaction should be required only when absolutely necessary.
### 5. Universally Available ### 5. Universally Available
Syncthing should run on every common computer. We are mindful that the Syncthing should run on every common computer. We are mindful that the
latest technology is not always available to every individual. latest technology is not always available to any given individual.
> Computers include desktops, laptops, servers, virtual machines, small > Computers include desktops, laptops, servers, virtual machines, small
> general purpose computers such as Raspberry Pis and, *where possible*, > general purpose computers such as Raspberry Pis and, *where possible*,
> tablets and phones. NAS appliances, toasters, cars, firearms, thermostats, > tablets and phones. NAS appliances, toasters, cars, firearms, thermostats
> and so on may include computing capabilities but it is not our goal for > and so on may include computing capabilities but it is not our goal for
> Syncthing to run smoothly on these devices. > Syncthing to run smoothly on these devices.
### 6. For Individuals ### 6. For Individuals
Syncthing is primarily about empowering the individual user with safe, Syncthing is primarily about empowering the individual user with safe,
secure, and easy to use file synchronization. secure and easy to use file synchronization.
> We acknowledge that it's also useful in an enterprise setting and include > We acknowledge that it's also useful in an enterprise setting and include
> functionality to support that. If this is in conflict with the > functionality to support that. If this is in conflict with the

View File

@ -15,9 +15,6 @@ To grant Syncthing additional capabilities without running as root, use the
`PCAP` environment variable with the same syntax as that for `setcap(8)`. `PCAP` environment variable with the same syntax as that for `setcap(8)`.
For example, `PCAP=cap_chown,cap_fowner+ep`. For example, `PCAP=cap_chown,cap_fowner+ep`.
To set a different umask value, use the `UMASK` environment variable. For
example `UMASK=002`.
## Example Usage ## Example Usage
**Docker cli** **Docker cli**

View File

@ -2,6 +2,9 @@
--- ---
[![Latest Linux & Cross Build](https://img.shields.io/teamcity/https/build.syncthing.net/s/Syncthing_BuildLinuxCross.svg?style=flat-square&label=linux+%26+cross+build)](https://build.syncthing.net/viewType.html?buildTypeId=Syncthing_BuildLinuxCross&guest=1)
[![Latest Windows Build](https://img.shields.io/teamcity/https/build.syncthing.net/s/Syncthing_BuildWindows.svg?style=flat-square&label=windows+build)](https://build.syncthing.net/viewType.html?buildTypeId=Syncthing_BuildWindows&guest=1)
[![Latest Mac Build](https://img.shields.io/teamcity/https/build.syncthing.net/s/Syncthing_BuildMac.svg?style=flat-square&label=mac+build)](https://build.syncthing.net/viewType.html?buildTypeId=Syncthing_BuildMac&guest=1)
[![MPLv2 License](https://img.shields.io/badge/license-MPLv2-blue.svg?style=flat-square)](https://www.mozilla.org/MPL/2.0/) [![MPLv2 License](https://img.shields.io/badge/license-MPLv2-blue.svg?style=flat-square)](https://www.mozilla.org/MPL/2.0/)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/88/badge)](https://bestpractices.coreinfrastructure.org/projects/88) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/88/badge)](https://bestpractices.coreinfrastructure.org/projects/88)
[![Go Report Card](https://goreportcard.com/badge/github.com/syncthing/syncthing)](https://goreportcard.com/report/github.com/syncthing/syncthing) [![Go Report Card](https://goreportcard.com/badge/github.com/syncthing/syncthing)](https://goreportcard.com/report/github.com/syncthing/syncthing)
@ -10,8 +13,8 @@
Syncthing is a **continuous file synchronization program**. It synchronizes Syncthing is a **continuous file synchronization program**. It synchronizes
files between two or more computers. We strive to fulfill the goals below. files between two or more computers. We strive to fulfill the goals below.
The goals are listed in order of importance, the most important ones first. The goals are listed in order of importance, the most important one being
This is the summary version of the goal list - for more the first. This is the summary version of the goal list - for more
commentary, see the full [Goals document][13]. commentary, see the full [Goals document][13].
Syncthing should be: Syncthing should be:
@ -24,12 +27,12 @@ Syncthing should be:
2. **Secure Against Attackers** 2. **Secure Against Attackers**
Again, protecting the user's data is paramount. Regardless of our other Again, protecting the user's data is paramount. Regardless of our other
goals, we must never allow the user's data to be susceptible to goals we must never allow the user's data to be susceptible to
eavesdropping or modification by unauthorized parties. eavesdropping or modification by unauthorized parties.
3. **Easy to Use** 3. **Easy to Use**
Syncthing should be approachable, understandable, and inclusive. Syncthing should be approachable, understandable and inclusive.
4. **Automatic** 4. **Automatic**
@ -38,12 +41,12 @@ Syncthing should be:
5. **Universally Available** 5. **Universally Available**
Syncthing should run on every common computer. We are mindful that the Syncthing should run on every common computer. We are mindful that the
latest technology is not always available to every individual. latest technology is not always available to any given individual.
6. **For Individuals** 6. **For Individuals**
Syncthing is primarily about empowering the individual user with safe, Syncthing is primarily about empowering the individual user with safe,
secure, and easy to use file synchronization. secure and easy to use file synchronization.
7. **Everything Else** 7. **Everything Else**
@ -57,22 +60,23 @@ Take a look at the [getting started guide][2].
There are a few examples for keeping Syncthing running in the background There are a few examples for keeping Syncthing running in the background
on your system in [the etc directory][3]. There are also several [GUI on your system in [the etc directory][3]. There are also several [GUI
implementations][11] for Windows, Mac, and Linux. implementations][11] for Windows, Mac and Linux.
## Docker ## Docker
To run Syncthing in Docker, see [the Docker README][16]. To run Syncthing in Docker, see [the Docker README][16].
## Vote on features/bugs
We'd like to encourage you to [vote][12] on issues that matter to you.
This helps the team understand what are the biggest pain points for our users, and could potentially influence what is being worked on next.
## Getting in Touch ## Getting in Touch
The first and best point of contact is the [Forum][8]. The first and best point of contact is the [Forum][8].
If you've found something that is clearly a If you've found something that is clearly a
bug, feel free to report it in the [GitHub issue tracker][10]. bug, feel free to report it in the [GitHub issue tracker][10].
If you believe that youve found a Syncthing-related security vulnerability,
please report it by emailing security@syncthing.net. Do not report it in the
Forum or issue tracker.
## Building ## Building
Building Syncthing from source is easy. After extracting the source bundle from Building Syncthing from source is easy. After extracting the source bundle from
@ -82,11 +86,11 @@ build process.
## Signed Releases ## Signed Releases
As of v0.10.15 and onwards, release binaries are GPG signed with the key As of v0.10.15 and onwards release binaries are GPG signed with the key
D26E6ED000654A3E, available from https://syncthing.net/security/ and D26E6ED000654A3E, available from https://syncthing.net/security.html and
most key servers. most key servers.
There is also a built-in automatic upgrade mechanism (disabled in some There is also a built in automatic upgrade mechanism (disabled in some
distribution channels) which uses a compiled in ECDSA signature. macOS distribution channels) which uses a compiled in ECDSA signature. macOS
binaries are also properly code signed. binaries are also properly code signed.
@ -105,6 +109,7 @@ All code is licensed under the [MPLv2 License][7].
[8]: https://forum.syncthing.net/ [8]: https://forum.syncthing.net/
[10]: https://github.com/syncthing/syncthing/issues [10]: https://github.com/syncthing/syncthing/issues
[11]: https://docs.syncthing.net/users/contrib.html#gui-wrappers [11]: https://docs.syncthing.net/users/contrib.html#gui-wrappers
[12]: https://www.bountysource.com/teams/syncthing/issues
[13]: https://github.com/syncthing/syncthing/blob/main/GOALS.md [13]: https://github.com/syncthing/syncthing/blob/main/GOALS.md
[14]: assets/logo-text-128.png [14]: assets/logo-text-128.png
[15]: https://syncthing.net/ [15]: https://syncthing.net/

View File

@ -33,7 +33,6 @@ import (
"text/template" "text/template"
"time" "time"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
buildpkg "github.com/syncthing/syncthing/lib/build" buildpkg "github.com/syncthing/syncthing/lib/build"
) )
@ -208,24 +207,6 @@ var targets = map[string]target{
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-relaypoolsrv/AUTHORS.txt", perm: 0644}, {src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-relaypoolsrv/AUTHORS.txt", perm: 0644},
}, },
}, },
"stupgrades": {
name: "stupgrades",
description: "Syncthing Upgrade Check Server",
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/stupgrades"},
binaryName: "stupgrades",
},
"stcrashreceiver": {
name: "stcrashreceiver",
description: "Syncthing Crash Server",
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/stcrashreceiver"},
binaryName: "stcrashreceiver",
},
"ursrv": {
name: "ursrv",
description: "Syncthing Usage Reporting Server",
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/ursrv"},
binaryName: "ursrv",
},
} }
func initTargets() { func initTargets() {
@ -1115,14 +1096,10 @@ func getBranchSuffix() string {
branch = parts[len(parts)-1] branch = parts[len(parts)-1]
switch branch { switch branch {
case "release", "main": case "master", "release", "main":
// these are not special // these are not special
return "" return ""
} }
if strings.HasPrefix(branch, "release-") {
// release branches are not special
return ""
}
validBranchRe := regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`) validBranchRe := regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`)
if !validBranchRe.MatchString(branch) { if !validBranchRe.MatchString(branch) {

View File

@ -23,7 +23,7 @@ case "${1:-default}" in
prerelease) prerelease)
script authors script authors
build weblate build transifex
pushd man ; ./refresh.sh ; popd pushd man ; ./refresh.sh ; popd
git add -A gui man AUTHORS git add -A gui man AUTHORS
git commit -m 'gui, man, authors: Update docs, translations, and contributors' git commit -m 'gui, man, authors: Update docs, translations, and contributors'

View File

@ -11,13 +11,13 @@ import (
"github.com/syncthing/syncthing/lib/build" "github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/events" "github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/logger" "github.com/syncthing/syncthing/lib/logger"
"github.com/syncthing/syncthing/lib/locations" "github.com/syncthing/syncthing/lib/locations"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/svcutil" "github.com/syncthing/syncthing/lib/svcutil"
"github.com/syncthing/syncthing/lib/syncthing" "github.com/syncthing/syncthing/lib/syncthing"
"github.com/syncthing/syncthing/cmd/syncthing/cli" "github.com/syncthing/syncthing/cmd/syncthing/cli"
"github.com/syncthing/syncthing/cmd/syncthing"
"github.com/thejerf/suture/v4" "github.com/thejerf/suture/v4"
) )
@ -52,12 +52,8 @@ func libst_init_logging() {
} }
//export libst_clear_cli_args //export libst_clear_cli_args
func libst_clear_cli_args(command string) { func libst_clear_cli_args() {
if command == "cli" { cliArgs = []string{"syncthing", "cli"}
cliArgs = []string{}
} else {
cliArgs = []string{command}
}
} }
//export libst_append_cli_arg //export libst_append_cli_arg
@ -74,13 +70,28 @@ func libst_run_cli() int {
return 0 return 0
} }
//export libst_run_main // C&P from main.go; used to ensure that the config directory exists
func libst_run_main() int { func ensureDir(dir string, mode fs.FileMode) error {
if err := syncthing_main.RunWithArgs(cliArgs); err != nil { fs := fs.NewFilesystem(fs.FilesystemTypeBasic, dir)
fmt.Println(err) err := fs.MkdirAll(".", mode)
return 1 if err != nil {
return err
} }
return 0
if fi, err := fs.Stat("."); err == nil {
// Apprently the stat may fail even though the mkdirall passed. If it
// does, we'll just assume things are in order and let other things
// fail (like loading or creating the config...).
currentMode := fi.Mode() & 0777
if currentMode != mode {
err := fs.Chmod(".", mode)
// This can fail on crappy filesystems, nothing we can do about it.
if err != nil {
l.Warnln(err)
}
}
}
return nil
} }
//export libst_run_syncthing //export libst_run_syncthing
@ -132,7 +143,7 @@ func libst_run_syncthing(configDir string, dataDir string, guiAddress string, gu
// ensure that the config directory exists // ensure that the config directory exists
if ensureConfigDirExists { if ensureConfigDirExists {
if err := syncthing.EnsureDir(locations.GetBaseDir(locations.ConfigBaseDir), 0700); err != nil { if err := ensureDir(locations.GetBaseDir(locations.ConfigBaseDir), 0700); err != nil {
l.Warnln("Failed to create config directory:", err) l.Warnln("Failed to create config directory:", err)
return 4 return 4
} }
@ -140,7 +151,7 @@ func libst_run_syncthing(configDir string, dataDir string, guiAddress string, gu
// ensure that the database directory exists // ensure that the database directory exists
if dataDir != "" && ensureDataDirExists { if dataDir != "" && ensureDataDirExists {
if err := syncthing.EnsureDir(locations.GetBaseDir(locations.DataBaseDir), 0700); err != nil { if err := ensureDir(locations.GetBaseDir(locations.DataBaseDir), 0700); err != nil {
l.Warnln("Failed to create database directory:", err) l.Warnln("Failed to create database directory:", err)
return 4 return 4
} }

View File

@ -1,15 +0,0 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"github.com/syncthing/syncthing/lib/logger"
)
var (
l = logger.DefaultLogger.NewFacility("main", "Main package")
)

1
c-bindings/debug.go Symbolic link
View File

@ -0,0 +1 @@
../cmd/syncthing/debug.go

View File

@ -15,7 +15,6 @@ import (
"os" "os"
"path/filepath" "path/filepath"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/sha256" "github.com/syncthing/syncthing/lib/sha256"
) )

View File

@ -1,198 +0,0 @@
// Copyright (C) 2023 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"bytes"
"compress/gzip"
"context"
"io"
"log"
"math"
"os"
"path/filepath"
"sort"
"time"
)
type diskStore struct {
dir string
inbox chan diskEntry
maxBytes int64
maxFiles int
currentFiles []currentFile
currentSize int64
}
type diskEntry struct {
path string
data []byte
}
type currentFile struct {
path string
size int64
mtime int64
}
func (d *diskStore) Serve(ctx context.Context) {
if err := os.MkdirAll(d.dir, 0o700); err != nil {
log.Println("Creating directory:", err)
return
}
if err := d.inventory(); err != nil {
log.Println("Failed to inventory disk store:", err)
}
d.clean()
cleanTimer := time.NewTicker(time.Minute)
inventoryTimer := time.NewTicker(24 * time.Hour)
buf := new(bytes.Buffer)
gw := gzip.NewWriter(buf)
for {
select {
case entry := <-d.inbox:
path := d.fullPath(entry.path)
if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
log.Println("Creating directory:", err)
continue
}
buf.Reset()
gw.Reset(buf)
if _, err := gw.Write(entry.data); err != nil {
log.Println("Failed to compress crash report:", err)
continue
}
if err := gw.Close(); err != nil {
log.Println("Failed to compress crash report:", err)
continue
}
if err := os.WriteFile(path, buf.Bytes(), 0o600); err != nil {
log.Printf("Failed to write %s: %v", entry.path, err)
_ = os.Remove(path)
continue
}
d.currentSize += int64(buf.Len())
d.currentFiles = append(d.currentFiles, currentFile{
size: int64(len(entry.data)),
path: path,
})
case <-cleanTimer.C:
d.clean()
case <-inventoryTimer.C:
if err := d.inventory(); err != nil {
log.Println("Failed to inventory disk store:", err)
}
case <-ctx.Done():
return
}
}
}
func (d *diskStore) Put(path string, data []byte) bool {
select {
case d.inbox <- diskEntry{
path: path,
data: data,
}:
return true
default:
return false
}
}
func (d *diskStore) Get(path string) ([]byte, error) {
path = d.fullPath(path)
bs, err := os.ReadFile(path)
if err != nil {
return nil, err
}
gr, err := gzip.NewReader(bytes.NewReader(bs))
if err != nil {
return nil, err
}
defer gr.Close()
return io.ReadAll(gr)
}
func (d *diskStore) Exists(path string) bool {
path = d.fullPath(path)
_, err := os.Lstat(path)
return err == nil
}
func (d *diskStore) clean() {
for len(d.currentFiles) > 0 && (len(d.currentFiles) > d.maxFiles || d.currentSize > d.maxBytes) {
f := d.currentFiles[0]
log.Println("Removing", f.path)
if err := os.Remove(f.path); err != nil {
log.Println("Failed to remove file:", err)
}
d.currentFiles = d.currentFiles[1:]
d.currentSize -= f.size
}
var oldest time.Duration
if len(d.currentFiles) > 0 {
oldest = time.Since(time.Unix(d.currentFiles[0].mtime, 0)).Truncate(time.Minute)
}
metricDiskstoreFilesTotal.Set(float64(len(d.currentFiles)))
metricDiskstoreBytesTotal.Set(float64(d.currentSize))
metricDiskstoreOldestAgeSeconds.Set(math.Round(oldest.Seconds()))
log.Printf("Clean complete: %d files, %d MB, oldest is %v ago", len(d.currentFiles), d.currentSize>>20, oldest)
}
func (d *diskStore) inventory() error {
d.currentFiles = nil
d.currentSize = 0
err := filepath.Walk(d.dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
if filepath.Ext(path) != ".gz" {
return nil
}
d.currentSize += info.Size()
d.currentFiles = append(d.currentFiles, currentFile{
path: path,
size: info.Size(),
mtime: info.ModTime().Unix(),
})
return nil
})
sort.Slice(d.currentFiles, func(i, j int) bool {
return d.currentFiles[i].mtime < d.currentFiles[j].mtime
})
var oldest time.Duration
if len(d.currentFiles) > 0 {
oldest = time.Since(time.Unix(d.currentFiles[0].mtime, 0)).Truncate(time.Minute)
}
metricDiskstoreFilesTotal.Set(float64(len(d.currentFiles)))
metricDiskstoreBytesTotal.Set(float64(d.currentSize))
metricDiskstoreOldestAgeSeconds.Set(math.Round(oldest.Seconds()))
log.Printf("Inventory complete: %d files, %d MB, oldest is %v ago", len(d.currentFiles), d.currentSize>>20, oldest)
return err
}
func (d *diskStore) fullPath(path string) string {
return filepath.Join(d.dir, path[0:2], path[2:]) + ".gz"
}

View File

@ -13,8 +13,8 @@
package main package main
import ( import (
"context"
"encoding/json" "encoding/json"
"flag"
"fmt" "fmt"
"io" "io"
"log" "log"
@ -22,74 +22,40 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"github.com/alecthomas/kong"
raven "github.com/getsentry/raven-go"
"github.com/prometheus/client_golang/prometheus/promhttp"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/sha256" "github.com/syncthing/syncthing/lib/sha256"
"github.com/syncthing/syncthing/lib/ur" "github.com/syncthing/syncthing/lib/ur"
raven "github.com/getsentry/raven-go"
) )
const maxRequestSize = 1 << 20 // 1 MiB const maxRequestSize = 1 << 20 // 1 MiB
type cli struct {
Dir string `help:"Parent directory to store crash and failure reports in" env:"REPORTS_DIR" default:"."`
DSN string `help:"Sentry DSN" env:"SENTRY_DSN"`
Listen string `help:"HTTP listen address" default:":8080" env:"LISTEN_ADDRESS"`
MaxDiskFiles int `help:"Maximum number of reports on disk" default:"100000" env:"MAX_DISK_FILES"`
MaxDiskSizeMB int64 `help:"Maximum disk space to use for reports" default:"1024" env:"MAX_DISK_SIZE_MB"`
SentryQueue int `help:"Maximum number of reports to queue for sending to Sentry" default:"64" env:"SENTRY_QUEUE"`
DiskQueue int `help:"Maximum number of reports to queue for writing to disk" default:"64" env:"DISK_QUEUE"`
}
func main() { func main() {
var params cli dir := flag.String("dir", ".", "Parent directory to store crash and failure reports in")
kong.Parse(&params) dsn := flag.String("dsn", "", "Sentry DSN")
listen := flag.String("listen", ":22039", "HTTP listen address")
flag.Parse()
mux := http.NewServeMux() mux := http.NewServeMux()
ds := &diskStore{
dir: filepath.Join(params.Dir, "crash_reports"),
inbox: make(chan diskEntry, params.DiskQueue),
maxFiles: params.MaxDiskFiles,
maxBytes: params.MaxDiskSizeMB << 20,
}
go ds.Serve(context.Background())
ss := &sentryService{
dsn: params.DSN,
inbox: make(chan sentryRequest, params.SentryQueue),
}
go ss.Serve(context.Background())
cr := &crashReceiver{ cr := &crashReceiver{
store: ds, dir: filepath.Join(*dir, "crash_reports"),
sentry: ss, dsn: *dsn,
} }
mux.Handle("/", cr) mux.Handle("/", cr)
mux.HandleFunc("/ping", func(w http.ResponseWriter, req *http.Request) {
w.Write([]byte("OK"))
})
mux.Handle("/metrics", promhttp.Handler())
if params.DSN != "" { if *dsn != "" {
mux.HandleFunc("/newcrash/failure", handleFailureFn(params.DSN, filepath.Join(params.Dir, "failure_reports"))) mux.HandleFunc("/newcrash/failure", handleFailureFn(*dsn, filepath.Join(*dir, "failure_reports")))
} }
log.SetOutput(os.Stdout) log.SetOutput(os.Stdout)
if err := http.ListenAndServe(params.Listen, mux); err != nil { if err := http.ListenAndServe(*listen, mux); err != nil {
log.Fatalln("HTTP serve:", err) log.Fatalln("HTTP serve:", err)
} }
} }
func handleFailureFn(dsn, failureDir string) func(w http.ResponseWriter, req *http.Request) { func handleFailureFn(dsn, failureDir string) func(w http.ResponseWriter, req *http.Request) {
return func(w http.ResponseWriter, req *http.Request) { return func(w http.ResponseWriter, req *http.Request) {
result := "failure"
defer func() {
metricFailureReportsTotal.WithLabelValues(result).Inc()
}()
lr := io.LimitReader(req.Body, maxRequestSize) lr := io.LimitReader(req.Body, maxRequestSize)
bs, err := io.ReadAll(lr) bs, err := io.ReadAll(lr)
req.Body.Close() req.Body.Close()
@ -140,7 +106,6 @@ func handleFailureFn(dsn, failureDir string) func(w http.ResponseWriter, req *ht
log.Println("Failed to send failure report:", err) log.Println("Failed to send failure report:", err)
} else { } else {
log.Println("Sent failure report:", r.Description) log.Println("Sent failure report:", r.Description)
result = "success"
} }
} }
} }

View File

@ -1,40 +0,0 @@
// Copyright (C) 2023 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
metricCrashReportsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "syncthing",
Subsystem: "crashreceiver",
Name: "crash_reports_total",
}, []string{"result"})
metricFailureReportsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "syncthing",
Subsystem: "crashreceiver",
Name: "failure_reports_total",
}, []string{"result"})
metricDiskstoreFilesTotal = promauto.NewGauge(prometheus.GaugeOpts{
Namespace: "syncthing",
Subsystem: "crashreceiver",
Name: "diskstore_files_total",
})
metricDiskstoreBytesTotal = promauto.NewGauge(prometheus.GaugeOpts{
Namespace: "syncthing",
Subsystem: "crashreceiver",
Name: "diskstore_bytes_total",
})
metricDiskstoreOldestAgeSeconds = promauto.NewGauge(prometheus.GaugeOpts{
Namespace: "syncthing",
Subsystem: "crashreceiver",
Name: "diskstore_oldest_age_seconds",
})
)

View File

@ -8,10 +8,8 @@ package main
import ( import (
"bytes" "bytes"
"context"
"errors" "errors"
"io" "io"
"log"
"regexp" "regexp"
"strings" "strings"
"sync" "sync"
@ -33,45 +31,6 @@ var (
clientsMut sync.Mutex clientsMut sync.Mutex
) )
type sentryService struct {
dsn string
inbox chan sentryRequest
}
type sentryRequest struct {
reportID string
userID string
data []byte
}
func (s *sentryService) Serve(ctx context.Context) {
for {
select {
case req := <-s.inbox:
pkt, err := parseCrashReport(req.reportID, req.data)
if err != nil {
log.Println("Failed to parse crash report:", err)
continue
}
if err := sendReport(s.dsn, pkt, req.userID); err != nil {
log.Println("Failed to send crash report:", err)
}
case <-ctx.Done():
return
}
}
}
func (s *sentryService) Send(reportID, userID string, data []byte) bool {
select {
case s.inbox <- sentryRequest{reportID, userID, data}:
return true
default:
return false
}
}
func sendReport(dsn string, pkt *raven.Packet, userID string) error { func sendReport(dsn string, pkt *raven.Packet, userID string) error {
pkt.Interfaces = append(pkt.Interfaces, &raven.User{ID: userID}) pkt.Interfaces = append(pkt.Interfaces, &raven.User{ID: userID})
@ -216,13 +175,7 @@ func crashReportFingerprint(message string) []string {
} }
// syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar] // syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar]
// or, somewhere along the way the "+" in the version tag disappeared: var longVersionRE = regexp.MustCompile(`syncthing\s+(v[^\s]+)\s+"([^"]+)"\s\(([^\s]+)\s+([^-]+)-([^)]+)\)\s+([^\s]+)[^\[]*(?:\[(.+)\])?$`)
// syncthing v1.23.7-dev.26.gdf7b56ae.dirty-stversionextra "Fermium Flea" (go1.20.5 darwin-arm64) jb@ok.kastelo.net 2023-07-12 06:55:26 UTC [Some Wrapper, purego, stnoupgrade]
var (
longVersionRE = regexp.MustCompile(`syncthing\s+(v[^\s]+)\s+"([^"]+)"\s\(([^\s]+)\s+([^-]+)-([^)]+)\)\s+([^\s]+)[^\[]*(?:\[(.+)\])?$`)
gitExtraRE = regexp.MustCompile(`\.\d+\.g[0-9a-f]+`) // ".1.g6aaae618"
gitExtraSepRE = regexp.MustCompile(`[.-]`) // dot or dash
)
type version struct { type version struct {
version string // "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep" version string // "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep"
@ -264,21 +217,10 @@ func parseVersion(line string) (version, error) {
builder: m[6], builder: m[6],
} }
// Split the version tag into tag and commit. This is old style parts := strings.Split(v.version, "+")
// v1.2.3-something.4+11-g12345678 or newer with just dots
// v1.2.3-something.4.11.g12345678 or v1.2.3-dev.11.g12345678.
parts := []string{v.version}
if strings.Contains(v.version, "+") {
parts = strings.Split(v.version, "+")
} else {
idxs := gitExtraRE.FindStringIndex(v.version)
if len(idxs) > 0 {
parts = []string{v.version[:idxs[0]], v.version[idxs[0]+1:]}
}
}
v.tag = parts[0] v.tag = parts[0]
if len(parts) > 1 { if len(parts) > 1 {
fields := gitExtraSepRE.Split(parts[1], -1) fields := strings.Split(parts[1], "-")
if len(fields) >= 2 && strings.HasPrefix(fields[1], "g") { if len(fields) >= 2 && strings.HasPrefix(fields[1], "g") {
v.commit = fields[1][1:] v.commit = fields[1][1:]
} }

View File

@ -44,20 +44,6 @@ func TestParseVersion(t *testing.T) {
extra: []string{"foo", "bar"}, extra: []string{"foo", "bar"},
}, },
}, },
{
longVersion: `syncthing v1.23.7-dev.26.gdf7b56ae-stversionextra "Fermium Flea" (go1.20.5 darwin-arm64) jb@ok.kastelo.net 2023-07-12 06:55:26 UTC [Some Wrapper, purego, stnoupgrade]`,
parsed: version{
version: "v1.23.7-dev.26.gdf7b56ae-stversionextra",
tag: "v1.23.7-dev",
commit: "df7b56ae",
codename: "Fermium Flea",
runtime: "go1.20.5",
goos: "darwin",
goarch: "arm64",
builder: "jb@ok.kastelo.net",
extra: []string{"Some Wrapper", "purego", "stnoupgrade"},
},
},
} }
for _, tc := range cases { for _, tc := range cases {

View File

@ -7,16 +7,20 @@
package main package main
import ( import (
"bytes"
"compress/gzip"
"io" "io"
"log" "log"
"net/http" "net/http"
"os"
"path" "path"
"path/filepath"
"strings" "strings"
) )
type crashReceiver struct { type crashReceiver struct {
store *diskStore dir string
sentry *sentryService dsn string
} }
func (r *crashReceiver) ServeHTTP(w http.ResponseWriter, req *http.Request) { func (r *crashReceiver) ServeHTTP(w http.ResponseWriter, req *http.Request) {
@ -39,42 +43,54 @@ func (r *crashReceiver) ServeHTTP(w http.ResponseWriter, req *http.Request) {
return return
} }
// The location of the report on disk, compressed
fullPath := filepath.Join(r.dir, r.dirFor(reportID), reportID) + ".gz"
switch req.Method { switch req.Method {
case http.MethodGet: case http.MethodGet:
r.serveGet(reportID, w, req) r.serveGet(fullPath, w, req)
case http.MethodHead: case http.MethodHead:
r.serveHead(reportID, w, req) r.serveHead(fullPath, w, req)
case http.MethodPut: case http.MethodPut:
r.servePut(reportID, w, req) r.servePut(reportID, fullPath, w, req)
default: default:
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
} }
} }
// serveGet responds to GET requests by serving the uncompressed report. // serveGet responds to GET requests by serving the uncompressed report.
func (r *crashReceiver) serveGet(reportID string, w http.ResponseWriter, _ *http.Request) { func (*crashReceiver) serveGet(fullPath string, w http.ResponseWriter, _ *http.Request) {
bs, err := r.store.Get(reportID) fd, err := os.Open(fullPath)
if err != nil { if err != nil {
http.Error(w, "Not found", http.StatusNotFound) http.Error(w, "Not found", http.StatusNotFound)
return return
} }
w.Write(bs)
defer fd.Close()
gr, err := gzip.NewReader(fd)
if err != nil {
http.Error(w, "Internal server error", http.StatusInternalServerError)
return
}
_, _ = io.Copy(w, gr) // best effort
} }
// serveHead responds to HEAD requests by checking if the named report // serveHead responds to HEAD requests by checking if the named report
// already exists in the system. // already exists in the system.
func (r *crashReceiver) serveHead(reportID string, w http.ResponseWriter, _ *http.Request) { func (*crashReceiver) serveHead(fullPath string, w http.ResponseWriter, _ *http.Request) {
if !r.store.Exists(reportID) { if _, err := os.Lstat(fullPath); err != nil {
http.Error(w, "Not found", http.StatusNotFound) http.Error(w, "Not found", http.StatusNotFound)
} }
} }
// servePut accepts and stores the given report. // servePut accepts and stores the given report.
func (r *crashReceiver) servePut(reportID string, w http.ResponseWriter, req *http.Request) { func (r *crashReceiver) servePut(reportID, fullPath string, w http.ResponseWriter, req *http.Request) {
result := "receive_failure" // Ensure the destination directory exists
defer func() { if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil {
metricCrashReportsTotal.WithLabelValues(result).Inc() log.Println("Creating directory:", err)
}() http.Error(w, "Internal server error", http.StatusInternalServerError)
return
}
// Read at most maxRequestSize of report data. // Read at most maxRequestSize of report data.
log.Println("Receiving report", reportID) log.Println("Receiving report", reportID)
@ -86,17 +102,40 @@ func (r *crashReceiver) servePut(reportID string, w http.ResponseWriter, req *ht
return return
} }
result = "success" // Compress the report for storage
buf := new(bytes.Buffer)
gw := gzip.NewWriter(buf)
_, _ = gw.Write(bs) // can't fail
gw.Close()
// Store the report // Create an output file with the compressed report
if !r.store.Put(reportID, bs) { err = os.WriteFile(fullPath, buf.Bytes(), 0644)
log.Println("Failed to store report (queue full):", reportID) if err != nil {
result = "queue_failure" log.Println("Saving report:", err)
http.Error(w, "Internal server error", http.StatusInternalServerError)
return
} }
// Send the report to Sentry // Send the report to Sentry
if !r.sentry.Send(reportID, userIDFor(req), bs) { if r.dsn != "" {
log.Println("Failed to send report to sentry (queue full):", reportID) // Remote ID
result = "sentry_failure" user := userIDFor(req)
go func() {
// There's no need for the client to have to wait for this part.
pkt, err := parseCrashReport(reportID, bs)
if err != nil {
log.Println("Failed to parse crash report:", err)
return
}
if err := sendReport(r.dsn, pkt, user); err != nil {
log.Println("Failed to send crash report:", err)
}
}()
} }
} }
// 01234567890abcdef... => 01/23
func (*crashReceiver) dirFor(base string) string {
return filepath.Join(base[0:2], base[2:4])
}

View File

@ -15,7 +15,6 @@ import (
"strings" "strings"
"time" "time"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/beacon" "github.com/syncthing/syncthing/lib/beacon"
"github.com/syncthing/syncthing/lib/discover" "github.com/syncthing/syncthing/lib/discover"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"

View File

@ -8,7 +8,6 @@ package main
import ( import (
"bytes" "bytes"
"compress/gzip"
"context" "context"
"crypto/tls" "crypto/tls"
"encoding/base64" "encoding/base64"
@ -16,7 +15,6 @@ import (
"encoding/pem" "encoding/pem"
"errors" "errors"
"fmt" "fmt"
io "io"
"log" "log"
"math/rand" "math/rand"
"net" "net"
@ -29,7 +27,6 @@ import (
"time" "time"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/stringutil"
) )
// announcement is the format received from and sent to clients // announcement is the format received from and sent to clients
@ -81,10 +78,18 @@ func (s *apiSrv) Serve(_ context.Context) error {
s.listener = listener s.listener = listener
} else { } else {
tlsCfg := &tls.Config{ tlsCfg := &tls.Config{
Certificates: []tls.Certificate{s.cert}, Certificates: []tls.Certificate{s.cert},
ClientAuth: tls.RequestClientCert, ClientAuth: tls.RequestClientCert,
MinVersion: tls.VersionTLS12, SessionTicketsDisabled: true,
NextProtos: []string{"h2", "http/1.1"}, MinVersion: tls.VersionTLS12,
CipherSuites: []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
},
} }
tlsListener, err := tls.Listen("tcp", s.addr, tlsCfg) tlsListener, err := tls.Listen("tcp", s.addr, tlsCfg)
@ -102,7 +107,6 @@ func (s *apiSrv) Serve(_ context.Context) error {
ReadTimeout: httpReadTimeout, ReadTimeout: httpReadTimeout,
WriteTimeout: httpWriteTimeout, WriteTimeout: httpWriteTimeout,
MaxHeaderBytes: httpMaxHeaderBytes, MaxHeaderBytes: httpMaxHeaderBytes,
ErrorLog: log.New(io.Discard, "", 0),
} }
err := srv.Serve(s.listener) err := srv.Serve(s.listener)
@ -112,6 +116,8 @@ func (s *apiSrv) Serve(_ context.Context) error {
return err return err
} }
var topCtx = context.Background()
func (s *apiSrv) handler(w http.ResponseWriter, req *http.Request) { func (s *apiSrv) handler(w http.ResponseWriter, req *http.Request) {
t0 := time.Now() t0 := time.Now()
@ -124,10 +130,10 @@ func (s *apiSrv) handler(w http.ResponseWriter, req *http.Request) {
}() }()
reqID := requestID(rand.Int63()) reqID := requestID(rand.Int63())
req = req.WithContext(context.WithValue(req.Context(), idKey, reqID)) ctx := context.WithValue(topCtx, idKey, reqID)
if debug { if debug {
log.Println(reqID, req.Method, req.URL, req.Proto) log.Println(reqID, req.Method, req.URL)
} }
remoteAddr := &net.TCPAddr{ remoteAddr := &net.TCPAddr{
@ -136,12 +142,7 @@ func (s *apiSrv) handler(w http.ResponseWriter, req *http.Request) {
} }
if s.useHTTP { if s.useHTTP {
// X-Forwarded-For can have multiple client IPs; split using the comma separator remoteAddr.IP = net.ParseIP(req.Header.Get("X-Forwarded-For"))
forwardIP, _, _ := strings.Cut(req.Header.Get("X-Forwarded-For"), ",")
// net.ParseIP will return nil if leading/trailing whitespace exists; use strings.TrimSpace()
remoteAddr.IP = net.ParseIP(strings.TrimSpace(forwardIP))
if parsedPort, err := strconv.ParseInt(req.Header.Get("X-Client-Port"), 10, 0); err == nil { if parsedPort, err := strconv.ParseInt(req.Header.Get("X-Client-Port"), 10, 0); err == nil {
remoteAddr.Port = int(parsedPort) remoteAddr.Port = int(parsedPort)
} }
@ -158,17 +159,17 @@ func (s *apiSrv) handler(w http.ResponseWriter, req *http.Request) {
} }
switch req.Method { switch req.Method {
case http.MethodGet: case "GET":
s.handleGET(lw, req) s.handleGET(ctx, lw, req)
case http.MethodPost: case "POST":
s.handlePOST(remoteAddr, lw, req) s.handlePOST(ctx, remoteAddr, lw, req)
default: default:
http.Error(lw, "Method Not Allowed", http.StatusMethodNotAllowed) http.Error(lw, "Method Not Allowed", http.StatusMethodNotAllowed)
} }
} }
func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) { func (s *apiSrv) handleGET(ctx context.Context, w http.ResponseWriter, req *http.Request) {
reqID := req.Context().Value(idKey).(requestID) reqID := ctx.Value(idKey).(requestID)
deviceID, err := protocol.DeviceIDFromString(req.URL.Query().Get("device")) deviceID, err := protocol.DeviceIDFromString(req.URL.Query().Get("device"))
if err != nil { if err != nil {
@ -212,34 +213,23 @@ func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
s.db.put(key, rec) s.db.put(key, rec)
} }
afterS := notFoundRetryAfterSeconds(int(misses)) w.Header().Set("Retry-After", notFoundRetryAfterString(int(misses)))
retryAfterHistogram.Observe(float64(afterS))
w.Header().Set("Retry-After", strconv.Itoa(afterS))
http.Error(w, "Not Found", http.StatusNotFound) http.Error(w, "Not Found", http.StatusNotFound)
return return
} }
lookupRequestsTotal.WithLabelValues("success").Inc() lookupRequestsTotal.WithLabelValues("success").Inc()
w.Header().Set("Content-Type", "application/json") bs, _ := json.Marshal(announcement{
var bw io.Writer = w Seen: time.Unix(0, rec.Seen),
// Use compression if the client asks for it
if strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") {
w.Header().Set("Content-Encoding", "gzip")
gw := gzip.NewWriter(bw)
defer gw.Close()
bw = gw
}
json.NewEncoder(bw).Encode(announcement{
Seen: time.Unix(0, rec.Seen).Truncate(time.Second),
Addresses: addressStrs(rec.Addresses), Addresses: addressStrs(rec.Addresses),
}) })
w.Header().Set("Content-Type", "application/json")
w.Write(bs)
} }
func (s *apiSrv) handlePOST(remoteAddr *net.TCPAddr, w http.ResponseWriter, req *http.Request) { func (s *apiSrv) handlePOST(ctx context.Context, remoteAddr *net.TCPAddr, w http.ResponseWriter, req *http.Request) {
reqID := req.Context().Value(idKey).(requestID) reqID := ctx.Value(idKey).(requestID)
rawCert, err := certificateBytes(req) rawCert, err := certificateBytes(req)
if err != nil { if err != nil {
@ -361,16 +351,13 @@ func certificateBytes(req *http.Request) ([]byte, error) {
bs = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: hdr}) bs = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: hdr})
} else if hdr := req.Header.Get("X-Forwarded-Tls-Client-Cert"); hdr != "" { } else if hdr := req.Header.Get("X-Forwarded-Tls-Client-Cert"); hdr != "" {
// Traefik 2 passtlsclientcert // Traefik 2 passtlsclientcert
// // The certificate is in PEM format with url encoding but without newlines
// The certificate is in PEM format, maybe with URL encoding // and start/end statements. We need to decode, reinstate the newlines every 64
// (depends on Traefik version) but without newlines and start/end
// statements. We need to decode, reinstate the newlines every 64
// character and add statements for the PEM decoder // character and add statements for the PEM decoder
hdr, err := url.QueryUnescape(hdr)
if strings.Contains(hdr, "%") { if err != nil {
if unesc, err := url.QueryUnescape(hdr); err == nil { // Decoding failed
hdr = unesc return nil, err
}
} }
for i := 64; i < len(hdr); i += 65 { for i := 64; i < len(hdr); i += 65 {
@ -378,7 +365,7 @@ func certificateBytes(req *http.Request) ([]byte, error) {
} }
hdr = "-----BEGIN CERTIFICATE-----\n" + hdr hdr = "-----BEGIN CERTIFICATE-----\n" + hdr
hdr += "\n-----END CERTIFICATE-----\n" hdr = hdr + "\n-----END CERTIFICATE-----\n"
bs = []byte(hdr) bs = []byte(hdr)
} }
@ -417,13 +404,13 @@ func fixupAddresses(remote *net.TCPAddr, addresses []string) []string {
continue continue
} }
if host == "" || ip.IsUnspecified() { if remote != nil {
if remote != nil { if host == "" || ip.IsUnspecified() {
// Replace the unspecified IP with the request source. // Replace the unspecified IP with the request source.
// ... unless the request source is the loopback address or // ... unless the request source is the loopback address or
// multicast/unspecified (can't happen, really). // multicast/unspecified (can't happen, really).
if remote.IP == nil || remote.IP.IsLoopback() || remote.IP.IsMulticast() || remote.IP.IsUnspecified() { if remote.IP.IsLoopback() || remote.IP.IsMulticast() || remote.IP.IsUnspecified() {
continue continue
} }
@ -439,22 +426,11 @@ func fixupAddresses(remote *net.TCPAddr, addresses []string) []string {
} }
host = remote.IP.String() host = remote.IP.String()
} else {
// remote is nil, unable to determine host IP
continue
} }
} // If zero port was specified, use remote port.
if port == "0" && remote.Port > 0 {
// If zero port was specified, use remote port.
if port == "0" {
if remote != nil && remote.Port > 0 {
// use remote port
port = strconv.Itoa(remote.Port) port = strconv.Itoa(remote.Port)
} else {
// unable to determine remote port
continue
} }
} }
@ -462,9 +438,6 @@ func fixupAddresses(remote *net.TCPAddr, addresses []string) []string {
fixed = append(fixed, uri.String()) fixed = append(fixed, uri.String())
} }
// Remove duplicate addresses
fixed = stringutil.UniqueTrimmedStrings(fixed)
return fixed return fixed
} }
@ -494,13 +467,13 @@ func errorRetryAfterString() string {
return strconv.Itoa(errorRetryAfterSeconds + rand.Intn(errorRetryFuzzSeconds)) return strconv.Itoa(errorRetryAfterSeconds + rand.Intn(errorRetryFuzzSeconds))
} }
func notFoundRetryAfterSeconds(misses int) int { func notFoundRetryAfterString(misses int) string {
retryAfterS := notFoundRetryMinSeconds + notFoundRetryIncSeconds*misses retryAfterS := notFoundRetryMinSeconds + notFoundRetryIncSeconds*misses
if retryAfterS > notFoundRetryMaxSeconds { if retryAfterS > notFoundRetryMaxSeconds {
retryAfterS = notFoundRetryMaxSeconds retryAfterS = notFoundRetryMaxSeconds
} }
retryAfterS += rand.Intn(notFoundRetryFuzzSeconds) retryAfterS += rand.Intn(notFoundRetryFuzzSeconds)
return retryAfterS return strconv.Itoa(retryAfterS)
} }
func reannounceAfterString() string { func reannounceAfterString() string {

View File

@ -69,14 +69,6 @@ func TestFixupAddresses(t *testing.T) {
remote: addr("123.123.123.123", 9000), remote: addr("123.123.123.123", 9000),
in: []string{"tcp://44.44.44.44:0"}, in: []string{"tcp://44.44.44.44:0"},
out: []string{"tcp://44.44.44.44:9000"}, out: []string{"tcp://44.44.44.44:9000"},
}, { // remote ip nil
remote: addr("", 9000),
in: []string{"tcp://:22000", "tcp://44.44.44.44:9000"},
out: []string{"tcp://44.44.44.44:9000"},
}, { // remote port 0
remote: addr("123.123.123.123", 0),
in: []string{"tcp://:22000", "tcp://44.44.44.44"},
out: []string{"tcp://123.123.123.123:22000"},
}, },
} }

View File

@ -12,14 +12,10 @@ package main
import ( import (
"context" "context"
"log" "log"
"net"
"net/url"
"sort" "sort"
"time" "time"
"github.com/syncthing/syncthing/lib/sliceutil"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/storage"
"github.com/syndtr/goleveldb/leveldb/util" "github.com/syndtr/goleveldb/leveldb/util"
) )
@ -58,18 +54,6 @@ func newLevelDBStore(dir string) (*levelDBStore, error) {
}, nil }, nil
} }
func newMemoryLevelDBStore() (*levelDBStore, error) {
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
return nil, err
}
return &levelDBStore{
db: db,
inbox: make(chan func(), 16),
clock: defaultClock{},
}, nil
}
func (s *levelDBStore) put(key string, rec DatabaseRecord) error { func (s *levelDBStore) put(key string, rec DatabaseRecord) error {
t0 := time.Now() t0 := time.Now()
defer func() { defer func() {
@ -220,7 +204,7 @@ func (s *levelDBStore) statisticsServe(trigger <-chan struct{}, done chan<- stru
cutoff24h := t0.Add(-24 * time.Hour).UnixNano() cutoff24h := t0.Add(-24 * time.Hour).UnixNano()
cutoff1w := t0.Add(-7 * 24 * time.Hour).UnixNano() cutoff1w := t0.Add(-7 * 24 * time.Hour).UnixNano()
cutoff2Mon := t0.Add(-60 * 24 * time.Hour).UnixNano() cutoff2Mon := t0.Add(-60 * 24 * time.Hour).UnixNano()
current, currentIPv4, currentIPv6, last24h, last1w, inactive, errors := 0, 0, 0, 0, 0, 0, 0 current, last24h, last1w, inactive, errors := 0, 0, 0, 0, 0
iter := s.db.NewIterator(&util.Range{}, nil) iter := s.db.NewIterator(&util.Range{}, nil)
for iter.Next() { for iter.Next() {
@ -235,35 +219,9 @@ func (s *levelDBStore) statisticsServe(trigger <-chan struct{}, done chan<- stru
// If there are addresses that have not expired it's a current // If there are addresses that have not expired it's a current
// record, otherwise account it based on when it was last seen // record, otherwise account it based on when it was last seen
// (last 24 hours or last week) or finally as inactice. // (last 24 hours or last week) or finally as inactice.
addrs := expire(rec.Addresses, nowNanos)
switch { switch {
case len(addrs) > 0: case len(expire(rec.Addresses, nowNanos)) > 0:
current++ current++
seenIPv4, seenIPv6 := false, false
for _, addr := range addrs {
uri, err := url.Parse(addr.Address)
if err != nil {
continue
}
host, _, err := net.SplitHostPort(uri.Host)
if err != nil {
continue
}
if ip := net.ParseIP(host); ip != nil && ip.To4() != nil {
seenIPv4 = true
} else if ip != nil {
seenIPv6 = true
}
if seenIPv4 && seenIPv6 {
break
}
}
if seenIPv4 {
currentIPv4++
}
if seenIPv6 {
currentIPv6++
}
case rec.Seen > cutoff24h: case rec.Seen > cutoff24h:
last24h++ last24h++
case rec.Seen > cutoff1w: case rec.Seen > cutoff1w:
@ -287,8 +245,6 @@ func (s *levelDBStore) statisticsServe(trigger <-chan struct{}, done chan<- stru
iter.Release() iter.Release()
databaseKeys.WithLabelValues("current").Set(float64(current)) databaseKeys.WithLabelValues("current").Set(float64(current))
databaseKeys.WithLabelValues("currentIPv4").Set(float64(currentIPv4))
databaseKeys.WithLabelValues("currentIPv6").Set(float64(currentIPv6))
databaseKeys.WithLabelValues("last24h").Set(float64(last24h)) databaseKeys.WithLabelValues("last24h").Set(float64(last24h))
databaseKeys.WithLabelValues("last1w").Set(float64(last1w)) databaseKeys.WithLabelValues("last1w").Set(float64(last1w))
databaseKeys.WithLabelValues("inactive").Set(float64(inactive)) databaseKeys.WithLabelValues("inactive").Set(float64(inactive))
@ -383,7 +339,14 @@ func expire(addrs []DatabaseAddress, now int64) []DatabaseAddress {
i := 0 i := 0
for i < len(addrs) { for i < len(addrs) {
if addrs[i].Expires < now { if addrs[i].Expires < now {
addrs = sliceutil.RemoveAndZero(addrs, i) // This item is expired. Replace it with the last in the list
// (noop if we are at the last item).
addrs[i] = addrs[len(addrs)-1]
// Wipe the last item of the list to release references to
// strings and stuff.
addrs[len(addrs)-1] = DatabaseAddress{}
// Shorten the slice.
addrs = addrs[:len(addrs)-1]
continue continue
} }
i++ i++

View File

@ -9,12 +9,15 @@ package main
import ( import (
"context" "context"
"fmt" "fmt"
"os"
"testing" "testing"
"time" "time"
) )
func TestDatabaseGetSet(t *testing.T) { func TestDatabaseGetSet(t *testing.T) {
db, err := newMemoryLevelDBStore() os.RemoveAll("_database")
defer os.RemoveAll("_database")
db, err := newLevelDBStore("_database")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -116,7 +119,7 @@ func TestDatabaseGetSet(t *testing.T) {
// Put a record with misses // Put a record with misses
rec = DatabaseRecord{Misses: 42, Missed: tc.Now().UnixNano()} rec = DatabaseRecord{Misses: 42}
if err := db.put("efgh", rec); err != nil { if err := db.put("efgh", rec); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -185,7 +188,7 @@ func TestFilter(t *testing.T) {
}, },
{ {
a: []DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}, {Address: "c", Expires: 5}, {Address: "d", Expires: 15}, {Address: "e", Expires: 5}}, a: []DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}, {Address: "c", Expires: 5}, {Address: "d", Expires: 15}, {Address: "e", Expires: 5}},
b: []DatabaseAddress{{Address: "b", Expires: 15}, {Address: "d", Expires: 15}}, b: []DatabaseAddress{{Address: "d", Expires: 15}, {Address: "b", Expires: 15}}, // gets reordered
}, },
} }
@ -206,6 +209,5 @@ func (t *testClock) wind(d time.Duration) {
} }
func (t *testClock) Now() time.Time { func (t *testClock) Now() time.Time {
t.now = t.now.Add(time.Nanosecond)
return t.now return t.now
} }

View File

@ -14,12 +14,10 @@ import (
"net" "net"
"net/http" "net/http"
"os" "os"
"runtime"
"strings" "strings"
"time" "time"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/build" "github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/tlsutil" "github.com/syncthing/syncthing/lib/tlsutil"
@ -66,7 +64,9 @@ var levelDBOptions = &opt.Options{
WriteBuffer: 32 << 20, // default 4<<20 WriteBuffer: 32 << 20, // default 4<<20
} }
var debug = false var (
debug = false
)
func main() { func main() {
var listen string var listen string
@ -76,26 +76,20 @@ func main() {
var replicationPeers string var replicationPeers string
var certFile string var certFile string
var keyFile string var keyFile string
var replCertFile string
var replKeyFile string
var useHTTP bool var useHTTP bool
var largeDB bool
log.SetOutput(os.Stdout) log.SetOutput(os.Stdout)
log.SetFlags(0) log.SetFlags(0)
flag.StringVar(&certFile, "cert", "./cert.pem", "Certificate file") flag.StringVar(&certFile, "cert", "./cert.pem", "Certificate file")
flag.StringVar(&keyFile, "key", "./key.pem", "Key file")
flag.StringVar(&dir, "db-dir", "./discovery.db", "Database directory") flag.StringVar(&dir, "db-dir", "./discovery.db", "Database directory")
flag.BoolVar(&debug, "debug", false, "Print debug output") flag.BoolVar(&debug, "debug", false, "Print debug output")
flag.BoolVar(&useHTTP, "http", false, "Listen on HTTP (behind an HTTPS proxy)") flag.BoolVar(&useHTTP, "http", false, "Listen on HTTP (behind an HTTPS proxy)")
flag.StringVar(&listen, "listen", ":8443", "Listen address") flag.StringVar(&listen, "listen", ":8443", "Listen address")
flag.StringVar(&keyFile, "key", "./key.pem", "Key file")
flag.StringVar(&metricsListen, "metrics-listen", "", "Metrics listen address") flag.StringVar(&metricsListen, "metrics-listen", "", "Metrics listen address")
flag.StringVar(&replicationPeers, "replicate", "", "Replication peers, id@address, comma separated") flag.StringVar(&replicationPeers, "replicate", "", "Replication peers, id@address, comma separated")
flag.StringVar(&replicationListen, "replication-listen", ":19200", "Replication listen address") flag.StringVar(&replicationListen, "replication-listen", ":19200", "Replication listen address")
flag.StringVar(&replCertFile, "replication-cert", "", "Certificate file for replication")
flag.StringVar(&replKeyFile, "replication-key", "", "Key file for replication")
flag.BoolVar(&largeDB, "large-db", false, "Use larger database settings")
showVersion := flag.Bool("version", false, "Show version") showVersion := flag.Bool("version", false, "Show version")
flag.Parse() flag.Parse()
@ -104,17 +98,6 @@ func main() {
return return
} }
buildInfo.WithLabelValues(build.Version, runtime.Version(), build.User, build.Date.UTC().Format("2006-01-02T15:04:05Z")).Set(1)
if largeDB {
levelDBOptions.BlockCacheCapacity = 64 << 20
levelDBOptions.BlockSize = 64 << 10
levelDBOptions.CompactionTableSize = 16 << 20
levelDBOptions.CompactionTableSizeMultiplier = 2.0
levelDBOptions.WriteBuffer = 64 << 20
levelDBOptions.CompactionL0Trigger = 8
}
cert, err := tls.LoadX509KeyPair(certFile, keyFile) cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if os.IsNotExist(err) { if os.IsNotExist(err) {
log.Println("Failed to load keypair. Generating one, this might take a while...") log.Println("Failed to load keypair. Generating one, this might take a while...")
@ -128,16 +111,6 @@ func main() {
devID := protocol.NewDeviceID(cert.Certificate[0]) devID := protocol.NewDeviceID(cert.Certificate[0])
log.Println("Server device ID is", devID) log.Println("Server device ID is", devID)
replCert := cert
if replCertFile != "" && replKeyFile != "" {
replCert, err = tls.LoadX509KeyPair(replCertFile, replKeyFile)
if err != nil {
log.Fatalln("Failed to load replication keypair:", err)
}
}
replDevID := protocol.NewDeviceID(replCert.Certificate[0])
log.Println("Replication device ID is", replDevID)
// Parse the replication specs, if any. // Parse the replication specs, if any.
var allowedReplicationPeers []protocol.DeviceID var allowedReplicationPeers []protocol.DeviceID
var replicationDestinations []string var replicationDestinations []string
@ -192,14 +165,14 @@ func main() {
// Start any replication senders. // Start any replication senders.
var repl replicationMultiplexer var repl replicationMultiplexer
for _, dst := range replicationDestinations { for _, dst := range replicationDestinations {
rs := newReplicationSender(dst, replCert, allowedReplicationPeers) rs := newReplicationSender(dst, cert, allowedReplicationPeers)
main.Add(rs) main.Add(rs)
repl = append(repl, rs) repl = append(repl, rs)
} }
// If we have replication configured, start the replication listener. // If we have replication configured, start the replication listener.
if len(allowedReplicationPeers) > 0 { if len(allowedReplicationPeers) > 0 {
rl := newReplicationListener(replicationListen, replCert, allowedReplicationPeers, db) rl := newReplicationListener(replicationListen, cert, allowedReplicationPeers, db)
main.Add(rl) main.Add(rl)
} }

View File

@ -19,11 +19,8 @@ import (
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
) )
const ( const replicationReadTimeout = time.Minute
replicationReadTimeout = time.Minute const replicationHeartbeatInterval = time.Second * 30
replicationWriteTimeout = 30 * time.Second
replicationHeartbeatInterval = time.Second * 30
)
type replicator interface { type replicator interface {
send(key string, addrs []DatabaseAddress, seen int64) send(key string, addrs []DatabaseAddress, seen int64)
@ -71,12 +68,6 @@ func (s *replicationSender) Serve(ctx context.Context) error {
conn.Close() conn.Close()
}() }()
// The replication stream is not especially latency sensitive, but it is
// quite a lot of data in small writes. Make it more efficient.
if tcpc, ok := conn.NetConn().(*net.TCPConn); ok {
_ = tcpc.SetNoDelay(false)
}
// Get the other side device ID. // Get the other side device ID.
remoteID, err := deviceID(conn) remoteID, err := deviceID(conn)
if err != nil { if err != nil {
@ -125,7 +116,7 @@ func (s *replicationSender) Serve(ctx context.Context) error {
binary.BigEndian.PutUint32(buf, uint32(n)) binary.BigEndian.PutUint32(buf, uint32(n))
// Send // Send
conn.SetWriteDeadline(time.Now().Add(replicationWriteTimeout)) conn.SetWriteDeadline(time.Now().Add(5 * time.Second))
if _, err := conn.Write(buf[:4+n]); err != nil { if _, err := conn.Write(buf[:4+n]); err != nil {
replicationSendsTotal.WithLabelValues("error").Inc() replicationSendsTotal.WithLabelValues("error").Inc()
log.Println("Replication write:", err) log.Println("Replication write:", err)

View File

@ -14,14 +14,6 @@ import (
) )
var ( var (
buildInfo = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "syncthing",
Subsystem: "discovery",
Name: "build_info",
Help: "A metric with a constant '1' value labeled by version, goversion, builduser and builddate from which stdiscosrv was built.",
}, []string{"version", "goversion", "builduser", "builddate"})
apiRequestsTotal = prometheus.NewCounterVec( apiRequestsTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{ prometheus.CounterOpts{
Namespace: "syncthing", Namespace: "syncthing",
@ -98,14 +90,6 @@ var (
Help: "Latency of database operations.", Help: "Latency of database operations.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}, []string{"operation"}) }, []string{"operation"})
retryAfterHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: "syncthing",
Subsystem: "discovery",
Name: "retry_after_seconds",
Help: "Retry-After header value in seconds.",
Buckets: prometheus.ExponentialBuckets(60, 2, 7), // 60, 120, 240, 480, 960, 1920, 3840
})
) )
const ( const (
@ -120,13 +104,11 @@ const (
) )
func init() { func init() {
prometheus.MustRegister(buildInfo, prometheus.MustRegister(apiRequestsTotal, apiRequestsSeconds,
apiRequestsTotal, apiRequestsSeconds,
lookupRequestsTotal, announceRequestsTotal, lookupRequestsTotal, announceRequestsTotal,
replicationSendsTotal, replicationRecvsTotal, replicationSendsTotal, replicationRecvsTotal,
databaseKeys, databaseStatisticsSeconds, databaseKeys, databaseStatisticsSeconds,
databaseOperations, databaseOperationSeconds, databaseOperations, databaseOperationSeconds)
retryAfterHistogram)
processCollectorOpts := collectors.ProcessCollectorOpts{ processCollectorOpts := collectors.ProcessCollectorOpts{
Namespace: "syncthing_discovery", Namespace: "syncthing_discovery",
@ -138,4 +120,5 @@ func init() {
prometheus.MustRegister( prometheus.MustRegister(
collectors.NewProcessCollector(processCollectorOpts), collectors.NewProcessCollector(processCollectorOpts),
) )
} }

View File

@ -14,8 +14,6 @@ import (
"net/http" "net/http"
"os" "os"
"time" "time"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
) )
type event struct { type event struct {

View File

@ -13,7 +13,6 @@ import (
"os" "os"
"path/filepath" "path/filepath"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/scanner" "github.com/syncthing/syncthing/lib/scanner"
) )

View File

@ -16,7 +16,6 @@ import (
"os" "os"
"time" "time"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/discover" "github.com/syncthing/syncthing/lib/discover"
"github.com/syncthing/syncthing/lib/events" "github.com/syncthing/syncthing/lib/events"

View File

@ -12,7 +12,6 @@ import (
"fmt" "fmt"
"os" "os"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/ignore" "github.com/syncthing/syncthing/lib/ignore"
) )

View File

@ -15,8 +15,6 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"time" "time"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
) )
func main() { func main() {
@ -45,7 +43,7 @@ func generateFiles(dir string, files, maxexp int, srcname string) error {
} }
p0 := filepath.Join(dir, string(n[0]), n[0:2]) p0 := filepath.Join(dir, string(n[0]), n[0:2])
err = os.MkdirAll(p0, 0o755) err = os.MkdirAll(p0, 0755)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -68,7 +66,7 @@ func generateFiles(dir string, files, maxexp int, srcname string) error {
} }
func generateOneFile(fd io.ReadSeeker, p1 string, s int64) error { func generateOneFile(fd io.ReadSeeker, p1 string, s int64) error {
src := io.LimitReader(&infiniteReader{fd}, s) src := io.LimitReader(&inifiteReader{fd}, s)
dst, err := os.Create(p1) dst, err := os.Create(p1)
if err != nil { if err != nil {
return err return err
@ -84,7 +82,7 @@ func generateOneFile(fd io.ReadSeeker, p1 string, s int64) error {
return err return err
} }
os.Chmod(p1, os.FileMode(rand.Intn(0o777)|0o400)) os.Chmod(p1, os.FileMode(rand.Intn(0777)|0400))
t := time.Now().Add(-time.Duration(rand.Intn(30*86400)) * time.Second) t := time.Now().Add(-time.Duration(rand.Intn(30*86400)) * time.Second)
return os.Chtimes(p1, t, t) return os.Chtimes(p1, t, t)
@ -107,11 +105,11 @@ func readRand(bs []byte) (int, error) {
return len(bs), nil return len(bs), nil
} }
type infiniteReader struct { type inifiteReader struct {
rd io.ReadSeeker rd io.ReadSeeker
} }
func (i *infiniteReader) Read(bs []byte) (int, error) { func (i *inifiteReader) Read(bs []byte) (int, error) {
n, err := i.rd.Read(bs) n, err := i.rd.Read(bs)
if err == io.EOF { if err == io.EOF {
err = nil err = nil

View File

@ -237,7 +237,7 @@
uptimeSeconds: 0, uptimeSeconds: 0,
}; };
$scope.map = L.map('map').setView([40.90296, 1.90925], 2); $scope.map = L.map('map').setView([40.90296, 1.90925], 2);
L.tileLayer('https://tile.openstreetmap.org/{z}/{x}/{y}.png', L.tileLayer('https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
{ {
attribution: 'Leaflet', attribution: 'Leaflet',
maxZoom: 17 maxZoom: 17

View File

@ -3,12 +3,14 @@
package main package main
import ( import (
"compress/gzip"
"context" "context"
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"encoding/json" "encoding/json"
"flag" "flag"
"fmt" "fmt"
"io"
"log" "log"
"net" "net"
"net/http" "net/http"
@ -17,22 +19,21 @@ import (
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
"sync/atomic"
"time" "time"
lru "github.com/hashicorp/golang-lru/v2" "github.com/syncthing/syncthing/lib/protocol"
"github.com/golang/groupcache/lru"
"github.com/oschwald/geoip2-golang" "github.com/oschwald/geoip2-golang"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto" "github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto"
"github.com/syncthing/syncthing/lib/assets" "github.com/syncthing/syncthing/lib/assets"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/httpcache"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/rand" "github.com/syncthing/syncthing/lib/rand"
"github.com/syncthing/syncthing/lib/relay/client" "github.com/syncthing/syncthing/lib/relay/client"
"github.com/syncthing/syncthing/lib/sync" "github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/tlsutil" "github.com/syncthing/syncthing/lib/tlsutil"
"golang.org/x/time/rate"
) )
type location struct { type location struct {
@ -98,13 +99,27 @@ var (
dir string dir string
evictionTime = time.Hour evictionTime = time.Hour
debug bool debug bool
getLRUSize = 10 << 10
getLimitBurst = 10
getLimitAvg = 2
postLRUSize = 1 << 10
postLimitBurst = 2
postLimitAvg = 2
getLimit time.Duration
postLimit time.Duration
permRelaysFile string permRelaysFile string
ipHeader string ipHeader string
geoipPath string geoipPath string
proto string proto string
statsRefresh = time.Minute statsRefresh = time.Minute / 2
requestQueueLen = 64 requestQueueLen = 10
requestProcessors = 8 requestProcessors = 1
getMut = sync.NewMutex()
getLRUCache *lru.Cache
postMut = sync.NewMutex()
postLRUCache *lru.Cache
requests chan request requests chan request
@ -112,7 +127,6 @@ var (
knownRelays = make([]*relay, 0) knownRelays = make([]*relay, 0)
permanentRelays = make([]*relay, 0) permanentRelays = make([]*relay, 0)
evictionTimers = make(map[string]*time.Timer) evictionTimers = make(map[string]*time.Timer)
globalBlocklist = newErrorTracker(1000)
) )
const ( const (
@ -127,8 +141,13 @@ func main() {
flag.StringVar(&dir, "keys", dir, "Directory where http-cert.pem and http-key.pem is stored for TLS listening") flag.StringVar(&dir, "keys", dir, "Directory where http-cert.pem and http-key.pem is stored for TLS listening")
flag.BoolVar(&debug, "debug", debug, "Enable debug output") flag.BoolVar(&debug, "debug", debug, "Enable debug output")
flag.DurationVar(&evictionTime, "eviction", evictionTime, "After how long the relay is evicted") flag.DurationVar(&evictionTime, "eviction", evictionTime, "After how long the relay is evicted")
flag.IntVar(&getLRUSize, "get-limit-cache", getLRUSize, "Get request limiter cache size")
flag.IntVar(&getLimitAvg, "get-limit-avg", getLimitAvg, "Allowed average get request rate, per 10 s")
flag.IntVar(&getLimitBurst, "get-limit-burst", getLimitBurst, "Allowed burst get requests")
flag.IntVar(&postLRUSize, "post-limit-cache", postLRUSize, "Post request limiter cache size")
flag.IntVar(&postLimitAvg, "post-limit-avg", postLimitAvg, "Allowed average post request rate, per minute")
flag.IntVar(&postLimitBurst, "post-limit-burst", postLimitBurst, "Allowed burst post requests")
flag.StringVar(&permRelaysFile, "perm-relays", "", "Path to list of permanent relays") flag.StringVar(&permRelaysFile, "perm-relays", "", "Path to list of permanent relays")
flag.StringVar(&knownRelaysFile, "known-relays", knownRelaysFile, "Path to list of current relays")
flag.StringVar(&ipHeader, "ip-header", "", "Name of header which holds clients ip:port. Only meaningful when running behind a reverse proxy.") flag.StringVar(&ipHeader, "ip-header", "", "Name of header which holds clients ip:port. Only meaningful when running behind a reverse proxy.")
flag.StringVar(&geoipPath, "geoip", "GeoLite2-City.mmdb", "Path to GeoLite2-City database") flag.StringVar(&geoipPath, "geoip", "GeoLite2-City.mmdb", "Path to GeoLite2-City database")
flag.StringVar(&proto, "protocol", "tcp", "Protocol used for listening. 'tcp' for IPv4 and IPv6, 'tcp4' for IPv4, 'tcp6' for IPv6") flag.StringVar(&proto, "protocol", "tcp", "Protocol used for listening. 'tcp' for IPv4 and IPv6, 'tcp4' for IPv4, 'tcp6' for IPv6")
@ -140,6 +159,12 @@ func main() {
requests = make(chan request, requestQueueLen) requests = make(chan request, requestQueueLen)
getLimit = 10 * time.Second / time.Duration(getLimitAvg)
postLimit = time.Minute / time.Duration(postLimitAvg)
getLRUCache = lru.New(getLRUSize)
postLRUCache = lru.New(postLRUSize)
var listener net.Listener var listener net.Listener
var err error var err error
@ -215,7 +240,7 @@ func main() {
handler := http.NewServeMux() handler := http.NewServeMux()
handler.HandleFunc("/", handleAssets) handler.HandleFunc("/", handleAssets)
handler.Handle("/endpoint", httpcache.SinglePath(http.HandlerFunc(handleRequest), 15*time.Second)) handler.HandleFunc("/endpoint", handleRequest)
handler.HandleFunc("/metrics", handleMetrics) handler.HandleFunc("/metrics", handleMetrics)
srv := http.Server{ srv := http.Server{
@ -266,17 +291,21 @@ func handleRequest(w http.ResponseWriter, r *http.Request) {
}() }()
if ipHeader != "" { if ipHeader != "" {
hdr := r.Header.Get(ipHeader) r.RemoteAddr = r.Header.Get(ipHeader)
fields := strings.Split(hdr, ",")
if len(fields) > 0 {
r.RemoteAddr = strings.TrimSpace(fields[len(fields)-1])
}
} }
w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Origin", "*")
switch r.Method { switch r.Method {
case "GET": case "GET":
if limit(r.RemoteAddr, getLRUCache, getMut, getLimit, getLimitBurst) {
w.WriteHeader(httpStatusEnhanceYourCalm)
return
}
handleGetRequest(w, r) handleGetRequest(w, r)
case "POST": case "POST":
if limit(r.RemoteAddr, postLRUCache, postMut, postLimit, postLimitBurst) {
w.WriteHeader(httpStatusEnhanceYourCalm)
return
}
handlePostRequest(w, r) handlePostRequest(w, r)
default: default:
if debug { if debug {
@ -298,28 +327,20 @@ func handleGetRequest(rw http.ResponseWriter, r *http.Request) {
// Shuffle // Shuffle
rand.Shuffle(relays) rand.Shuffle(relays)
_ = json.NewEncoder(rw).Encode(map[string][]*relay{ w := io.Writer(rw)
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
rw.Header().Set("Content-Encoding", "gzip")
gw := gzip.NewWriter(rw)
defer gw.Close()
w = gw
}
_ = json.NewEncoder(w).Encode(map[string][]*relay{
"relays": relays, "relays": relays,
}) })
} }
func handlePostRequest(w http.ResponseWriter, r *http.Request) { func handlePostRequest(w http.ResponseWriter, r *http.Request) {
// Get the IP address of the client
rhost := r.RemoteAddr
if host, _, err := net.SplitHostPort(rhost); err == nil {
rhost = host
}
// Check the black list. A client is blacklisted if their last 10
// attempts to join have all failed. The "Unauthorized" status return
// causes strelaysrv to cease attempting to join.
if globalBlocklist.IsBlocked(rhost) {
log.Println("Rejected blocked client", rhost)
http.Error(w, "Too many errors", http.StatusUnauthorized)
globalBlocklist.ClearErrors(rhost)
return
}
var relayCert *x509.Certificate var relayCert *x509.Certificate
if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 { if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 {
relayCert = r.TLS.PeerCertificates[0] relayCert = r.TLS.PeerCertificates[0]
@ -371,6 +392,12 @@ func handlePostRequest(w http.ResponseWriter, r *http.Request) {
return return
} }
// Get the IP address of the client
rhost := r.RemoteAddr
if host, _, err := net.SplitHostPort(rhost); err == nil {
rhost = host
}
ip := net.ParseIP(host) ip := net.ParseIP(host)
// The client did not provide an IP address, use the IP address of the client. // The client did not provide an IP address, use the IP address of the client.
if ip == nil || ip.IsUnspecified() { if ip == nil || ip.IsUnspecified() {
@ -402,14 +429,10 @@ func handlePostRequest(w http.ResponseWriter, r *http.Request) {
case requests <- request{&newRelay, reschan, prometheus.NewTimer(relayTestActionsSeconds.WithLabelValues("queue"))}: case requests <- request{&newRelay, reschan, prometheus.NewTimer(relayTestActionsSeconds.WithLabelValues("queue"))}:
result := <-reschan result := <-reschan
if result.err != nil { if result.err != nil {
log.Println("Join from", r.RemoteAddr, "failed:", result.err)
globalBlocklist.AddError(rhost)
relayTestsTotal.WithLabelValues("failed").Inc() relayTestsTotal.WithLabelValues("failed").Inc()
http.Error(w, result.err.Error(), http.StatusBadRequest) http.Error(w, result.err.Error(), http.StatusBadRequest)
return return
} }
log.Println("Join from", r.RemoteAddr, "succeeded")
globalBlocklist.ClearErrors(rhost)
relayTestsTotal.WithLabelValues("success").Inc() relayTestsTotal.WithLabelValues("success").Inc()
w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(map[string]time.Duration{ json.NewEncoder(w).Encode(map[string]time.Duration{
@ -523,6 +546,23 @@ func evict(relay *relay) func() {
} }
} }
func limit(addr string, cache *lru.Cache, lock sync.Mutex, intv time.Duration, burst int) bool {
if host, _, err := net.SplitHostPort(addr); err == nil {
addr = host
}
lock.Lock()
v, _ := cache.Get(addr)
bkt, ok := v.(*rate.Limiter)
if !ok {
bkt = rate.NewLimiter(rate.Every(intv), burst)
cache.Add(addr, bkt)
}
lock.Unlock()
return !bkt.Allow()
}
func loadRelays(file string) []*relay { func loadRelays(file string) []*relay {
content, err := os.ReadFile(file) content, err := os.ReadFile(file)
if err != nil { if err != nil {
@ -562,7 +602,7 @@ func saveRelays(file string, relays []*relay) error {
for _, relay := range relays { for _, relay := range relays {
content += relay.uri.String() + "\n" content += relay.uri.String() + "\n"
} }
return os.WriteFile(file, []byte(content), 0o777) return os.WriteFile(file, []byte(content), 0777)
} }
func createTestCertificate() tls.Certificate { func createTestCertificate() tls.Certificate {
@ -621,42 +661,3 @@ func (lrw *loggingResponseWriter) WriteHeader(code int) {
lrw.statusCode = code lrw.statusCode = code
lrw.ResponseWriter.WriteHeader(code) lrw.ResponseWriter.WriteHeader(code)
} }
type errorTracker struct {
errors *lru.TwoQueueCache[string, *errorCounter]
}
type errorCounter struct {
count atomic.Int32
}
func newErrorTracker(size int) *errorTracker {
cache, err := lru.New2Q[string, *errorCounter](size)
if err != nil {
panic(err)
}
return &errorTracker{
errors: cache,
}
}
func (b *errorTracker) AddError(host string) {
entry, ok := b.errors.Get(host)
if !ok {
entry = &errorCounter{}
b.errors.Add(host, entry)
}
c := entry.count.Add(1)
log.Printf("Error count for %s is now %d", host, c)
}
func (b *errorTracker) ClearErrors(host string) {
b.errors.Remove(host)
}
func (b *errorTracker) IsBlocked(host string) bool {
if be, ok := b.errors.Get(host); ok {
return be.count.Load() > 10
}
return false
}

View File

@ -20,7 +20,7 @@ import (
var ( var (
outboxesMut = sync.RWMutex{} outboxesMut = sync.RWMutex{}
outboxes = make(map[syncthingprotocol.DeviceID]chan interface{}) outboxes = make(map[syncthingprotocol.DeviceID]chan interface{})
numConnections atomic.Int64 numConnections int64
) )
func listener(_, addr string, config *tls.Config, token string) { func listener(_, addr string, config *tls.Config, token string) {
@ -36,14 +36,8 @@ func listener(_, addr string, config *tls.Config, token string) {
for { for {
conn, isTLS, err := listener.AcceptNoWrapTLS() conn, isTLS, err := listener.AcceptNoWrapTLS()
if err != nil { if err != nil {
// Conn may be nil if accept failed, or non-nil if the initial
// read to figure out if it's TLS or not failed. In the latter
// case, close the connection before moving on.
if conn != nil {
conn.Close()
}
if debug { if debug {
log.Println("Listener failed to accept:", err) log.Println("Listener failed to accept connection from", conn.RemoteAddr(), ". Possibly a TCP Ping.")
} }
continue continue
} }
@ -134,7 +128,7 @@ func protocolConnectionHandler(tcpConn net.Conn, config *tls.Config, token strin
continue continue
} }
if overLimit.Load() { if atomic.LoadInt32(&overLimit) > 0 {
protocol.WriteMessage(conn, protocol.RelayFull{}) protocol.WriteMessage(conn, protocol.RelayFull{})
if debug { if debug {
log.Println("Refusing join request from", id, "due to being over limits") log.Println("Refusing join request from", id, "due to being over limits")
@ -273,7 +267,7 @@ func protocolConnectionHandler(tcpConn net.Conn, config *tls.Config, token strin
conn.Close() conn.Close()
} }
if overLimit.Load() && !hasSessions(id) { if atomic.LoadInt32(&overLimit) > 0 && !hasSessions(id) {
if debug { if debug {
log.Println("Dropping", id, "as it has no sessions and we are over our limits") log.Println("Dropping", id, "as it has no sessions and we are over our limits")
} }
@ -366,8 +360,8 @@ func sessionConnectionHandler(conn net.Conn) {
} }
func messageReader(conn net.Conn, messages chan<- interface{}, errors chan<- error) { func messageReader(conn net.Conn, messages chan<- interface{}, errors chan<- error) {
numConnections.Add(1) atomic.AddInt64(&numConnections, 1)
defer numConnections.Add(-1) defer atomic.AddInt64(&numConnections, -1)
for { for {
msg, err := protocol.ReadMessage(conn) msg, err := protocol.ReadMessage(conn)

View File

@ -19,18 +19,19 @@ import (
"syscall" "syscall"
"time" "time"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/build" "github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/events" "github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/nat"
"github.com/syncthing/syncthing/lib/osutil" "github.com/syncthing/syncthing/lib/osutil"
_ "github.com/syncthing/syncthing/lib/pmp"
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/relay/protocol" "github.com/syncthing/syncthing/lib/relay/protocol"
"github.com/syncthing/syncthing/lib/tlsutil" "github.com/syncthing/syncthing/lib/tlsutil"
_ "github.com/syncthing/syncthing/lib/upnp"
"golang.org/x/time/rate" "golang.org/x/time/rate"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/nat"
_ "github.com/syncthing/syncthing/lib/pmp"
_ "github.com/syncthing/syncthing/lib/upnp"
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
) )
var ( var (
@ -48,7 +49,7 @@ var (
sessionLimitBps int sessionLimitBps int
globalLimitBps int globalLimitBps int
overLimit atomic.Bool overLimit int32
descriptorLimit int64 descriptorLimit int64
sessionLimiter *rate.Limiter sessionLimiter *rate.Limiter
globalLimiter *rate.Limiter globalLimiter *rate.Limiter
@ -193,15 +194,7 @@ func main() {
cfg.Options.NATTimeoutS = natTimeout cfg.Options.NATTimeoutS = natTimeout
}) })
natSvc := nat.NewService(id, wrapper) natSvc := nat.NewService(id, wrapper)
var ipVersion nat.IPVersion mapping := mapping{natSvc.NewMapping(nat.TCP, addr.IP, addr.Port)}
if strings.HasSuffix(proto, "4") {
ipVersion = nat.IPv4Only
} else if strings.HasSuffix(proto, "6") {
ipVersion = nat.IPv6Only
} else {
ipVersion = nat.IPvAny
}
mapping := mapping{natSvc.NewMapping(nat.TCP, ipVersion, addr.IP, addr.Port)}
if natEnabled { if natEnabled {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@ -315,10 +308,10 @@ func main() {
func monitorLimits() { func monitorLimits() {
limitCheckTimer = time.NewTimer(time.Minute) limitCheckTimer = time.NewTimer(time.Minute)
for range limitCheckTimer.C { for range limitCheckTimer.C {
if numConnections.Load()+numProxies.Load() > descriptorLimit { if atomic.LoadInt64(&numConnections)+atomic.LoadInt64(&numProxies) > descriptorLimit {
overLimit.Store(true) atomic.StoreInt32(&overLimit, 1)
log.Println("Gone past our connection limits. Starting to refuse new/drop idle connections.") log.Println("Gone past our connection limits. Starting to refuse new/drop idle connections.")
} else if overLimit.CompareAndSwap(true, false) { } else if atomic.CompareAndSwapInt32(&overLimit, 1, 0) {
log.Println("Dropped below our connection limits. Accepting new connections.") log.Println("Dropped below our connection limits. Accepting new connections.")
} }
limitCheckTimer.Reset(time.Minute) limitCheckTimer.Reset(time.Minute)

View File

@ -23,8 +23,8 @@ var (
sessionMut = sync.RWMutex{} sessionMut = sync.RWMutex{}
activeSessions = make([]*session, 0) activeSessions = make([]*session, 0)
pendingSessions = make(map[string]*session) pendingSessions = make(map[string]*session)
numProxies atomic.Int64 numProxies int64
bytesProxied atomic.Int64 bytesProxied int64
) )
func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionRateLimit, globalRateLimit *rate.Limiter) *session { func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionRateLimit, globalRateLimit *rate.Limiter) *session {
@ -251,8 +251,8 @@ func (s *session) proxy(c1, c2 net.Conn) error {
log.Println("Proxy", c1.RemoteAddr(), "->", c2.RemoteAddr()) log.Println("Proxy", c1.RemoteAddr(), "->", c2.RemoteAddr())
} }
numProxies.Add(1) atomic.AddInt64(&numProxies, 1)
defer numProxies.Add(-1) defer atomic.AddInt64(&numProxies, -1)
buf := make([]byte, networkBufferSize) buf := make([]byte, networkBufferSize)
for { for {
@ -262,7 +262,7 @@ func (s *session) proxy(c1, c2 net.Conn) error {
return err return err
} }
bytesProxied.Add(int64(n)) atomic.AddInt64(&bytesProxied, int64(n))
if debug { if debug {
log.Printf("%d bytes from %s to %s", n, c1.RemoteAddr(), c2.RemoteAddr()) log.Printf("%d bytes from %s to %s", n, c1.RemoteAddr(), c2.RemoteAddr())

View File

@ -51,9 +51,9 @@ func getStatus(w http.ResponseWriter, _ *http.Request) {
status["numPendingSessionKeys"] = len(pendingSessions) status["numPendingSessionKeys"] = len(pendingSessions)
status["numActiveSessions"] = len(activeSessions) status["numActiveSessions"] = len(activeSessions)
sessionMut.Unlock() sessionMut.Unlock()
status["numConnections"] = numConnections.Load() status["numConnections"] = atomic.LoadInt64(&numConnections)
status["numProxies"] = numProxies.Load() status["numProxies"] = atomic.LoadInt64(&numProxies)
status["bytesProxied"] = bytesProxied.Load() status["bytesProxied"] = atomic.LoadInt64(&bytesProxied)
status["goVersion"] = runtime.Version() status["goVersion"] = runtime.Version()
status["goOS"] = runtime.GOOS status["goOS"] = runtime.GOOS
status["goArch"] = runtime.GOARCH status["goArch"] = runtime.GOARCH
@ -88,13 +88,13 @@ func getStatus(w http.ResponseWriter, _ *http.Request) {
} }
type rateCalculator struct { type rateCalculator struct {
counter *atomic.Int64 counter *int64 // atomic, must remain 64-bit aligned
rates []int64 rates []int64
prev int64 prev int64
startTime time.Time startTime time.Time
} }
func newRateCalculator(keepIntervals int, interval time.Duration, counter *atomic.Int64) *rateCalculator { func newRateCalculator(keepIntervals int, interval time.Duration, counter *int64) *rateCalculator {
r := &rateCalculator{ r := &rateCalculator{
rates: make([]int64, keepIntervals), rates: make([]int64, keepIntervals),
counter: counter, counter: counter,
@ -112,7 +112,7 @@ func (r *rateCalculator) updateRates(interval time.Duration) {
next := now.Truncate(interval).Add(interval) next := now.Truncate(interval).Add(interval)
time.Sleep(next.Sub(now)) time.Sleep(next.Sub(now))
cur := r.counter.Load() cur := atomic.LoadInt64(r.counter)
rate := int64(float64(cur-r.prev) / interval.Seconds()) rate := int64(float64(cur-r.prev) / interval.Seconds())
copy(r.rates[1:], r.rates) copy(r.rates[1:], r.rates)
r.rates[0] = rate r.rates[0] = rate

View File

@ -14,7 +14,6 @@ import (
"path/filepath" "path/filepath"
"time" "time"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol" syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/relay/client" "github.com/syncthing/syncthing/lib/relay/client"
"github.com/syncthing/syncthing/lib/relay/protocol" "github.com/syncthing/syncthing/lib/relay/protocol"

View File

@ -12,7 +12,6 @@ import (
"log" "log"
"os" "os"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/signature" "github.com/syncthing/syncthing/lib/signature"
"github.com/syncthing/syncthing/lib/upgrade" "github.com/syncthing/syncthing/lib/upgrade"
) )

View File

@ -7,123 +7,31 @@
package main package main
import ( import (
"bytes"
"encoding/json" "encoding/json"
"fmt" "flag"
"io"
"log"
"net/http"
"os" "os"
"sort" "sort"
"strings"
"time"
"github.com/alecthomas/kong"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/httpcache"
"github.com/syncthing/syncthing/lib/upgrade" "github.com/syncthing/syncthing/lib/upgrade"
) )
type cli struct { const defaultURL = "https://api.github.com/repos/syncthing/syncthing/releases?per_page=25"
Listen string `default:":8080" help:"Listen address"`
URL string `short:"u" default:"https://api.github.com/repos/syncthing/syncthing/releases?per_page=25" help:"GitHub releases url"`
Forward []string `short:"f" help:"Forwarded pages, format: /path->https://example/com/url"`
CacheTime time.Duration `default:"15m" help:"Cache time"`
}
func main() { func main() {
var params cli url := flag.String("u", defaultURL, "GitHub releases url")
kong.Parse(&params) flag.Parse()
if err := server(&params); err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(1)
}
}
func server(params *cli) error { rels := upgrade.FetchLatestReleases(*url, "")
http.Handle("/meta.json", httpcache.SinglePath(&githubReleases{url: params.URL}, params.CacheTime))
for _, fwd := range params.Forward {
path, url, ok := strings.Cut(fwd, "->")
if !ok {
return fmt.Errorf("invalid forward: %q", fwd)
}
http.Handle(path, httpcache.SinglePath(&proxy{url: url}, params.CacheTime))
}
return http.ListenAndServe(params.Listen, nil)
}
type githubReleases struct {
url string
}
func (p *githubReleases) ServeHTTP(w http.ResponseWriter, _ *http.Request) {
log.Println("Fetching", p.url)
rels := upgrade.FetchLatestReleases(p.url, "")
if rels == nil { if rels == nil {
http.Error(w, "no releases", http.StatusInternalServerError) // An error was already logged
return os.Exit(1)
} }
sort.Sort(upgrade.SortByRelease(rels)) sort.Sort(upgrade.SortByRelease(rels))
rels = filterForLatest(rels) rels = filterForLatest(rels)
// Move the URL used for browser downloads to the URL field, and remove if err := json.NewEncoder(os.Stdout).Encode(rels); err != nil {
// the browser URL field. This avoids going via the GitHub API for os.Exit(1)
// downloads, since Syncthing uses the URL field.
for _, rel := range rels {
for j, asset := range rel.Assets {
rel.Assets[j].URL = asset.BrowserURL
rel.Assets[j].BrowserURL = ""
}
}
buf := new(bytes.Buffer)
_ = json.NewEncoder(buf).Encode(rels)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET")
w.Write(buf.Bytes())
}
type proxy struct {
url string
}
func (p *proxy) ServeHTTP(w http.ResponseWriter, req *http.Request) {
log.Println("Fetching", p.url)
req, err := http.NewRequestWithContext(req.Context(), http.MethodGet, p.url, nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer resp.Body.Close()
ct := resp.Header.Get("Content-Type")
w.Header().Set("Content-Type", ct)
if resp.StatusCode == http.StatusOK {
w.Header().Set("Cache-Control", "public, max-age=900")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET")
}
w.WriteHeader(resp.StatusCode)
if strings.HasPrefix(ct, "application/json") {
// Special JSON handling; clean it up a bit.
var v interface{}
if err := json.NewDecoder(resp.Body).Decode(&v); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
_ = json.NewEncoder(w).Encode(v)
} else {
_, _ = io.Copy(w, resp.Body)
} }
} }

View File

@ -26,7 +26,6 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
) )
@ -45,7 +44,7 @@ func main() {
found := make(chan result) found := make(chan result)
stop := make(chan struct{}) stop := make(chan struct{})
var count atomic.Int64 var count int64
// Print periodic progress reports. // Print periodic progress reports.
go printProgress(prefix, &count) go printProgress(prefix, &count)
@ -73,7 +72,7 @@ func main() {
// Try certificates until one is found that has the prefix at the start of // Try certificates until one is found that has the prefix at the start of
// the resulting device ID. Increments count atomically, sends the result to // the resulting device ID. Increments count atomically, sends the result to
// found, returns when stop is closed. // found, returns when stop is closed.
func generatePrefixed(prefix string, count *atomic.Int64, found chan<- result, stop <-chan struct{}) { func generatePrefixed(prefix string, count *int64, found chan<- result, stop <-chan struct{}) {
notBefore := time.Now() notBefore := time.Now()
notAfter := time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC) notAfter := time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC)
@ -110,7 +109,7 @@ func generatePrefixed(prefix string, count *atomic.Int64, found chan<- result, s
} }
id := protocol.NewDeviceID(derBytes) id := protocol.NewDeviceID(derBytes)
count.Add(1) atomic.AddInt64(count, 1)
if strings.HasPrefix(id.String(), prefix) { if strings.HasPrefix(id.String(), prefix) {
select { select {
@ -122,7 +121,7 @@ func generatePrefixed(prefix string, count *atomic.Int64, found chan<- result, s
} }
} }
func printProgress(prefix string, count *atomic.Int64) { func printProgress(prefix string, count *int64) {
started := time.Now() started := time.Now()
wantBits := 5 * len(prefix) wantBits := 5 * len(prefix)
if wantBits > 63 { if wantBits > 63 {
@ -133,7 +132,7 @@ func printProgress(prefix string, count *atomic.Int64) {
fmt.Printf("Want %d bits for prefix %q, about %.2g certs to test (statistically speaking)\n", wantBits, prefix, expectedIterations) fmt.Printf("Want %d bits for prefix %q, about %.2g certs to test (statistically speaking)\n", wantBits, prefix, expectedIterations)
for range time.NewTicker(15 * time.Second).C { for range time.NewTicker(15 * time.Second).C {
tried := count.Load() tried := atomic.LoadInt64(count)
elapsed := time.Since(started) elapsed := time.Since(started)
rate := float64(tried) / elapsed.Seconds() rate := float64(tried) / elapsed.Seconds()
expected := timeStr(expectedIterations / rate) expected := timeStr(expectedIterations / rate)
@ -158,7 +157,7 @@ func saveCert(priv interface{}, derBytes []byte) {
os.Exit(1) os.Exit(1)
} }
keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600) keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
os.Exit(1) os.Exit(1)

View File

@ -13,7 +13,6 @@ import (
"os" "os"
"time" "time"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/sha256" "github.com/syncthing/syncthing/lib/sha256"
) )

View File

@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file, // License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/. // You can obtain one at https://mozilla.org/MPL/2.0/.
package syncthing_main package main
import ( import (
"fmt" "fmt"

View File

@ -13,7 +13,6 @@ import (
"reflect" "reflect"
"github.com/AudriusButkevicius/recli" "github.com/AudriusButkevicius/recli"
"github.com/alecthomas/kong"
"github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/config"
"github.com/urfave/cli" "github.com/urfave/cli"
) )
@ -24,20 +23,9 @@ type configHandler struct {
err error err error
} }
type configCommand struct { func getConfigCommand(f *apiClientFactory) (cli.Command, error) {
Args []string `arg:"" default:"-h"`
}
func (c *configCommand) Run(ctx Context, _ *kong.Context) error {
app := cli.NewApp()
app.Name = "syncthing"
app.Author = "The Syncthing Authors"
app.Metadata = map[string]interface{}{
"clientFactory": ctx.clientFactory,
}
h := new(configHandler) h := new(configHandler)
h.client, h.err = ctx.clientFactory.getClient() h.client, h.err = f.getClient()
if h.err == nil { if h.err == nil {
h.cfg, h.err = getConfig(h.client) h.cfg, h.err = getConfig(h.client)
} }
@ -50,15 +38,17 @@ func (c *configCommand) Run(ctx Context, _ *kong.Context) error {
commands, err := recli.New(recliCfg).Construct(&h.cfg) commands, err := recli.New(recliCfg).Construct(&h.cfg)
if err != nil { if err != nil {
return fmt.Errorf("config reflect: %w", err) return cli.Command{}, fmt.Errorf("config reflect: %w", err)
} }
app.Commands = commands return cli.Command{
app.HideHelp = true Name: "config",
app.Before = h.configBefore HideHelp: true,
app.After = h.configAfter Usage: "Configuration modification command group",
Subcommands: commands,
return app.Run(append([]string{app.Name}, c.Args...)) Before: h.configBefore,
After: h.configAfter,
}, nil
} }
func (h *configHandler) configBefore(c *cli.Context) error { func (h *configHandler) configBefore(c *cli.Context) error {

View File

@ -9,37 +9,47 @@ package cli
import ( import (
"fmt" "fmt"
"net/url" "net/url"
"github.com/urfave/cli"
) )
type fileCommand struct { var debugCommand = cli.Command{
FolderID string `arg:""` Name: "debug",
Path string `arg:""` HideHelp: true,
Usage: "Debug command group",
Subcommands: []cli.Command{
{
Name: "file",
Usage: "Show information about a file (or directory/symlink)",
ArgsUsage: "FOLDER-ID PATH",
Action: expects(2, debugFile()),
},
indexCommand,
{
Name: "profile",
Usage: "Save a profile to help figuring out what Syncthing does.",
ArgsUsage: "cpu | heap",
Action: expects(1, profile()),
},
},
} }
func (f *fileCommand) Run(ctx Context) error { func debugFile() cli.ActionFunc {
indexDumpOutput := indexDumpOutputWrapper(ctx.clientFactory) return func(c *cli.Context) error {
query := make(url.Values)
query := make(url.Values) query.Set("folder", c.Args()[0])
query.Set("folder", f.FolderID) query.Set("file", normalizePath(c.Args()[1]))
query.Set("file", normalizePath(f.Path)) return indexDumpOutput("debug/file?" + query.Encode())(c)
return indexDumpOutput("debug/file?" + query.Encode())
}
type profileCommand struct {
Type string `arg:"" help:"cpu | heap"`
}
func (p *profileCommand) Run(ctx Context) error {
switch t := p.Type; t {
case "cpu", "heap":
return saveToFile(fmt.Sprintf("debug/%vprof", p.Type), ctx.clientFactory)
default:
return fmt.Errorf("expected cpu or heap as argument, got %v", t)
} }
} }
type debugCommand struct { func profile() cli.ActionFunc {
File fileCommand `cmd:"" help:"Show information about a file (or directory/symlink)"` return func(c *cli.Context) error {
Profile profileCommand `cmd:"" help:"Save a profile to help figuring out what Syncthing does"` switch t := c.Args()[0]; t {
Index indexCommand `cmd:"" help:"Show information about the index (database)"` case "cpu", "heap":
return saveToFile(fmt.Sprintf("debug/%vprof", c.Args()[0]))(c)
default:
return fmt.Errorf("expected cpu or heap as argument, got %v", t)
}
}
} }

View File

@ -11,25 +11,36 @@ import (
"fmt" "fmt"
"strings" "strings"
"github.com/alecthomas/kong" "github.com/urfave/cli"
) )
type errorsCommand struct { var errorsCommand = cli.Command{
Show struct{} `cmd:"" help:"Show pending errors"` Name: "errors",
Push errorsPushCommand `cmd:"" help:"Push an error to active clients"` HideHelp: true,
Clear struct{} `cmd:"" help:"Clear pending errors"` Usage: "Error command group",
Subcommands: []cli.Command{
{
Name: "show",
Usage: "Show pending errors",
Action: expects(0, indexDumpOutput("system/error")),
},
{
Name: "push",
Usage: "Push an error to active clients",
ArgsUsage: "ERROR-MESSAGE",
Action: expects(1, errorsPush),
},
{
Name: "clear",
Usage: "Clear pending errors",
Action: expects(0, emptyPost("system/error/clear")),
},
},
} }
type errorsPushCommand struct { func errorsPush(c *cli.Context) error {
ErrorMessage string `arg:""` client := c.App.Metadata["client"].(APIClient)
} errStr := strings.Join(c.Args(), " ")
func (e *errorsPushCommand) Run(ctx Context) error {
client, err := ctx.clientFactory.getClient()
if err != nil {
return err
}
errStr := e.ErrorMessage
response, err := client.Post("system/error", strings.TrimSpace(errStr)) response, err := client.Post("system/error", strings.TrimSpace(errStr))
if err != nil { if err != nil {
return err return err
@ -48,13 +59,3 @@ func (e *errorsPushCommand) Run(ctx Context) error {
} }
return nil return nil
} }
func (*errorsCommand) Run(ctx Context, kongCtx *kong.Context) error {
switch kongCtx.Selected().Name {
case "show":
return indexDumpOutput("system/error", ctx.clientFactory)
case "clear":
return emptyPost("system/error/clear", ctx.clientFactory)
}
return nil
}

View File

@ -7,26 +7,32 @@
package cli package cli
import ( import (
"github.com/alecthomas/kong" "github.com/urfave/cli"
) )
type indexCommand struct { var indexCommand = cli.Command{
Dump struct{} `cmd:"" help:"Print the entire db"` Name: "index",
DumpSize struct{} `cmd:"" help:"Print the db size of different categories of information"` Usage: "Show information about the index (database)",
Check struct{} `cmd:"" help:"Check the database for inconsistencies"` Subcommands: []cli.Command{
Account struct{} `cmd:"" help:"Print key and value size statistics per key type"` {
} Name: "dump",
Usage: "Print the entire db",
func (*indexCommand) Run(kongCtx *kong.Context) error { Action: expects(0, indexDump),
switch kongCtx.Selected().Name { },
case "dump": {
return indexDump() Name: "dump-size",
case "dump-size": Usage: "Print the db size of different categories of information",
return indexDumpSize() Action: expects(0, indexDumpSize),
case "check": },
return indexCheck() {
case "account": Name: "check",
return indexAccount() Usage: "Check the database for inconsistencies",
} Action: expects(0, indexCheck),
return nil },
{
Name: "account",
Usage: "Print key and value size statistics per key type",
Action: expects(0, indexAccount),
},
},
} }

View File

@ -10,10 +10,12 @@ import (
"fmt" "fmt"
"os" "os"
"text/tabwriter" "text/tabwriter"
"github.com/urfave/cli"
) )
// indexAccount prints key and data size statistics per class // indexAccount prints key and data size statistics per class
func indexAccount() error { func indexAccount(*cli.Context) error {
ldb, err := getDB() ldb, err := getDB()
if err != nil { if err != nil {
return err return err

View File

@ -11,11 +11,13 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/urfave/cli"
"github.com/syncthing/syncthing/lib/db" "github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
) )
func indexDump() error { func indexDump(*cli.Context) error {
ldb, err := getDB() ldb, err := getDB()
if err != nil { if err != nil {
return err return err

View File

@ -11,10 +11,12 @@ import (
"fmt" "fmt"
"sort" "sort"
"github.com/urfave/cli"
"github.com/syncthing/syncthing/lib/db" "github.com/syncthing/syncthing/lib/db"
) )
func indexDumpSize() error { func indexDumpSize(*cli.Context) error {
type sizedElement struct { type sizedElement struct {
key string key string
size int size int

View File

@ -13,6 +13,8 @@ import (
"fmt" "fmt"
"sort" "sort"
"github.com/urfave/cli"
"github.com/syncthing/syncthing/lib/db" "github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
) )
@ -33,7 +35,7 @@ type sequenceKey struct {
sequence uint64 sequence uint64
} }
func indexCheck() (err error) { func indexCheck(*cli.Context) (err error) {
ldb, err := getDB() ldb, err := getDB()
if err != nil { if err != nil {
return err return err

View File

@ -7,72 +7,166 @@
package cli package cli
import ( import (
"bufio"
"errors"
"fmt" "fmt"
"io"
"os"
"strings"
"github.com/alecthomas/kong" "github.com/alecthomas/kong"
"github.com/willabides/kongplete" "github.com/flynn-archive/go-shlex"
"github.com/urfave/cli"
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil" "github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
"github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/config"
) )
type CLI struct { type preCli struct {
cmdutil.CommonOptions
DataDir string `name:"data" placeholder:"PATH" env:"STDATADIR" help:"Set data directory (database and logs)"`
GUIAddress string `name:"gui-address"` GUIAddress string `name:"gui-address"`
GUIAPIKey string `name:"gui-apikey"` GUIAPIKey string `name:"gui-apikey"`
HomeDir string `name:"home"`
Show showCommand `cmd:"" help:"Show command group"` ConfDir string `name:"config"`
Debug debugCommand `cmd:"" help:"Debug command group"` DataDir string `name:"data"`
Operations operationCommand `cmd:"" help:"Operation command group"`
Errors errorsCommand `cmd:"" help:"Error command group"`
Config configCommand `cmd:"" help:"Configuration modification command group" passthrough:""`
Stdin stdinCommand `cmd:"" name:"-" help:"Read commands from stdin"`
} }
type Context struct { func Run() error {
clientFactory *apiClientFactory // This is somewhat a hack around a chicken and egg problem. We need to set
// the home directory and potentially other flags to know where the
// syncthing instance is running in order to get it's config ... which we
// then use to construct the actual CLI ... at which point it's too late to
// add flags there...
c := preCli{}
parseFlags(&c)
return runInternal(c, os.Args)
} }
func (cli CLI) AfterApply(kongCtx *kong.Context) error { func RunWithArgs(cliArgs []string) error {
err := cmdutil.SetConfigDataLocationsFromFlags(cli.HomeDir, cli.ConfDir, cli.DataDir) c := preCli{}
parseFlagsWithArgs(cliArgs, &c)
return runInternal(c, cliArgs)
}
func runInternal(c preCli, cliArgs []string) error {
// Not set as default above because the strings can be really long.
err := cmdutil.SetConfigDataLocationsFromFlags(c.HomeDir, c.ConfDir, c.DataDir)
if err != nil { if err != nil {
return fmt.Errorf("command line options: %w", err) return fmt.Errorf("Command line options: %w", err)
} }
clientFactory := &apiClientFactory{ clientFactory := &apiClientFactory{
cfg: config.GUIConfiguration{ cfg: config.GUIConfiguration{
RawAddress: cli.GUIAddress, RawAddress: c.GUIAddress,
APIKey: cli.GUIAPIKey, APIKey: c.GUIAPIKey,
}, },
} }
context := Context{ configCommand, err := getConfigCommand(clientFactory)
clientFactory: clientFactory,
}
kongCtx.Bind(context)
return nil
}
type stdinCommand struct{}
func RunWithArgs(args []string) error {
var cli CLI
p, err := kong.New(&cli)
if err != nil { if err != nil {
// can't happen, really
return fmt.Errorf("creating parser: %w", err)
}
kongplete.Complete(p)
ctx, err := p.Parse(args)
if err != nil {
fmt.Println("Error:", err)
return err return err
} }
if err := ctx.Run(); err != nil {
fmt.Println("Error:", err) // Implement the same flags at the upper CLI, but do nothing with them.
// This is so that the usage text is the same
fakeFlags := []cli.Flag{
cli.StringFlag{
Name: "gui-address",
Usage: "Override GUI address to `URL` (e.g. \"192.0.2.42:8443\")",
},
cli.StringFlag{
Name: "gui-apikey",
Usage: "Override GUI API key to `API-KEY`",
},
cli.StringFlag{
Name: "home",
Usage: "Set configuration and data directory to `PATH`",
},
cli.StringFlag{
Name: "config",
Usage: "Set configuration directory (config and keys) to `PATH`",
},
cli.StringFlag{
Name: "data",
Usage: "Set data directory (database and logs) to `PATH`",
},
}
// Construct the actual CLI
app := cli.NewApp()
app.Author = "The Syncthing Authors"
app.Metadata = map[string]interface{}{
"clientFactory": clientFactory,
}
app.Commands = []cli.Command{{
Name: "cli",
Usage: "Syncthing command line interface",
Flags: fakeFlags,
Subcommands: []cli.Command{
configCommand,
showCommand,
operationCommand,
errorsCommand,
debugCommand,
{
Name: "-",
HideHelp: true,
Usage: "Read commands from stdin",
Action: func(ctx *cli.Context) error {
if ctx.NArg() > 0 {
return errors.New("command does not expect any arguments")
}
// Drop the `-` not to recurse into self.
args := make([]string, len(cliArgs)-1)
copy(args, cliArgs)
fmt.Println("Reading commands from stdin...", args)
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
input, err := shlex.Split(scanner.Text())
if err != nil {
return fmt.Errorf("parsing input: %w", err)
}
if len(input) == 0 {
continue
}
err = app.Run(append(args, input...))
if err != nil {
return err
}
}
return scanner.Err()
},
},
},
}}
return app.Run(cliArgs)
}
func parseFlags(c *preCli) error {
// kong only needs to parse the global arguments after "cli" and before the
// subcommand (if any).
if len(os.Args) <= 2 {
return nil
}
return parseFlagsWithArgs(os.Args[2:], c)
}
func parseFlagsWithArgs(args []string, c *preCli) error {
for i := 0; i < len(args); i++ {
if !strings.HasPrefix(args[i], "--") {
args = args[:i]
break
}
if !strings.Contains(args[i], "=") {
i++
}
}
// We don't want kong to print anything nor os.Exit (e.g. on -h)
parser, err := kong.New(c, kong.Writers(io.Discard, io.Discard), kong.Exit(func(int) {}))
if err != nil {
return err return err
} }
return nil _, err = parser.Parse(args)
return err
} }

View File

@ -12,43 +12,48 @@ import (
"fmt" "fmt"
"path/filepath" "path/filepath"
"github.com/alecthomas/kong"
"github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/fs"
"github.com/urfave/cli"
) )
type folderOverrideCommand struct { var operationCommand = cli.Command{
FolderID string `arg:""` Name: "operations",
HideHelp: true,
Usage: "Operation command group",
Subcommands: []cli.Command{
{
Name: "restart",
Usage: "Restart syncthing",
Action: expects(0, emptyPost("system/restart")),
},
{
Name: "shutdown",
Usage: "Shutdown syncthing",
Action: expects(0, emptyPost("system/shutdown")),
},
{
Name: "upgrade",
Usage: "Upgrade syncthing (if a newer version is available)",
Action: expects(0, emptyPost("system/upgrade")),
},
{
Name: "folder-override",
Usage: "Override changes on folder (remote for sendonly, local for receiveonly). WARNING: Destructive - deletes/changes your data.",
ArgsUsage: "FOLDER-ID",
Action: expects(1, foldersOverride),
},
{
Name: "default-ignores",
Usage: "Set the default ignores (config) from a file",
ArgsUsage: "PATH",
Action: expects(1, setDefaultIgnores),
},
},
} }
type defaultIgnoresCommand struct { func foldersOverride(c *cli.Context) error {
Path string `arg:""` client, err := getClientFactory(c).getClient()
}
type operationCommand struct {
Restart struct{} `cmd:"" help:"Restart syncthing"`
Shutdown struct{} `cmd:"" help:"Shutdown syncthing"`
Upgrade struct{} `cmd:"" help:"Upgrade syncthing (if a newer version is available)"`
FolderOverride folderOverrideCommand `cmd:"" help:"Override changes on folder (remote for sendonly, local for receiveonly). WARNING: Destructive - deletes/changes your data"`
DefaultIgnores defaultIgnoresCommand `cmd:"" help:"Set the default ignores (config) from a file"`
}
func (*operationCommand) Run(ctx Context, kongCtx *kong.Context) error {
f := ctx.clientFactory
switch kongCtx.Selected().Name {
case "restart":
return emptyPost("system/restart", f)
case "shutdown":
return emptyPost("system/shutdown", f)
case "upgrade":
return emptyPost("system/upgrade", f)
}
return nil
}
func (f *folderOverrideCommand) Run(ctx Context) error {
client, err := ctx.clientFactory.getClient()
if err != nil { if err != nil {
return err return err
} }
@ -56,7 +61,7 @@ func (f *folderOverrideCommand) Run(ctx Context) error {
if err != nil { if err != nil {
return err return err
} }
rid := f.FolderID rid := c.Args()[0]
for _, folder := range cfg.Folders { for _, folder := range cfg.Folders {
if folder.ID == rid { if folder.ID == rid {
response, err := client.Post("db/override", "") response, err := client.Post("db/override", "")
@ -81,12 +86,12 @@ func (f *folderOverrideCommand) Run(ctx Context) error {
return fmt.Errorf("Folder %q not found", rid) return fmt.Errorf("Folder %q not found", rid)
} }
func (d *defaultIgnoresCommand) Run(ctx Context) error { func setDefaultIgnores(c *cli.Context) error {
client, err := ctx.clientFactory.getClient() client, err := getClientFactory(c).getClient()
if err != nil { if err != nil {
return err return err
} }
dir, file := filepath.Split(d.Path) dir, file := filepath.Split(c.Args()[0])
filesystem := fs.NewFilesystem(fs.FilesystemTypeBasic, dir) filesystem := fs.NewFilesystem(fs.FilesystemTypeBasic, dir)
fd, err := filesystem.Open(file) fd, err := filesystem.Open(file)

View File

@ -9,30 +9,37 @@ package cli
import ( import (
"net/url" "net/url"
"github.com/alecthomas/kong" "github.com/urfave/cli"
) )
type pendingCommand struct { var pendingCommand = cli.Command{
Devices struct{} `cmd:"" help:"Show pending devices"` Name: "pending",
Folders struct { HideHelp: true,
Device string `help:"Show pending folders offered by given device"` Usage: "Pending subcommand group",
} `cmd:"" help:"Show pending folders"` Subcommands: []cli.Command{
{
Name: "devices",
Usage: "Show pending devices",
Action: expects(0, indexDumpOutput("cluster/pending/devices")),
},
{
Name: "folders",
Usage: "Show pending folders",
Flags: []cli.Flag{
cli.StringFlag{Name: "device", Usage: "Show pending folders offered by given device"},
},
Action: expects(0, folders()),
},
},
} }
func (p *pendingCommand) Run(ctx Context, kongCtx *kong.Context) error { func folders() cli.ActionFunc {
indexDumpOutput := indexDumpOutputWrapper(ctx.clientFactory) return func(c *cli.Context) error {
if c.String("device") != "" {
switch kongCtx.Selected().Name {
case "devices":
return indexDumpOutput("cluster/pending/devices")
case "folders":
if p.Folders.Device != "" {
query := make(url.Values) query := make(url.Values)
query.Set("device", p.Folders.Device) query.Set("device", c.String("device"))
return indexDumpOutput("cluster/pending/folders?" + query.Encode()) return indexDumpOutput("cluster/pending/folders?" + query.Encode())(c)
} }
return indexDumpOutput("cluster/pending/folders") return indexDumpOutput("cluster/pending/folders")(c)
} }
return nil
} }

View File

@ -7,36 +7,44 @@
package cli package cli
import ( import (
"github.com/alecthomas/kong" "github.com/urfave/cli"
) )
type showCommand struct { var showCommand = cli.Command{
Version struct{} `cmd:"" help:"Show syncthing client version"` Name: "show",
ConfigStatus struct{} `cmd:"" help:"Show configuration status, whether or not a restart is required for changes to take effect"` HideHelp: true,
System struct{} `cmd:"" help:"Show system status"` Usage: "Show command group",
Connections struct{} `cmd:"" help:"Report about connections to other devices"` Subcommands: []cli.Command{
Discovery struct{} `cmd:"" help:"Show the discovered addresses of remote devices (from cache of the running syncthing instance)"` {
Usage struct{} `cmd:"" help:"Show usage report"` Name: "version",
Pending pendingCommand `cmd:"" help:"Pending subcommand group"` Usage: "Show syncthing client version",
} Action: expects(0, indexDumpOutput("system/version")),
},
func (*showCommand) Run(ctx Context, kongCtx *kong.Context) error { {
indexDumpOutput := indexDumpOutputWrapper(ctx.clientFactory) Name: "config-status",
Usage: "Show configuration status, whether or not a restart is required for changes to take effect",
switch kongCtx.Selected().Name { Action: expects(0, indexDumpOutput("config/restart-required")),
case "version": },
return indexDumpOutput("system/version") {
case "config-status": Name: "system",
return indexDumpOutput("config/restart-required") Usage: "Show system status",
case "system": Action: expects(0, indexDumpOutput("system/status")),
return indexDumpOutput("system/status") },
case "connections": {
return indexDumpOutput("system/connections") Name: "connections",
case "discovery": Usage: "Report about connections to other devices",
return indexDumpOutput("system/discovery") Action: expects(0, indexDumpOutput("system/connections")),
case "usage": },
return indexDumpOutput("svc/report") {
} Name: "discovery",
Usage: "Show the discovered addresses of remote devices (from cache of the running syncthing instance)",
return nil Action: expects(0, indexDumpOutput("system/discovery")),
},
pendingCommand,
{
Name: "usage",
Usage: "Show usage report",
Action: expects(0, indexDumpOutput("svc/report")),
},
},
} }

View File

@ -19,6 +19,7 @@ import (
"github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/locations" "github.com/syncthing/syncthing/lib/locations"
"github.com/urfave/cli"
) )
func responseToBArray(response *http.Response) ([]byte, error) { func responseToBArray(response *http.Response) ([]byte, error) {
@ -29,72 +30,68 @@ func responseToBArray(response *http.Response) ([]byte, error) {
return bytes, response.Body.Close() return bytes, response.Body.Close()
} }
func emptyPost(url string, apiClientFactory *apiClientFactory) error { func emptyPost(url string) cli.ActionFunc {
client, err := apiClientFactory.getClient() return func(c *cli.Context) error {
if err != nil { client, err := getClientFactory(c).getClient()
if err != nil {
return err
}
_, err = client.Post(url, "")
return err return err
} }
_, err = client.Post(url, "")
return err
}
func indexDumpOutputWrapper(apiClientFactory *apiClientFactory) func(url string) error {
return func(url string) error {
return indexDumpOutput(url, apiClientFactory)
}
} }
func indexDumpOutput(url string, apiClientFactory *apiClientFactory) error { func indexDumpOutput(url string) cli.ActionFunc {
client, err := apiClientFactory.getClient() return func(c *cli.Context) error {
if err != nil { client, err := getClientFactory(c).getClient()
return err if err != nil {
return err
}
response, err := client.Get(url)
if errors.Is(err, errNotFound) {
return errors.New("not found (folder/file not in database)")
}
if err != nil {
return err
}
return prettyPrintResponse(response)
} }
response, err := client.Get(url)
if errors.Is(err, errNotFound) {
return errors.New("not found (folder/file not in database)")
}
if err != nil {
return err
}
return prettyPrintResponse(response)
} }
func saveToFile(url string, apiClientFactory *apiClientFactory) error { func saveToFile(url string) cli.ActionFunc {
client, err := apiClientFactory.getClient() return func(c *cli.Context) error {
if err != nil { client, err := getClientFactory(c).getClient()
if err != nil {
return err
}
response, err := client.Get(url)
if err != nil {
return err
}
_, params, err := mime.ParseMediaType(response.Header.Get("Content-Disposition"))
if err != nil {
return err
}
filename := params["filename"]
if filename == "" {
return errors.New("Missing filename in response")
}
bs, err := responseToBArray(response)
if err != nil {
return err
}
f, err := os.Create(filename)
if err != nil {
return err
}
defer f.Close()
_, err = f.Write(bs)
if err != nil {
return err
}
fmt.Println("Wrote results to", filename)
return err return err
} }
response, err := client.Get(url)
if err != nil {
return err
}
_, params, err := mime.ParseMediaType(response.Header.Get("Content-Disposition"))
if err != nil {
return err
}
filename := params["filename"]
if filename == "" {
return errors.New("Missing filename in response")
}
bs, err := responseToBArray(response)
if err != nil {
return err
}
f, err := os.Create(filename)
if err != nil {
return err
}
_, err = f.Write(bs)
if err != nil {
_ = f.Close()
return err
}
err = f.Close()
if err != nil {
return err
}
fmt.Println("Wrote results to", filename)
return nil
} }
func getConfig(c APIClient) (config.Configuration, error) { func getConfig(c APIClient) (config.Configuration, error) {
@ -114,6 +111,19 @@ func getConfig(c APIClient) (config.Configuration, error) {
return cfg, nil return cfg, nil
} }
func expects(n int, actionFunc cli.ActionFunc) cli.ActionFunc {
return func(ctx *cli.Context) error {
if ctx.NArg() != n {
plural := ""
if n != 1 {
plural = "s"
}
return fmt.Errorf("expected %d argument%s, got %d", n, plural, ctx.NArg())
}
return actionFunc(ctx)
}
}
func prettyPrintJSON(data interface{}) error { func prettyPrintJSON(data interface{}) error {
enc := json.NewEncoder(os.Stdout) enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ") enc.SetIndent("", " ")
@ -149,3 +159,7 @@ func nulString(bs []byte) string {
func normalizePath(path string) string { func normalizePath(path string) string {
return filepath.ToSlash(filepath.Clean(path)) return filepath.ToSlash(filepath.Clean(path))
} }
func getClientFactory(c *cli.Context) *apiClientFactory {
return c.App.Metadata["clientFactory"].(*apiClientFactory)
}

View File

@ -9,8 +9,8 @@ package cmdutil
// CommonOptions are reused among several subcommands // CommonOptions are reused among several subcommands
type CommonOptions struct { type CommonOptions struct {
buildCommonOptions buildCommonOptions
ConfDir string `name:"config" placeholder:"PATH" env:"STCONFDIR" help:"Set configuration directory (config and keys)"` ConfDir string `name:"config" placeholder:"PATH" help:"Set configuration directory (config and keys)"`
HomeDir string `name:"home" placeholder:"PATH" env:"STHOMEDIR" help:"Set configuration and data directory"` HomeDir string `name:"home" placeholder:"PATH" help:"Set configuration and data directory"`
NoDefaultFolder bool `env:"STNODEFAULTFOLDER" help:"Don't create the \"default\" folder on first startup"` NoDefaultFolder bool `env:"STNODEFAULTFOLDER" help:"Don't create the \"default\" folder on first startup"`
SkipPortProbing bool `help:"Don't try to find free ports for GUI and listen addresses on first startup"` SkipPortProbing bool `help:"Don't try to find free ports for GUI and listen addresses on first startup"`
} }

View File

@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file, // License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/. // You can obtain one at https://mozilla.org/MPL/2.0/.
package syncthing_main package main
import ( import (
"bytes" "bytes"

View File

@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file, // License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/. // You can obtain one at https://mozilla.org/MPL/2.0/.
package syncthing_main package main
import ( import (
"bytes" "bytes"

View File

@ -4,12 +4,12 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file, // License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/. // You can obtain one at https://mozilla.org/MPL/2.0/.
package syncthing_main package main
import ( import (
"github.com/syncthing/syncthing/lib/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
l = logger.DefaultLogger.NewFacility("syncthing_main", "Syncthing package") l = logger.DefaultLogger.NewFacility("main", "Main package")
) )

View File

@ -35,7 +35,6 @@ type CLI struct {
TokenPath string `placeholder:"PATH" help:"Path to the token file within the folder (used to determine folder ID)"` TokenPath string `placeholder:"PATH" help:"Path to the token file within the folder (used to determine folder ID)"`
folderKey *[32]byte folderKey *[32]byte
keyGen *protocol.KeyGenerator
} }
type storedEncryptionToken struct { type storedEncryptionToken struct {
@ -69,8 +68,7 @@ func (c *CLI) Run() error {
} }
} }
c.keyGen = protocol.NewKeyGenerator() c.folderKey = protocol.KeyFromPassword(c.FolderID, c.Password)
c.folderKey = c.keyGen.KeyFromPassword(c.FolderID, c.Password)
return c.walk() return c.walk()
} }
@ -153,7 +151,7 @@ func (c *CLI) process(srcFs fs.Filesystem, dstFs fs.Filesystem, path string) err
// in native format, while protocol expects wire format (slashes). // in native format, while protocol expects wire format (slashes).
encFi.Name = osutil.NormalizedFilename(encFi.Name) encFi.Name = osutil.NormalizedFilename(encFi.Name)
plainFi, err := protocol.DecryptFileInfo(c.keyGen, *encFi, c.folderKey) plainFi, err := protocol.DecryptFileInfo(*encFi, c.folderKey)
if err != nil { if err != nil {
return fmt.Errorf("%s: decrypting metadata: %w", path, err) return fmt.Errorf("%s: decrypting metadata: %w", path, err)
} }
@ -164,7 +162,7 @@ func (c *CLI) process(srcFs fs.Filesystem, dstFs fs.Filesystem, path string) err
var plainFd fs.File var plainFd fs.File
if dstFs != nil { if dstFs != nil {
if err := dstFs.MkdirAll(filepath.Dir(plainFi.Name), 0o700); err != nil { if err := dstFs.MkdirAll(filepath.Dir(plainFi.Name), 0700); err != nil {
return fmt.Errorf("%s: %w", plainFi.Name, err) return fmt.Errorf("%s: %w", plainFi.Name, err)
} }
@ -211,7 +209,7 @@ func (c *CLI) decryptFile(encFi *protocol.FileInfo, plainFi *protocol.FileInfo,
return fmt.Errorf("block count mismatch: encrypted %d != plaintext %d", len(encFi.Blocks), len(plainFi.Blocks)) return fmt.Errorf("block count mismatch: encrypted %d != plaintext %d", len(encFi.Blocks), len(plainFi.Blocks))
} }
fileKey := c.keyGen.FileKey(plainFi.Name, c.folderKey) fileKey := protocol.FileKey(plainFi.Name, c.folderKey)
for i, encBlock := range encFi.Blocks { for i, encBlock := range encFi.Blocks {
// Read the encrypted block // Read the encrypted block
buf := make([]byte, encBlock.Size) buf := make([]byte, encBlock.Size)

View File

@ -69,7 +69,7 @@ func Generate(l logger.Logger, confDir, guiUser, guiPassword string, noDefaultFo
return err return err
} }
if err := syncthing.EnsureDir(dir, 0o700); err != nil { if err := syncthing.EnsureDir(dir, 0700); err != nil {
return err return err
} }
locations.SetBaseDir(locations.ConfigBaseDir, dir) locations.SetBaseDir(locations.ConfigBaseDir, dir)
@ -127,7 +127,7 @@ func updateGUIAuthentication(l logger.Logger, guiCfg *config.GUIConfiguration, g
} }
if guiPassword != "" && guiCfg.Password != guiPassword { if guiPassword != "" && guiCfg.Password != guiPassword {
if err := guiCfg.SetPassword(guiPassword); err != nil { if err := guiCfg.HashAndSetPassword(guiPassword); err != nil {
return fmt.Errorf("failed to set GUI authentication password: %w", err) return fmt.Errorf("failed to set GUI authentication password: %w", err)
} }
l.Infoln("Updated GUI authentication password.") l.Infoln("Updated GUI authentication password.")

View File

@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file, // License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/. // You can obtain one at https://mozilla.org/MPL/2.0/.
package syncthing_main package main
import ( import (
"fmt" "fmt"

View File

@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file, // License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/. // You can obtain one at https://mozilla.org/MPL/2.0/.
package syncthing_main package main
import ( import (
"bytes" "bytes"
@ -22,6 +22,7 @@ import (
"path" "path"
"path/filepath" "path/filepath"
"regexp" "regexp"
"runtime"
"runtime/pprof" "runtime/pprof"
"sort" "sort"
"strconv" "strconv"
@ -30,10 +31,9 @@ import (
"time" "time"
"github.com/alecthomas/kong" "github.com/alecthomas/kong"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/thejerf/suture/v4" "github.com/thejerf/suture/v4"
"github.com/willabides/kongplete"
"github.com/syncthing/syncthing/cmd/syncthing/cli"
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil" "github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
"github.com/syncthing/syncthing/cmd/syncthing/decrypt" "github.com/syncthing/syncthing/cmd/syncthing/decrypt"
"github.com/syncthing/syncthing/cmd/syncthing/generate" "github.com/syncthing/syncthing/cmd/syncthing/generate"
@ -88,6 +88,9 @@ above.
STTRACE A comma separated string of facilities to trace. The valid STTRACE A comma separated string of facilities to trace. The valid
facility strings are listed below. facility strings are listed below.
STDEADLOCKTIMEOUT Used for debugging internal deadlocks; sets debug
sensitivity. Use only under direction of a developer.
STLOCKTHRESHOLD Used for debugging internal deadlocks; sets debug STLOCKTHRESHOLD Used for debugging internal deadlocks; sets debug
sensitivity. Use only under direction of a developer. sensitivity. Use only under direction of a developer.
@ -96,11 +99,6 @@ above.
"minio" for the github.com/minio/sha256-simd implementation, "minio" for the github.com/minio/sha256-simd implementation,
and blank (the default) for auto detection. and blank (the default) for auto detection.
STVERSIONEXTRA Add extra information to the version string in logs and the
version line in the GUI. Can be set to the name of a wrapper
or tool controlling syncthing to communicate this to the end
user.
GOMAXPROCS Set the maximum number of CPU cores to use. Defaults to all GOMAXPROCS Set the maximum number of CPU cores to use. Defaults to all
available CPU cores. available CPU cores.
@ -133,9 +131,10 @@ var (
// commands and options here are top level commands to syncthing. // commands and options here are top level commands to syncthing.
// Cli is just a placeholder for the help text (see main). // Cli is just a placeholder for the help text (see main).
var entrypoint struct { var entrypoint struct {
Serve serveOptions `cmd:"" help:"Run Syncthing"` Serve serveOptions `cmd:"" help:"Run Syncthing"`
Generate generate.CLI `cmd:"" help:"Generate key and config, then exit"` Generate generate.CLI `cmd:"" help:"Generate key and config, then exit"`
Decrypt decrypt.CLI `cmd:"" help:"Decrypt or verify an encrypted folder"` Decrypt decrypt.CLI `cmd:"" help:"Decrypt or verify an encrypted folder"`
Cli struct{} `cmd:"" help:"Command line interface for Syncthing"`
} }
// serveOptions are the options for the `syncthing serve` command. // serveOptions are the options for the `syncthing serve` command.
@ -145,9 +144,9 @@ type serveOptions struct {
Audit bool `help:"Write events to audit file"` Audit bool `help:"Write events to audit file"`
AuditFile string `name:"auditfile" placeholder:"PATH" help:"Specify audit file (use \"-\" for stdout, \"--\" for stderr)"` AuditFile string `name:"auditfile" placeholder:"PATH" help:"Specify audit file (use \"-\" for stdout, \"--\" for stderr)"`
BrowserOnly bool `help:"Open GUI in browser"` BrowserOnly bool `help:"Open GUI in browser"`
DataDir string `name:"data" placeholder:"PATH" env:"STDATADIR" help:"Set data directory (database and logs)"` DataDir string `name:"data" placeholder:"PATH" help:"Set data directory (database and logs)"`
DeviceID bool `help:"Show the device ID"` DeviceID bool `help:"Show the device ID"`
GenerateDir string `name:"generate" placeholder:"PATH" help:"Generate key and config in specified dir, then exit"` // DEPRECATED: replaced by subcommand! GenerateDir string `name:"generate" placeholder:"PATH" help:"Generate key and config in specified dir, then exit"` //DEPRECATED: replaced by subcommand!
GUIAddress string `name:"gui-address" placeholder:"URL" help:"Override GUI address (e.g. \"http://192.0.2.42:8443\")"` GUIAddress string `name:"gui-address" placeholder:"URL" help:"Override GUI address (e.g. \"http://192.0.2.42:8443\")"`
GUIAPIKey string `name:"gui-apikey" placeholder:"API-KEY" help:"Override GUI API key"` GUIAPIKey string `name:"gui-apikey" placeholder:"API-KEY" help:"Override GUI API key"`
LogFile string `name:"logfile" default:"${logFile}" placeholder:"PATH" help:"Log file name (see below)"` LogFile string `name:"logfile" default:"${logFile}" placeholder:"PATH" help:"Log file name (see below)"`
@ -169,6 +168,7 @@ type serveOptions struct {
// Debug options below // Debug options below
DebugDBIndirectGCInterval time.Duration `env:"STGCINDIRECTEVERY" help:"Database indirection GC interval"` DebugDBIndirectGCInterval time.Duration `env:"STGCINDIRECTEVERY" help:"Database indirection GC interval"`
DebugDBRecheckInterval time.Duration `env:"STRECHECKDBEVERY" help:"Database metadata recalculation interval"` DebugDBRecheckInterval time.Duration `env:"STRECHECKDBEVERY" help:"Database metadata recalculation interval"`
DebugDeadlockTimeout int `placeholder:"SECONDS" env:"STDEADLOCKTIMEOUT" help:"Used for debugging internal deadlocks"`
DebugGUIAssetsDir string `placeholder:"PATH" help:"Directory to load GUI assets from" env:"STGUIASSETS"` DebugGUIAssetsDir string `placeholder:"PATH" help:"Directory to load GUI assets from" env:"STGUIASSETS"`
DebugPerfStats bool `env:"STPERFSTATS" help:"Write running performance statistics to perf-$pid.csv (Unix only)"` DebugPerfStats bool `env:"STPERFSTATS" help:"Write running performance statistics to perf-$pid.csv (Unix only)"`
DebugProfileBlock bool `env:"STBLOCKPROFILE" help:"Write block profiles to block-$pid-$timestamp.pprof every 20 seconds"` DebugProfileBlock bool `env:"STBLOCKPROFILE" help:"Write block profiles to block-$pid-$timestamp.pprof every 20 seconds"`
@ -207,10 +207,23 @@ func defaultVars() kong.Vars {
return vars return vars
} }
func RunWithArgs(args []string) error { func main() {
// The "cli" subcommand uses a different command line parser, and e.g. help
// gets mangled when integrating it as a subcommand -> detect it here at the
// beginning.
if len(os.Args) > 1 && os.Args[1] == "cli" {
if err := cli.Run(); err != nil {
fmt.Println(err)
os.Exit(1)
}
return
}
// First some massaging of the raw command line to fit the new model. // First some massaging of the raw command line to fit the new model.
// Basically this means adding the default command at the front, and // Basically this means adding the default command at the front, and
// converting -options to --options. // converting -options to --options.
args := os.Args[1:]
switch { switch {
case len(args) == 0: case len(args) == 0:
// Empty command line is equivalent to just calling serve // Empty command line is equivalent to just calling serve
@ -231,26 +244,16 @@ func RunWithArgs(args []string) error {
// Create a parser with an overridden help function to print our extra // Create a parser with an overridden help function to print our extra
// help info. // help info.
parser, err := kong.New( parser, err := kong.New(&entrypoint, kong.Help(helpHandler), defaultVars())
&entrypoint,
kong.ConfigureHelp(kong.HelpOptions{
NoExpandSubcommands: true,
Compact: true,
}),
kong.Help(helpHandler),
defaultVars(),
)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
kongplete.Complete(parser)
ctx, err := parser.Parse(args) ctx, err := parser.Parse(args)
parser.FatalIfErrorf(err) parser.FatalIfErrorf(err)
ctx.BindTo(l, (*logger.Logger)(nil)) // main logger available to subcommands ctx.BindTo(l, (*logger.Logger)(nil)) // main logger available to subcommands
err = ctx.Run() err = ctx.Run()
parser.FatalIfErrorf(err) parser.FatalIfErrorf(err)
return err
} }
func helpHandler(options kong.HelpOptions, ctx *kong.Context) error { func helpHandler(options kong.HelpOptions, ctx *kong.Context) error {
@ -351,7 +354,7 @@ func (options serveOptions) Run() error {
} }
// Ensure that our home directory exists. // Ensure that our home directory exists.
if err := syncthing.EnsureDir(locations.GetBaseDir(locations.ConfigBaseDir), 0o700); err != nil { if err := syncthing.EnsureDir(locations.GetBaseDir(locations.ConfigBaseDir), 0700); err != nil {
l.Warnln("Failure on home directory:", err) l.Warnln("Failure on home directory:", err)
os.Exit(svcutil.ExitError.AsInt()) os.Exit(svcutil.ExitError.AsInt())
} }
@ -618,6 +621,7 @@ func syncthingMain(options serveOptions) {
} }
appOpts := syncthing.Options{ appOpts := syncthing.Options{
DeadlockTimeoutS: options.DebugDeadlockTimeout,
NoUpgrade: options.NoUpgrade, NoUpgrade: options.NoUpgrade,
ProfilerAddr: options.DebugProfilerListen, ProfilerAddr: options.DebugProfilerListen,
ResetDeltaIdxs: options.DebugResetDeltaIdxs, ResetDeltaIdxs: options.DebugResetDeltaIdxs,
@ -628,6 +632,10 @@ func syncthingMain(options serveOptions) {
if options.Audit { if options.Audit {
appOpts.AuditWriter = auditWriter(options.AuditFile) appOpts.AuditWriter = auditWriter(options.AuditFile)
} }
if t := os.Getenv("STDEADLOCKTIMEOUT"); t != "" {
secs, _ := strconv.Atoi(t)
appOpts.DeadlockTimeoutS = secs
}
if dur, err := time.ParseDuration(os.Getenv("STRECHECKDBEVERY")); err == nil { if dur, err := time.ParseDuration(os.Getenv("STRECHECKDBEVERY")); err == nil {
appOpts.DBRecheckInterval = dur appOpts.DBRecheckInterval = dur
} }
@ -647,6 +655,10 @@ func syncthingMain(options serveOptions) {
setupSignalHandling(app) setupSignalHandling(app)
if os.Getenv("GOMAXPROCS") == "" {
runtime.GOMAXPROCS(runtime.NumCPU())
}
if options.DebugProfileCPU { if options.DebugProfileCPU {
f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid())) f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
if err != nil { if err != nil {
@ -710,6 +722,7 @@ func setupSignalHandling(app *syncthing.App) {
func loadOrDefaultConfig() (config.Wrapper, error) { func loadOrDefaultConfig() (config.Wrapper, error) {
cfgFile := locations.Get(locations.ConfigFile) cfgFile := locations.Get(locations.ConfigFile)
cfg, _, err := config.Load(cfgFile, protocol.EmptyDeviceID, events.NoopLogger) cfg, _, err := config.Load(cfgFile, protocol.EmptyDeviceID, events.NoopLogger)
if err != nil { if err != nil {
newCfg := config.New(protocol.EmptyDeviceID) newCfg := config.New(protocol.EmptyDeviceID)
return config.Wrap(cfgFile, newCfg, protocol.EmptyDeviceID, events.NoopLogger), nil return config.Wrap(cfgFile, newCfg, protocol.EmptyDeviceID, events.NoopLogger), nil
@ -737,7 +750,7 @@ func auditWriter(auditFile string) io.Writer {
} else { } else {
auditFlags = os.O_WRONLY | os.O_CREATE | os.O_APPEND auditFlags = os.O_WRONLY | os.O_CREATE | os.O_APPEND
} }
fd, err = os.OpenFile(auditFile, auditFlags, 0o600) fd, err = os.OpenFile(auditFile, auditFlags, 0600)
if err != nil { if err != nil {
l.Warnln("Audit:", err) l.Warnln("Audit:", err)
os.Exit(svcutil.ExitError.AsInt()) os.Exit(svcutil.ExitError.AsInt())
@ -857,7 +870,6 @@ func cleanConfigDirectory() {
"backup-of-v0.8": 30 * 24 * time.Hour, // these neither "backup-of-v0.8": 30 * 24 * time.Hour, // these neither
"tmp-index-sorter.*": time.Minute, // these should never exist on startup "tmp-index-sorter.*": time.Minute, // these should never exist on startup
"support-bundle-*": 30 * 24 * time.Hour, // keep old support bundle zip or folder for a month "support-bundle-*": 30 * 24 * time.Hour, // keep old support bundle zip or folder for a month
"csrftokens.txt": 0, // deprecated, remove immediately
} }
for pat, dur := range patterns { for pat, dur := range patterns {

View File

@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file, // License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/. // You can obtain one at https://mozilla.org/MPL/2.0/.
package syncthing_main package main
import ( import (
"bufio" "bufio"
@ -346,7 +346,7 @@ func restartMonitor(binary string, args []string) error {
} }
func restartMonitorUnix(binary string, args []string) error { func restartMonitorUnix(binary string, args []string) error {
return syscall.Exec(binary, args, os.Environ()) return syscall.Exec(args[0], args, os.Environ())
} }
func restartMonitorWindows(binary string, args []string) error { func restartMonitorWindows(binary string, args []string) error {
@ -521,7 +521,7 @@ func (f *autoclosedFile) ensureOpenLocked() error {
// We open the file for write only, and create it if it doesn't exist. // We open the file for write only, and create it if it doesn't exist.
flags := os.O_WRONLY | os.O_CREATE | os.O_APPEND flags := os.O_WRONLY | os.O_CREATE | os.O_APPEND
fd, err := os.OpenFile(f.name, flags, 0o644) fd, err := os.OpenFile(f.name, flags, 0644)
if err != nil { if err != nil {
return err return err
} }

View File

@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file, // License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/. // You can obtain one at https://mozilla.org/MPL/2.0/.
package syncthing_main package main
import ( import (
"io" "io"

View File

@ -7,7 +7,7 @@
//go:build !windows //go:build !windows
// +build !windows // +build !windows
package syncthing_main package main
import ( import (
"os/exec" "os/exec"

View File

@ -7,7 +7,7 @@
//go:build windows //go:build windows
// +build windows // +build windows
package syncthing_main package main
import "os/exec" import "os/exec"

View File

@ -7,7 +7,7 @@
//go:build !solaris && !windows //go:build !solaris && !windows
// +build !solaris,!windows // +build !solaris,!windows
package syncthing_main package main
import ( import (
"fmt" "fmt"

View File

@ -7,7 +7,7 @@
//go:build solaris || windows //go:build solaris || windows
// +build solaris windows // +build solaris windows
package syncthing_main package main
func startPerfStats() { func startPerfStats() {
} }

View File

@ -7,7 +7,7 @@
//go:build go1.7 //go:build go1.7
// +build go1.7 // +build go1.7
package syncthing_main package main
import "runtime/debug" import "runtime/debug"

View File

@ -4,11 +4,10 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file, // License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/. // You can obtain one at https://mozilla.org/MPL/2.0/.
package aggregate package main
import ( import (
"database/sql" "database/sql"
"fmt"
"log" "log"
"os" "os"
"time" "time"
@ -16,21 +15,26 @@ import (
_ "github.com/lib/pq" _ "github.com/lib/pq"
) )
type CLI struct { var dbConn = getEnvDefault("UR_DB_URL", "postgres://user:password@localhost/ur?sslmode=disable")
DBConn string `env:"UR_DB_URL" default:"postgres://user:password@localhost/ur?sslmode=disable"`
func getEnvDefault(key, def string) string {
if val := os.Getenv(key); val != "" {
return val
}
return def
} }
func (cli *CLI) Run() error { func main() {
log.SetFlags(log.Ltime | log.Ldate) log.SetFlags(log.Ltime | log.Ldate)
log.SetOutput(os.Stdout) log.SetOutput(os.Stdout)
db, err := sql.Open("postgres", cli.DBConn) db, err := sql.Open("postgres", dbConn)
if err != nil { if err != nil {
return fmt.Errorf("database: %w", err) log.Fatalln("database:", err)
} }
err = setupDB(db) err = setupDB(db)
if err != nil { if err != nil {
return fmt.Errorf("database: %w", err) log.Fatalln("database:", err)
} }
for { for {
@ -49,6 +53,13 @@ func runAggregation(db *sql.DB) {
} }
log.Println("Inserted", rows, "rows") log.Println("Inserted", rows, "rows")
log.Println("Aggregating UserMovement data")
rows, err = aggregateUserMovement(db)
if err != nil {
log.Println("aggregate:", err)
}
log.Println("Inserted", rows, "rows")
since = maxIndexedDay(db, "Performance") since = maxIndexedDay(db, "Performance")
log.Println("Aggregating Performance data since", since) log.Println("Aggregating Performance data since", since)
rows, err = aggregatePerformance(db, since.Add(24*time.Hour)) rows, err = aggregatePerformance(db, since.Add(24*time.Hour))
@ -83,6 +94,16 @@ func setupDB(db *sql.DB) error {
return err return err
} }
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS UserMovement (
Day TIMESTAMP NOT NULL,
Added INTEGER NOT NULL,
Bounced INTEGER NOT NULL,
Removed INTEGER NOT NULL
)`)
if err != nil {
return err
}
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS Performance ( _, err = db.Exec(`CREATE TABLE IF NOT EXISTS Performance (
Day TIMESTAMP NOT NULL, Day TIMESTAMP NOT NULL,
TotFiles INTEGER NOT NULL, TotFiles INTEGER NOT NULL,
@ -98,13 +119,13 @@ func setupDB(db *sql.DB) error {
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS BlockStats ( _, err = db.Exec(`CREATE TABLE IF NOT EXISTS BlockStats (
Day TIMESTAMP NOT NULL, Day TIMESTAMP NOT NULL,
Reports INTEGER NOT NULL, Reports INTEGER NOT NULL,
Total BIGINT NOT NULL, Total INTEGER NOT NULL,
Renamed BIGINT NOT NULL, Renamed INTEGER NOT NULL,
Reused BIGINT NOT NULL, Reused INTEGER NOT NULL,
Pulled BIGINT NOT NULL, Pulled INTEGER NOT NULL,
CopyOrigin BIGINT NOT NULL, CopyOrigin INTEGER NOT NULL,
CopyOriginShifted BIGINT NOT NULL, CopyOriginShifted INTEGER NOT NULL,
CopyElsewhere BIGINT NOT NULL CopyElsewhere INTEGER NOT NULL
)`) )`)
if err != nil { if err != nil {
return err return err
@ -122,6 +143,11 @@ func setupDB(db *sql.DB) error {
_, _ = db.Exec(`CREATE INDEX VersionDayIndex ON VersionSummary (Day)`) _, _ = db.Exec(`CREATE INDEX VersionDayIndex ON VersionSummary (Day)`)
} }
row = db.QueryRow(`SELECT 'MovementDayIndex'::regclass`)
if err := row.Scan(&t); err != nil {
_, _ = db.Exec(`CREATE INDEX MovementDayIndex ON UserMovement (Day)`)
}
row = db.QueryRow(`SELECT 'PerformanceDayIndex'::regclass`) row = db.QueryRow(`SELECT 'PerformanceDayIndex'::regclass`)
if err := row.Scan(&t); err != nil { if err := row.Scan(&t); err != nil {
_, _ = db.Exec(`CREATE INDEX PerformanceDayIndex ON Performance (Day)`) _, _ = db.Exec(`CREATE INDEX PerformanceDayIndex ON Performance (Day)`)
@ -166,6 +192,87 @@ func aggregateVersionSummary(db *sql.DB, since time.Time) (int64, error) {
return res.RowsAffected() return res.RowsAffected()
} }
func aggregateUserMovement(db *sql.DB) (int64, error) {
rows, err := db.Query(`SELECT
DATE_TRUNC('day', Received) AS Day,
Report->>'uniqueID'
FROM ReportsJson
WHERE
Report->>'uniqueID' IS NOT NULL
AND Received < DATE_TRUNC('day', NOW())
AND Report->>'version' like 'v_.%'
ORDER BY Day
`)
if err != nil {
return 0, err
}
defer rows.Close()
firstSeen := make(map[string]time.Time)
lastSeen := make(map[string]time.Time)
var minTs time.Time
minTs = minTs.In(time.UTC)
for rows.Next() {
var ts time.Time
var id string
if err := rows.Scan(&ts, &id); err != nil {
return 0, err
}
if minTs.IsZero() {
minTs = ts
}
if _, ok := firstSeen[id]; !ok {
firstSeen[id] = ts
}
lastSeen[id] = ts
}
type sumRow struct {
day time.Time
added int
removed int
bounced int
}
var sumRows []sumRow
for t := minTs; t.Before(time.Now().Truncate(24 * time.Hour)); t = t.AddDate(0, 0, 1) {
var added, removed, bounced int
old := t.Before(time.Now().AddDate(0, 0, -30))
for id, first := range firstSeen {
last := lastSeen[id]
if first.Equal(t) && last.Equal(t) && old {
bounced++
continue
}
if first.Equal(t) {
added++
}
if last == t && old {
removed++
}
}
sumRows = append(sumRows, sumRow{t, added, removed, bounced})
}
tx, err := db.Begin()
if err != nil {
return 0, err
}
if _, err := tx.Exec("DELETE FROM UserMovement"); err != nil {
tx.Rollback()
return 0, err
}
for _, r := range sumRows {
if _, err := tx.Exec("INSERT INTO UserMovement (Day, Added, Removed, Bounced) VALUES ($1, $2, $3, $4)", r.day, r.added, r.removed, r.bounced); err != nil {
tx.Rollback()
return 0, err
}
}
return int64(len(sumRows)), tx.Commit()
}
func aggregatePerformance(db *sql.DB, since time.Time) (int64, error) { func aggregatePerformance(db *sql.DB, since time.Time) (int64, error) {
res, err := db.Exec(`INSERT INTO Performance ( res, err := db.Exec(`INSERT INTO Performance (
SELECT SELECT
@ -199,13 +306,13 @@ func aggregateBlockStats(db *sql.DB, since time.Time) (int64, error) {
SELECT SELECT
DATE_TRUNC('day', Received) AS Day, DATE_TRUNC('day', Received) AS Day,
COUNT(1) As Reports, COUNT(1) As Reports,
SUM((Report->'blockStats'->>'total')::numeric)::bigint AS Total, SUM((Report->'blockStats'->>'total')::numeric) AS Total,
SUM((Report->'blockStats'->>'renamed')::numeric)::bigint AS Renamed, SUM((Report->'blockStats'->>'renamed')::numeric) AS Renamed,
SUM((Report->'blockStats'->>'reused')::numeric)::bigint AS Reused, SUM((Report->'blockStats'->>'reused')::numeric) AS Reused,
SUM((Report->'blockStats'->>'pulled')::numeric)::bigint AS Pulled, SUM((Report->'blockStats'->>'pulled')::numeric) AS Pulled,
SUM((Report->'blockStats'->>'copyOrigin')::numeric)::bigint AS CopyOrigin, SUM((Report->'blockStats'->>'copyOrigin')::numeric) AS CopyOrigin,
SUM((Report->'blockStats'->>'copyOriginShifted')::numeric)::bigint AS CopyOriginShifted, SUM((Report->'blockStats'->>'copyOriginShifted')::numeric) AS CopyOriginShifted,
SUM((Report->'blockStats'->>'copyElsewhere')::numeric)::bigint AS CopyElsewhere SUM((Report->'blockStats'->>'copyElsewhere')::numeric) AS CopyElsewhere
FROM ReportsJson FROM ReportsJson
WHERE WHERE
Received > $1 Received > $1

View File

@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file, // License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/. // You can obtain one at https://mozilla.org/MPL/2.0/.
package serve package main
import ( import (
"regexp" "regexp"
@ -145,7 +145,7 @@ func statsForFloats(data []float64) [4]float64 {
return res return res
} }
func group(by func(string) string, as []analytic, perGroup int, otherPct float64) []analytic { func group(by func(string) string, as []analytic, perGroup int) []analytic {
var res []analytic var res []analytic
next: next:
@ -170,25 +170,6 @@ next:
} }
sort.Sort(analyticList(res)) sort.Sort(analyticList(res))
if otherPct > 0 {
// Groups with less than otherPCt go into "Other"
other := analytic{
Key: "Other",
}
for i := 0; i < len(res); i++ {
if res[i].Percentage < otherPct || res[i].Key == "Other" {
other.Count += res[i].Count
other.Percentage += res[i].Percentage
res = append(res[:i], res[i+1:]...)
i--
}
}
if other.Count > 0 {
res = append(res, other)
}
}
return res return res
} }

View File

@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file, // License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/. // You can obtain one at https://mozilla.org/MPL/2.0/.
package serve package main
import "testing" import "testing"

View File

@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file, // License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/. // You can obtain one at https://mozilla.org/MPL/2.0/.
package serve package main
import ( import (
"bytes" "bytes"

File diff suppressed because it is too large Load Diff

143
cmd/ursrv/migration.go Normal file
View File

@ -0,0 +1,143 @@
// Copyright (C) 2020 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"database/sql"
"database/sql/driver"
"encoding/json"
"errors"
"log"
"strings"
"github.com/lib/pq"
"github.com/syncthing/syncthing/lib/ur/contract"
)
func migrate(db *sql.DB) error {
var count uint64
log.Println("Checking old table row count, this might take a while...")
if err := db.QueryRow(`SELECT COUNT(1) FROM Reports`).Scan(&count); err != nil || count == 0 {
// err != nil most likely means table does not exist.
return nil
}
log.Printf("Found %d records, will perform migration.", count)
tx, err := db.Begin()
if err != nil {
log.Println("sql:", err)
return err
}
defer tx.Rollback()
// These must be lower case, because we don't quote them when creating, so postgres creates them lower case.
// Yet pg.CopyIn quotes them, which makes them case sensitive.
stmt, err := tx.Prepare(pq.CopyIn("reportsjson", "received", "report"))
if err != nil {
log.Println("sql:", err)
return err
}
// Custom types used in the old struct.
var rep contract.Report
var rescanIntvs pq.Int64Array
var fsWatcherDelay pq.Int64Array
pullOrder := make(IntMap)
fileSystemType := make(IntMap)
themes := make(IntMap)
transportStats := make(IntMap)
rows, err := db.Query(`SELECT ` + strings.Join(rep.FieldNames(), ", ") + `, FolderFsWatcherDelays, RescanIntvs, FolderPullOrder, FolderFilesystemType, GUITheme, Transport FROM Reports`)
if err != nil {
log.Println("sql:", err)
return err
}
defer rows.Close()
var done uint64
pct := count / 100
for rows.Next() {
err := rows.Scan(append(rep.FieldPointers(), &fsWatcherDelay, &rescanIntvs, &pullOrder, &fileSystemType, &themes, &transportStats)...)
if err != nil {
log.Println("sql scan:", err)
return err
}
// Patch up parts that used to use custom types
rep.RescanIntvs = make([]int, len(rescanIntvs))
for i := range rescanIntvs {
rep.RescanIntvs[i] = int(rescanIntvs[i])
}
rep.FolderUsesV3.FsWatcherDelays = make([]int, len(fsWatcherDelay))
for i := range fsWatcherDelay {
rep.FolderUsesV3.FsWatcherDelays[i] = int(fsWatcherDelay[i])
}
rep.FolderUsesV3.PullOrder = pullOrder
rep.FolderUsesV3.FilesystemType = fileSystemType
rep.GUIStats.Theme = themes
rep.TransportStats = transportStats
_, err = stmt.Exec(rep.Received, rep)
if err != nil {
log.Println("sql insert:", err)
return err
}
done++
if done%pct == 0 {
log.Printf("Migration progress %d/%d (%d%%)", done, count, (100*done)/count)
}
}
// Tell the driver bulk copy is finished
_, err = stmt.Exec()
if err != nil {
log.Println("sql stmt exec:", err)
return err
}
err = stmt.Close()
if err != nil {
log.Println("sql stmt close:", err)
return err
}
_, err = tx.Exec("DROP TABLE Reports")
if err != nil {
log.Println("sql drop:", err)
return err
}
err = tx.Commit()
if err != nil {
log.Println("sql commit:", err)
return err
}
return nil
}
type IntMap map[string]int
func (p IntMap) Value() (driver.Value, error) {
return json.Marshal(p)
}
func (p *IntMap) Scan(src interface{}) error {
source, ok := src.([]byte)
if !ok {
return errors.New("Type assertion .([]byte) failed.")
}
var i map[string]int
err := json.Unmarshal(source, &i)
if err != nil {
return err
}
*p = i
return nil
}

View File

@ -1,26 +0,0 @@
// Copyright (C) 2023 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package serve
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var metricReportsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "syncthing",
Subsystem: "ursrv",
Name: "reports_total",
}, []string{"version"})
func init() {
metricReportsTotal.WithLabelValues("fail")
metricReportsTotal.WithLabelValues("duplicate")
metricReportsTotal.WithLabelValues("v1")
metricReportsTotal.WithLabelValues("v2")
metricReportsTotal.WithLabelValues("v3")
}

File diff suppressed because it is too large Load Diff

View File

Before

Width:  |  Height:  |  Size: 4.8 KiB

After

Width:  |  Height:  |  Size: 4.8 KiB

Some files were not shown because too many files have changed in this diff Show More