Remove KCP (fixes #4737) (#4741)

This commit is contained in:
Jakob Borg 2018-02-09 11:40:57 +01:00 committed by GitHub
parent 97068c10f3
commit b97d5bcca8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
54 changed files with 268 additions and 6169 deletions

View File

@ -336,7 +336,7 @@ func fixupAddresses(remote net.IP, addresses []string) []string {
ip := net.ParseIP(host)
if host == "" || ip.IsUnspecified() {
// Do not use IPv6 remote address if requested scheme is ...4
// (i.e., tcp4, kcp4, etc.)
// (i.e., tcp4, etc.)
if strings.HasSuffix(uri.Scheme, "4") && remote.To4() == nil {
continue
}

View File

@ -199,7 +199,6 @@ func reportData(cfg configIntf, m modelIntf, connectionsService connectionsIntf,
res["limitBandwidthInLan"] = opts.LimitBandwidthInLan
res["customReleaseURL"] = opts.ReleasesURL != "https://upgrades.syncthing.net/meta.json"
res["restartOnWakeup"] = opts.RestartOnWakeup
res["customStunServers"] = len(opts.StunServers) == 0 || opts.StunServers[0] != "default" || len(opts.StunServers) > 1
folderUsesV3 := map[string]int{
"scanProgressDisabled": 0,

View File

@ -39,8 +39,6 @@ const (
var (
// DefaultTCPPort defines default TCP port used if the URI does not specify one, for example tcp://0.0.0.0
DefaultTCPPort = 22000
// DefaultKCPPort defines default KCP (UDP) port used if the URI does not specify one, for example kcp://0.0.0.0
DefaultKCPPort = 22020
// DefaultListenAddresses should be substituted when the configuration
// contains <listenAddress>default</listenAddress>. This is done by the
// "consumer" of the configuration as we don't want these saved to the
@ -48,7 +46,6 @@ var (
DefaultListenAddresses = []string{
util.Address("tcp", net.JoinHostPort("0.0.0.0", strconv.Itoa(DefaultTCPPort))),
"dynamic+https://relays.syncthing.net/endpoint",
util.Address("kcp", net.JoinHostPort("0.0.0.0", strconv.Itoa(DefaultKCPPort))),
}
// DefaultDiscoveryServersV4 should be substituted when the configuration
// contains <globalAnnounceServer>default-v4</globalAnnounceServer>.
@ -65,25 +62,6 @@ var (
// DefaultDiscoveryServers should be substituted when the configuration
// contains <globalAnnounceServer>default</globalAnnounceServer>.
DefaultDiscoveryServers = append(DefaultDiscoveryServersV4, DefaultDiscoveryServersV6...)
// DefaultStunServers should be substituted when the configuration
// contains <stunServer>default</stunServer>.
DefaultStunServers = []string{
"stun.callwithus.com:3478",
"stun.counterpath.com:3478",
"stun.counterpath.net:3478",
"stun.ekiga.net:3478",
"stun.ideasip.com:3478",
"stun.internetcalls.com:3478",
"stun.schlund.de:3478",
"stun.sipgate.net:10000",
"stun.sipgate.net:3478",
"stun.voip.aebc.com:3478",
"stun.voiparound.com:3478",
"stun.voipbuster.com:3478",
"stun.voipstunt.com:3478",
"stun.voxgratia.org:3478",
"stun.xten.com:3478",
}
// DefaultTheme is the default and fallback theme for the web UI.
DefaultTheme = "default"
)
@ -380,6 +358,16 @@ func (cfg *Configuration) clean() error {
}
cfg.IgnoredDevices = newIgnoredDevices
// Deprecated protocols are removed from the list of listeners and
// device addresses. So far just kcp*.
for _, prefix := range []string{"kcp"} {
cfg.Options.ListenAddresses = filterURLSchemePrefix(cfg.Options.ListenAddresses, prefix)
for i := range cfg.Devices {
dev := &cfg.Devices[i]
dev.Addresses = filterURLSchemePrefix(dev.Addresses, prefix)
}
}
return nil
}
@ -768,3 +756,21 @@ func cleanSymlinks(filesystem fs.Filesystem, dir string) {
return nil
})
}
// filterURLSchemePrefix returns the list of addresses after removing all
// entries whose URL scheme matches the given prefix.
func filterURLSchemePrefix(addrs []string, prefix string) []string {
for i := 0; i < len(addrs); i++ {
uri, err := url.Parse(addrs[i])
if err != nil {
continue
}
if strings.HasPrefix(uri.Scheme, prefix) {
// Remove this entry
copy(addrs[i:], addrs[i+1:])
addrs = addrs[:len(addrs)-1]
i--
}
}
return addrs
}

View File

@ -68,13 +68,6 @@ func TestDefaultValues(t *testing.T) {
TempIndexMinBlocks: 10,
UnackedNotificationIDs: []string{},
WeakHashSelectionMethod: WeakHashAuto,
StunKeepaliveS: 24,
StunServers: []string{"default"},
KCPCongestionControl: true,
KCPReceiveWindowSize: 128,
KCPSendWindowSize: 128,
KCPUpdateIntervalMs: 25,
KCPFastResend: false,
DefaultFolderPath: "~",
SetLowPriority: true,
}
@ -217,13 +210,6 @@ func TestOverriddenValues(t *testing.T) {
"channelNotification", // added in 17->18 migration
},
WeakHashSelectionMethod: WeakHashNever,
StunKeepaliveS: 10,
StunServers: []string{"a.stun.com", "b.stun.com"},
KCPCongestionControl: false,
KCPReceiveWindowSize: 1280,
KCPSendWindowSize: 1280,
KCPUpdateIntervalMs: 1000,
KCPFastResend: true,
DefaultFolderPath: "/media/syncthing",
SetLowPriority: false,
}
@ -953,6 +939,32 @@ func TestInvalidFolderIDRejected(t *testing.T) {
}
}
func TestFilterURLSchemePrefix(t *testing.T) {
cases := []struct {
before []string
prefix string
after []string
}{
{[]string{}, "kcp", []string{}},
{[]string{"tcp://foo"}, "kcp", []string{"tcp://foo"}},
{[]string{"kcp://foo"}, "kcp", []string{}},
{[]string{"tcp6://foo", "kcp6://foo"}, "kcp", []string{"tcp6://foo"}},
{[]string{"kcp6://foo", "tcp6://foo"}, "kcp", []string{"tcp6://foo"}},
{
[]string{"tcp://foo", "tcp4://foo", "kcp://foo", "kcp4://foo", "banana://foo", "banana4://foo", "banananas!"},
"kcp",
[]string{"tcp://foo", "tcp4://foo", "banana://foo", "banana4://foo", "banananas!"},
},
}
for _, tc := range cases {
res := filterURLSchemePrefix(tc.before, tc.prefix)
if !reflect.DeepEqual(res, tc.after) {
t.Errorf("filterURLSchemePrefix => %q, expected %q", res, tc.after)
}
}
}
// defaultConfigAsMap returns a valid default config as a JSON-decoded
// map[string]interface{}. This is useful to override random elements and
// re-encode into JSON.

View File

@ -134,14 +134,6 @@ type OptionsConfiguration struct {
UnackedNotificationIDs []string `xml:"unackedNotificationID" json:"unackedNotificationIDs"`
TrafficClass int `xml:"trafficClass" json:"trafficClass"`
WeakHashSelectionMethod WeakHashSelectionMethod `xml:"weakHashSelectionMethod" json:"weakHashSelectionMethod" restart:"true"`
StunServers []string `xml:"stunServer" json:"stunServers" default:"default"`
StunKeepaliveS int `xml:"stunKeepaliveSeconds" json:"stunKeepaliveSeconds" default:"24"`
KCPNoDelay bool `xml:"kcpNoDelay" json:"kcpNoDelay" default:"false"`
KCPUpdateIntervalMs int `xml:"kcpUpdateIntervalMs" json:"kcpUpdateIntervalMs" default:"25"`
KCPFastResend bool `xml:"kcpFastResend" json:"kcpFastResend" default:"false"`
KCPCongestionControl bool `xml:"kcpCongestionControl" json:"kcpCongestionControl" default:"true"`
KCPSendWindowSize int `xml:"kcpSendWindowSize" json:"kcpSendWindowSize" default:"128"`
KCPReceiveWindowSize int `xml:"kcpReceiveWindowSize" json:"kcpReceiveWindowSize" default:"128"`
DefaultFolderPath string `xml:"defaultFolderPath" json:"defaultFolderPath" default:"~"`
SetLowPriority bool `xml:"setLowPriority" json:"setLowPriority" default:"true"`

View File

@ -35,14 +35,6 @@
<overwriteRemoteDeviceNamesOnConnect>true</overwriteRemoteDeviceNamesOnConnect>
<tempIndexMinBlocks>100</tempIndexMinBlocks>
<weakHashSelectionMethod>never</weakHashSelectionMethod>
<stunKeepaliveSeconds>10</stunKeepaliveSeconds>
<stunServer>a.stun.com</stunServer>
<stunServer>b.stun.com</stunServer>
<kcpCongestionControl>false</kcpCongestionControl>
<kcpReceiveWindowSize>1280</kcpReceiveWindowSize>
<kcpSendWindowSize>1280</kcpSendWindowSize>
<kcpUpdateIntervalMs>1000</kcpUpdateIntervalMs>
<kcpFastResend>true</kcpFastResend>
<defaultFolderPath>/media/syncthing</defaultFolderPath>
<setLowPriority>false</setLowPriority>
</options>

View File

@ -15,7 +15,6 @@ import (
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/rand"
"github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/util"
)
@ -433,29 +432,6 @@ func (w *Wrapper) setRequiresRestart() {
atomic.StoreUint32(&w.requiresRestart, 1)
}
func (w *Wrapper) StunServers() []string {
var addresses []string
for _, addr := range w.cfg.Options.StunServers {
switch addr {
case "default":
addresses = append(addresses, DefaultStunServers...)
default:
addresses = append(addresses, addr)
}
}
addresses = util.UniqueStrings(addresses)
// Shuffle
l := len(addresses)
for i := range addresses {
r := rand.Intn(l)
addresses[i], addresses[r] = addresses[r], addresses[i]
}
return addresses
}
func (w *Wrapper) MyName() string {
w.mut.Lock()
myID := w.cfg.MyID

View File

@ -6,28 +6,7 @@
package connections
import (
"time"
"github.com/xtaci/smux"
)
const (
tcpPriority = 10
kcpPriority = 50
relayPriority = 200
// KCP filter priorities
kcpNoFilterPriority = 100
kcpConversationFilterPriority = 20
kcpStunFilterPriority = 10
)
var (
smuxConfig = &smux.Config{
KeepAliveInterval: 10 * time.Second,
KeepAliveTimeout: 30 * time.Second,
MaxFrameSize: 4096,
MaxReceiveBuffer: 4 * 1024 * 1024,
}
)

View File

@ -6,8 +6,13 @@
package connections
import "testing"
import "net/url"
import (
"net/url"
"testing"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/protocol"
)
func TestFixupPort(t *testing.T) {
cases := [][2]string{
@ -105,3 +110,60 @@ func TestAllowedNetworks(t *testing.T) {
}
}
}
func TestGetDialer(t *testing.T) {
mustParseURI := func(v string) *url.URL {
uri, err := url.Parse(v)
if err != nil {
panic(err)
}
return uri
}
cases := []struct {
uri *url.URL
ok bool
disabled bool
deprecated bool
}{
{mustParseURI("tcp://1.2.3.4:5678"), true, false, false}, // ok
{mustParseURI("tcp4://1.2.3.4:5678"), true, false, false}, // ok
{mustParseURI("kcp://1.2.3.4:5678"), false, false, true}, // deprecated
{mustParseURI("relay://1.2.3.4:5678"), false, true, false}, // disabled
{mustParseURI("http://1.2.3.4:5678"), false, false, false}, // generally bad
{mustParseURI("bananas!"), false, false, false}, // wat
}
cfg := config.New(protocol.LocalDeviceID)
cfg.Options.RelaysEnabled = false
for _, tc := range cases {
df, err := getDialerFactory(cfg, tc.uri)
if tc.ok && err != nil {
t.Errorf("getDialerFactory(%q) => %v, expected nil err", tc.uri, err)
}
if tc.ok && df == nil {
t.Errorf("getDialerFactory(%q) => nil factory, expected non-nil", tc.uri)
}
if tc.deprecated && err != errDeprecated {
t.Errorf("getDialerFactory(%q) => %v, expected %v", tc.uri, err, errDeprecated)
}
if tc.disabled && err != errDisabled {
t.Errorf("getDialerFactory(%q) => %v, expected %v", tc.uri, err, errDisabled)
}
lf, err := getListenerFactory(cfg, tc.uri)
if tc.ok && err != nil {
t.Errorf("getListenerFactory(%q) => %v, expected nil err", tc.uri, err)
}
if tc.ok && lf == nil {
t.Errorf("getListenerFactory(%q) => nil factory, expected non-nil", tc.uri)
}
if tc.deprecated && err != errDeprecated {
t.Errorf("getListenerFactory(%q) => %v, expected %v", tc.uri, err, errDeprecated)
}
if tc.disabled && err != errDisabled {
t.Errorf("getListenerFactory(%q) => %v, expected %v", tc.uri, err, errDisabled)
}
}
}

View File

@ -0,0 +1,36 @@
// Copyright (C) 2018 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package connections
import "github.com/syncthing/syncthing/lib/config"
// deprecatedListener is never valid
type deprecatedListener struct {
listenerFactory
}
func (deprecatedListener) Valid(_ config.Configuration) error {
return errDeprecated
}
// deprecatedDialer is never valid
type deprecatedDialer struct {
dialerFactory
}
func (deprecatedDialer) Valid(_ config.Configuration) error {
return errDeprecated
}
func init() {
listeners["kcp"] = deprecatedListener{}
listeners["kcp4"] = deprecatedListener{}
listeners["kcp6"] = deprecatedListener{}
dialers["kcp"] = deprecatedDialer{}
dialers["kcp4"] = deprecatedDialer{}
dialers["kcp6"] = deprecatedDialer{}
}

View File

@ -1,112 +0,0 @@
// Copyright (C) 2016 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package connections
import (
"crypto/tls"
"net/url"
"time"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/xtaci/kcp-go"
"github.com/xtaci/smux"
)
func init() {
factory := &kcpDialerFactory{}
for _, scheme := range []string{"kcp", "kcp4", "kcp6"} {
dialers[scheme] = factory
}
}
type kcpDialer struct {
cfg *config.Wrapper
tlsCfg *tls.Config
}
func (d *kcpDialer) Dial(id protocol.DeviceID, uri *url.URL) (internalConn, error) {
uri = fixupPort(uri, config.DefaultKCPPort)
var conn *kcp.UDPSession
var err error
// Try to dial via an existing listening connection
// giving better changes punching through NAT.
if f := getDialingFilter(); f != nil {
conn, err = kcp.NewConn(uri.Host, nil, 0, 0, f.NewConn(kcpConversationFilterPriority, &kcpConversationFilter{}))
l.Debugf("dial %s using existing conn on %s", uri.String(), conn.LocalAddr())
} else {
conn, err = kcp.DialWithOptions(uri.Host, nil, 0, 0)
}
if err != nil {
return internalConn{}, err
}
opts := d.cfg.Options()
conn.SetStreamMode(true)
conn.SetACKNoDelay(false)
conn.SetWindowSize(opts.KCPSendWindowSize, opts.KCPReceiveWindowSize)
conn.SetNoDelay(boolInt(opts.KCPNoDelay), opts.KCPUpdateIntervalMs, boolInt(opts.KCPFastResend), boolInt(!opts.KCPCongestionControl))
ses, err := smux.Client(conn, smuxConfig)
if err != nil {
conn.Close()
return internalConn{}, err
}
ses.SetDeadline(time.Now().Add(10 * time.Second))
stream, err := ses.OpenStream()
if err != nil {
ses.Close()
return internalConn{}, err
}
ses.SetDeadline(time.Time{})
tc := tls.Client(&sessionClosingStream{stream, ses}, d.tlsCfg)
tc.SetDeadline(time.Now().Add(time.Second * 10))
err = tc.Handshake()
if err != nil {
tc.Close()
return internalConn{}, err
}
tc.SetDeadline(time.Time{})
return internalConn{tc, connTypeKCPClient, kcpPriority}, nil
}
func (d *kcpDialer) RedialFrequency() time.Duration {
// For restricted NATs, the UDP mapping will potentially only be open for 20-30 seconds
// hence try dialing just as often.
return time.Duration(d.cfg.Options().StunKeepaliveS) * time.Second
}
type kcpDialerFactory struct{}
func (kcpDialerFactory) New(cfg *config.Wrapper, tlsCfg *tls.Config) genericDialer {
return &kcpDialer{
cfg: cfg,
tlsCfg: tlsCfg,
}
}
func (kcpDialerFactory) Priority() int {
return kcpPriority
}
func (kcpDialerFactory) AlwaysWAN() bool {
return false
}
func (kcpDialerFactory) Enabled(cfg config.Configuration) bool {
return true
}
func (kcpDialerFactory) String() string {
return "KCP Dialer"
}

View File

@ -1,326 +0,0 @@
// Copyright (C) 2016 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package connections
import (
"crypto/tls"
"net"
"net/url"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/AudriusButkevicius/pfilter"
"github.com/ccding/go-stun/stun"
"github.com/xtaci/kcp-go"
"github.com/xtaci/smux"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/nat"
)
const stunRetryInterval = 5 * time.Minute
func init() {
factory := &kcpListenerFactory{}
for _, scheme := range []string{"kcp", "kcp4", "kcp6"} {
listeners[scheme] = factory
}
}
type kcpListener struct {
onAddressesChangedNotifier
uri *url.URL
cfg *config.Wrapper
tlsCfg *tls.Config
stop chan struct{}
conns chan internalConn
factory listenerFactory
nat atomic.Value
address *url.URL
err error
mut sync.RWMutex
}
func (t *kcpListener) Serve() {
t.mut.Lock()
t.err = nil
t.mut.Unlock()
network := strings.Replace(t.uri.Scheme, "kcp", "udp", -1)
packetConn, err := net.ListenPacket(network, t.uri.Host)
if err != nil {
t.mut.Lock()
t.err = err
t.mut.Unlock()
l.Infoln("Listen (BEP/kcp):", err)
return
}
filterConn := pfilter.NewPacketFilter(packetConn)
kcpConn := filterConn.NewConn(kcpNoFilterPriority, nil)
stunConn := filterConn.NewConn(kcpStunFilterPriority, &stunFilter{
ids: make(map[string]time.Time),
})
filterConn.Start()
registerFilter(filterConn)
listener, err := kcp.ServeConn(nil, 0, 0, kcpConn)
if err != nil {
t.mut.Lock()
t.err = err
t.mut.Unlock()
l.Infoln("Listen (BEP/kcp):", err)
return
}
defer listener.Close()
defer stunConn.Close()
defer kcpConn.Close()
defer deregisterFilter(filterConn)
defer packetConn.Close()
l.Infof("KCP listener (%v) starting", kcpConn.LocalAddr())
defer l.Infof("KCP listener (%v) shutting down", kcpConn.LocalAddr())
go t.stunRenewal(stunConn)
for {
listener.SetDeadline(time.Now().Add(time.Second))
conn, err := listener.AcceptKCP()
select {
case <-t.stop:
if err == nil {
conn.Close()
}
return
default:
}
if err != nil {
if err, ok := err.(net.Error); !ok || !err.Timeout() {
l.Warnln("Listen (BEP/kcp): Accepting connection:", err)
}
continue
}
opts := t.cfg.Options()
conn.SetStreamMode(true)
conn.SetACKNoDelay(false)
conn.SetWindowSize(opts.KCPSendWindowSize, opts.KCPReceiveWindowSize)
conn.SetNoDelay(boolInt(opts.KCPNoDelay), opts.KCPUpdateIntervalMs, boolInt(opts.KCPFastResend), boolInt(!opts.KCPCongestionControl))
l.Debugln("connect from", conn.RemoteAddr())
ses, err := smux.Server(conn, smuxConfig)
if err != nil {
l.Debugln("smux server:", err)
conn.Close()
continue
}
ses.SetDeadline(time.Now().Add(10 * time.Second))
stream, err := ses.AcceptStream()
if err != nil {
l.Debugln("smux accept:", err)
ses.Close()
continue
}
ses.SetDeadline(time.Time{})
tc := tls.Server(&sessionClosingStream{stream, ses}, t.tlsCfg)
tc.SetDeadline(time.Now().Add(time.Second * 10))
err = tc.Handshake()
if err != nil {
l.Debugln("TLS handshake (BEP/kcp):", err)
tc.Close()
continue
}
tc.SetDeadline(time.Time{})
t.conns <- internalConn{tc, connTypeKCPServer, kcpPriority}
}
}
func (t *kcpListener) Stop() {
close(t.stop)
}
func (t *kcpListener) URI() *url.URL {
return t.uri
}
func (t *kcpListener) WANAddresses() []*url.URL {
uris := t.LANAddresses()
t.mut.RLock()
if t.address != nil {
uris = append(uris, t.address)
}
t.mut.RUnlock()
return uris
}
func (t *kcpListener) LANAddresses() []*url.URL {
return []*url.URL{t.uri}
}
func (t *kcpListener) Error() error {
t.mut.RLock()
err := t.err
t.mut.RUnlock()
return err
}
func (t *kcpListener) String() string {
return t.uri.String()
}
func (t *kcpListener) Factory() listenerFactory {
return t.factory
}
func (t *kcpListener) NATType() string {
v := t.nat.Load().(stun.NATType)
if v == stun.NATUnknown || v == stun.NATError {
return "unknown"
}
return v.String()
}
func (t *kcpListener) stunRenewal(listener net.PacketConn) {
client := stun.NewClientWithConnection(listener)
client.SetSoftwareName("syncthing")
var natType stun.NATType
var extAddr *stun.Host
var udpAddr *net.UDPAddr
var err error
oldType := stun.NATUnknown
for {
disabled:
if t.cfg.Options().StunKeepaliveS < 1 {
time.Sleep(time.Second)
oldType = stun.NATUnknown
t.nat.Store(stun.NATUnknown)
t.mut.Lock()
t.address = nil
t.mut.Unlock()
continue
}
for _, addr := range t.cfg.StunServers() {
// Resolve the address, so that in case the server advertises two
// IPs, we always hit the same one, as otherwise, the mapping might
// expire as we hit the other address, and cause us to flip flop
// between servers/external addresses, as a result flooding discovery
// servers.
udpAddr, err = net.ResolveUDPAddr("udp", addr)
if err != nil {
l.Debugf("%s stun addr resolution on %s: %s", t.uri, addr, err)
continue
}
client.SetServerAddr(udpAddr.String())
natType, extAddr, err = client.Discover()
if err != nil || extAddr == nil {
l.Debugf("%s stun discovery on %s: %s", t.uri, addr, err)
continue
}
// The stun server is most likely borked, try another one.
if natType == stun.NATError || natType == stun.NATUnknown || natType == stun.NATBlocked {
l.Debugf("%s stun discovery on %s resolved to %s", t.uri, addr, natType)
continue
}
if oldType != natType {
l.Infof("%s detected NAT type: %s", t.uri, natType)
t.nat.Store(natType)
oldType = natType
}
// We can't punch through this one, so no point doing keepalives
// and such, just try again in a minute and hope that the NAT type changes.
if !isPunchable(natType) {
break
}
for {
changed := false
uri := *t.uri
uri.Host = extAddr.TransportAddr()
t.mut.Lock()
if t.address == nil || t.address.String() != uri.String() {
l.Infof("%s resolved external address %s (via %s)", t.uri, uri.String(), addr)
t.address = &uri
changed = true
}
t.mut.Unlock()
// This will most likely result in a call to WANAddresses() which tries to
// get t.mut, so notify while unlocked.
if changed {
t.notifyAddressesChanged(t)
}
select {
case <-time.After(time.Duration(t.cfg.Options().StunKeepaliveS) * time.Second):
case <-t.stop:
return
}
if t.cfg.Options().StunKeepaliveS < 1 {
goto disabled
}
extAddr, err = client.Keepalive()
if err != nil {
l.Debugf("%s stun keepalive on %s: %s (%v)", t.uri, addr, err, extAddr)
break
}
}
}
// We failed to contact all provided stun servers or the nat is not punchable.
// Chillout for a while.
time.Sleep(stunRetryInterval)
}
}
type kcpListenerFactory struct{}
func (f *kcpListenerFactory) New(uri *url.URL, cfg *config.Wrapper, tlsCfg *tls.Config, conns chan internalConn, natService *nat.Service) genericListener {
l := &kcpListener{
uri: fixupPort(uri, config.DefaultKCPPort),
cfg: cfg,
tlsCfg: tlsCfg,
conns: conns,
stop: make(chan struct{}),
factory: f,
}
l.nat.Store(stun.NATUnknown)
return l
}
func (kcpListenerFactory) Enabled(cfg config.Configuration) bool {
return true
}
func isPunchable(natType stun.NATType) bool {
return natType == stun.NATNone || natType == stun.NATPortRestricted || natType == stun.NATRestricted || natType == stun.NATFull
}

View File

@ -1,194 +0,0 @@
// Copyright (C) 2016 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package connections
import (
"bytes"
"encoding/binary"
"net"
"sort"
"sync"
"sync/atomic"
"time"
"github.com/AudriusButkevicius/pfilter"
"github.com/xtaci/kcp-go"
"github.com/xtaci/smux"
)
var (
mut sync.Mutex
filters filterList
)
func init() {
kcp.BlacklistDuration = 10 * time.Minute
}
type filterList []*pfilter.PacketFilter
// Sort connections by whether they are unspecified or not, as connections
// listening on all addresses are more useful.
func (f filterList) Len() int { return len(f) }
func (f filterList) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
func (f filterList) Less(i, j int) bool {
iIsUnspecified := false
jIsUnspecified := false
if host, _, err := net.SplitHostPort(f[i].LocalAddr().String()); err == nil {
iIsUnspecified = net.ParseIP(host).IsUnspecified()
}
if host, _, err := net.SplitHostPort(f[j].LocalAddr().String()); err == nil {
jIsUnspecified = net.ParseIP(host).IsUnspecified()
}
return (iIsUnspecified && !jIsUnspecified) || (iIsUnspecified && jIsUnspecified)
}
// As we open listen KCP connections, we register them here, so that Dial calls through
// KCP could reuse them. This way we will hopefully work around restricted NATs by
// dialing via the same connection we are listening on, creating a mapping on our NAT
// to that IP, and hoping that the other end will try to dial our listen address and
// using the mapping we've established when we dialed.
func getDialingFilter() *pfilter.PacketFilter {
mut.Lock()
defer mut.Unlock()
if len(filters) == 0 {
return nil
}
return filters[0]
}
func registerFilter(filter *pfilter.PacketFilter) {
mut.Lock()
defer mut.Unlock()
filters = append(filters, filter)
sort.Sort(filterList(filters))
}
func deregisterFilter(filter *pfilter.PacketFilter) {
mut.Lock()
defer mut.Unlock()
for i, f := range filters {
if f == filter {
copy(filters[i:], filters[i+1:])
filters[len(filters)-1] = nil
filters = filters[:len(filters)-1]
break
}
}
sort.Sort(filterList(filters))
}
// Filters
type kcpConversationFilter struct {
convID uint32
}
func (f *kcpConversationFilter) Outgoing(out []byte, addr net.Addr) {
if !f.isKCPConv(out) {
panic("not a kcp conversation")
}
atomic.StoreUint32(&f.convID, binary.LittleEndian.Uint32(out[:4]))
}
func (kcpConversationFilter) isKCPConv(data []byte) bool {
// Need at least 5 bytes
if len(data) < 5 {
return false
}
// First 4 bytes convID
// 5th byte is cmd
// IKCP_CMD_PUSH = 81 // cmd: push data
// IKCP_CMD_ACK = 82 // cmd: ack
// IKCP_CMD_WASK = 83 // cmd: window probe (ask)
// IKCP_CMD_WINS = 84 // cmd: window size (tell)
return 80 < data[4] && data[4] < 85
}
func (f *kcpConversationFilter) ClaimIncoming(in []byte, addr net.Addr) bool {
if f.isKCPConv(in) {
convID := atomic.LoadUint32(&f.convID)
return convID != 0 && binary.LittleEndian.Uint32(in[:4]) == convID
}
return false
}
type stunFilter struct {
ids map[string]time.Time
mut sync.Mutex
}
func (f *stunFilter) Outgoing(out []byte, addr net.Addr) {
if !f.isStunPayload(out) {
panic("not a stun payload")
}
id := string(out[8:20])
f.mut.Lock()
f.ids[id] = time.Now().Add(time.Minute)
f.reap()
f.mut.Unlock()
}
func (f *stunFilter) ClaimIncoming(in []byte, addr net.Addr) bool {
if f.isStunPayload(in) {
id := string(in[8:20])
f.mut.Lock()
_, ok := f.ids[id]
f.reap()
f.mut.Unlock()
return ok
}
return false
}
func (f *stunFilter) isStunPayload(data []byte) bool {
// Need at least 20 bytes
if len(data) < 20 {
return false
}
// First two bits always unset, and should always send magic cookie.
return data[0]&0xc0 == 0 && bytes.Equal(data[4:8], []byte{0x21, 0x12, 0xA4, 0x42})
}
func (f *stunFilter) reap() {
now := time.Now()
for id, timeout := range f.ids {
if timeout.Before(now) {
delete(f.ids, id)
}
}
}
type sessionClosingStream struct {
*smux.Stream
session *smux.Session
}
func (w *sessionClosingStream) Close() error {
err1 := w.Stream.Close()
deadline := time.Now().Add(5 * time.Second)
for w.session.NumStreams() > 0 && time.Now().Before(deadline) {
time.Sleep(200 * time.Millisecond)
}
err2 := w.session.Close()
if err1 != nil {
return err1
}
return err2
}
func boolInt(b bool) int {
if b {
return 1
}
return 0
}

View File

@ -85,8 +85,11 @@ func (relayDialerFactory) AlwaysWAN() bool {
return true
}
func (relayDialerFactory) Enabled(cfg config.Configuration) bool {
return cfg.Options.RelaysEnabled
func (relayDialerFactory) Valid(cfg config.Configuration) error {
if !cfg.Options.RelaysEnabled {
return errDisabled
}
return nil
}
func (relayDialerFactory) String() string {

View File

@ -190,6 +190,9 @@ func (f *relayListenerFactory) New(uri *url.URL, cfg *config.Wrapper, tlsCfg *tl
}
}
func (relayListenerFactory) Enabled(cfg config.Configuration) bool {
return cfg.Options.RelaysEnabled
func (relayListenerFactory) Valid(cfg config.Configuration) error {
if !cfg.Options.RelaysEnabled {
return errDisabled
}
return nil
}

View File

@ -39,6 +39,11 @@ var (
listeners = make(map[string]listenerFactory, 0)
)
var (
errDisabled = errors.New("disabled by configuration")
errDeprecated = errors.New("deprecated protocol")
)
const (
perDeviceWarningIntv = 15 * time.Minute
tlsHandshakeTimeout = 10 * time.Second
@ -149,10 +154,6 @@ func NewService(cfg *config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *
return service
}
var (
errDisabled = errors.New("disabled by configuration")
)
func (s *Service) handle() {
next:
for c := range s.conns {
@ -293,7 +294,7 @@ func (s *Service) connect() {
bestDialerPrio := 1<<31 - 1 // worse prio won't build on 32 bit
for _, df := range dialers {
if !df.Enabled(cfg) {
if df.Valid(cfg) != nil {
continue
}
if prio := df.Priority(); prio < bestDialerPrio {
@ -367,13 +368,18 @@ func (s *Service) connect() {
}
}
dialerFactory, err := s.getDialerFactory(cfg, uri)
if err == errDisabled {
l.Debugln(dialerFactory, "for", uri, "is disabled")
dialerFactory, err := getDialerFactory(cfg, uri)
switch err {
case nil:
// all good
case errDisabled:
l.Debugln("Dialer for", uri, "is disabled")
continue
}
if err != nil {
l.Infof("%v for %v: %v", dialerFactory, uri, err)
case errDeprecated:
l.Debugln("Dialer for", uri, "is deprecated")
continue
default:
l.Infof("Dialer for %v: %v", uri, err)
continue
}
@ -537,13 +543,18 @@ func (s *Service) CommitConfiguration(from, to config.Configuration) bool {
continue
}
factory, err := s.getListenerFactory(to, uri)
if err == errDisabled {
factory, err := getListenerFactory(to, uri)
switch err {
case nil:
// all good
case errDisabled:
l.Debugln("Listener for", uri, "is disabled")
continue
}
if err != nil {
l.Infof("Getting listener factory for %v: %v", uri, err)
case errDeprecated:
l.Debugln("Listener for", uri, "is deprecated")
continue
default:
l.Infof("Listener for %v: %v", uri, err)
continue
}
@ -552,7 +563,7 @@ func (s *Service) CommitConfiguration(from, to config.Configuration) bool {
}
for addr, listener := range s.listeners {
if _, ok := seen[addr]; !ok || !listener.Factory().Enabled(to) {
if _, ok := seen[addr]; !ok || listener.Factory().Valid(to) != nil {
l.Debugln("Stopping listener", addr)
s.listenerSupervisor.Remove(s.listenerTokens[addr])
delete(s.listenerTokens, addr)
@ -633,27 +644,25 @@ func (s *Service) NATType() string {
return "unknown"
}
func (s *Service) getDialerFactory(cfg config.Configuration, uri *url.URL) (dialerFactory, error) {
func getDialerFactory(cfg config.Configuration, uri *url.URL) (dialerFactory, error) {
dialerFactory, ok := dialers[uri.Scheme]
if !ok {
return nil, fmt.Errorf("unknown address scheme %q", uri.Scheme)
}
if !dialerFactory.Enabled(cfg) {
return nil, errDisabled
if err := dialerFactory.Valid(cfg); err != nil {
return nil, err
}
return dialerFactory, nil
}
func (s *Service) getListenerFactory(cfg config.Configuration, uri *url.URL) (listenerFactory, error) {
func getListenerFactory(cfg config.Configuration, uri *url.URL) (listenerFactory, error) {
listenerFactory, ok := listeners[uri.Scheme]
if !ok {
return nil, fmt.Errorf("unknown address scheme %q", uri.Scheme)
}
if !listenerFactory.Enabled(cfg) {
return nil, errDisabled
if err := listenerFactory.Valid(cfg); err != nil {
return nil, err
}
return listenerFactory, nil

View File

@ -54,8 +54,6 @@ const (
connTypeRelayServer
connTypeTCPClient
connTypeTCPServer
connTypeKCPClient
connTypeKCPServer
)
func (t connType) String() string {
@ -68,10 +66,6 @@ func (t connType) String() string {
return "tcp-client"
case connTypeTCPServer:
return "tcp-server"
case connTypeKCPClient:
return "kcp-client"
case connTypeKCPServer:
return "kcp-server"
default:
return "unknown-type"
}
@ -83,8 +77,6 @@ func (t connType) Transport() string {
return "relay"
case connTypeTCPClient, connTypeTCPServer:
return "tcp"
case connTypeKCPClient, connTypeKCPServer:
return "kcp"
default:
return "unknown"
}
@ -122,7 +114,7 @@ type dialerFactory interface {
New(*config.Wrapper, *tls.Config) genericDialer
Priority() int
AlwaysWAN() bool
Enabled(config.Configuration) bool
Valid(config.Configuration) error
String() string
}
@ -133,7 +125,7 @@ type genericDialer interface {
type listenerFactory interface {
New(*url.URL, *config.Wrapper, *tls.Config, chan internalConn, *nat.Service) genericListener
Enabled(config.Configuration) bool
Valid(config.Configuration) error
}
type genericListener interface {

View File

@ -77,8 +77,9 @@ func (tcpDialerFactory) AlwaysWAN() bool {
return false
}
func (tcpDialerFactory) Enabled(cfg config.Configuration) bool {
return true
func (tcpDialerFactory) Valid(_ config.Configuration) error {
// Always valid
return nil
}
func (tcpDialerFactory) String() string {

View File

@ -193,6 +193,7 @@ func (f *tcpListenerFactory) New(uri *url.URL, cfg *config.Wrapper, tlsCfg *tls.
}
}
func (tcpListenerFactory) Enabled(cfg config.Configuration) bool {
return true
func (tcpListenerFactory) Valid(_ config.Configuration) error {
// Always valid
return nil
}

View File

@ -9,7 +9,6 @@ import (
"testing"
"github.com/syncthing/syncthing/lib/dialer"
"github.com/xtaci/kcp-go"
)
func BenchmarkRequestsRawTCP(b *testing.B) {
@ -29,25 +28,6 @@ func BenchmarkRequestsRawTCP(b *testing.B) {
benchmarkRequestsConnPair(b, conn0, conn1)
}
func BenchmarkRequestsRawKCP(b *testing.B) {
b.Skip("KCP broken")
// Benchmarks the rate at which we can serve requests over a single,
// unencrypted KCP channel over the loopback interface.
// Get a connected KCP pair
conn0, conn1, err := getKCPConnectionPair()
if err != nil {
b.Fatal(err)
}
defer conn0.Close()
defer conn1.Close()
// Bench it
benchmarkRequestsConnPair(b, conn0, conn1)
}
func BenchmarkRequestsTLSoTCP(b *testing.B) {
conn0, conn1, err := getTCPConnectionPair()
if err != nil {
@ -58,18 +38,6 @@ func BenchmarkRequestsTLSoTCP(b *testing.B) {
benchmarkRequestsTLS(b, conn0, conn1)
}
func BenchmarkRequestsTLSoKCP(b *testing.B) {
b.Skip("KCP broken")
conn0, conn1, err := getKCPConnectionPair()
if err != nil {
b.Fatal(err)
}
defer conn0.Close()
defer conn1.Close()
benchmarkRequestsTLS(b, conn0, conn1)
}
func benchmarkRequestsTLS(b *testing.B, conn0, conn1 net.Conn) {
// Benchmarks the rate at which we can serve requests over a single,
// TLS encrypted channel over the loopback interface.
@ -170,35 +138,6 @@ func getTCPConnectionPair() (net.Conn, net.Conn, error) {
return conn0, conn1, nil
}
func getKCPConnectionPair() (net.Conn, net.Conn, error) {
lst, err := kcp.Listen("127.0.0.1:0")
if err != nil {
return nil, nil, err
}
var conn0 net.Conn
var err0 error
done := make(chan struct{})
go func() {
conn0, err0 = lst.Accept()
close(done)
}()
// Dial the connection
conn1, err := kcp.Dial(lst.Addr().String())
if err != nil {
return nil, nil, err
}
// Check any error from accept
<-done
if err0 != nil {
return nil, nil, err0
}
return conn0, conn1, nil
}
func negotiateTLS(cert tls.Certificate, conn0, conn1 net.Conn) (net.Conn, net.Conn) {
cfg := &tls.Config{
Certificates: []tls.Certificate{cert},

View File

@ -44,22 +44,27 @@
<device id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK" name="s4" compression="metadata" introducer="false" skipIntroductionRemovals="false" introducedBy="">
<address>tcp://127.0.0.1:22004</address>
<paused>false</paused>
<autoAcceptFolders>false</autoAcceptFolders>
</device>
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="metadata" introducer="false" skipIntroductionRemovals="false" introducedBy="">
<address>tcp://127.0.0.1:22001</address>
<paused>false</paused>
<autoAcceptFolders>false</autoAcceptFolders>
</device>
<device id="MRIW7OK-NETT3M4-N6SBWME-N25O76W-YJKVXPH-FUMQJ3S-P57B74J-GBITBAC" name="s2" compression="metadata" introducer="false" skipIntroductionRemovals="false" introducedBy="">
<address>tcp://127.0.0.1:22002</address>
<paused>false</paused>
<autoAcceptFolders>false</autoAcceptFolders>
</device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="metadata" introducer="false" skipIntroductionRemovals="false" introducedBy="">
<address>tcp://127.0.0.1:22003</address>
<paused>false</paused>
<autoAcceptFolders>false</autoAcceptFolders>
</device>
<device id="7PBCTLL-JJRYBSA-MOWZRKL-MSDMN4N-4US4OMX-SYEXUS4-HSBGNRY-CZXRXAT" name="s4" compression="metadata" introducer="false" skipIntroductionRemovals="false" introducedBy="">
<address>tcp://127.0.0.1:22004</address>
<paused>false</paused>
<autoAcceptFolders>false</autoAcceptFolders>
</device>
<gui enabled="true" tls="false" debugging="true">
<address>127.0.0.1:8081</address>
@ -85,7 +90,7 @@
<natLeaseMinutes>0</natLeaseMinutes>
<natRenewalMinutes>30</natRenewalMinutes>
<natTimeoutSeconds>10</natTimeoutSeconds>
<urAccepted>-1</urAccepted>
<urAccepted>3</urAccepted>
<urSeen>2</urSeen>
<urUniqueID>tmwxxCqi</urUniqueID>
<urURL>https://data.syncthing.net/newdata</urURL>
@ -104,18 +109,8 @@
<tempIndexMinBlocks>10</tempIndexMinBlocks>
<trafficClass>0</trafficClass>
<weakHashSelectionMethod>auto</weakHashSelectionMethod>
<stunServer>default</stunServer>
<stunKeepaliveSeconds>24</stunKeepaliveSeconds>
<kcpNoDelay>false</kcpNoDelay>
<kcpUpdateIntervalMs>25</kcpUpdateIntervalMs>
<kcpFastResend>false</kcpFastResend>
<kcpCongestionControl>true</kcpCongestionControl>
<kcpSendWindowSize>128</kcpSendWindowSize>
<kcpReceiveWindowSize>128</kcpReceiveWindowSize>
<defaultFolderPath>~</defaultFolderPath>
<upnpEnabled>true</upnpEnabled>
<upnpRenewalMinutes>30</upnpRenewalMinutes>
<upnpTimeoutSeconds>10</upnpTimeoutSeconds>
<setLowPriority>true</setLowPriority>
<minHomeDiskFreePct>0</minHomeDiskFreePct>
</options>
</configuration>

View File

@ -20,6 +20,26 @@
<weakHashThresholdPct>25</weakHashThresholdPct>
<markerName>.stfolder</markerName>
</folder>
<folder id="s23" label="" path="s23-2" type="readwrite" rescanIntervalS="15" fsWatcherEnabled="false" fsWatcherDelayS="10" ignorePerms="false" autoNormalize="true">
<filesystemType>basic</filesystemType>
<device id="MRIW7OK-NETT3M4-N6SBWME-N25O76W-YJKVXPH-FUMQJ3S-P57B74J-GBITBAC" introducedBy=""></device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" introducedBy=""></device>
<minDiskFree unit="%">1</minDiskFree>
<versioning></versioning>
<copiers>1</copiers>
<pullers>16</pullers>
<hashers>0</hashers>
<order>random</order>
<ignoreDelete>false</ignoreDelete>
<scanProgressIntervalS>0</scanProgressIntervalS>
<pullerPauseS>0</pullerPauseS>
<maxConflicts>-1</maxConflicts>
<disableSparseFiles>false</disableSparseFiles>
<disableTempIndexes>false</disableTempIndexes>
<paused>false</paused>
<weakHashThresholdPct>25</weakHashThresholdPct>
<markerName>.stfolder</markerName>
</folder>
<folder id="¯\_(ツ)_/¯ Räksmörgås 动作 Адрес" label="" path="s12-2" type="readwrite" rescanIntervalS="15" fsWatcherEnabled="false" fsWatcherDelayS="10" ignorePerms="false" autoNormalize="true">
<filesystemType>basic</filesystemType>
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" introducedBy=""></device>
@ -40,37 +60,20 @@
<weakHashThresholdPct>25</weakHashThresholdPct>
<markerName>.stfolder</markerName>
</folder>
<folder id="s23" label="" path="s23-2" type="readwrite" rescanIntervalS="15" fsWatcherEnabled="false" fsWatcherDelayS="10" ignorePerms="false" autoNormalize="true">
<filesystemType>basic</filesystemType>
<device id="MRIW7OK-NETT3M4-N6SBWME-N25O76W-YJKVXPH-FUMQJ3S-P57B74J-GBITBAC" introducedBy=""></device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" introducedBy=""></device>
<minDiskFree unit="%">1</minDiskFree>
<versioning></versioning>
<copiers>1</copiers>
<pullers>16</pullers>
<hashers>0</hashers>
<order>random</order>
<ignoreDelete>false</ignoreDelete>
<scanProgressIntervalS>0</scanProgressIntervalS>
<pullerPauseS>0</pullerPauseS>
<maxConflicts>-1</maxConflicts>
<disableSparseFiles>false</disableSparseFiles>
<disableTempIndexes>false</disableTempIndexes>
<paused>false</paused>
<weakHashThresholdPct>25</weakHashThresholdPct>
<markerName>.stfolder</markerName>
</folder>
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="metadata" introducer="false" skipIntroductionRemovals="false" introducedBy="">
<address>tcp://127.0.0.1:22001</address>
<paused>false</paused>
<autoAcceptFolders>false</autoAcceptFolders>
</device>
<device id="MRIW7OK-NETT3M4-N6SBWME-N25O76W-YJKVXPH-FUMQJ3S-P57B74J-GBITBAC" name="s2" compression="metadata" introducer="false" skipIntroductionRemovals="false" introducedBy="">
<address>tcp://127.0.0.1:22002</address>
<paused>false</paused>
<autoAcceptFolders>false</autoAcceptFolders>
</device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="metadata" introducer="false" skipIntroductionRemovals="false" introducedBy="">
<address>tcp://127.0.0.1:22003</address>
<paused>false</paused>
<autoAcceptFolders>false</autoAcceptFolders>
</device>
<gui enabled="true" tls="false" debugging="true">
<address>127.0.0.1:8082</address>
@ -95,7 +98,7 @@
<natLeaseMinutes>0</natLeaseMinutes>
<natRenewalMinutes>1</natRenewalMinutes>
<natTimeoutSeconds>10</natTimeoutSeconds>
<urAccepted>-1</urAccepted>
<urAccepted>3</urAccepted>
<urSeen>2</urSeen>
<urUniqueID>x7AWqz5k</urUniqueID>
<urURL>https://data.syncthing.net/newdata</urURL>
@ -114,15 +117,8 @@
<tempIndexMinBlocks>10</tempIndexMinBlocks>
<trafficClass>0</trafficClass>
<weakHashSelectionMethod>auto</weakHashSelectionMethod>
<stunServer>default</stunServer>
<stunKeepaliveSeconds>24</stunKeepaliveSeconds>
<kcpNoDelay>false</kcpNoDelay>
<kcpUpdateIntervalMs>25</kcpUpdateIntervalMs>
<kcpFastResend>false</kcpFastResend>
<kcpCongestionControl>true</kcpCongestionControl>
<kcpSendWindowSize>128</kcpSendWindowSize>
<kcpReceiveWindowSize>128</kcpReceiveWindowSize>
<defaultFolderPath>~</defaultFolderPath>
<setLowPriority>true</setLowPriority>
<minHomeDiskFreePct>0</minHomeDiskFreePct>
</options>
</configuration>

View File

@ -1,24 +1,4 @@
<configuration version="26">
<folder id="s23" label="" path="s23-3" type="readwrite" rescanIntervalS="20" fsWatcherEnabled="false" fsWatcherDelayS="10" ignorePerms="false" autoNormalize="true">
<filesystemType>basic</filesystemType>
<device id="MRIW7OK-NETT3M4-N6SBWME-N25O76W-YJKVXPH-FUMQJ3S-P57B74J-GBITBAC" introducedBy=""></device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" introducedBy=""></device>
<minDiskFree unit="%">1</minDiskFree>
<versioning></versioning>
<copiers>1</copiers>
<pullers>16</pullers>
<hashers>0</hashers>
<order>random</order>
<ignoreDelete>false</ignoreDelete>
<scanProgressIntervalS>0</scanProgressIntervalS>
<pullerPauseS>0</pullerPauseS>
<maxConflicts>-1</maxConflicts>
<disableSparseFiles>false</disableSparseFiles>
<disableTempIndexes>false</disableTempIndexes>
<paused>false</paused>
<weakHashThresholdPct>25</weakHashThresholdPct>
<markerName>.stfolder</markerName>
</folder>
<folder id="default" label="" path="s3" type="readwrite" rescanIntervalS="20" fsWatcherEnabled="false" fsWatcherDelayS="10" ignorePerms="false" autoNormalize="true">
<filesystemType>basic</filesystemType>
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" introducedBy=""></device>
@ -42,17 +22,40 @@
<weakHashThresholdPct>25</weakHashThresholdPct>
<markerName>.stfolder</markerName>
</folder>
<folder id="s23" label="" path="s23-3" type="readwrite" rescanIntervalS="20" fsWatcherEnabled="false" fsWatcherDelayS="10" ignorePerms="false" autoNormalize="true">
<filesystemType>basic</filesystemType>
<device id="MRIW7OK-NETT3M4-N6SBWME-N25O76W-YJKVXPH-FUMQJ3S-P57B74J-GBITBAC" introducedBy=""></device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" introducedBy=""></device>
<minDiskFree unit="%">1</minDiskFree>
<versioning></versioning>
<copiers>1</copiers>
<pullers>16</pullers>
<hashers>0</hashers>
<order>random</order>
<ignoreDelete>false</ignoreDelete>
<scanProgressIntervalS>0</scanProgressIntervalS>
<pullerPauseS>0</pullerPauseS>
<maxConflicts>-1</maxConflicts>
<disableSparseFiles>false</disableSparseFiles>
<disableTempIndexes>false</disableTempIndexes>
<paused>false</paused>
<weakHashThresholdPct>25</weakHashThresholdPct>
<markerName>.stfolder</markerName>
</folder>
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="metadata" introducer="false" skipIntroductionRemovals="false" introducedBy="">
<address>tcp://127.0.0.1:22001</address>
<paused>false</paused>
<autoAcceptFolders>false</autoAcceptFolders>
</device>
<device id="MRIW7OK-NETT3M4-N6SBWME-N25O76W-YJKVXPH-FUMQJ3S-P57B74J-GBITBAC" name="s2" compression="metadata" introducer="false" skipIntroductionRemovals="false" introducedBy="">
<address>tcp://127.0.0.1:22002</address>
<paused>false</paused>
<autoAcceptFolders>false</autoAcceptFolders>
</device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="metadata" introducer="false" skipIntroductionRemovals="false" introducedBy="">
<address>tcp://127.0.0.1:22003</address>
<paused>false</paused>
<autoAcceptFolders>false</autoAcceptFolders>
</device>
<gui enabled="true" tls="false" debugging="true">
<address>127.0.0.1:8083</address>
@ -77,7 +80,7 @@
<natLeaseMinutes>0</natLeaseMinutes>
<natRenewalMinutes>30</natRenewalMinutes>
<natTimeoutSeconds>10</natTimeoutSeconds>
<urAccepted>-1</urAccepted>
<urAccepted>3</urAccepted>
<urSeen>2</urSeen>
<urUniqueID>UL4yowgK</urUniqueID>
<urURL>https://data.syncthing.net/newdata</urURL>
@ -96,15 +99,8 @@
<tempIndexMinBlocks>10</tempIndexMinBlocks>
<trafficClass>0</trafficClass>
<weakHashSelectionMethod>auto</weakHashSelectionMethod>
<stunServer>default</stunServer>
<stunKeepaliveSeconds>24</stunKeepaliveSeconds>
<kcpNoDelay>false</kcpNoDelay>
<kcpUpdateIntervalMs>25</kcpUpdateIntervalMs>
<kcpFastResend>false</kcpFastResend>
<kcpCongestionControl>true</kcpCongestionControl>
<kcpSendWindowSize>128</kcpSendWindowSize>
<kcpReceiveWindowSize>128</kcpReceiveWindowSize>
<defaultFolderPath>~</defaultFolderPath>
<setLowPriority>true</setLowPriority>
<minHomeDiskFreePct>0</minHomeDiskFreePct>
</options>
</configuration>

View File

@ -21,6 +21,7 @@
<device id="7PBCTLL-JJRYBSA-MOWZRKL-MSDMN4N-4US4OMX-SYEXUS4-HSBGNRY-CZXRXAT" name="s4" compression="metadata" introducer="false" skipIntroductionRemovals="false" introducedBy="">
<address>dynamic</address>
<paused>false</paused>
<autoAcceptFolders>false</autoAcceptFolders>
</device>
<gui enabled="true" tls="false" debugging="true">
<address>127.0.0.1:8084</address>
@ -45,9 +46,9 @@
<natLeaseMinutes>60</natLeaseMinutes>
<natRenewalMinutes>30</natRenewalMinutes>
<natTimeoutSeconds>10</natTimeoutSeconds>
<urAccepted>-1</urAccepted>
<urAccepted>3</urAccepted>
<urSeen>2</urSeen>
<urUniqueID></urUniqueID>
<urUniqueID>vF5srHmT</urUniqueID>
<urURL>https://data.syncthing.net/newdata</urURL>
<urPostInsecurely>false</urPostInsecurely>
<urInitialDelayS>1800</urInitialDelayS>
@ -64,15 +65,8 @@
<tempIndexMinBlocks>10</tempIndexMinBlocks>
<trafficClass>0</trafficClass>
<weakHashSelectionMethod>auto</weakHashSelectionMethod>
<stunServer>default</stunServer>
<stunKeepaliveSeconds>24</stunKeepaliveSeconds>
<kcpNoDelay>false</kcpNoDelay>
<kcpUpdateIntervalMs>25</kcpUpdateIntervalMs>
<kcpFastResend>false</kcpFastResend>
<kcpCongestionControl>true</kcpCongestionControl>
<kcpSendWindowSize>128</kcpSendWindowSize>
<kcpReceiveWindowSize>128</kcpReceiveWindowSize>
<defaultFolderPath>~</defaultFolderPath>
<setLowPriority>true</setLowPriority>
<minHomeDiskFreePct>0</minHomeDiskFreePct>
</options>
</configuration>

View File

@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,56 +0,0 @@
// Copyright 2013, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// author: Cong Ding <dinggnu@gmail.com>
package main
import (
"flag"
"fmt"
"github.com/ccding/go-stun/stun"
)
func main() {
var serverAddr = flag.String("s", stun.DefaultServerAddr, "STUN server address")
var v = flag.Bool("v", false, "verbose mode")
var vv = flag.Bool("vv", false, "double verbose mode (includes -v)")
var vvv = flag.Bool("vvv", false, "triple verbose mode (includes -v and -vv)")
flag.Parse()
// Creates a STUN client. NewClientWithConnection can also be used if
// you want to handle the UDP listener by yourself.
client := stun.NewClient()
// The default addr (stun.DefaultServerAddr) will be used unless we
// call SetServerAddr.
client.SetServerAddr(*serverAddr)
// Non verbose mode will be used by default unless we call
// SetVerbose(true) or SetVVerbose(true).
client.SetVerbose(*v || *vv || *vvv)
client.SetVVerbose(*vv || *vvv)
// Discover the NAT and return the result.
nat, host, err := client.Discover()
if err != nil {
fmt.Println(err)
return
}
fmt.Println("NAT Type:", nat)
if host != nil {
fmt.Println("External IP Family:", host.Family())
fmt.Println("External IP:", host.IP())
fmt.Println("External Port:", host.Port())
}
}

View File

@ -1,106 +0,0 @@
// Copyright 2013, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"encoding/binary"
"hash/crc32"
"net"
)
type attribute struct {
types uint16
length uint16
value []byte
}
func newAttribute(types uint16, value []byte) *attribute {
att := new(attribute)
att.types = types
att.value = padding(value)
att.length = uint16(len(att.value))
return att
}
func newFingerprintAttribute(packet *packet) *attribute {
crc := crc32.ChecksumIEEE(packet.bytes()) ^ fingerprint
buf := make([]byte, 4)
binary.BigEndian.PutUint32(buf, crc)
return newAttribute(attributeFingerprint, buf)
}
func newSoftwareAttribute(name string) *attribute {
return newAttribute(attributeSoftware, []byte(name))
}
func newChangeReqAttribute(changeIP bool, changePort bool) *attribute {
value := make([]byte, 4)
if changeIP {
value[3] |= 0x04
}
if changePort {
value[3] |= 0x02
}
return newAttribute(attributeChangeRequest, value)
}
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |x x x x x x x x| Family | X-Port |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | X-Address (Variable)
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
//
// Figure 6: Format of XOR-MAPPED-ADDRESS Attribute
func (v *attribute) xorAddr(transID []byte) *Host {
xorIP := make([]byte, 16)
for i := 0; i < len(v.value)-4; i++ {
xorIP[i] = v.value[i+4] ^ transID[i]
}
family := uint16(v.value[1])
port := binary.BigEndian.Uint16(v.value[2:4])
// Truncate if IPv4, otherwise net.IP sometimes renders it as an IPv6 address.
if family == attributeFamilyIPv4 {
xorIP = xorIP[:4]
}
x := binary.BigEndian.Uint16(transID[:2])
return &Host{family, net.IP(xorIP).String(), port ^ x}
}
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |0 0 0 0 0 0 0 0| Family | Port |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | |
// | Address (32 bits or 128 bits) |
// | |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
//
// Figure 5: Format of MAPPED-ADDRESS Attribute
func (v *attribute) rawAddr() *Host {
host := new(Host)
host.family = uint16(v.value[1])
host.port = binary.BigEndian.Uint16(v.value[2:4])
// Truncate if IPv4, otherwise net.IP sometimes renders it as an IPv6 address.
if host.family == attributeFamilyIPv4 {
v.value = v.value[:8]
}
host.ip = net.IP(v.value[4:]).String()
return host
}

View File

@ -1,126 +0,0 @@
// Copyright 2013, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"errors"
"net"
"strconv"
)
// Client is a STUN client, which can be set STUN server address and is used
// to discover NAT type.
type Client struct {
serverAddr string
softwareName string
conn net.PacketConn
logger *Logger
}
// NewClient returns a client without network connection. The network
// connection will be build when calling Discover function.
func NewClient() *Client {
c := new(Client)
c.SetSoftwareName(DefaultSoftwareName)
c.logger = NewLogger()
return c
}
// NewClientWithConnection returns a client which uses the given connection.
// Please note the connection should be acquired via net.Listen* method.
func NewClientWithConnection(conn net.PacketConn) *Client {
c := new(Client)
c.conn = conn
c.SetSoftwareName(DefaultSoftwareName)
c.logger = NewLogger()
return c
}
// SetVerbose sets the client to be in the verbose mode, which prints
// information in the discover process.
func (c *Client) SetVerbose(v bool) {
c.logger.SetDebug(v)
}
// SetVVerbose sets the client to be in the double verbose mode, which prints
// information and packet in the discover process.
func (c *Client) SetVVerbose(v bool) {
c.logger.SetInfo(v)
}
// SetServerHost allows user to set the STUN hostname and port.
func (c *Client) SetServerHost(host string, port int) {
c.serverAddr = net.JoinHostPort(host, strconv.Itoa(port))
}
// SetServerAddr allows user to set the transport layer STUN server address.
func (c *Client) SetServerAddr(address string) {
c.serverAddr = address
}
// SetSoftwareName allows user to set the name of the software, which is used
// for logging purpose (NOT used in the current implementation).
func (c *Client) SetSoftwareName(name string) {
c.softwareName = name
}
// Discover contacts the STUN server and gets the response of NAT type, host
// for UDP punching.
func (c *Client) Discover() (NATType, *Host, error) {
if c.serverAddr == "" {
c.SetServerAddr(DefaultServerAddr)
}
serverUDPAddr, err := net.ResolveUDPAddr("udp", c.serverAddr)
if err != nil {
return NATError, nil, err
}
// Use the connection passed to the client if it is not nil, otherwise
// create a connection and close it at the end.
conn := c.conn
if conn == nil {
conn, err = net.ListenUDP("udp", nil)
if err != nil {
return NATError, nil, err
}
defer conn.Close()
}
return c.discover(conn, serverUDPAddr)
}
// Keepalive sends and receives a bind request, which ensures the mapping stays open
// Only applicable when client was created with a connection.
func (c *Client) Keepalive() (*Host, error) {
if c.conn == nil {
return nil, errors.New("no connection available")
}
if c.serverAddr == "" {
c.SetServerAddr(DefaultServerAddr)
}
serverUDPAddr, err := net.ResolveUDPAddr("udp", c.serverAddr)
if err != nil {
return nil, err
}
resp, err := c.test1(c.conn, serverUDPAddr)
if err != nil {
return nil, err
}
if resp == nil || resp.packet == nil {
return nil, errors.New("failed to contact")
}
return resp.mappedAddr, nil
}

View File

@ -1,178 +0,0 @@
// Copyright 2013, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
// Default server address and client name.
const (
DefaultServerAddr = "stun.ekiga.net:3478"
DefaultSoftwareName = "StunClient"
)
const (
magicCookie = 0x2112A442
fingerprint = 0x5354554e
)
// NATType is the type of NAT described by int.
type NATType int
// NAT types.
const (
NATError NATType = iota
NATUnknown
NATNone
NATBlocked
NATFull
NATSymmetric
NATRestricted
NATPortRestricted
NATSymmetricUDPFirewall
// Deprecated spellings of these constants
NATSymetric = NATSymmetric
NATSymetricUDPFirewall = NATSymmetricUDPFirewall
)
var natStr map[NATType]string
func init() {
natStr = map[NATType]string{
NATError: "Test failed",
NATUnknown: "Unexpected response from the STUN server",
NATBlocked: "UDP is blocked",
NATFull: "Full cone NAT",
NATSymmetric: "Symmetric NAT",
NATRestricted: "Restricted NAT",
NATPortRestricted: "Port restricted NAT",
NATNone: "Not behind a NAT",
NATSymmetricUDPFirewall: "Symmetric UDP firewall",
}
}
func (nat NATType) String() string {
if s, ok := natStr[nat]; ok {
return s
}
return "Unknown"
}
const (
errorTryAlternate = 300
errorBadRequest = 400
errorUnauthorized = 401
errorUnassigned402 = 402
errorForbidden = 403
errorUnknownAttribute = 420
errorAllocationMismatch = 437
errorStaleNonce = 438
errorUnassigned439 = 439
errorAddressFamilyNotSupported = 440
errorWrongCredentials = 441
errorUnsupportedTransportProtocol = 442
errorPeerAddressFamilyMismatch = 443
errorConnectionAlreadyExists = 446
errorConnectionTimeoutOrFailure = 447
errorAllocationQuotaReached = 486
errorRoleConflict = 487
errorServerError = 500
errorInsufficientCapacity = 508
)
const (
attributeFamilyIPv4 = 0x01
attributeFamilyIPV6 = 0x02
)
const (
attributeMappedAddress = 0x0001
attributeResponseAddress = 0x0002
attributeChangeRequest = 0x0003
attributeSourceAddress = 0x0004
attributeChangedAddress = 0x0005
attributeUsername = 0x0006
attributePassword = 0x0007
attributeMessageIntegrity = 0x0008
attributeErrorCode = 0x0009
attributeUnknownAttributes = 0x000a
attributeReflectedFrom = 0x000b
attributeChannelNumber = 0x000c
attributeLifetime = 0x000d
attributeBandwidth = 0x0010
attributeXorPeerAddress = 0x0012
attributeData = 0x0013
attributeRealm = 0x0014
attributeNonce = 0x0015
attributeXorRelayedAddress = 0x0016
attributeRequestedAddressFamily = 0x0017
attributeEvenPort = 0x0018
attributeRequestedTransport = 0x0019
attributeDontFragment = 0x001a
attributeXorMappedAddress = 0x0020
attributeTimerVal = 0x0021
attributeReservationToken = 0x0022
attributePriority = 0x0024
attributeUseCandidate = 0x0025
attributePadding = 0x0026
attributeResponsePort = 0x0027
attributeConnectionID = 0x002a
attributeXorMappedAddressExp = 0x8020
attributeSoftware = 0x8022
attributeAlternateServer = 0x8023
attributeCacheTimeout = 0x8027
attributeFingerprint = 0x8028
attributeIceControlled = 0x8029
attributeIceControlling = 0x802a
attributeResponseOrigin = 0x802b
attributeOtherAddress = 0x802c
attributeEcnCheckStun = 0x802d
attributeCiscoFlowdata = 0xc000
)
const (
typeBindingRequest = 0x0001
typeBindingResponse = 0x0101
typeBindingErrorResponse = 0x0111
typeSharedSecretRequest = 0x0002
typeSharedSecretResponse = 0x0102
typeSharedErrorResponse = 0x0112
typeAllocate = 0x0003
typeAllocateResponse = 0x0103
typeAllocateErrorResponse = 0x0113
typeRefresh = 0x0004
typeRefreshResponse = 0x0104
typeRefreshErrorResponse = 0x0114
typeSend = 0x0006
typeSendResponse = 0x0106
typeSendErrorResponse = 0x0116
typeData = 0x0007
typeDataResponse = 0x0107
typeDataErrorResponse = 0x0117
typeCreatePermisiion = 0x0008
typeCreatePermisiionResponse = 0x0108
typeCreatePermisiionErrorResponse = 0x0118
typeChannelBinding = 0x0009
typeChannelBindingResponse = 0x0109
typeChannelBindingErrorResponse = 0x0119
typeConnect = 0x000a
typeConnectResponse = 0x010a
typeConnectErrorResponse = 0x011a
typeConnectionBind = 0x000b
typeConnectionBindResponse = 0x010b
typeConnectionBindErrorResponse = 0x011b
typeConnectionAttempt = 0x000c
typeConnectionAttemptResponse = 0x010c
typeConnectionAttemptErrorResponse = 0x011c
)

View File

@ -1,165 +0,0 @@
// Copyright 2013, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"errors"
"net"
)
// Follow RFC 3489 and RFC 5389.
// Figure 2: Flow for type discovery process (from RFC 3489).
// +--------+
// | Test |
// | I |
// +--------+
// |
// |
// V
// /\ /\
// N / \ Y / \ Y +--------+
// UDP <-------/Resp\--------->/ IP \------------->| Test |
// Blocked \ ? / \Same/ | II |
// \ / \? / +--------+
// \/ \/ |
// | N |
// | V
// V /\
// +--------+ Sym. N / \
// | Test | UDP <---/Resp\
// | II | Firewall \ ? /
// +--------+ \ /
// | \/
// V |Y
// /\ /\ |
// Symmetric N / \ +--------+ N / \ V
// NAT <--- / IP \<-----| Test |<--- /Resp\ Open
// \Same/ | I | \ ? / Internet
// \? / +--------+ \ /
// \/ \/
// |Y |Y
// | |
// | V
// | Full
// | Cone
// V /\
// +--------+ / \ Y
// | Test |------>/Resp\---->Restricted
// | III | \ ? /
// +--------+ \ /
// \/
// |N
// | Port
// +------>Restricted
func (c *Client) discover(conn net.PacketConn, addr *net.UDPAddr) (NATType, *Host, error) {
// Perform test1 to check if it is under NAT.
c.logger.Debugln("Do Test1")
c.logger.Debugln("Send To:", addr)
resp, err := c.test1(conn, addr)
if err != nil {
return NATError, nil, err
}
c.logger.Debugln("Received:", resp)
if resp == nil {
return NATBlocked, nil, nil
}
// identical used to check if it is open Internet or not.
identical := resp.identical
// changedAddr is used to perform second time test1 and test3.
changedAddr := resp.changedAddr
// mappedAddr is used as the return value, its IP is used for tests
mappedAddr := resp.mappedAddr
// Make sure IP and port are not changed.
if resp.serverAddr.IP() != addr.IP.String() ||
resp.serverAddr.Port() != uint16(addr.Port) {
return NATError, mappedAddr, errors.New("Server error: response IP/port")
}
// if changedAddr is not available, use otherAddr as changedAddr,
// which is updated in RFC 5780
if changedAddr == nil {
changedAddr = resp.otherAddr
}
// changedAddr shall not be nil
if changedAddr == nil {
return NATError, mappedAddr, errors.New("Server error: no changed address.")
}
// Perform test2 to see if the client can receive packet sent from
// another IP and port.
c.logger.Debugln("Do Test2")
c.logger.Debugln("Send To:", addr)
resp, err = c.test2(conn, addr)
if err != nil {
return NATError, mappedAddr, err
}
c.logger.Debugln("Received:", resp)
// Make sure IP and port are changed.
if resp != nil &&
(resp.serverAddr.IP() == addr.IP.String() ||
resp.serverAddr.Port() == uint16(addr.Port)) {
return NATError, mappedAddr, errors.New("Server error: response IP/port")
}
if identical {
if resp == nil {
return NATSymmetricUDPFirewall, mappedAddr, nil
}
return NATNone, mappedAddr, nil
}
if resp != nil {
return NATFull, mappedAddr, nil
}
// Perform test1 to another IP and port to see if the NAT use the same
// external IP.
c.logger.Debugln("Do Test1")
c.logger.Debugln("Send To:", changedAddr)
caddr, err := net.ResolveUDPAddr("udp", changedAddr.String())
resp, err = c.test1(conn, caddr)
if err != nil {
return NATError, mappedAddr, err
}
c.logger.Debugln("Received:", resp)
if resp == nil {
// It should be NAT_BLOCKED, but will be detected in the first
// step. So this will never happen.
return NATUnknown, mappedAddr, nil
}
// Make sure IP/port is not changed.
if resp.serverAddr.IP() != caddr.IP.String() ||
resp.serverAddr.Port() != uint16(caddr.Port) {
return NATError, mappedAddr, errors.New("Server error: response IP/port")
}
if mappedAddr.IP() == resp.mappedAddr.IP() && mappedAddr.Port() == resp.mappedAddr.Port() {
// Perform test3 to see if the client can receive packet sent
// from another port.
c.logger.Debugln("Do Test3")
c.logger.Debugln("Send To:", caddr)
resp, err = c.test3(conn, caddr)
if err != nil {
return NATError, mappedAddr, err
}
c.logger.Debugln("Received:", resp)
if resp == nil {
return NATPortRestricted, mappedAddr, nil
}
// Make sure IP is not changed, and port is changed.
if resp.serverAddr.IP() != caddr.IP.String() ||
resp.serverAddr.Port() == uint16(caddr.Port) {
return NATError, mappedAddr, errors.New("Server error: response IP/port")
}
return NATRestricted, mappedAddr, nil
}
return NATSymmetric, mappedAddr, nil
}

View File

@ -1,25 +0,0 @@
// Copyright 2013, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
// Package stun is a STUN (RFC 3489 and RFC 5389) client implementation in
// golang.
//
// It is extremely easy to use -- just one line of code.
//
// nat, host, err := stun.NewClient().Discover()
//
// More details please go to `main.go`.
package stun

View File

@ -1,70 +0,0 @@
// Copyright 2013, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"net"
"strconv"
)
// Host defines the network address including address family, IP address and port.
type Host struct {
family uint16
ip string
port uint16
}
func newHostFromStr(s string) *Host {
udpAddr, err := net.ResolveUDPAddr("udp", s)
if err != nil {
return nil
}
host := new(Host)
if udpAddr.IP.To4() != nil {
host.family = attributeFamilyIPv4
} else {
host.family = attributeFamilyIPV6
}
host.ip = udpAddr.IP.String()
host.port = uint16(udpAddr.Port)
return host
}
// Family returns the family type of a host (IPv4 or IPv6).
func (h *Host) Family() uint16 {
return h.family
}
// IP returns the internet protocol address of the host.
func (h *Host) IP() string {
return h.ip
}
// Port returns the port number of the host.
func (h *Host) Port() uint16 {
return h.port
}
// TransportAddr returns the transport layer address of the host.
func (h *Host) TransportAddr() string {
return net.JoinHostPort(h.ip, strconv.Itoa(int(h.port)))
}
// String returns the string representation of the host address.
func (h *Host) String() string {
return h.TransportAddr()
}

View File

@ -1,87 +0,0 @@
// Copyright 2016, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"log"
"os"
)
// Logger is a simple logger specified for this STUN client.
type Logger struct {
log.Logger
debug bool
info bool
}
// NewLogger creates a default logger.
func NewLogger() *Logger {
logger := &Logger{*log.New(os.Stdout, "", log.LstdFlags), false, false}
return logger
}
// SetDebug sets the logger running in debug mode or not.
func (l *Logger) SetDebug(v bool) {
l.debug = v
}
// SetInfo sets the logger running in info mode or not.
func (l *Logger) SetInfo(v bool) {
l.info = v
}
// Debug outputs the log in the format of log.Print.
func (l *Logger) Debug(v ...interface{}) {
if l.debug {
l.Print(v...)
}
}
// Debugf outputs the log in the format of log.Printf.
func (l *Logger) Debugf(format string, v ...interface{}) {
if l.debug {
l.Printf(format, v...)
}
}
// Debugln outputs the log in the format of log.Println.
func (l *Logger) Debugln(v ...interface{}) {
if l.debug {
l.Println(v...)
}
}
// Info outputs the log in the format of log.Print.
func (l *Logger) Info(v ...interface{}) {
if l.info {
l.Print(v...)
}
}
// Infof outputs the log in the format of log.Printf.
func (l *Logger) Infof(format string, v ...interface{}) {
if l.info {
l.Printf(format, v...)
}
}
// Infoln outputs the log in the format of log.Println.
func (l *Logger) Infoln(v ...interface{}) {
if l.info {
l.Println(v...)
}
}

View File

@ -1,102 +0,0 @@
// Copyright 2016, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"bytes"
"encoding/hex"
"errors"
"net"
"time"
)
const (
numRetransmit = 9
defaultTimeout = 100
maxTimeout = 1600
maxPacketSize = 1024
)
func (c *Client) sendBindingReq(conn net.PacketConn, addr net.Addr, changeIP bool, changePort bool) (*response, error) {
// Construct packet.
pkt, err := newPacket()
if err != nil {
return nil, err
}
pkt.types = typeBindingRequest
attribute := newSoftwareAttribute(c.softwareName)
pkt.addAttribute(*attribute)
if changeIP || changePort {
attribute = newChangeReqAttribute(changeIP, changePort)
pkt.addAttribute(*attribute)
}
attribute = newFingerprintAttribute(pkt)
pkt.addAttribute(*attribute)
// Send packet.
return c.send(pkt, conn, addr)
}
// RFC 3489: Clients SHOULD retransmit the request starting with an interval
// of 100ms, doubling every retransmit until the interval reaches 1.6s.
// Retransmissions continue with intervals of 1.6s until a response is
// received, or a total of 9 requests have been sent.
func (c *Client) send(pkt *packet, conn net.PacketConn, addr net.Addr) (*response, error) {
c.logger.Info("\n" + hex.Dump(pkt.bytes()))
timeout := defaultTimeout
packetBytes := make([]byte, maxPacketSize)
for i := 0; i < numRetransmit; i++ {
// Send packet to the server.
length, err := conn.WriteTo(pkt.bytes(), addr)
if err != nil {
return nil, err
}
if length != len(pkt.bytes()) {
return nil, errors.New("Error in sending data.")
}
err = conn.SetReadDeadline(time.Now().Add(time.Duration(timeout) * time.Millisecond))
if err != nil {
return nil, err
}
if timeout < maxTimeout {
timeout *= 2
}
for {
// Read from the port.
length, raddr, err := conn.ReadFrom(packetBytes)
if err != nil {
if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
break
}
return nil, err
}
p, err := newPacketFromBytes(packetBytes[0:length])
if err != nil {
return nil, err
}
// If transId mismatches, keep reading until get a
// matched packet or timeout.
if !bytes.Equal(pkt.transID, p.transID) {
continue
}
c.logger.Info("\n" + hex.Dump(packetBytes[0:length]))
resp := newResponse(p, conn)
resp.serverAddr = newHostFromStr(raddr.String())
return resp, err
}
}
return nil, nil
}

View File

@ -1,129 +0,0 @@
// Copyright 2013, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"crypto/rand"
"encoding/binary"
"errors"
)
type packet struct {
types uint16
length uint16
transID []byte // 4 bytes magic cookie + 12 bytes transaction id
attributes []attribute
}
func newPacket() (*packet, error) {
v := new(packet)
v.transID = make([]byte, 16)
binary.BigEndian.PutUint32(v.transID[:4], magicCookie)
_, err := rand.Read(v.transID[4:])
if err != nil {
return nil, err
}
v.attributes = make([]attribute, 0, 10)
v.length = 0
return v, nil
}
func newPacketFromBytes(packetBytes []byte) (*packet, error) {
if len(packetBytes) < 24 {
return nil, errors.New("Received data length too short.")
}
pkt := new(packet)
pkt.types = binary.BigEndian.Uint16(packetBytes[0:2])
pkt.length = binary.BigEndian.Uint16(packetBytes[2:4])
pkt.transID = packetBytes[4:20]
pkt.attributes = make([]attribute, 0, 10)
for pos := uint16(20); pos < uint16(len(packetBytes)); {
types := binary.BigEndian.Uint16(packetBytes[pos : pos+2])
length := binary.BigEndian.Uint16(packetBytes[pos+2 : pos+4])
if pos+4+length > uint16(len(packetBytes)) {
return nil, errors.New("Received data format mismatch.")
}
value := packetBytes[pos+4 : pos+4+length]
attribute := newAttribute(types, value)
pkt.addAttribute(*attribute)
pos += align(length) + 4
}
return pkt, nil
}
func (v *packet) addAttribute(a attribute) {
v.attributes = append(v.attributes, a)
v.length += align(a.length) + 4
}
func (v *packet) bytes() []byte {
packetBytes := make([]byte, 4)
binary.BigEndian.PutUint16(packetBytes[0:2], v.types)
binary.BigEndian.PutUint16(packetBytes[2:4], v.length)
packetBytes = append(packetBytes, v.transID...)
for _, a := range v.attributes {
buf := make([]byte, 2)
binary.BigEndian.PutUint16(buf, a.types)
packetBytes = append(packetBytes, buf...)
binary.BigEndian.PutUint16(buf, a.length)
packetBytes = append(packetBytes, buf...)
packetBytes = append(packetBytes, a.value...)
}
return packetBytes
}
func (v *packet) getSourceAddr() *Host {
return v.getRawAddr(attributeSourceAddress)
}
func (v *packet) getMappedAddr() *Host {
return v.getRawAddr(attributeMappedAddress)
}
func (v *packet) getChangedAddr() *Host {
return v.getRawAddr(attributeChangedAddress)
}
func (v *packet) getOtherAddr() *Host {
return v.getRawAddr(attributeOtherAddress)
}
func (v *packet) getRawAddr(attribute uint16) *Host {
for _, a := range v.attributes {
if a.types == attribute {
return a.rawAddr()
}
}
return nil
}
func (v *packet) getXorMappedAddr() *Host {
addr := v.getXorAddr(attributeXorMappedAddress)
if addr == nil {
addr = v.getXorAddr(attributeXorMappedAddressExp)
}
return addr
}
func (v *packet) getXorAddr(attribute uint16) *Host {
for _, a := range v.attributes {
if a.types == attribute {
return a.xorAddr(v.transID)
}
}
return nil
}

View File

@ -1,78 +0,0 @@
// Copyright 2016, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"fmt"
"net"
)
type response struct {
packet *packet // the original packet from the server
serverAddr *Host // the address received packet
changedAddr *Host // parsed from packet
mappedAddr *Host // parsed from packet, external addr of client NAT
otherAddr *Host // parsed from packet, to replace changedAddr in RFC 5780
identical bool // if mappedAddr is in local addr list
}
func newResponse(pkt *packet, conn net.PacketConn) *response {
resp := &response{pkt, nil, nil, nil, nil, false}
if pkt == nil {
return resp
}
// RFC 3489 doesn't require the server return XOR mapped address.
mappedAddr := pkt.getXorMappedAddr()
if mappedAddr == nil {
mappedAddr = pkt.getMappedAddr()
}
resp.mappedAddr = mappedAddr
// compute identical
localAddrStr := conn.LocalAddr().String()
if mappedAddr != nil {
mappedAddrStr := mappedAddr.String()
resp.identical = isLocalAddress(localAddrStr, mappedAddrStr)
}
// compute changedAddr
changedAddr := pkt.getChangedAddr()
if changedAddr != nil {
changedAddrHost := newHostFromStr(changedAddr.String())
resp.changedAddr = changedAddrHost
}
// compute otherAddr
otherAddr := pkt.getOtherAddr()
if otherAddr != nil {
otherAddrHost := newHostFromStr(otherAddr.String())
resp.otherAddr = otherAddrHost
}
return resp
}
// String is only used for verbose mode output.
func (r *response) String() string {
if r == nil {
return "Nil"
}
return fmt.Sprintf("{packet nil: %v, local: %v, remote: %v, changed: %v, other: %v, identical: %v}",
r.packet == nil,
r.mappedAddr,
r.serverAddr,
r.changedAddr,
r.otherAddr,
r.identical)
}

View File

@ -1,33 +0,0 @@
// Copyright 2016, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"net"
)
func (c *Client) test1(conn net.PacketConn, addr net.Addr) (*response, error) {
return c.sendBindingReq(conn, addr, false, false)
}
func (c *Client) test2(conn net.PacketConn, addr net.Addr) (*response, error) {
return c.sendBindingReq(conn, addr, true, true)
}
func (c *Client) test3(conn net.PacketConn, addr net.Addr) (*response, error) {
return c.sendBindingReq(conn, addr, false, true)
}

View File

@ -1,63 +0,0 @@
// Copyright 2016, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"net"
)
// Padding the length of the byte slice to multiple of 4.
func padding(bytes []byte) []byte {
length := uint16(len(bytes))
return append(bytes, make([]byte, align(length)-length)...)
}
// Align the uint16 number to the smallest multiple of 4, which is larger than
// or equal to the uint16 number.
func align(n uint16) uint16 {
return (n + 3) & 0xfffc
}
// isLocalAddress check if localRemote is a local address.
func isLocalAddress(local, localRemote string) bool {
// Resolve the IP returned by the STUN server first.
localRemoteAddr, err := net.ResolveUDPAddr("udp", localRemote)
if err != nil {
return false
}
// Try comparing with the local address on the socket first, but only if
// it's actually specified.
addr, err := net.ResolveUDPAddr("udp", local)
if err == nil && addr.IP != nil && !addr.IP.IsUnspecified() {
return addr.IP.Equal(localRemoteAddr.IP)
}
// Fallback to checking IPs of all interfaces
addrs, err := net.InterfaceAddrs()
if err != nil {
return false
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if ip.Equal(localRemoteAddr.IP) {
return true
}
}
return false
}

View File

@ -1,22 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Daniel Fu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,69 +0,0 @@
package kcp
import (
"sync"
"time"
)
var (
// BlacklistDuration sets a duration for which a session is blacklisted
// once it's established. This is simillar to TIME_WAIT state in TCP, whereby
// any connection attempt with the same session parameters is ignored for
// some amount of time.
//
// This is only useful when dial attempts happen from a pre-determined port,
// for example when you are dialing from the same connection you are listening on
// to punch through NAT, and helps with the fact that KCP is state-less.
// This helps better deal with scenarios where a process on one of the side (A)
// get's restarted, and stray packets from other side (B) makes it look like
// as if someone is trying to connect to A. Even if session dies on B,
// new stray reply packets from A resurrect the session on B, causing the
// session to be alive forever.
BlacklistDuration time.Duration
blacklist = blacklistMap{
entries: make(map[sessionKey]time.Time),
}
)
// a global map for blacklisting conversations
type blacklistMap struct {
entries map[sessionKey]time.Time
reapAt time.Time
mut sync.Mutex
}
func (m *blacklistMap) add(address string, conv uint32) {
if BlacklistDuration == 0 {
return
}
m.mut.Lock()
timeout := time.Now().Add(BlacklistDuration)
m.entries[sessionKey{
addr: address,
convID: conv,
}] = timeout
m.reap()
m.mut.Unlock()
}
func (m *blacklistMap) has(address string, conv uint32) bool {
if BlacklistDuration == 0 {
return false
}
m.mut.Lock()
t, ok := m.entries[sessionKey{
addr: address,
convID: conv,
}]
m.mut.Unlock()
return ok && t.After(time.Now())
}
func (m *blacklistMap) reap() {
now := time.Now()
for k, t := range m.entries {
if t.Before(now) {
delete(m.entries, k)
}
}
}

View File

@ -1,288 +0,0 @@
package kcp
import (
"crypto/aes"
"crypto/cipher"
"crypto/des"
"crypto/sha1"
"github.com/templexxx/xor"
"github.com/tjfoc/gmsm/sm4"
"golang.org/x/crypto/blowfish"
"golang.org/x/crypto/cast5"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/salsa20"
"golang.org/x/crypto/tea"
"golang.org/x/crypto/twofish"
"golang.org/x/crypto/xtea"
)
var (
initialVector = []byte{167, 115, 79, 156, 18, 172, 27, 1, 164, 21, 242, 193, 252, 120, 230, 107}
saltxor = `sH3CIVoF#rWLtJo6`
)
// BlockCrypt defines encryption/decryption methods for a given byte slice.
// Notes on implementing: the data to be encrypted contains a builtin
// nonce at the first 16 bytes
type BlockCrypt interface {
// Encrypt encrypts the whole block in src into dst.
// Dst and src may point at the same memory.
Encrypt(dst, src []byte)
// Decrypt decrypts the whole block in src into dst.
// Dst and src may point at the same memory.
Decrypt(dst, src []byte)
}
type salsa20BlockCrypt struct {
key [32]byte
}
// NewSalsa20BlockCrypt https://en.wikipedia.org/wiki/Salsa20
func NewSalsa20BlockCrypt(key []byte) (BlockCrypt, error) {
c := new(salsa20BlockCrypt)
copy(c.key[:], key)
return c, nil
}
func (c *salsa20BlockCrypt) Encrypt(dst, src []byte) {
salsa20.XORKeyStream(dst[8:], src[8:], src[:8], &c.key)
copy(dst[:8], src[:8])
}
func (c *salsa20BlockCrypt) Decrypt(dst, src []byte) {
salsa20.XORKeyStream(dst[8:], src[8:], src[:8], &c.key)
copy(dst[:8], src[:8])
}
type sm4BlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewSM4BlockCrypt https://github.com/tjfoc/gmsm/tree/master/sm4
func NewSM4BlockCrypt(key []byte) (BlockCrypt, error) {
c := new(sm4BlockCrypt)
block, err := sm4.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, sm4.BlockSize)
c.decbuf = make([]byte, 2*sm4.BlockSize)
return c, nil
}
func (c *sm4BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *sm4BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type twofishBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewTwofishBlockCrypt https://en.wikipedia.org/wiki/Twofish
func NewTwofishBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(twofishBlockCrypt)
block, err := twofish.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, twofish.BlockSize)
c.decbuf = make([]byte, 2*twofish.BlockSize)
return c, nil
}
func (c *twofishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *twofishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type tripleDESBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewTripleDESBlockCrypt https://en.wikipedia.org/wiki/Triple_DES
func NewTripleDESBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(tripleDESBlockCrypt)
block, err := des.NewTripleDESCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, des.BlockSize)
c.decbuf = make([]byte, 2*des.BlockSize)
return c, nil
}
func (c *tripleDESBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *tripleDESBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type cast5BlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewCast5BlockCrypt https://en.wikipedia.org/wiki/CAST-128
func NewCast5BlockCrypt(key []byte) (BlockCrypt, error) {
c := new(cast5BlockCrypt)
block, err := cast5.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, cast5.BlockSize)
c.decbuf = make([]byte, 2*cast5.BlockSize)
return c, nil
}
func (c *cast5BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *cast5BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type blowfishBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewBlowfishBlockCrypt https://en.wikipedia.org/wiki/Blowfish_(cipher)
func NewBlowfishBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(blowfishBlockCrypt)
block, err := blowfish.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, blowfish.BlockSize)
c.decbuf = make([]byte, 2*blowfish.BlockSize)
return c, nil
}
func (c *blowfishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *blowfishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type aesBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewAESBlockCrypt https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
func NewAESBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(aesBlockCrypt)
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, aes.BlockSize)
c.decbuf = make([]byte, 2*aes.BlockSize)
return c, nil
}
func (c *aesBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *aesBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type teaBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewTEABlockCrypt https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm
func NewTEABlockCrypt(key []byte) (BlockCrypt, error) {
c := new(teaBlockCrypt)
block, err := tea.NewCipherWithRounds(key, 16)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, tea.BlockSize)
c.decbuf = make([]byte, 2*tea.BlockSize)
return c, nil
}
func (c *teaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *teaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type xteaBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewXTEABlockCrypt https://en.wikipedia.org/wiki/XTEA
func NewXTEABlockCrypt(key []byte) (BlockCrypt, error) {
c := new(xteaBlockCrypt)
block, err := xtea.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, xtea.BlockSize)
c.decbuf = make([]byte, 2*xtea.BlockSize)
return c, nil
}
func (c *xteaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *xteaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type simpleXORBlockCrypt struct {
xortbl []byte
}
// NewSimpleXORBlockCrypt simple xor with key expanding
func NewSimpleXORBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(simpleXORBlockCrypt)
c.xortbl = pbkdf2.Key(key, []byte(saltxor), 32, mtuLimit, sha1.New)
return c, nil
}
func (c *simpleXORBlockCrypt) Encrypt(dst, src []byte) { xor.Bytes(dst, src, c.xortbl) }
func (c *simpleXORBlockCrypt) Decrypt(dst, src []byte) { xor.Bytes(dst, src, c.xortbl) }
type noneBlockCrypt struct{}
// NewNoneBlockCrypt does nothing but copying
func NewNoneBlockCrypt(key []byte) (BlockCrypt, error) {
return new(noneBlockCrypt), nil
}
func (c *noneBlockCrypt) Encrypt(dst, src []byte) { copy(dst, src) }
func (c *noneBlockCrypt) Decrypt(dst, src []byte) { copy(dst, src) }
// packet encryption with local CFB mode
func encrypt(block cipher.Block, dst, src, buf []byte) {
blocksize := block.BlockSize()
tbl := buf[:blocksize]
block.Encrypt(tbl, initialVector)
n := len(src) / blocksize
base := 0
for i := 0; i < n; i++ {
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
}
xor.BytesSrc0(dst[base:], src[base:], tbl)
}
func decrypt(block cipher.Block, dst, src, buf []byte) {
blocksize := block.BlockSize()
tbl := buf[:blocksize]
next := buf[blocksize:]
block.Encrypt(tbl, initialVector)
n := len(src) / blocksize
base := 0
for i := 0; i < n; i++ {
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += blocksize
}
xor.BytesSrc0(dst[base:], src[base:], tbl)
}

311
vendor/github.com/xtaci/kcp-go/fec.go generated vendored
View File

@ -1,311 +0,0 @@
package kcp
import (
"encoding/binary"
"sync/atomic"
"github.com/klauspost/reedsolomon"
)
const (
fecHeaderSize = 6
fecHeaderSizePlus2 = fecHeaderSize + 2 // plus 2B data size
typeData = 0xf1
typeFEC = 0xf2
)
type (
// fecPacket is a decoded FEC packet
fecPacket struct {
seqid uint32
flag uint16
data []byte
}
// fecDecoder for decoding incoming packets
fecDecoder struct {
rxlimit int // queue size limit
dataShards int
parityShards int
shardSize int
rx []fecPacket // ordered receive queue
// caches
decodeCache [][]byte
flagCache []bool
// zeros
zeros []byte
// RS decoder
codec reedsolomon.Encoder
}
)
func newFECDecoder(rxlimit, dataShards, parityShards int) *fecDecoder {
if dataShards <= 0 || parityShards <= 0 {
return nil
}
if rxlimit < dataShards+parityShards {
return nil
}
dec := new(fecDecoder)
dec.rxlimit = rxlimit
dec.dataShards = dataShards
dec.parityShards = parityShards
dec.shardSize = dataShards + parityShards
codec, err := reedsolomon.New(dataShards, parityShards)
if err != nil {
return nil
}
dec.codec = codec
dec.decodeCache = make([][]byte, dec.shardSize)
dec.flagCache = make([]bool, dec.shardSize)
dec.zeros = make([]byte, mtuLimit)
return dec
}
// decodeBytes a fec packet
func (dec *fecDecoder) decodeBytes(data []byte) fecPacket {
var pkt fecPacket
pkt.seqid = binary.LittleEndian.Uint32(data)
pkt.flag = binary.LittleEndian.Uint16(data[4:])
// allocate memory & copy
buf := xmitBuf.Get().([]byte)[:len(data)-6]
copy(buf, data[6:])
pkt.data = buf
return pkt
}
// decode a fec packet
func (dec *fecDecoder) decode(pkt fecPacket) (recovered [][]byte) {
// insertion
n := len(dec.rx) - 1
insertIdx := 0
for i := n; i >= 0; i-- {
if pkt.seqid == dec.rx[i].seqid { // de-duplicate
xmitBuf.Put(pkt.data)
return nil
} else if _itimediff(pkt.seqid, dec.rx[i].seqid) > 0 { // insertion
insertIdx = i + 1
break
}
}
// insert into ordered rx queue
if insertIdx == n+1 {
dec.rx = append(dec.rx, pkt)
} else {
dec.rx = append(dec.rx, fecPacket{})
copy(dec.rx[insertIdx+1:], dec.rx[insertIdx:]) // shift right
dec.rx[insertIdx] = pkt
}
// shard range for current packet
shardBegin := pkt.seqid - pkt.seqid%uint32(dec.shardSize)
shardEnd := shardBegin + uint32(dec.shardSize) - 1
// max search range in ordered queue for current shard
searchBegin := insertIdx - int(pkt.seqid%uint32(dec.shardSize))
if searchBegin < 0 {
searchBegin = 0
}
searchEnd := searchBegin + dec.shardSize - 1
if searchEnd >= len(dec.rx) {
searchEnd = len(dec.rx) - 1
}
// re-construct datashards
if searchEnd-searchBegin+1 >= dec.dataShards {
var numshard, numDataShard, first, maxlen int
// zero cache
shards := dec.decodeCache
shardsflag := dec.flagCache
for k := range dec.decodeCache {
shards[k] = nil
shardsflag[k] = false
}
// shard assembly
for i := searchBegin; i <= searchEnd; i++ {
seqid := dec.rx[i].seqid
if _itimediff(seqid, shardEnd) > 0 {
break
} else if _itimediff(seqid, shardBegin) >= 0 {
shards[seqid%uint32(dec.shardSize)] = dec.rx[i].data
shardsflag[seqid%uint32(dec.shardSize)] = true
numshard++
if dec.rx[i].flag == typeData {
numDataShard++
}
if numshard == 1 {
first = i
}
if len(dec.rx[i].data) > maxlen {
maxlen = len(dec.rx[i].data)
}
}
}
if numDataShard == dec.dataShards {
// case 1: no lost data shards
dec.rx = dec.freeRange(first, numshard, dec.rx)
} else if numshard >= dec.dataShards {
// case 2: data shard lost, but recoverable from parity shard
for k := range shards {
if shards[k] != nil {
dlen := len(shards[k])
shards[k] = shards[k][:maxlen]
copy(shards[k][dlen:], dec.zeros)
}
}
if err := dec.codec.ReconstructData(shards); err == nil {
for k := range shards[:dec.dataShards] {
if !shardsflag[k] {
recovered = append(recovered, shards[k])
}
}
}
dec.rx = dec.freeRange(first, numshard, dec.rx)
}
}
// keep rxlimit
if len(dec.rx) > dec.rxlimit {
if dec.rx[0].flag == typeData { // record unrecoverable data
atomic.AddUint64(&DefaultSnmp.FECShortShards, 1)
}
dec.rx = dec.freeRange(0, 1, dec.rx)
}
return
}
// free a range of fecPacket, and zero for GC recycling
func (dec *fecDecoder) freeRange(first, n int, q []fecPacket) []fecPacket {
for i := first; i < first+n; i++ { // free
xmitBuf.Put(q[i].data)
}
copy(q[first:], q[first+n:])
for i := 0; i < n; i++ { // dereference data
q[len(q)-1-i].data = nil
}
return q[:len(q)-n]
}
type (
// fecEncoder for encoding outgoing packets
fecEncoder struct {
dataShards int
parityShards int
shardSize int
paws uint32 // Protect Against Wrapped Sequence numbers
next uint32 // next seqid
shardCount int // count the number of datashards collected
maxSize int // record maximum data length in datashard
headerOffset int // FEC header offset
payloadOffset int // FEC payload offset
// caches
shardCache [][]byte
encodeCache [][]byte
// zeros
zeros []byte
// RS encoder
codec reedsolomon.Encoder
}
)
func newFECEncoder(dataShards, parityShards, offset int) *fecEncoder {
if dataShards <= 0 || parityShards <= 0 {
return nil
}
enc := new(fecEncoder)
enc.dataShards = dataShards
enc.parityShards = parityShards
enc.shardSize = dataShards + parityShards
enc.paws = (0xffffffff/uint32(enc.shardSize) - 1) * uint32(enc.shardSize)
enc.headerOffset = offset
enc.payloadOffset = enc.headerOffset + fecHeaderSize
codec, err := reedsolomon.New(dataShards, parityShards)
if err != nil {
return nil
}
enc.codec = codec
// caches
enc.encodeCache = make([][]byte, enc.shardSize)
enc.shardCache = make([][]byte, enc.shardSize)
for k := range enc.shardCache {
enc.shardCache[k] = make([]byte, mtuLimit)
}
enc.zeros = make([]byte, mtuLimit)
return enc
}
// encode the packet, output parity shards if we have enough datashards
// the content of returned parityshards will change in next encode
func (enc *fecEncoder) encode(b []byte) (ps [][]byte) {
enc.markData(b[enc.headerOffset:])
binary.LittleEndian.PutUint16(b[enc.payloadOffset:], uint16(len(b[enc.payloadOffset:])))
// copy data to fec datashards
sz := len(b)
enc.shardCache[enc.shardCount] = enc.shardCache[enc.shardCount][:sz]
copy(enc.shardCache[enc.shardCount], b)
enc.shardCount++
// record max datashard length
if sz > enc.maxSize {
enc.maxSize = sz
}
// calculate Reed-Solomon Erasure Code
if enc.shardCount == enc.dataShards {
// bzero each datashard's tail
for i := 0; i < enc.dataShards; i++ {
shard := enc.shardCache[i]
slen := len(shard)
copy(shard[slen:enc.maxSize], enc.zeros)
}
// construct equal-sized slice with stripped header
cache := enc.encodeCache
for k := range cache {
cache[k] = enc.shardCache[k][enc.payloadOffset:enc.maxSize]
}
// rs encode
if err := enc.codec.Encode(cache); err == nil {
ps = enc.shardCache[enc.dataShards:]
for k := range ps {
enc.markFEC(ps[k][enc.headerOffset:])
ps[k] = ps[k][:enc.maxSize]
}
}
// reset counters to zero
enc.shardCount = 0
enc.maxSize = 0
}
return
}
func (enc *fecEncoder) markData(data []byte) {
binary.LittleEndian.PutUint32(data, enc.next)
binary.LittleEndian.PutUint16(data[4:], typeData)
enc.next++
}
func (enc *fecEncoder) markFEC(data []byte) {
binary.LittleEndian.PutUint32(data, enc.next)
binary.LittleEndian.PutUint16(data[4:], typeFEC)
enc.next = (enc.next + 1) % enc.paws
}

997
vendor/github.com/xtaci/kcp-go/kcp.go generated vendored
View File

@ -1,997 +0,0 @@
// Package kcp - A Fast and Reliable ARQ Protocol
package kcp
import (
"encoding/binary"
"sync/atomic"
)
const (
IKCP_RTO_NDL = 30 // no delay min rto
IKCP_RTO_MIN = 100 // normal min rto
IKCP_RTO_DEF = 200
IKCP_RTO_MAX = 60000
IKCP_CMD_PUSH = 81 // cmd: push data
IKCP_CMD_ACK = 82 // cmd: ack
IKCP_CMD_WASK = 83 // cmd: window probe (ask)
IKCP_CMD_WINS = 84 // cmd: window size (tell)
IKCP_ASK_SEND = 1 // need to send IKCP_CMD_WASK
IKCP_ASK_TELL = 2 // need to send IKCP_CMD_WINS
IKCP_WND_SND = 32
IKCP_WND_RCV = 32
IKCP_MTU_DEF = 1400
IKCP_ACK_FAST = 3
IKCP_INTERVAL = 100
IKCP_OVERHEAD = 24
IKCP_DEADLINK = 20
IKCP_THRESH_INIT = 2
IKCP_THRESH_MIN = 2
IKCP_PROBE_INIT = 7000 // 7 secs to probe window size
IKCP_PROBE_LIMIT = 120000 // up to 120 secs to probe window
)
// output_callback is a prototype which ought capture conn and call conn.Write
type output_callback func(buf []byte, size int)
/* encode 8 bits unsigned int */
func ikcp_encode8u(p []byte, c byte) []byte {
p[0] = c
return p[1:]
}
/* decode 8 bits unsigned int */
func ikcp_decode8u(p []byte, c *byte) []byte {
*c = p[0]
return p[1:]
}
/* encode 16 bits unsigned int (lsb) */
func ikcp_encode16u(p []byte, w uint16) []byte {
binary.LittleEndian.PutUint16(p, w)
return p[2:]
}
/* decode 16 bits unsigned int (lsb) */
func ikcp_decode16u(p []byte, w *uint16) []byte {
*w = binary.LittleEndian.Uint16(p)
return p[2:]
}
/* encode 32 bits unsigned int (lsb) */
func ikcp_encode32u(p []byte, l uint32) []byte {
binary.LittleEndian.PutUint32(p, l)
return p[4:]
}
/* decode 32 bits unsigned int (lsb) */
func ikcp_decode32u(p []byte, l *uint32) []byte {
*l = binary.LittleEndian.Uint32(p)
return p[4:]
}
func _imin_(a, b uint32) uint32 {
if a <= b {
return a
}
return b
}
func _imax_(a, b uint32) uint32 {
if a >= b {
return a
}
return b
}
func _ibound_(lower, middle, upper uint32) uint32 {
return _imin_(_imax_(lower, middle), upper)
}
func _itimediff(later, earlier uint32) int32 {
return (int32)(later - earlier)
}
// segment defines a KCP segment
type segment struct {
conv uint32
cmd uint8
frg uint8
wnd uint16
ts uint32
sn uint32
una uint32
rto uint32
xmit uint32
resendts uint32
fastack uint32
data []byte
}
// encode a segment into buffer
func (seg *segment) encode(ptr []byte) []byte {
ptr = ikcp_encode32u(ptr, seg.conv)
ptr = ikcp_encode8u(ptr, seg.cmd)
ptr = ikcp_encode8u(ptr, seg.frg)
ptr = ikcp_encode16u(ptr, seg.wnd)
ptr = ikcp_encode32u(ptr, seg.ts)
ptr = ikcp_encode32u(ptr, seg.sn)
ptr = ikcp_encode32u(ptr, seg.una)
ptr = ikcp_encode32u(ptr, uint32(len(seg.data)))
atomic.AddUint64(&DefaultSnmp.OutSegs, 1)
return ptr
}
// KCP defines a single KCP connection
type KCP struct {
conv, mtu, mss, state uint32
snd_una, snd_nxt, rcv_nxt uint32
ssthresh uint32
rx_rttvar, rx_srtt int32
rx_rto, rx_minrto uint32
snd_wnd, rcv_wnd, rmt_wnd, cwnd, probe uint32
interval, ts_flush uint32
nodelay, updated uint32
ts_probe, probe_wait uint32
dead_link, incr uint32
fastresend int32
nocwnd, stream int32
snd_queue []segment
rcv_queue []segment
snd_buf []segment
rcv_buf []segment
acklist []ackItem
buffer []byte
output output_callback
}
type ackItem struct {
sn uint32
ts uint32
}
// NewKCP create a new kcp control object, 'conv' must equal in two endpoint
// from the same connection.
func NewKCP(conv uint32, output output_callback) *KCP {
kcp := new(KCP)
kcp.conv = conv
kcp.snd_wnd = IKCP_WND_SND
kcp.rcv_wnd = IKCP_WND_RCV
kcp.rmt_wnd = IKCP_WND_RCV
kcp.mtu = IKCP_MTU_DEF
kcp.mss = kcp.mtu - IKCP_OVERHEAD
kcp.buffer = make([]byte, (kcp.mtu+IKCP_OVERHEAD)*3)
kcp.rx_rto = IKCP_RTO_DEF
kcp.rx_minrto = IKCP_RTO_MIN
kcp.interval = IKCP_INTERVAL
kcp.ts_flush = IKCP_INTERVAL
kcp.ssthresh = IKCP_THRESH_INIT
kcp.dead_link = IKCP_DEADLINK
kcp.output = output
return kcp
}
// newSegment creates a KCP segment
func (kcp *KCP) newSegment(size int) (seg segment) {
seg.data = xmitBuf.Get().([]byte)[:size]
return
}
// delSegment recycles a KCP segment
func (kcp *KCP) delSegment(seg segment) {
xmitBuf.Put(seg.data)
}
// PeekSize checks the size of next message in the recv queue
func (kcp *KCP) PeekSize() (length int) {
if len(kcp.rcv_queue) == 0 {
return -1
}
seg := &kcp.rcv_queue[0]
if seg.frg == 0 {
return len(seg.data)
}
if len(kcp.rcv_queue) < int(seg.frg+1) {
return -1
}
for k := range kcp.rcv_queue {
seg := &kcp.rcv_queue[k]
length += len(seg.data)
if seg.frg == 0 {
break
}
}
return
}
// Recv is user/upper level recv: returns size, returns below zero for EAGAIN
func (kcp *KCP) Recv(buffer []byte) (n int) {
if len(kcp.rcv_queue) == 0 {
return -1
}
peeksize := kcp.PeekSize()
if peeksize < 0 {
return -2
}
if peeksize > len(buffer) {
return -3
}
var fast_recover bool
if len(kcp.rcv_queue) >= int(kcp.rcv_wnd) {
fast_recover = true
}
// merge fragment
count := 0
for k := range kcp.rcv_queue {
seg := &kcp.rcv_queue[k]
copy(buffer, seg.data)
buffer = buffer[len(seg.data):]
n += len(seg.data)
count++
kcp.delSegment(*seg)
if seg.frg == 0 {
break
}
}
if count > 0 {
kcp.rcv_queue = kcp.remove_front(kcp.rcv_queue, count)
}
// move available data from rcv_buf -> rcv_queue
count = 0
for k := range kcp.rcv_buf {
seg := &kcp.rcv_buf[k]
if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
kcp.rcv_nxt++
count++
} else {
break
}
}
if count > 0 {
kcp.rcv_queue = append(kcp.rcv_queue, kcp.rcv_buf[:count]...)
kcp.rcv_buf = kcp.remove_front(kcp.rcv_buf, count)
}
// fast recover
if len(kcp.rcv_queue) < int(kcp.rcv_wnd) && fast_recover {
// ready to send back IKCP_CMD_WINS in ikcp_flush
// tell remote my window size
kcp.probe |= IKCP_ASK_TELL
}
return
}
// Send is user/upper level send, returns below zero for error
func (kcp *KCP) Send(buffer []byte) int {
var count int
if len(buffer) == 0 {
return -1
}
// append to previous segment in streaming mode (if possible)
if kcp.stream != 0 {
n := len(kcp.snd_queue)
if n > 0 {
seg := &kcp.snd_queue[n-1]
if len(seg.data) < int(kcp.mss) {
capacity := int(kcp.mss) - len(seg.data)
extend := capacity
if len(buffer) < capacity {
extend = len(buffer)
}
// grow slice, the underlying cap is guaranteed to
// be larger than kcp.mss
oldlen := len(seg.data)
seg.data = seg.data[:oldlen+extend]
copy(seg.data[oldlen:], buffer)
buffer = buffer[extend:]
}
}
if len(buffer) == 0 {
return 0
}
}
if len(buffer) <= int(kcp.mss) {
count = 1
} else {
count = (len(buffer) + int(kcp.mss) - 1) / int(kcp.mss)
}
if count > 255 {
return -2
}
if count == 0 {
count = 1
}
for i := 0; i < count; i++ {
var size int
if len(buffer) > int(kcp.mss) {
size = int(kcp.mss)
} else {
size = len(buffer)
}
seg := kcp.newSegment(size)
copy(seg.data, buffer[:size])
if kcp.stream == 0 { // message mode
seg.frg = uint8(count - i - 1)
} else { // stream mode
seg.frg = 0
}
kcp.snd_queue = append(kcp.snd_queue, seg)
buffer = buffer[size:]
}
return 0
}
func (kcp *KCP) update_ack(rtt int32) {
// https://tools.ietf.org/html/rfc6298
var rto uint32
if kcp.rx_srtt == 0 {
kcp.rx_srtt = rtt
kcp.rx_rttvar = rtt >> 1
} else {
delta := rtt - kcp.rx_srtt
kcp.rx_srtt += delta >> 3
if delta < 0 {
delta = -delta
}
if rtt < kcp.rx_srtt-kcp.rx_rttvar {
// if the new RTT sample is below the bottom of the range of
// what an RTT measurement is expected to be.
// give an 8x reduced weight versus its normal weighting
kcp.rx_rttvar += (delta - kcp.rx_rttvar) >> 5
} else {
kcp.rx_rttvar += (delta - kcp.rx_rttvar) >> 2
}
}
rto = uint32(kcp.rx_srtt) + _imax_(kcp.interval, uint32(kcp.rx_rttvar)<<2)
kcp.rx_rto = _ibound_(kcp.rx_minrto, rto, IKCP_RTO_MAX)
}
func (kcp *KCP) shrink_buf() {
if len(kcp.snd_buf) > 0 {
seg := &kcp.snd_buf[0]
kcp.snd_una = seg.sn
} else {
kcp.snd_una = kcp.snd_nxt
}
}
func (kcp *KCP) parse_ack(sn uint32) {
if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
return
}
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if sn == seg.sn {
kcp.delSegment(*seg)
copy(kcp.snd_buf[k:], kcp.snd_buf[k+1:])
kcp.snd_buf[len(kcp.snd_buf)-1] = segment{}
kcp.snd_buf = kcp.snd_buf[:len(kcp.snd_buf)-1]
break
}
if _itimediff(sn, seg.sn) < 0 {
break
}
}
}
func (kcp *KCP) parse_fastack(sn uint32) {
if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
return
}
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if _itimediff(sn, seg.sn) < 0 {
break
} else if sn != seg.sn {
seg.fastack++
}
}
}
func (kcp *KCP) parse_una(una uint32) {
count := 0
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if _itimediff(una, seg.sn) > 0 {
kcp.delSegment(*seg)
count++
} else {
break
}
}
if count > 0 {
kcp.snd_buf = kcp.remove_front(kcp.snd_buf, count)
}
}
// ack append
func (kcp *KCP) ack_push(sn, ts uint32) {
kcp.acklist = append(kcp.acklist, ackItem{sn, ts})
}
func (kcp *KCP) parse_data(newseg segment) {
sn := newseg.sn
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) >= 0 ||
_itimediff(sn, kcp.rcv_nxt) < 0 {
kcp.delSegment(newseg)
return
}
n := len(kcp.rcv_buf) - 1
insert_idx := 0
repeat := false
for i := n; i >= 0; i-- {
seg := &kcp.rcv_buf[i]
if seg.sn == sn {
repeat = true
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
break
}
if _itimediff(sn, seg.sn) > 0 {
insert_idx = i + 1
break
}
}
if !repeat {
if insert_idx == n+1 {
kcp.rcv_buf = append(kcp.rcv_buf, newseg)
} else {
kcp.rcv_buf = append(kcp.rcv_buf, segment{})
copy(kcp.rcv_buf[insert_idx+1:], kcp.rcv_buf[insert_idx:])
kcp.rcv_buf[insert_idx] = newseg
}
} else {
kcp.delSegment(newseg)
}
// move available data from rcv_buf -> rcv_queue
count := 0
for k := range kcp.rcv_buf {
seg := &kcp.rcv_buf[k]
if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
kcp.rcv_nxt++
count++
} else {
break
}
}
if count > 0 {
kcp.rcv_queue = append(kcp.rcv_queue, kcp.rcv_buf[:count]...)
kcp.rcv_buf = kcp.remove_front(kcp.rcv_buf, count)
}
}
// Input when you received a low level packet (eg. UDP packet), call it
// regular indicates a regular packet has received(not from FEC)
func (kcp *KCP) Input(data []byte, regular, ackNoDelay bool) int {
una := kcp.snd_una
if len(data) < IKCP_OVERHEAD {
return -1
}
var maxack uint32
var lastackts uint32
var flag int
var inSegs uint64
for {
var ts, sn, length, una, conv uint32
var wnd uint16
var cmd, frg uint8
if len(data) < int(IKCP_OVERHEAD) {
break
}
data = ikcp_decode32u(data, &conv)
if conv != kcp.conv {
return -1
}
data = ikcp_decode8u(data, &cmd)
data = ikcp_decode8u(data, &frg)
data = ikcp_decode16u(data, &wnd)
data = ikcp_decode32u(data, &ts)
data = ikcp_decode32u(data, &sn)
data = ikcp_decode32u(data, &una)
data = ikcp_decode32u(data, &length)
if len(data) < int(length) {
return -2
}
if cmd != IKCP_CMD_PUSH && cmd != IKCP_CMD_ACK &&
cmd != IKCP_CMD_WASK && cmd != IKCP_CMD_WINS {
return -3
}
// only trust window updates from regular packets. i.e: latest update
if regular {
kcp.rmt_wnd = uint32(wnd)
}
kcp.parse_una(una)
kcp.shrink_buf()
if cmd == IKCP_CMD_ACK {
kcp.parse_ack(sn)
kcp.shrink_buf()
if flag == 0 {
flag = 1
maxack = sn
lastackts = ts
} else if _itimediff(sn, maxack) > 0 {
maxack = sn
lastackts = ts
}
} else if cmd == IKCP_CMD_PUSH {
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) < 0 {
kcp.ack_push(sn, ts)
if _itimediff(sn, kcp.rcv_nxt) >= 0 {
seg := kcp.newSegment(int(length))
seg.conv = conv
seg.cmd = cmd
seg.frg = frg
seg.wnd = wnd
seg.ts = ts
seg.sn = sn
seg.una = una
copy(seg.data, data[:length])
kcp.parse_data(seg)
} else {
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
}
} else {
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
}
} else if cmd == IKCP_CMD_WASK {
// ready to send back IKCP_CMD_WINS in Ikcp_flush
// tell remote my window size
kcp.probe |= IKCP_ASK_TELL
} else if cmd == IKCP_CMD_WINS {
// do nothing
} else {
return -3
}
inSegs++
data = data[length:]
}
atomic.AddUint64(&DefaultSnmp.InSegs, inSegs)
if flag != 0 && regular {
kcp.parse_fastack(maxack)
current := currentMs()
if _itimediff(current, lastackts) >= 0 {
kcp.update_ack(_itimediff(current, lastackts))
}
}
if _itimediff(kcp.snd_una, una) > 0 {
if kcp.cwnd < kcp.rmt_wnd {
mss := kcp.mss
if kcp.cwnd < kcp.ssthresh {
kcp.cwnd++
kcp.incr += mss
} else {
if kcp.incr < mss {
kcp.incr = mss
}
kcp.incr += (mss*mss)/kcp.incr + (mss / 16)
if (kcp.cwnd+1)*mss <= kcp.incr {
kcp.cwnd++
}
}
if kcp.cwnd > kcp.rmt_wnd {
kcp.cwnd = kcp.rmt_wnd
kcp.incr = kcp.rmt_wnd * mss
}
}
}
if ackNoDelay && len(kcp.acklist) > 0 { // ack immediately
kcp.flush(true)
}
return 0
}
func (kcp *KCP) wnd_unused() uint16 {
if len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
return uint16(int(kcp.rcv_wnd) - len(kcp.rcv_queue))
}
return 0
}
// flush pending data
func (kcp *KCP) flush(ackOnly bool) {
var seg segment
seg.conv = kcp.conv
seg.cmd = IKCP_CMD_ACK
seg.wnd = kcp.wnd_unused()
seg.una = kcp.rcv_nxt
buffer := kcp.buffer
// flush acknowledges
ptr := buffer
for i, ack := range kcp.acklist {
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
// filter jitters caused by bufferbloat
if ack.sn >= kcp.rcv_nxt || len(kcp.acklist)-1 == i {
seg.sn, seg.ts = ack.sn, ack.ts
ptr = seg.encode(ptr)
}
}
kcp.acklist = kcp.acklist[0:0]
if ackOnly { // flash remain ack segments
size := len(buffer) - len(ptr)
if size > 0 {
kcp.output(buffer, size)
}
return
}
// probe window size (if remote window size equals zero)
if kcp.rmt_wnd == 0 {
current := currentMs()
if kcp.probe_wait == 0 {
kcp.probe_wait = IKCP_PROBE_INIT
kcp.ts_probe = current + kcp.probe_wait
} else {
if _itimediff(current, kcp.ts_probe) >= 0 {
if kcp.probe_wait < IKCP_PROBE_INIT {
kcp.probe_wait = IKCP_PROBE_INIT
}
kcp.probe_wait += kcp.probe_wait / 2
if kcp.probe_wait > IKCP_PROBE_LIMIT {
kcp.probe_wait = IKCP_PROBE_LIMIT
}
kcp.ts_probe = current + kcp.probe_wait
kcp.probe |= IKCP_ASK_SEND
}
}
} else {
kcp.ts_probe = 0
kcp.probe_wait = 0
}
// flush window probing commands
if (kcp.probe & IKCP_ASK_SEND) != 0 {
seg.cmd = IKCP_CMD_WASK
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = seg.encode(ptr)
}
// flush window probing commands
if (kcp.probe & IKCP_ASK_TELL) != 0 {
seg.cmd = IKCP_CMD_WINS
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = seg.encode(ptr)
}
kcp.probe = 0
// calculate window size
cwnd := _imin_(kcp.snd_wnd, kcp.rmt_wnd)
if kcp.nocwnd == 0 {
cwnd = _imin_(kcp.cwnd, cwnd)
}
// sliding window, controlled by snd_nxt && sna_una+cwnd
newSegsCount := 0
for k := range kcp.snd_queue {
if _itimediff(kcp.snd_nxt, kcp.snd_una+cwnd) >= 0 {
break
}
newseg := kcp.snd_queue[k]
newseg.conv = kcp.conv
newseg.cmd = IKCP_CMD_PUSH
newseg.sn = kcp.snd_nxt
kcp.snd_buf = append(kcp.snd_buf, newseg)
kcp.snd_nxt++
newSegsCount++
kcp.snd_queue[k].data = nil
}
if newSegsCount > 0 {
kcp.snd_queue = kcp.remove_front(kcp.snd_queue, newSegsCount)
}
// calculate resent
resent := uint32(kcp.fastresend)
if kcp.fastresend <= 0 {
resent = 0xffffffff
}
// check for retransmissions
current := currentMs()
var change, lost, lostSegs, fastRetransSegs, earlyRetransSegs uint64
for k := range kcp.snd_buf {
segment := &kcp.snd_buf[k]
needsend := false
if segment.xmit == 0 { // initial transmit
needsend = true
segment.rto = kcp.rx_rto
segment.resendts = current + segment.rto
} else if _itimediff(current, segment.resendts) >= 0 { // RTO
needsend = true
if kcp.nodelay == 0 {
segment.rto += kcp.rx_rto
} else {
segment.rto += kcp.rx_rto / 2
}
segment.resendts = current + segment.rto
lost++
lostSegs++
} else if segment.fastack >= resent { // fast retransmit
needsend = true
segment.fastack = 0
segment.rto = kcp.rx_rto
segment.resendts = current + segment.rto
change++
fastRetransSegs++
} else if segment.fastack > 0 && newSegsCount == 0 { // early retransmit
needsend = true
segment.fastack = 0
segment.rto = kcp.rx_rto
segment.resendts = current + segment.rto
change++
earlyRetransSegs++
}
if needsend {
segment.xmit++
segment.ts = current
segment.wnd = seg.wnd
segment.una = seg.una
size := len(buffer) - len(ptr)
need := IKCP_OVERHEAD + len(segment.data)
if size+need > int(kcp.mtu) {
kcp.output(buffer, size)
current = currentMs() // time update for a blocking call
ptr = buffer
}
ptr = segment.encode(ptr)
copy(ptr, segment.data)
ptr = ptr[len(segment.data):]
if segment.xmit >= kcp.dead_link {
kcp.state = 0xFFFFFFFF
}
}
}
// flash remain segments
size := len(buffer) - len(ptr)
if size > 0 {
kcp.output(buffer, size)
}
// counter updates
sum := lostSegs
if lostSegs > 0 {
atomic.AddUint64(&DefaultSnmp.LostSegs, lostSegs)
}
if fastRetransSegs > 0 {
atomic.AddUint64(&DefaultSnmp.FastRetransSegs, fastRetransSegs)
sum += fastRetransSegs
}
if earlyRetransSegs > 0 {
atomic.AddUint64(&DefaultSnmp.EarlyRetransSegs, earlyRetransSegs)
sum += earlyRetransSegs
}
if sum > 0 {
atomic.AddUint64(&DefaultSnmp.RetransSegs, sum)
}
// update ssthresh
// rate halving, https://tools.ietf.org/html/rfc6937
if change > 0 {
inflight := kcp.snd_nxt - kcp.snd_una
kcp.ssthresh = inflight / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = kcp.ssthresh + resent
kcp.incr = kcp.cwnd * kcp.mss
}
// congestion control, https://tools.ietf.org/html/rfc5681
if lost > 0 {
kcp.ssthresh = cwnd / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = 1
kcp.incr = kcp.mss
}
if kcp.cwnd < 1 {
kcp.cwnd = 1
kcp.incr = kcp.mss
}
}
// Update updates state (call it repeatedly, every 10ms-100ms), or you can ask
// ikcp_check when to call it again (without ikcp_input/_send calling).
// 'current' - current timestamp in millisec.
func (kcp *KCP) Update() {
var slap int32
current := currentMs()
if kcp.updated == 0 {
kcp.updated = 1
kcp.ts_flush = current
}
slap = _itimediff(current, kcp.ts_flush)
if slap >= 10000 || slap < -10000 {
kcp.ts_flush = current
slap = 0
}
if slap >= 0 {
kcp.ts_flush += kcp.interval
if _itimediff(current, kcp.ts_flush) >= 0 {
kcp.ts_flush = current + kcp.interval
}
kcp.flush(false)
}
}
// Check determines when should you invoke ikcp_update:
// returns when you should invoke ikcp_update in millisec, if there
// is no ikcp_input/_send calling. you can call ikcp_update in that
// time, instead of call update repeatly.
// Important to reduce unnacessary ikcp_update invoking. use it to
// schedule ikcp_update (eg. implementing an epoll-like mechanism,
// or optimize ikcp_update when handling massive kcp connections)
func (kcp *KCP) Check() uint32 {
current := currentMs()
ts_flush := kcp.ts_flush
tm_flush := int32(0x7fffffff)
tm_packet := int32(0x7fffffff)
minimal := uint32(0)
if kcp.updated == 0 {
return current
}
if _itimediff(current, ts_flush) >= 10000 ||
_itimediff(current, ts_flush) < -10000 {
ts_flush = current
}
if _itimediff(current, ts_flush) >= 0 {
return current
}
tm_flush = _itimediff(ts_flush, current)
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
diff := _itimediff(seg.resendts, current)
if diff <= 0 {
return current
}
if diff < tm_packet {
tm_packet = diff
}
}
minimal = uint32(tm_packet)
if tm_packet >= tm_flush {
minimal = uint32(tm_flush)
}
if minimal >= kcp.interval {
minimal = kcp.interval
}
return current + minimal
}
// SetMtu changes MTU size, default is 1400
func (kcp *KCP) SetMtu(mtu int) int {
if mtu < 50 || mtu < IKCP_OVERHEAD {
return -1
}
buffer := make([]byte, (mtu+IKCP_OVERHEAD)*3)
if buffer == nil {
return -2
}
kcp.mtu = uint32(mtu)
kcp.mss = kcp.mtu - IKCP_OVERHEAD
kcp.buffer = buffer
return 0
}
// NoDelay options
// fastest: ikcp_nodelay(kcp, 1, 20, 2, 1)
// nodelay: 0:disable(default), 1:enable
// interval: internal update timer interval in millisec, default is 100ms
// resend: 0:disable fast resend(default), 1:enable fast resend
// nc: 0:normal congestion control(default), 1:disable congestion control
func (kcp *KCP) NoDelay(nodelay, interval, resend, nc int) int {
if nodelay >= 0 {
kcp.nodelay = uint32(nodelay)
if nodelay != 0 {
kcp.rx_minrto = IKCP_RTO_NDL
} else {
kcp.rx_minrto = IKCP_RTO_MIN
}
}
if interval >= 0 {
if interval > 5000 {
interval = 5000
} else if interval < 10 {
interval = 10
}
kcp.interval = uint32(interval)
}
if resend >= 0 {
kcp.fastresend = int32(resend)
}
if nc >= 0 {
kcp.nocwnd = int32(nc)
}
return 0
}
// WndSize sets maximum window size: sndwnd=32, rcvwnd=32 by default
func (kcp *KCP) WndSize(sndwnd, rcvwnd int) int {
if sndwnd > 0 {
kcp.snd_wnd = uint32(sndwnd)
}
if rcvwnd > 0 {
kcp.rcv_wnd = uint32(rcvwnd)
}
return 0
}
// WaitSnd gets how many packet is waiting to be sent
func (kcp *KCP) WaitSnd() int {
return len(kcp.snd_buf) + len(kcp.snd_queue)
}
// remove front n elements from queue
func (kcp *KCP) remove_front(q []segment, n int) []segment {
newn := copy(q, q[n:])
for i := newn; i < len(q); i++ {
q[i] = segment{} // manual set nil for GC
}
return q[:newn]
}

View File

@ -1,25 +0,0 @@
package kcp
import (
"crypto/md5"
"crypto/rand"
"io"
)
// nonceMD5 is a nonce generator for each packet header
// which took the advantages of both MD5 and CSPRNG(like /dev/urandom).
// The benchmark shows it's faster than previous CSPRNG only method.
type nonceMD5 struct {
data [md5.Size]byte
}
// Nonce fills a nonce into the provided slice with no more than md5.Size bytes
// the entropy will be updated whenever a leading 0 appears
func (n *nonceMD5) Fill(nonce []byte) {
if n.data[0] == 0 { // 1/256 chance for entropy update
io.ReadFull(rand.Reader, n.data[:])
}
n.data = md5.Sum(n.data[:])
copy(nonce, n.data[:])
return
}

View File

@ -1,964 +0,0 @@
package kcp
import (
"crypto/rand"
"encoding/binary"
"hash/crc32"
"net"
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
"golang.org/x/net/ipv4"
)
type errTimeout struct {
error
}
func (errTimeout) Timeout() bool { return true }
func (errTimeout) Temporary() bool { return true }
func (errTimeout) Error() string { return "i/o timeout" }
const (
// 16-bytes magic number for each packet
nonceSize = 16
// 4-bytes packet checksum
crcSize = 4
// overall crypto header size
cryptHeaderSize = nonceSize + crcSize
// maximum packet size
mtuLimit = 1500
// FEC keeps rxFECMulti* (dataShard+parityShard) ordered packets in memory
rxFECMulti = 3
// accept backlog
acceptBacklog = 128
// prerouting(to session) queue
qlen = 128
)
const (
errBrokenPipe = "broken pipe"
errInvalidOperation = "invalid operation"
)
var (
// global packet buffer
// shared among sending/receiving/FEC
xmitBuf sync.Pool
)
func init() {
xmitBuf.New = func() interface{} {
return make([]byte, mtuLimit)
}
}
type (
// UDPSession defines a KCP session implemented by UDP
UDPSession struct {
updaterIdx int // record slice index in updater
conn net.PacketConn // the underlying packet connection
kcp *KCP // KCP ARQ protocol
l *Listener // point to the Listener if it's accepted by Listener
block BlockCrypt // block encryption
// kcp receiving is based on packets
// recvbuf turns packets into stream
recvbuf []byte
bufptr []byte
// extended output buffer(with header)
ext []byte
// FEC
fecDecoder *fecDecoder
fecEncoder *fecEncoder
// settings
remote net.Addr // remote peer address
rd time.Time // read deadline
wd time.Time // write deadline
headerSize int // the overall header size added before KCP frame
ackNoDelay bool // send ack immediately for each incoming packet
writeDelay bool // delay kcp.flush() for Write() for bulk transfer
dup int // duplicate udp packets
// notifications
die chan struct{} // notify session has Closed
chReadEvent chan struct{} // notify Read() can be called without blocking
chWriteEvent chan struct{} // notify Write() can be called without blocking
chErrorEvent chan error // notify Read() have an error
// nonce generator
nonce nonceMD5
isClosed bool // flag the session has Closed
mu sync.Mutex
}
setReadBuffer interface {
SetReadBuffer(bytes int) error
}
setWriteBuffer interface {
SetWriteBuffer(bytes int) error
}
)
// newUDPSession create a new udp session for client or server
func newUDPSession(conv uint32, dataShards, parityShards int, l *Listener, conn net.PacketConn, remote net.Addr, block BlockCrypt) *UDPSession {
sess := new(UDPSession)
sess.die = make(chan struct{})
sess.chReadEvent = make(chan struct{}, 1)
sess.chWriteEvent = make(chan struct{}, 1)
sess.chErrorEvent = make(chan error, 1)
sess.remote = remote
sess.conn = conn
sess.l = l
sess.block = block
sess.recvbuf = make([]byte, mtuLimit)
// FEC initialization
sess.fecDecoder = newFECDecoder(rxFECMulti*(dataShards+parityShards), dataShards, parityShards)
if sess.block != nil {
sess.fecEncoder = newFECEncoder(dataShards, parityShards, cryptHeaderSize)
} else {
sess.fecEncoder = newFECEncoder(dataShards, parityShards, 0)
}
// calculate header size
if sess.block != nil {
sess.headerSize += cryptHeaderSize
}
if sess.fecEncoder != nil {
sess.headerSize += fecHeaderSizePlus2
}
// only allocate extended packet buffer
// when the extra header is required
if sess.headerSize > 0 {
sess.ext = make([]byte, mtuLimit)
}
sess.kcp = NewKCP(conv, func(buf []byte, size int) {
if size >= IKCP_OVERHEAD {
sess.output(buf[:size])
}
})
sess.kcp.SetMtu(IKCP_MTU_DEF - sess.headerSize)
blacklist.add(remote.String(), conv)
// add current session to the global updater,
// which periodically calls sess.update()
updater.addSession(sess)
if sess.l == nil { // it's a client connection
go sess.readLoop()
atomic.AddUint64(&DefaultSnmp.ActiveOpens, 1)
} else {
atomic.AddUint64(&DefaultSnmp.PassiveOpens, 1)
}
currestab := atomic.AddUint64(&DefaultSnmp.CurrEstab, 1)
maxconn := atomic.LoadUint64(&DefaultSnmp.MaxConn)
if currestab > maxconn {
atomic.CompareAndSwapUint64(&DefaultSnmp.MaxConn, maxconn, currestab)
}
return sess
}
// Read implements net.Conn
func (s *UDPSession) Read(b []byte) (n int, err error) {
for {
s.mu.Lock()
if len(s.bufptr) > 0 { // copy from buffer into b
n = copy(b, s.bufptr)
s.bufptr = s.bufptr[n:]
s.mu.Unlock()
return n, nil
}
if s.isClosed {
s.mu.Unlock()
return 0, errors.New(errBrokenPipe)
}
if size := s.kcp.PeekSize(); size > 0 { // peek data size from kcp
atomic.AddUint64(&DefaultSnmp.BytesReceived, uint64(size))
if len(b) >= size { // direct write to b
s.kcp.Recv(b)
s.mu.Unlock()
return size, nil
}
// resize kcp receive buffer
// to make sure recvbuf has enough capacity
if cap(s.recvbuf) < size {
s.recvbuf = make([]byte, size)
}
// resize recvbuf slice length
s.recvbuf = s.recvbuf[:size]
s.kcp.Recv(s.recvbuf)
n = copy(b, s.recvbuf) // copy to b
s.bufptr = s.recvbuf[n:] // update pointer
s.mu.Unlock()
return n, nil
}
// read deadline
var timeout *time.Timer
var c <-chan time.Time
if !s.rd.IsZero() {
if time.Now().After(s.rd) {
s.mu.Unlock()
return 0, errTimeout{}
}
delay := s.rd.Sub(time.Now())
timeout = time.NewTimer(delay)
c = timeout.C
}
s.mu.Unlock()
// wait for read event or timeout
select {
case <-s.chReadEvent:
case <-c:
case <-s.die:
case err = <-s.chErrorEvent:
if timeout != nil {
timeout.Stop()
}
return n, err
}
if timeout != nil {
timeout.Stop()
}
}
}
// Write implements net.Conn
func (s *UDPSession) Write(b []byte) (n int, err error) {
for {
s.mu.Lock()
if s.isClosed {
s.mu.Unlock()
return 0, errors.New(errBrokenPipe)
}
// api flow control
if s.kcp.WaitSnd() < int(s.kcp.snd_wnd) {
n = len(b)
for {
if len(b) <= int(s.kcp.mss) {
s.kcp.Send(b)
break
} else {
s.kcp.Send(b[:s.kcp.mss])
b = b[s.kcp.mss:]
}
}
if !s.writeDelay {
s.kcp.flush(false)
}
s.mu.Unlock()
atomic.AddUint64(&DefaultSnmp.BytesSent, uint64(n))
return n, nil
}
// write deadline
var timeout *time.Timer
var c <-chan time.Time
if !s.wd.IsZero() {
if time.Now().After(s.wd) {
s.mu.Unlock()
return 0, errTimeout{}
}
delay := s.wd.Sub(time.Now())
timeout = time.NewTimer(delay)
c = timeout.C
}
s.mu.Unlock()
// wait for write event or timeout
select {
case <-s.chWriteEvent:
case <-c:
case <-s.die:
}
if timeout != nil {
timeout.Stop()
}
}
}
// Close closes the connection.
func (s *UDPSession) Close() error {
// remove this session from updater & listener(if necessary)
updater.removeSession(s)
if s.l != nil { // notify listener
s.l.closeSession(sessionKey{
addr: s.remote.String(),
convID: s.kcp.conv,
})
}
s.mu.Lock()
defer s.mu.Unlock()
if s.isClosed {
return errors.New(errBrokenPipe)
}
close(s.die)
s.isClosed = true
atomic.AddUint64(&DefaultSnmp.CurrEstab, ^uint64(0))
if s.l == nil { // client socket close
return s.conn.Close()
}
return nil
}
// LocalAddr returns the local network address. The Addr returned is shared by all invocations of LocalAddr, so do not modify it.
func (s *UDPSession) LocalAddr() net.Addr { return s.conn.LocalAddr() }
// RemoteAddr returns the remote network address. The Addr returned is shared by all invocations of RemoteAddr, so do not modify it.
func (s *UDPSession) RemoteAddr() net.Addr { return s.remote }
// SetDeadline sets the deadline associated with the listener. A zero time value disables the deadline.
func (s *UDPSession) SetDeadline(t time.Time) error {
s.mu.Lock()
defer s.mu.Unlock()
s.rd = t
s.wd = t
return nil
}
// SetReadDeadline implements the Conn SetReadDeadline method.
func (s *UDPSession) SetReadDeadline(t time.Time) error {
s.mu.Lock()
defer s.mu.Unlock()
s.rd = t
return nil
}
// SetWriteDeadline implements the Conn SetWriteDeadline method.
func (s *UDPSession) SetWriteDeadline(t time.Time) error {
s.mu.Lock()
defer s.mu.Unlock()
s.wd = t
return nil
}
// SetWriteDelay delays write for bulk transfer until the next update interval
func (s *UDPSession) SetWriteDelay(delay bool) {
s.mu.Lock()
defer s.mu.Unlock()
s.writeDelay = delay
}
// SetWindowSize set maximum window size
func (s *UDPSession) SetWindowSize(sndwnd, rcvwnd int) {
s.mu.Lock()
defer s.mu.Unlock()
s.kcp.WndSize(sndwnd, rcvwnd)
}
// SetMtu sets the maximum transmission unit(not including UDP header)
func (s *UDPSession) SetMtu(mtu int) bool {
if mtu > mtuLimit {
return false
}
s.mu.Lock()
defer s.mu.Unlock()
s.kcp.SetMtu(mtu - s.headerSize)
return true
}
// SetStreamMode toggles the stream mode on/off
func (s *UDPSession) SetStreamMode(enable bool) {
s.mu.Lock()
defer s.mu.Unlock()
if enable {
s.kcp.stream = 1
} else {
s.kcp.stream = 0
}
}
// SetACKNoDelay changes ack flush option, set true to flush ack immediately,
func (s *UDPSession) SetACKNoDelay(nodelay bool) {
s.mu.Lock()
defer s.mu.Unlock()
s.ackNoDelay = nodelay
}
// SetDUP duplicates udp packets for kcp output, for testing purpose only
func (s *UDPSession) SetDUP(dup int) {
s.mu.Lock()
defer s.mu.Unlock()
s.dup = dup
}
// SetNoDelay calls nodelay() of kcp
// https://github.com/skywind3000/kcp/blob/master/README.en.md#protocol-configuration
func (s *UDPSession) SetNoDelay(nodelay, interval, resend, nc int) {
s.mu.Lock()
defer s.mu.Unlock()
s.kcp.NoDelay(nodelay, interval, resend, nc)
}
// SetDSCP sets the 6bit DSCP field of IP header, no effect if it's accepted from Listener
func (s *UDPSession) SetDSCP(dscp int) error {
s.mu.Lock()
defer s.mu.Unlock()
if s.l == nil {
if nc, ok := s.conn.(*connectedUDPConn); ok {
return ipv4.NewConn(nc.UDPConn).SetTOS(dscp << 2)
} else if nc, ok := s.conn.(net.Conn); ok {
return ipv4.NewConn(nc).SetTOS(dscp << 2)
}
}
return errors.New(errInvalidOperation)
}
// SetReadBuffer sets the socket read buffer, no effect if it's accepted from Listener
func (s *UDPSession) SetReadBuffer(bytes int) error {
s.mu.Lock()
defer s.mu.Unlock()
if s.l == nil {
if nc, ok := s.conn.(setReadBuffer); ok {
return nc.SetReadBuffer(bytes)
}
}
return errors.New(errInvalidOperation)
}
// SetWriteBuffer sets the socket write buffer, no effect if it's accepted from Listener
func (s *UDPSession) SetWriteBuffer(bytes int) error {
s.mu.Lock()
defer s.mu.Unlock()
if s.l == nil {
if nc, ok := s.conn.(setWriteBuffer); ok {
return nc.SetWriteBuffer(bytes)
}
}
return errors.New(errInvalidOperation)
}
// output pipeline entry
// steps for output data processing:
// 0. Header extends
// 1. FEC
// 2. CRC32
// 3. Encryption
// 4. WriteTo kernel
func (s *UDPSession) output(buf []byte) {
var ecc [][]byte
// 0. extend buf's header space(if necessary)
ext := buf
if s.headerSize > 0 {
ext = s.ext[:s.headerSize+len(buf)]
copy(ext[s.headerSize:], buf)
}
// 1. FEC encoding
if s.fecEncoder != nil {
ecc = s.fecEncoder.encode(ext)
}
// 2&3. crc32 & encryption
if s.block != nil {
s.nonce.Fill(ext[:nonceSize])
checksum := crc32.ChecksumIEEE(ext[cryptHeaderSize:])
binary.LittleEndian.PutUint32(ext[nonceSize:], checksum)
s.block.Encrypt(ext, ext)
for k := range ecc {
s.nonce.Fill(ecc[k][:nonceSize])
checksum := crc32.ChecksumIEEE(ecc[k][cryptHeaderSize:])
binary.LittleEndian.PutUint32(ecc[k][nonceSize:], checksum)
s.block.Encrypt(ecc[k], ecc[k])
}
}
// 4. WriteTo kernel
nbytes := 0
npkts := 0
for i := 0; i < s.dup+1; i++ {
if n, err := s.conn.WriteTo(ext, s.remote); err == nil {
nbytes += n
npkts++
}
}
for k := range ecc {
if n, err := s.conn.WriteTo(ecc[k], s.remote); err == nil {
nbytes += n
npkts++
}
}
atomic.AddUint64(&DefaultSnmp.OutPkts, uint64(npkts))
atomic.AddUint64(&DefaultSnmp.OutBytes, uint64(nbytes))
}
// kcp update, returns interval for next calling
func (s *UDPSession) update() (interval time.Duration) {
s.mu.Lock()
s.kcp.flush(false)
if s.kcp.WaitSnd() < int(s.kcp.snd_wnd) {
s.notifyWriteEvent()
}
interval = time.Duration(s.kcp.interval) * time.Millisecond
s.mu.Unlock()
return
}
// GetConv gets conversation id of a session
func (s *UDPSession) GetConv() uint32 { return s.kcp.conv }
func (s *UDPSession) notifyReadEvent() {
select {
case s.chReadEvent <- struct{}{}:
default:
}
}
func (s *UDPSession) notifyWriteEvent() {
select {
case s.chWriteEvent <- struct{}{}:
default:
}
}
func (s *UDPSession) kcpInput(data []byte) {
var kcpInErrors, fecErrs, fecRecovered, fecParityShards uint64
if s.fecDecoder != nil {
f := s.fecDecoder.decodeBytes(data)
s.mu.Lock()
if f.flag == typeData {
if ret := s.kcp.Input(data[fecHeaderSizePlus2:], true, s.ackNoDelay); ret != 0 {
kcpInErrors++
}
}
if f.flag == typeData || f.flag == typeFEC {
if f.flag == typeFEC {
fecParityShards++
}
recovers := s.fecDecoder.decode(f)
for _, r := range recovers {
if len(r) >= 2 { // must be larger than 2bytes
sz := binary.LittleEndian.Uint16(r)
if int(sz) <= len(r) && sz >= 2 {
if ret := s.kcp.Input(r[2:sz], false, s.ackNoDelay); ret == 0 {
fecRecovered++
} else {
kcpInErrors++
}
} else {
fecErrs++
}
} else {
fecErrs++
}
}
}
// notify reader
if n := s.kcp.PeekSize(); n > 0 {
s.notifyReadEvent()
}
s.mu.Unlock()
} else {
s.mu.Lock()
if ret := s.kcp.Input(data, true, s.ackNoDelay); ret != 0 {
kcpInErrors++
}
// notify reader
if n := s.kcp.PeekSize(); n > 0 {
s.notifyReadEvent()
}
s.mu.Unlock()
}
atomic.AddUint64(&DefaultSnmp.InPkts, 1)
atomic.AddUint64(&DefaultSnmp.InBytes, uint64(len(data)))
if fecParityShards > 0 {
atomic.AddUint64(&DefaultSnmp.FECParityShards, fecParityShards)
}
if kcpInErrors > 0 {
atomic.AddUint64(&DefaultSnmp.KCPInErrors, kcpInErrors)
}
if fecErrs > 0 {
atomic.AddUint64(&DefaultSnmp.FECErrs, fecErrs)
}
if fecRecovered > 0 {
atomic.AddUint64(&DefaultSnmp.FECRecovered, fecRecovered)
}
}
func (s *UDPSession) receiver(ch chan<- []byte) {
for {
data := xmitBuf.Get().([]byte)[:mtuLimit]
if n, _, err := s.conn.ReadFrom(data); err == nil && n >= s.headerSize+IKCP_OVERHEAD {
select {
case ch <- data[:n]:
case <-s.die:
return
}
} else if err != nil {
s.chErrorEvent <- err
return
} else {
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
}
}
}
// read loop for client session
func (s *UDPSession) readLoop() {
chPacket := make(chan []byte, qlen)
go s.receiver(chPacket)
for {
select {
case data := <-chPacket:
raw := data
dataValid := false
if s.block != nil {
s.block.Decrypt(data, data)
data = data[nonceSize:]
checksum := crc32.ChecksumIEEE(data[crcSize:])
if checksum == binary.LittleEndian.Uint32(data) {
data = data[crcSize:]
dataValid = true
} else {
atomic.AddUint64(&DefaultSnmp.InCsumErrors, 1)
}
} else if s.block == nil {
dataValid = true
}
if dataValid {
s.kcpInput(data)
}
xmitBuf.Put(raw)
case <-s.die:
return
}
}
}
type (
sessionKey struct {
addr string
convID uint32
}
// Listener defines a server listening for connections
Listener struct {
block BlockCrypt // block encryption
dataShards int // FEC data shard
parityShards int // FEC parity shard
fecDecoder *fecDecoder // FEC mock initialization
conn net.PacketConn // the underlying packet connection
sessions map[sessionKey]*UDPSession // all sessions accepted by this Listener
chAccepts chan *UDPSession // Listen() backlog
chSessionClosed chan sessionKey // session close queue
headerSize int // the overall header size added before KCP frame
die chan struct{} // notify the listener has closed
rd atomic.Value // read deadline for Accept()
wd atomic.Value
}
// incoming packet
inPacket struct {
from net.Addr
data []byte
}
)
// monitor incoming data for all connections of server
func (l *Listener) monitor() {
// cache last session
var lastKey sessionKey
var lastSession *UDPSession
chPacket := make(chan inPacket, qlen)
go l.receiver(chPacket)
for {
select {
case p := <-chPacket:
raw := p.data
data := p.data
from := p.from
dataValid := false
if l.block != nil {
l.block.Decrypt(data, data)
data = data[nonceSize:]
checksum := crc32.ChecksumIEEE(data[crcSize:])
if checksum == binary.LittleEndian.Uint32(data) {
data = data[crcSize:]
dataValid = true
} else {
atomic.AddUint64(&DefaultSnmp.InCsumErrors, 1)
}
} else if l.block == nil {
dataValid = true
}
if dataValid {
var conv uint32
convValid := false
if l.fecDecoder != nil {
isfec := binary.LittleEndian.Uint16(data[4:])
if isfec == typeData {
conv = binary.LittleEndian.Uint32(data[fecHeaderSizePlus2:])
convValid = true
}
} else {
conv = binary.LittleEndian.Uint32(data)
convValid = true
}
if convValid {
key := sessionKey{
addr: from.String(),
convID: conv,
}
var s *UDPSession
var ok bool
// packets received from an address always come in batch.
// cache the session for next packet, without querying map.
if key == lastKey {
s, ok = lastSession, true
} else if s, ok = l.sessions[key]; ok {
lastSession = s
lastKey = key
}
if !ok { // new session
if !blacklist.has(from.String(), conv) && len(l.chAccepts) < cap(l.chAccepts) && len(l.sessions) < 4096 { // do not let new session overwhelm accept queue and connection count
s := newUDPSession(conv, l.dataShards, l.parityShards, l, l.conn, from, l.block)
s.kcpInput(data)
l.sessions[key] = s
l.chAccepts <- s
}
} else {
s.kcpInput(data)
}
}
}
xmitBuf.Put(raw)
case key := <-l.chSessionClosed:
if key == lastKey {
lastKey = sessionKey{}
}
delete(l.sessions, key)
case <-l.die:
return
}
}
}
func (l *Listener) receiver(ch chan<- inPacket) {
for {
data := xmitBuf.Get().([]byte)[:mtuLimit]
if n, from, err := l.conn.ReadFrom(data); err == nil && n >= l.headerSize+IKCP_OVERHEAD {
select {
case ch <- inPacket{from, data[:n]}:
case <-l.die:
return
}
} else if err != nil {
return
} else {
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
}
}
}
// SetReadBuffer sets the socket read buffer for the Listener
func (l *Listener) SetReadBuffer(bytes int) error {
if nc, ok := l.conn.(setReadBuffer); ok {
return nc.SetReadBuffer(bytes)
}
return errors.New(errInvalidOperation)
}
// SetWriteBuffer sets the socket write buffer for the Listener
func (l *Listener) SetWriteBuffer(bytes int) error {
if nc, ok := l.conn.(setWriteBuffer); ok {
return nc.SetWriteBuffer(bytes)
}
return errors.New(errInvalidOperation)
}
// SetDSCP sets the 6bit DSCP field of IP header
func (l *Listener) SetDSCP(dscp int) error {
if nc, ok := l.conn.(net.Conn); ok {
return ipv4.NewConn(nc).SetTOS(dscp << 2)
}
return errors.New(errInvalidOperation)
}
// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn.
func (l *Listener) Accept() (net.Conn, error) {
return l.AcceptKCP()
}
// AcceptKCP accepts a KCP connection
func (l *Listener) AcceptKCP() (*UDPSession, error) {
var timeout <-chan time.Time
if tdeadline, ok := l.rd.Load().(time.Time); ok && !tdeadline.IsZero() {
timeout = time.After(tdeadline.Sub(time.Now()))
}
select {
case <-timeout:
return nil, &errTimeout{}
case c := <-l.chAccepts:
return c, nil
case <-l.die:
return nil, errors.New(errBrokenPipe)
}
}
// SetDeadline sets the deadline associated with the listener. A zero time value disables the deadline.
func (l *Listener) SetDeadline(t time.Time) error {
l.SetReadDeadline(t)
l.SetWriteDeadline(t)
return nil
}
// SetReadDeadline implements the Conn SetReadDeadline method.
func (l *Listener) SetReadDeadline(t time.Time) error {
l.rd.Store(t)
return nil
}
// SetWriteDeadline implements the Conn SetWriteDeadline method.
func (l *Listener) SetWriteDeadline(t time.Time) error {
l.wd.Store(t)
return nil
}
// Close stops listening on the UDP address. Already Accepted connections are not closed.
func (l *Listener) Close() error {
close(l.die)
return l.conn.Close()
}
// closeSession notify the listener that a session has closed
func (l *Listener) closeSession(key sessionKey) bool {
select {
case l.chSessionClosed <- key:
return true
case <-l.die:
return false
}
}
// Addr returns the listener's network address, The Addr returned is shared by all invocations of Addr, so do not modify it.
func (l *Listener) Addr() net.Addr { return l.conn.LocalAddr() }
// Listen listens for incoming KCP packets addressed to the local address laddr on the network "udp",
func Listen(laddr string) (net.Listener, error) { return ListenWithOptions(laddr, nil, 0, 0) }
// ListenWithOptions listens for incoming KCP packets addressed to the local address laddr on the network "udp" with packet encryption,
// dataShards, parityShards defines Reed-Solomon Erasure Coding parameters
func ListenWithOptions(laddr string, block BlockCrypt, dataShards, parityShards int) (*Listener, error) {
udpaddr, err := net.ResolveUDPAddr("udp", laddr)
if err != nil {
return nil, errors.Wrap(err, "net.ResolveUDPAddr")
}
conn, err := net.ListenUDP("udp", udpaddr)
if err != nil {
return nil, errors.Wrap(err, "net.ListenUDP")
}
return ServeConn(block, dataShards, parityShards, conn)
}
// ServeConn serves KCP protocol for a single packet connection.
func ServeConn(block BlockCrypt, dataShards, parityShards int, conn net.PacketConn) (*Listener, error) {
l := new(Listener)
l.conn = conn
l.sessions = make(map[sessionKey]*UDPSession)
l.chAccepts = make(chan *UDPSession, acceptBacklog)
l.chSessionClosed = make(chan sessionKey)
l.die = make(chan struct{})
l.dataShards = dataShards
l.parityShards = parityShards
l.block = block
l.fecDecoder = newFECDecoder(rxFECMulti*(dataShards+parityShards), dataShards, parityShards)
// calculate header size
if l.block != nil {
l.headerSize += cryptHeaderSize
}
if l.fecDecoder != nil {
l.headerSize += fecHeaderSizePlus2
}
go l.monitor()
return l, nil
}
// Dial connects to the remote address "raddr" on the network "udp"
func Dial(raddr string) (net.Conn, error) { return DialWithOptions(raddr, nil, 0, 0) }
// DialWithOptions connects to the remote address "raddr" on the network "udp" with packet encryption
func DialWithOptions(raddr string, block BlockCrypt, dataShards, parityShards int) (*UDPSession, error) {
udpaddr, err := net.ResolveUDPAddr("udp", raddr)
if err != nil {
return nil, errors.Wrap(err, "net.ResolveUDPAddr")
}
udpconn, err := net.DialUDP("udp", nil, udpaddr)
if err != nil {
return nil, errors.Wrap(err, "net.DialUDP")
}
return NewConn(raddr, block, dataShards, parityShards, &connectedUDPConn{udpconn})
}
// NewConn establishes a session and talks KCP protocol over a packet connection.
func NewConn(raddr string, block BlockCrypt, dataShards, parityShards int, conn net.PacketConn) (*UDPSession, error) {
udpaddr, err := net.ResolveUDPAddr("udp", raddr)
if err != nil {
return nil, errors.Wrap(err, "net.ResolveUDPAddr")
}
var convid uint32
binary.Read(rand.Reader, binary.LittleEndian, &convid)
return newUDPSession(convid, dataShards, parityShards, nil, conn, udpaddr, block), nil
}
// returns current time in milliseconds
func currentMs() uint32 { return uint32(time.Now().UnixNano() / int64(time.Millisecond)) }
// connectedUDPConn is a wrapper for net.UDPConn which converts WriteTo syscalls
// to Write syscalls that are 4 times faster on some OS'es. This should only be
// used for connections that were produced by a net.Dial* call.
type connectedUDPConn struct{ *net.UDPConn }
// WriteTo redirects all writes to the Write syscall, which is 4 times faster.
func (c *connectedUDPConn) WriteTo(b []byte, addr net.Addr) (int, error) { return c.Write(b) }

View File

@ -1,164 +0,0 @@
package kcp
import (
"fmt"
"sync/atomic"
)
// Snmp defines network statistics indicator
type Snmp struct {
BytesSent uint64 // bytes sent from upper level
BytesReceived uint64 // bytes received to upper level
MaxConn uint64 // max number of connections ever reached
ActiveOpens uint64 // accumulated active open connections
PassiveOpens uint64 // accumulated passive open connections
CurrEstab uint64 // current number of established connections
InErrs uint64 // UDP read errors reported from net.PacketConn
InCsumErrors uint64 // checksum errors from CRC32
KCPInErrors uint64 // packet iput errors reported from KCP
InPkts uint64 // incoming packets count
OutPkts uint64 // outgoing packets count
InSegs uint64 // incoming KCP segments
OutSegs uint64 // outgoing KCP segments
InBytes uint64 // UDP bytes received
OutBytes uint64 // UDP bytes sent
RetransSegs uint64 // accmulated retransmited segments
FastRetransSegs uint64 // accmulated fast retransmitted segments
EarlyRetransSegs uint64 // accmulated early retransmitted segments
LostSegs uint64 // number of segs infered as lost
RepeatSegs uint64 // number of segs duplicated
FECRecovered uint64 // correct packets recovered from FEC
FECErrs uint64 // incorrect packets recovered from FEC
FECParityShards uint64 // FEC segments received
FECShortShards uint64 // number of data shards that's not enough for recovery
}
func newSnmp() *Snmp {
return new(Snmp)
}
// Header returns all field names
func (s *Snmp) Header() []string {
return []string{
"BytesSent",
"BytesReceived",
"MaxConn",
"ActiveOpens",
"PassiveOpens",
"CurrEstab",
"InErrs",
"InCsumErrors",
"KCPInErrors",
"InPkts",
"OutPkts",
"InSegs",
"OutSegs",
"InBytes",
"OutBytes",
"RetransSegs",
"FastRetransSegs",
"EarlyRetransSegs",
"LostSegs",
"RepeatSegs",
"FECParityShards",
"FECErrs",
"FECRecovered",
"FECShortShards",
}
}
// ToSlice returns current snmp info as slice
func (s *Snmp) ToSlice() []string {
snmp := s.Copy()
return []string{
fmt.Sprint(snmp.BytesSent),
fmt.Sprint(snmp.BytesReceived),
fmt.Sprint(snmp.MaxConn),
fmt.Sprint(snmp.ActiveOpens),
fmt.Sprint(snmp.PassiveOpens),
fmt.Sprint(snmp.CurrEstab),
fmt.Sprint(snmp.InErrs),
fmt.Sprint(snmp.InCsumErrors),
fmt.Sprint(snmp.KCPInErrors),
fmt.Sprint(snmp.InPkts),
fmt.Sprint(snmp.OutPkts),
fmt.Sprint(snmp.InSegs),
fmt.Sprint(snmp.OutSegs),
fmt.Sprint(snmp.InBytes),
fmt.Sprint(snmp.OutBytes),
fmt.Sprint(snmp.RetransSegs),
fmt.Sprint(snmp.FastRetransSegs),
fmt.Sprint(snmp.EarlyRetransSegs),
fmt.Sprint(snmp.LostSegs),
fmt.Sprint(snmp.RepeatSegs),
fmt.Sprint(snmp.FECParityShards),
fmt.Sprint(snmp.FECErrs),
fmt.Sprint(snmp.FECRecovered),
fmt.Sprint(snmp.FECShortShards),
}
}
// Copy make a copy of current snmp snapshot
func (s *Snmp) Copy() *Snmp {
d := newSnmp()
d.BytesSent = atomic.LoadUint64(&s.BytesSent)
d.BytesReceived = atomic.LoadUint64(&s.BytesReceived)
d.MaxConn = atomic.LoadUint64(&s.MaxConn)
d.ActiveOpens = atomic.LoadUint64(&s.ActiveOpens)
d.PassiveOpens = atomic.LoadUint64(&s.PassiveOpens)
d.CurrEstab = atomic.LoadUint64(&s.CurrEstab)
d.InErrs = atomic.LoadUint64(&s.InErrs)
d.InCsumErrors = atomic.LoadUint64(&s.InCsumErrors)
d.KCPInErrors = atomic.LoadUint64(&s.KCPInErrors)
d.InPkts = atomic.LoadUint64(&s.InPkts)
d.OutPkts = atomic.LoadUint64(&s.OutPkts)
d.InSegs = atomic.LoadUint64(&s.InSegs)
d.OutSegs = atomic.LoadUint64(&s.OutSegs)
d.InBytes = atomic.LoadUint64(&s.InBytes)
d.OutBytes = atomic.LoadUint64(&s.OutBytes)
d.RetransSegs = atomic.LoadUint64(&s.RetransSegs)
d.FastRetransSegs = atomic.LoadUint64(&s.FastRetransSegs)
d.EarlyRetransSegs = atomic.LoadUint64(&s.EarlyRetransSegs)
d.LostSegs = atomic.LoadUint64(&s.LostSegs)
d.RepeatSegs = atomic.LoadUint64(&s.RepeatSegs)
d.FECParityShards = atomic.LoadUint64(&s.FECParityShards)
d.FECErrs = atomic.LoadUint64(&s.FECErrs)
d.FECRecovered = atomic.LoadUint64(&s.FECRecovered)
d.FECShortShards = atomic.LoadUint64(&s.FECShortShards)
return d
}
// Reset values to zero
func (s *Snmp) Reset() {
atomic.StoreUint64(&s.BytesSent, 0)
atomic.StoreUint64(&s.BytesReceived, 0)
atomic.StoreUint64(&s.MaxConn, 0)
atomic.StoreUint64(&s.ActiveOpens, 0)
atomic.StoreUint64(&s.PassiveOpens, 0)
atomic.StoreUint64(&s.CurrEstab, 0)
atomic.StoreUint64(&s.InErrs, 0)
atomic.StoreUint64(&s.InCsumErrors, 0)
atomic.StoreUint64(&s.KCPInErrors, 0)
atomic.StoreUint64(&s.InPkts, 0)
atomic.StoreUint64(&s.OutPkts, 0)
atomic.StoreUint64(&s.InSegs, 0)
atomic.StoreUint64(&s.OutSegs, 0)
atomic.StoreUint64(&s.InBytes, 0)
atomic.StoreUint64(&s.OutBytes, 0)
atomic.StoreUint64(&s.RetransSegs, 0)
atomic.StoreUint64(&s.FastRetransSegs, 0)
atomic.StoreUint64(&s.EarlyRetransSegs, 0)
atomic.StoreUint64(&s.LostSegs, 0)
atomic.StoreUint64(&s.RepeatSegs, 0)
atomic.StoreUint64(&s.FECParityShards, 0)
atomic.StoreUint64(&s.FECErrs, 0)
atomic.StoreUint64(&s.FECRecovered, 0)
atomic.StoreUint64(&s.FECShortShards, 0)
}
// DefaultSnmp is the global KCP connection statistics collector
var DefaultSnmp *Snmp
func init() {
DefaultSnmp = newSnmp()
}

View File

@ -1,105 +0,0 @@
package kcp
import (
"container/heap"
"sync"
"time"
)
var updater updateHeap
func init() {
updater.init()
go updater.updateTask()
}
// entry contains a session update info
type entry struct {
ts time.Time
s *UDPSession
}
// a global heap managed kcp.flush() caller
type updateHeap struct {
entries []entry
mu sync.Mutex
chWakeUp chan struct{}
}
func (h *updateHeap) Len() int { return len(h.entries) }
func (h *updateHeap) Less(i, j int) bool { return h.entries[i].ts.Before(h.entries[j].ts) }
func (h *updateHeap) Swap(i, j int) {
h.entries[i], h.entries[j] = h.entries[j], h.entries[i]
h.entries[i].s.updaterIdx = i
h.entries[j].s.updaterIdx = j
}
func (h *updateHeap) Push(x interface{}) {
h.entries = append(h.entries, x.(entry))
n := len(h.entries)
h.entries[n-1].s.updaterIdx = n - 1
}
func (h *updateHeap) Pop() interface{} {
n := len(h.entries)
x := h.entries[n-1]
h.entries[n-1].s.updaterIdx = -1
h.entries[n-1] = entry{} // manual set nil for GC
h.entries = h.entries[0 : n-1]
return x
}
func (h *updateHeap) init() {
h.chWakeUp = make(chan struct{}, 1)
}
func (h *updateHeap) addSession(s *UDPSession) {
h.mu.Lock()
heap.Push(h, entry{time.Now(), s})
h.mu.Unlock()
h.wakeup()
}
func (h *updateHeap) removeSession(s *UDPSession) {
h.mu.Lock()
if s.updaterIdx != -1 {
heap.Remove(h, s.updaterIdx)
}
h.mu.Unlock()
}
func (h *updateHeap) wakeup() {
select {
case h.chWakeUp <- struct{}{}:
default:
}
}
func (h *updateHeap) updateTask() {
var timer <-chan time.Time
for {
select {
case <-timer:
case <-h.chWakeUp:
}
h.mu.Lock()
hlen := h.Len()
now := time.Now()
for i := 0; i < hlen; i++ {
entry := heap.Pop(h).(entry)
if now.After(entry.ts) {
entry.ts = now.Add(entry.s.update())
heap.Push(h, entry)
} else {
heap.Push(h, entry)
break
}
}
if hlen > 0 {
timer = time.After(h.entries[0].ts.Sub(now))
}
h.mu.Unlock()
}
}

110
vendor/github.com/xtaci/kcp-go/xor.go generated vendored
View File

@ -1,110 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package kcp
import (
"runtime"
"unsafe"
)
const wordSize = int(unsafe.Sizeof(uintptr(0)))
const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"
// fastXORBytes xors in bulk. It only works on architectures that
// support unaligned read/writes.
func fastXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
w := n / wordSize
if w > 0 {
wordBytes := w * wordSize
fastXORWords(dst[:wordBytes], a[:wordBytes], b[:wordBytes])
}
for i := (n - n%wordSize); i < n; i++ {
dst[i] = a[i] ^ b[i]
}
return n
}
func safeXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
ex := n % 8
for i := 0; i < ex; i++ {
dst[i] = a[i] ^ b[i]
}
for i := ex; i < n; i += 8 {
_dst := dst[i : i+8]
_a := a[i : i+8]
_b := b[i : i+8]
_dst[0] = _a[0] ^ _b[0]
_dst[1] = _a[1] ^ _b[1]
_dst[2] = _a[2] ^ _b[2]
_dst[3] = _a[3] ^ _b[3]
_dst[4] = _a[4] ^ _b[4]
_dst[5] = _a[5] ^ _b[5]
_dst[6] = _a[6] ^ _b[6]
_dst[7] = _a[7] ^ _b[7]
}
return n
}
// xorBytes xors the bytes in a and b. The destination is assumed to have enough
// space. Returns the number of bytes xor'd.
func xorBytes(dst, a, b []byte) int {
if supportsUnaligned {
return fastXORBytes(dst, a, b)
}
// TODO(hanwen): if (dst, a, b) have common alignment
// we could still try fastXORBytes. It is not clear
// how often this happens, and it's only worth it if
// the block encryption itself is hardware
// accelerated.
return safeXORBytes(dst, a, b)
}
// fastXORWords XORs multiples of 4 or 8 bytes (depending on architecture.)
// The arguments are assumed to be of equal length.
func fastXORWords(dst, a, b []byte) {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
aw := *(*[]uintptr)(unsafe.Pointer(&a))
bw := *(*[]uintptr)(unsafe.Pointer(&b))
n := len(b) / wordSize
ex := n % 8
for i := 0; i < ex; i++ {
dw[i] = aw[i] ^ bw[i]
}
for i := ex; i < n; i += 8 {
_dw := dw[i : i+8]
_aw := aw[i : i+8]
_bw := bw[i : i+8]
_dw[0] = _aw[0] ^ _bw[0]
_dw[1] = _aw[1] ^ _bw[1]
_dw[2] = _aw[2] ^ _bw[2]
_dw[3] = _aw[3] ^ _bw[3]
_dw[4] = _aw[4] ^ _bw[4]
_dw[5] = _aw[5] ^ _bw[5]
_dw[6] = _aw[6] ^ _bw[6]
_dw[7] = _aw[7] ^ _bw[7]
}
}
func xorWords(dst, a, b []byte) {
if supportsUnaligned {
fastXORWords(dst, a, b)
} else {
safeXORBytes(dst, a, b)
}
}

21
vendor/github.com/xtaci/smux/LICENSE generated vendored
View File

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2016-2017 Daniel Fu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,60 +0,0 @@
package smux
import (
"encoding/binary"
"fmt"
)
const (
version = 1
)
const ( // cmds
cmdSYN byte = iota // stream open
cmdFIN // stream close, a.k.a EOF mark
cmdPSH // data push
cmdNOP // no operation
)
const (
sizeOfVer = 1
sizeOfCmd = 1
sizeOfLength = 2
sizeOfSid = 4
headerSize = sizeOfVer + sizeOfCmd + sizeOfSid + sizeOfLength
)
// Frame defines a packet from or to be multiplexed into a single connection
type Frame struct {
ver byte
cmd byte
sid uint32
data []byte
}
func newFrame(cmd byte, sid uint32) Frame {
return Frame{ver: version, cmd: cmd, sid: sid}
}
type rawHeader []byte
func (h rawHeader) Version() byte {
return h[0]
}
func (h rawHeader) Cmd() byte {
return h[1]
}
func (h rawHeader) Length() uint16 {
return binary.LittleEndian.Uint16(h[2:])
}
func (h rawHeader) StreamID() uint32 {
return binary.LittleEndian.Uint32(h[4:])
}
func (h rawHeader) String() string {
return fmt.Sprintf("Version:%d Cmd:%d StreamID:%d Length:%d",
h.Version(), h.Cmd(), h.StreamID(), h.Length())
}

80
vendor/github.com/xtaci/smux/mux.go generated vendored
View File

@ -1,80 +0,0 @@
package smux
import (
"fmt"
"io"
"time"
"github.com/pkg/errors"
)
// Config is used to tune the Smux session
type Config struct {
// KeepAliveInterval is how often to send a NOP command to the remote
KeepAliveInterval time.Duration
// KeepAliveTimeout is how long the session
// will be closed if no data has arrived
KeepAliveTimeout time.Duration
// MaxFrameSize is used to control the maximum
// frame size to sent to the remote
MaxFrameSize int
// MaxReceiveBuffer is used to control the maximum
// number of data in the buffer pool
MaxReceiveBuffer int
}
// DefaultConfig is used to return a default configuration
func DefaultConfig() *Config {
return &Config{
KeepAliveInterval: 10 * time.Second,
KeepAliveTimeout: 30 * time.Second,
MaxFrameSize: 4096,
MaxReceiveBuffer: 4194304,
}
}
// VerifyConfig is used to verify the sanity of configuration
func VerifyConfig(config *Config) error {
if config.KeepAliveInterval == 0 {
return errors.New("keep-alive interval must be positive")
}
if config.KeepAliveTimeout < config.KeepAliveInterval {
return fmt.Errorf("keep-alive timeout must be larger than keep-alive interval")
}
if config.MaxFrameSize <= 0 {
return errors.New("max frame size must be positive")
}
if config.MaxFrameSize > 65535 {
return errors.New("max frame size must not be larger than 65535")
}
if config.MaxReceiveBuffer <= 0 {
return errors.New("max receive buffer must be positive")
}
return nil
}
// Server is used to initialize a new server-side connection.
func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) {
if config == nil {
config = DefaultConfig()
}
if err := VerifyConfig(config); err != nil {
return nil, err
}
return newSession(config, conn, false), nil
}
// Client is used to initialize a new client-side connection.
func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) {
if config == nil {
config = DefaultConfig()
}
if err := VerifyConfig(config); err != nil {
return nil, err
}
return newSession(config, conn, true), nil
}

View File

@ -1,350 +0,0 @@
package smux
import (
"encoding/binary"
"io"
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
)
const (
defaultAcceptBacklog = 1024
)
const (
errBrokenPipe = "broken pipe"
errInvalidProtocol = "invalid protocol version"
errGoAway = "stream id overflows, should start a new connection"
)
type writeRequest struct {
frame Frame
result chan writeResult
}
type writeResult struct {
n int
err error
}
// Session defines a multiplexed connection for streams
type Session struct {
conn io.ReadWriteCloser
config *Config
nextStreamID uint32 // next stream identifier
nextStreamIDLock sync.Mutex
bucket int32 // token bucket
bucketNotify chan struct{} // used for waiting for tokens
streams map[uint32]*Stream // all streams in this session
streamLock sync.Mutex // locks streams
die chan struct{} // flag session has died
dieLock sync.Mutex
chAccepts chan *Stream
dataReady int32 // flag data has arrived
goAway int32 // flag id exhausted
deadline atomic.Value
writes chan writeRequest
}
func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session {
s := new(Session)
s.die = make(chan struct{})
s.conn = conn
s.config = config
s.streams = make(map[uint32]*Stream)
s.chAccepts = make(chan *Stream, defaultAcceptBacklog)
s.bucket = int32(config.MaxReceiveBuffer)
s.bucketNotify = make(chan struct{}, 1)
s.writes = make(chan writeRequest)
if client {
s.nextStreamID = 1
} else {
s.nextStreamID = 0
}
go s.recvLoop()
go s.sendLoop()
go s.keepalive()
return s
}
// OpenStream is used to create a new stream
func (s *Session) OpenStream() (*Stream, error) {
if s.IsClosed() {
return nil, errors.New(errBrokenPipe)
}
// generate stream id
s.nextStreamIDLock.Lock()
if s.goAway > 0 {
s.nextStreamIDLock.Unlock()
return nil, errors.New(errGoAway)
}
s.nextStreamID += 2
sid := s.nextStreamID
if sid == sid%2 { // stream-id overflows
s.goAway = 1
s.nextStreamIDLock.Unlock()
return nil, errors.New(errGoAway)
}
s.nextStreamIDLock.Unlock()
stream := newStream(sid, s.config.MaxFrameSize, s)
if _, err := s.writeFrame(newFrame(cmdSYN, sid)); err != nil {
return nil, errors.Wrap(err, "writeFrame")
}
s.streamLock.Lock()
s.streams[sid] = stream
s.streamLock.Unlock()
return stream, nil
}
// AcceptStream is used to block until the next available stream
// is ready to be accepted.
func (s *Session) AcceptStream() (*Stream, error) {
var deadline <-chan time.Time
if d, ok := s.deadline.Load().(time.Time); ok && !d.IsZero() {
timer := time.NewTimer(time.Until(d))
defer timer.Stop()
deadline = timer.C
}
select {
case stream := <-s.chAccepts:
return stream, nil
case <-deadline:
return nil, errTimeout
case <-s.die:
return nil, errors.New(errBrokenPipe)
}
}
// Close is used to close the session and all streams.
func (s *Session) Close() (err error) {
s.dieLock.Lock()
select {
case <-s.die:
s.dieLock.Unlock()
return errors.New(errBrokenPipe)
default:
close(s.die)
s.dieLock.Unlock()
s.streamLock.Lock()
for k := range s.streams {
s.streams[k].sessionClose()
}
s.streamLock.Unlock()
s.notifyBucket()
return s.conn.Close()
}
}
// notifyBucket notifies recvLoop that bucket is available
func (s *Session) notifyBucket() {
select {
case s.bucketNotify <- struct{}{}:
default:
}
}
// IsClosed does a safe check to see if we have shutdown
func (s *Session) IsClosed() bool {
select {
case <-s.die:
return true
default:
return false
}
}
// NumStreams returns the number of currently open streams
func (s *Session) NumStreams() int {
if s.IsClosed() {
return 0
}
s.streamLock.Lock()
defer s.streamLock.Unlock()
return len(s.streams)
}
// SetDeadline sets a deadline used by Accept* calls.
// A zero time value disables the deadline.
func (s *Session) SetDeadline(t time.Time) error {
s.deadline.Store(t)
return nil
}
// notify the session that a stream has closed
func (s *Session) streamClosed(sid uint32) {
s.streamLock.Lock()
if n := s.streams[sid].recycleTokens(); n > 0 { // return remaining tokens to the bucket
if atomic.AddInt32(&s.bucket, int32(n)) > 0 {
s.notifyBucket()
}
}
delete(s.streams, sid)
s.streamLock.Unlock()
}
// returnTokens is called by stream to return token after read
func (s *Session) returnTokens(n int) {
if atomic.AddInt32(&s.bucket, int32(n)) > 0 {
s.notifyBucket()
}
}
// session read a frame from underlying connection
// it's data is pointed to the input buffer
func (s *Session) readFrame(buffer []byte) (f Frame, err error) {
if _, err := io.ReadFull(s.conn, buffer[:headerSize]); err != nil {
return f, errors.Wrap(err, "readFrame")
}
dec := rawHeader(buffer)
if dec.Version() != version {
return f, errors.New(errInvalidProtocol)
}
f.ver = dec.Version()
f.cmd = dec.Cmd()
f.sid = dec.StreamID()
if length := dec.Length(); length > 0 {
if _, err := io.ReadFull(s.conn, buffer[headerSize:headerSize+length]); err != nil {
return f, errors.Wrap(err, "readFrame")
}
f.data = buffer[headerSize : headerSize+length]
}
return f, nil
}
// recvLoop keeps on reading from underlying connection if tokens are available
func (s *Session) recvLoop() {
buffer := make([]byte, (1<<16)+headerSize)
for {
for atomic.LoadInt32(&s.bucket) <= 0 && !s.IsClosed() {
<-s.bucketNotify
}
if f, err := s.readFrame(buffer); err == nil {
atomic.StoreInt32(&s.dataReady, 1)
switch f.cmd {
case cmdNOP:
case cmdSYN:
s.streamLock.Lock()
if _, ok := s.streams[f.sid]; !ok {
stream := newStream(f.sid, s.config.MaxFrameSize, s)
s.streams[f.sid] = stream
select {
case s.chAccepts <- stream:
case <-s.die:
}
}
s.streamLock.Unlock()
case cmdFIN:
s.streamLock.Lock()
if stream, ok := s.streams[f.sid]; ok {
stream.markRST()
stream.notifyReadEvent()
}
s.streamLock.Unlock()
case cmdPSH:
s.streamLock.Lock()
if stream, ok := s.streams[f.sid]; ok {
atomic.AddInt32(&s.bucket, -int32(len(f.data)))
stream.pushBytes(f.data)
stream.notifyReadEvent()
}
s.streamLock.Unlock()
default:
s.Close()
return
}
} else {
s.Close()
return
}
}
}
func (s *Session) keepalive() {
tickerPing := time.NewTicker(s.config.KeepAliveInterval)
tickerTimeout := time.NewTicker(s.config.KeepAliveTimeout)
defer tickerPing.Stop()
defer tickerTimeout.Stop()
for {
select {
case <-tickerPing.C:
s.writeFrame(newFrame(cmdNOP, 0))
s.notifyBucket() // force a signal to the recvLoop
case <-tickerTimeout.C:
if !atomic.CompareAndSwapInt32(&s.dataReady, 1, 0) {
s.Close()
return
}
case <-s.die:
return
}
}
}
func (s *Session) sendLoop() {
buf := make([]byte, (1<<16)+headerSize)
for {
select {
case <-s.die:
return
case request := <-s.writes:
buf[0] = request.frame.ver
buf[1] = request.frame.cmd
binary.LittleEndian.PutUint16(buf[2:], uint16(len(request.frame.data)))
binary.LittleEndian.PutUint32(buf[4:], request.frame.sid)
copy(buf[headerSize:], request.frame.data)
n, err := s.conn.Write(buf[:headerSize+len(request.frame.data)])
n -= headerSize
if n < 0 {
n = 0
}
result := writeResult{
n: n,
err: err,
}
request.result <- result
close(request.result)
}
}
}
// writeFrame writes the frame to the underlying connection
// and returns the number of bytes written if successful
func (s *Session) writeFrame(f Frame) (n int, err error) {
req := writeRequest{
frame: f,
result: make(chan writeResult, 1),
}
select {
case <-s.die:
return 0, errors.New(errBrokenPipe)
case s.writes <- req:
}
result := <-req.result
return result.n, result.err
}

View File

@ -1,253 +0,0 @@
package smux
import (
"bytes"
"io"
"net"
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
)
// Stream implements net.Conn
type Stream struct {
id uint32
rstflag int32
sess *Session
buffer bytes.Buffer
bufferLock sync.Mutex
frameSize int
chReadEvent chan struct{} // notify a read event
die chan struct{} // flag the stream has closed
dieLock sync.Mutex
readDeadline atomic.Value
writeDeadline atomic.Value
}
// newStream initiates a Stream struct
func newStream(id uint32, frameSize int, sess *Session) *Stream {
s := new(Stream)
s.id = id
s.chReadEvent = make(chan struct{}, 1)
s.frameSize = frameSize
s.sess = sess
s.die = make(chan struct{})
return s
}
// ID returns the unique stream ID.
func (s *Stream) ID() uint32 {
return s.id
}
// Read implements net.Conn
func (s *Stream) Read(b []byte) (n int, err error) {
var deadline <-chan time.Time
if d, ok := s.readDeadline.Load().(time.Time); ok && !d.IsZero() {
timer := time.NewTimer(time.Until(d))
defer timer.Stop()
deadline = timer.C
}
READ:
s.bufferLock.Lock()
n, err = s.buffer.Read(b)
s.bufferLock.Unlock()
if n > 0 {
s.sess.returnTokens(n)
return n, nil
} else if atomic.LoadInt32(&s.rstflag) == 1 {
_ = s.Close()
return 0, io.EOF
}
select {
case <-s.chReadEvent:
goto READ
case <-deadline:
return n, errTimeout
case <-s.die:
return 0, errors.New(errBrokenPipe)
}
}
// Write implements net.Conn
func (s *Stream) Write(b []byte) (n int, err error) {
var deadline <-chan time.Time
if d, ok := s.writeDeadline.Load().(time.Time); ok && !d.IsZero() {
timer := time.NewTimer(time.Until(d))
defer timer.Stop()
deadline = timer.C
}
select {
case <-s.die:
return 0, errors.New(errBrokenPipe)
default:
}
frames := s.split(b, cmdPSH, s.id)
sent := 0
for k := range frames {
req := writeRequest{
frame: frames[k],
result: make(chan writeResult, 1),
}
select {
case s.sess.writes <- req:
case <-s.die:
return sent, errors.New(errBrokenPipe)
case <-deadline:
return sent, errTimeout
}
select {
case result := <-req.result:
sent += result.n
if result.err != nil {
return sent, result.err
}
case <-s.die:
return sent, errors.New(errBrokenPipe)
case <-deadline:
return sent, errTimeout
}
}
return sent, nil
}
// Close implements net.Conn
func (s *Stream) Close() error {
s.dieLock.Lock()
select {
case <-s.die:
s.dieLock.Unlock()
return errors.New(errBrokenPipe)
default:
close(s.die)
s.dieLock.Unlock()
s.sess.streamClosed(s.id)
_, err := s.sess.writeFrame(newFrame(cmdFIN, s.id))
return err
}
}
// SetReadDeadline sets the read deadline as defined by
// net.Conn.SetReadDeadline.
// A zero time value disables the deadline.
func (s *Stream) SetReadDeadline(t time.Time) error {
s.readDeadline.Store(t)
return nil
}
// SetWriteDeadline sets the write deadline as defined by
// net.Conn.SetWriteDeadline.
// A zero time value disables the deadline.
func (s *Stream) SetWriteDeadline(t time.Time) error {
s.writeDeadline.Store(t)
return nil
}
// SetDeadline sets both read and write deadlines as defined by
// net.Conn.SetDeadline.
// A zero time value disables the deadlines.
func (s *Stream) SetDeadline(t time.Time) error {
if err := s.SetReadDeadline(t); err != nil {
return err
}
if err := s.SetWriteDeadline(t); err != nil {
return err
}
return nil
}
// session closes the stream
func (s *Stream) sessionClose() {
s.dieLock.Lock()
defer s.dieLock.Unlock()
select {
case <-s.die:
default:
close(s.die)
}
}
// LocalAddr satisfies net.Conn interface
func (s *Stream) LocalAddr() net.Addr {
if ts, ok := s.sess.conn.(interface {
LocalAddr() net.Addr
}); ok {
return ts.LocalAddr()
}
return nil
}
// RemoteAddr satisfies net.Conn interface
func (s *Stream) RemoteAddr() net.Addr {
if ts, ok := s.sess.conn.(interface {
RemoteAddr() net.Addr
}); ok {
return ts.RemoteAddr()
}
return nil
}
// pushBytes a slice into buffer
func (s *Stream) pushBytes(p []byte) {
s.bufferLock.Lock()
s.buffer.Write(p)
s.bufferLock.Unlock()
}
// recycleTokens transform remaining bytes to tokens(will truncate buffer)
func (s *Stream) recycleTokens() (n int) {
s.bufferLock.Lock()
n = s.buffer.Len()
s.buffer.Reset()
s.bufferLock.Unlock()
return
}
// split large byte buffer into smaller frames, reference only
func (s *Stream) split(bts []byte, cmd byte, sid uint32) []Frame {
frames := make([]Frame, 0, len(bts)/s.frameSize+1)
for len(bts) > s.frameSize {
frame := newFrame(cmd, sid)
frame.data = bts[:s.frameSize]
bts = bts[s.frameSize:]
frames = append(frames, frame)
}
if len(bts) > 0 {
frame := newFrame(cmd, sid)
frame.data = bts
frames = append(frames, frame)
}
return frames
}
// notify read event
func (s *Stream) notifyReadEvent() {
select {
case s.chReadEvent <- struct{}{}:
default:
}
}
// mark this stream has been reset
func (s *Stream) markRST() {
atomic.StoreInt32(&s.rstflag, 1)
}
var errTimeout error = &timeoutError{}
type timeoutError struct{}
func (e *timeoutError) Error() string { return "i/o timeout" }
func (e *timeoutError) Timeout() bool { return true }
func (e *timeoutError) Temporary() bool { return true }

24
vendor/manifest vendored
View File

@ -82,14 +82,6 @@
"branch": "master",
"notests": true
},
{
"importpath": "github.com/ccding/go-stun",
"repository": "https://github.com/ccding/go-stun",
"vcs": "git",
"revision": "d9bbe8f8fa7bf7ed03e6cfc6a2796bb36139e1f4",
"branch": "master",
"notests": true
},
{
"importpath": "github.com/cheggaaa/pb",
"repository": "https://github.com/cheggaaa/pb",
@ -524,22 +516,6 @@
"path": "/qr",
"notests": true
},
{
"importpath": "github.com/xtaci/kcp-go",
"repository": "https://github.com/xtaci/kcp-go",
"vcs": "git",
"revision": "86eebd5cadb519b7c9306082c7eb3bcee2c49a7b",
"branch": "master",
"notests": true
},
{
"importpath": "github.com/xtaci/smux",
"repository": "https://github.com/xtaci/smux",
"vcs": "git",
"revision": "c3e18246ff2252a6e9d6b529fcbf22ae5c74c007",
"branch": "master",
"notests": true
},
{
"importpath": "github.com/zillode/notify",
"repository": "https://github.com/zillode/notify",