diff --git a/.gitignore b/.gitignore index 64cac3290..5b1c7081c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,7 @@ syncthing syncthing.exe +stcli +stcli.exe *.tar.gz *.zip *.asc diff --git a/auto/gui.files.go b/auto/gui.files.go index 83ba928b1..88199350d 100644 --- a/auto/gui.files.go +++ b/auto/gui.files.go @@ -18,7 +18,7 @@ func init() { bs, _ = ioutil.ReadAll(gr) Assets["angular.min.js"] = bs - bs, _ = hex.DecodeString("1f8b080000096e8800ffd41b6b6fdb38f27b7e05b7db83e4c6b5ddc5e170489a026dfa40ae8f2c9ade7e09f281b6689b8d4cea483a8911f8bfdf90a26452a26429b96e6f0d6cd62687f3e2705e64c7cfbecb943285a682df4a228e90126b324433ce14656b52fcced2b5d4ffe5bfd1b3f1c1f8d922e5539ca2a747688e530940982dd62916f6b7063a88d6f0452a41672a3a3e38b8c102c90d9ba925650b7452ac18ad78b24e491c9573d1105d5e0d60453932d22c099ea644c4d145317aaa440ab0f335fca69ca1f8a99cf10c7879ba542a1ba0fb03041f4d3613e4e62d5604a84e8666547f8030493f1075fe11c6b56840528fe768344d460c6609f3f7dbe3cae49c2eeae3abcdd95b188d226f9401258de4f22a80e48c69894a169c7922041781759210f64ecf594a666e3c4617a06bb690684ae65c1034e53c9528e5fc1a469422c2c7a1940106eca546ee697284a24f542ac200192817f89ec1a61aa5a3df05577cc6539443a0d7492288944402a0da6460309122770a7ec1b0c242e516b31d56087cf8f7995dea5080c10ade9e583fe3bb0bc2928fd3cc457bbe560bae0deeabdeff4f7445158a3fbec9e460879ead575322f612f80a28313b63a0c91b9c5e3844f219544ca1f841d8adc18509d8c9c7d1f81d0b0c8728fd4afeb30640574fa03d04ba82d52c31eada81f4a301884e9770b64965233481f73425289fcdf7a3f34ed494859373966e3c15e104d9318b4d1f80bdfcbe4e537efb96a44411079b1945e5701f84efb95e7bb159816fbd7635904f2067a60fda0fc6e3be66ec1dc3d39424eed1c99d31cc71f0857df9fdc4670d78cdcc43d15ee8b9377964711d891e46bb7117a5c571653d5ae9d80b477db19ecd0849481217ce5d7fe81cc5bfec7cb93b65fc5d1cfdca88bae5e2daf8cc68a0230e4ee3684913120d8e3de8404c28a6b607f9df306fef31d8769db107f0753fc5b3eb44f00cd4020a5674066aba269b29c722b1f175dbc6b681d8c7b70e49a77c956141623c44d32ae378f4052074303b3971835b550c41d45a30f4fc4595608168da0f51239e92a197a840d98b173b59a27955a2397615647287d182a8381a6b031fdf10214161b03d529b9f94f12ee148b0c22e13563cbb04b6420358f476c76af8e50662deaa0ffa7c8587dd99b519889e33df8f77beb3463b4f403ad1762894b98f4fbf06313acf4cf6342a130a74d20a61e3fee83b04ec188c5e1f4e0fbdcee58a5cca47f495645c52c50525f27272657656fabc998523c9858a1de31f04052888301f8d7bec1a9439d649ebc3345a268266efdcb13a07db819fac0a32072696faf497043dd37984d9ed37bd56bd18dfd49790591434b140307019c993e638ac05777de1b0bbef6e510d74962537d75b1043171f6054b7f160e841e88f4a0020d670cfcb426580c6e8c56432a943d3a472269cda067004ad993248c574ed139ce56b554e7bf35044a0982688321492ce70a363af31d72596e7b70c2a848c08b581658310bcfe14e5e5716d765bd78dd83460d1442f6972550af719abe56885efe2c910fd133dcb77c4409cb1371b45e437ae209b791e28ef6a505afd2a19d439f428978a6b260d197517da1e5823f12d9a61355ba29834a9b6a6944957110280f5ddf0cce9f0a442ae8ec1373077413ee4afd8367846a708efe17518f8865e07950e11c3ab8a55e647c0e80751c83c8ca9a7842dd412060e0f431ba1b1a042568844f07324b3946ad6aae9e66e33ae46174b084d5ff2c51ac7a5fe638981e9bcb86a539761ccc4b69da4d5b4aef8e4d9d48e9c4ea8ca5f4da615c8adc29c8428bcea41a113010b5bb1da6a565c047330857ec693375efac6acb25d13a6b4f523b64e2da018526be9056d939acc172e096d9dfa1c78494fe93fec029bcb3a46a237410356b92dc6473a0382fad664aa909b43bc096d8d557574c6904e45a28aca11d1edbee6657a8dee27c4113a4455b28728fadba08ad1c9ddabc97bf4964a2b3b9cee46b59ecd4cf2fd17502abfeeab4f9be8f551da0aa2ad6cd6d6698ae55fc406ed71ecabb34cd015169b3e3aa36cce9b55a60b953f59639631a3345b27b554b951bc66d7900e42bd9bc30e5af6dfd9829f2652c529446db2358bf207d9bb2d9a199fd38e8d09bfb60ff1f723d59352c2d41fcdb46bfb6ef96cd9779b6a74d75620765bb22e480b77be0a20b84ea5123a69fe4738444a7c432e76f7140d956db084aef4c03a741df6f4262e9428b2b8a16ed5e1cc490aee80a142c8bb91027703756520c5c8b8acf65e86e85f17e75f46d25cddd0f926f698180cd1fd92e00436f308dd47a7503a811d3cffb6c9487484229c014750140013e3ef92b368ebd18ca35f8b6b9e6fbab70b7ccf789ae24c12bffbb9ad36134c63774f33c115c6ae70d3db7d775c159a24a14adbc53e8b2cd0ae85004dd81516ae465c2305e9dfddc116e6978e7e4bb78e6c54de69e5edaac264cb61af39e56aba10e0417ddc8a32c069d774b15709f7f9b1026a40c7150346920d1413c0c0b6838aaa47e77f2c5b62ee545ac5337d1372fba5bc351d22eab631431cd5baf9a635111632ecc58e43f940b50074fb836d85a0a6eec29a4a300f37bfecc28d6b78e196b6fe14aa18656bb9ac621d74486aaa4d4d8bafc93d067ba9deba9ecef74ff67d8128b2dfdcf2733e440967a4626e9d64ec6493960a0a19400d6ae773427ee881f1a824a2e5ac8bf103cc5d67577ee00fd9786571c8a75738f73d79f1990a82af3b1c89bc7109981a6a79b63b7045100afa070fbce56ea1db0973b1d5ee6d7eda01e26a4944c162fb112addf510b1e3c77b5156d10958c671cdee58c0b186f27883d0d9d616cf597c73f25619548d5a52b9d7b7fc1465ec296a1ce92e596367711bccd4749b4b27c5ad42d7de2e95a21b95d4c4cfe1dbe43758773ac81704954046dfa8e93a56df4b85b460091b9ba8de06b6d844be2c6814b39460f1ae90bf3583735f72f97285d462bac046b620dd39b81396a49bfa510507e1126fd986bd56688a5c3784856d519aec19fe423191a57846e259e9ff87d52a54cf0d062d8523e06928544c33acb8c48362a7781e15fb1043739b36013813044bcd246446571002411b299cd2f5ca7d3598d00555523f5d9915de585b977e7ca54f97d7a9f21bd23640e4088a0ba279cab988cdd7942ff22f786a480ff49d4f39f36252284353aedc2f018f600639660b6569e76c6edd6793739a2afd64926180c0fe63c9827bbb7a374159b65643d0404a938088667aa4f87b7a4792b854a0b76a1747b6fe3bce82a12965ba21d79d9f6a53c20c9a6d58b384cc292349436f229aa060376987e51518c76f7f47cfdcffd5121c03393e09801e87a8b66be937d8f04314a10fb40f6b5d787a04339fbb31d3cac523c87f0c93b738cc11101c36bbb007bd26da67672ba29f1fff1fd9d9646236aafc5f8b36aba08fb1b33e9c7561e93166d6899756261e41fdfa071899d4379b0fb4318ff96043b481284e6ff1467e29decbfe78fb9eb428ce6069e238a142f7c06f481c71d3527d97e8d2a78d69573ff93f20807ae5341a3ac326a9b0ff42a11c560233394bd7496dc664034715c96c7314709fd8af0e09e741ab222b20a7f43bd597b9c2f4abd59327fa59fa13c416cfcd9ba293277ee3f8d2e21cd1e4eac9ab9763b3f295a560d5f45f000000ffff010000ffff7432b7e783310000") + bs, _ = hex.DecodeString("1f8b080000096e8800ffd45bdd6fe336127fcf5fc16e7b90bcf1dadee2703824cd026df603b9fd48b1deeb4b9007daa26d6e64524752498cc0fffb0d294a26254a9693dbeed540b336399c8f1f8733c3913a7efe55a694293413fc4e12718294c8c910cd395394e5a4fc9da5b9d4ff15bfd1f3f1d1f8f932e5339ca29f4ed002a71288305be62916f6b7263a8a72f82295a073159d1e1ddd6281e486cdd58ab2253a2b578cd63cc9531247d55c344457d70358518d8cb44a82a7291171342d47cf9548817691c36fca198a7f92739e812e3fad94ca06e8e108c1478bcd04b97d8d1501a993a119d51f104cd277445dbe87716d1a88d4e3051b2d9311c359c2fcc3f6b436b9a0cbe6f87a73f11a46a3c81b65204933b9ba0e30b960daa24a05679e08c145609d2484bdd1735692991b8fd114b0664b896664c1054133ce538952ce6f604429227c1e4a1962e05e21f2409313147da0521106cc005cd07b0e9b6a4047bf0baef89ca7a8a040bf268920521209846a9381c3448adc2bf805c30a0b5578cc765813f0eedf1776a92301066b7c0fe4fa11df4f094bdecf3297ed65ae965c3bdc67bdff1fe89a2a14bfff2d93831d7b96af6744ec15f019586276c100c95b9c4e1d21c50c2aa750fc28eed6e1c202ece4d364fc8e058643947e26ffc981d0c509d0438015ac6689816b4772980c6074be82b34d6a1ba105bca52941c56cb11fbd77a201164e2e59baf120c209b263969b3e007bf57d0b6185df4d376b088537aec2c50472660e61fbce04c85f197bc3f02c2589ebe945ec84390ea18b1cc8f8039fb7f035338f653bd573bf1589c03df77a18edc65d9696c7b50d40551c2ee3ea349fcf0949481297b1587fe802c53fec42af3b65c2531cfdc888bae3e2c684b868a013044ee3684513120d4e3dea40082fa7b647c5dfb06e6f31b86253b147e8f530c3f39b44f00c600180159d034c376433e35824361d6ebbd43614fbf4d619e49caf332c488c876856571c8f3e0185ce3d67676e2eaa9b2188ca05432f5ed605968c6687316ae55329f40b2a591ea48b9dacd8bcaad89cba0099543f5a12154763ede0e35b22240006db23b5fb4919efea83042bec2a61cdb34b602b3481656f77acc15f6e2045ad0f615facf0b83bb3b660d073e6fbe92ed4356417f5422fd98e84aa54f1e5372846979929764655fe47679d14364d8fbe427e8dc1e9f5e1f4d8ebd2ab2c7d7c469f49c625555c5022af26d76667a5af9b5938925ca8d871fe41d0805208f3d9b8c7ae05ccb1ae311f876855b799bd73c79a1a6c077e6d29c8029458e9d35f09f45ce7096eb7dff53a7131b1e950416651d0c502c9c055a4a871e3300aeefa3260f7dfddb278ef6d4be1ae776086be2b8053ddc583a147a13f2a018258d3bda8ee150334462f279349939a26b533e15c458047d09b2983ca495f5582b33c57d5b4370f353f8a6982284321eb8c363af71a775d617979c7a0a0cf88501b583608d1eb4f791b3c6dcc6e9bd8884d0b172df48a26d795711fb15a8dd6f83e9e0cd13fd1f362470cc505fb6da388fcc21554332f02b7b10695865f2583a6869ee40ab876d15000f791ed91b50adfa23956f3158a491bb40d50267d4d08103677c373a7e3b39ab82607dfc1dc05c590bf62db12199d3bf3015187416c38e8a0d22162785df3cae208187c1085cac3b87a4ad852ad60e0f838b4119a0b2a6d854c043f47324ba956ad5e6eee36e37a345d416afa542cd63caef41f2b0c5ce7e575175c463193db7696d6cbbaf25354533b71baa0aa7eb5b956a0b60a6b1292f0ea0009bd0458da9ad7d6abe23299832b1ce63c459fe4d09c557557c292b67ec6d6a5055c86542ebda46d4a93c5d215a1bd539f03afe8a9e2875d606b59c749f42668c2bab6e5f8485740293162756d0ef926b43516eae882215d8a4435c811d1ddb9f6657a8dbefec7113a4675b1c728fadba0ced1a9ddebc57bf49a4a6b3b9cee56582fe6a6f8fe0b80ca6f0ec5d3167a8780b6866c2bdbd13a4fb1fc8bf8a03d8e87629609bac662730866942d783b64faa2f22723661533a0d97b52c72d378a737603e520dc770bda41c7fe3b5bf0dd4caa0585a8cbb67653fe207bb7452be36bdab331e1dfed43fa7d4b78524a98faa35d7663dfad9e1dfb6e4b8dfe680572b715eb927468e74300c9752695d045f33fc22952e25b32dd3d5668b9d906afd0b51e588faec39edec45489b28a1bea561dce9ca2e01e142a8dbc1f29083770af0c94181997f5decb10fd6b7af96924cd9316bad8c49e1283217a58119cc0669ea087e81cae4ee0072fbe6c32129da00867a0115c0a4089f157c959b4f564c6d18fe553992fbab70b7acf799ae24c12bffbb9ad37134c63774f33c135c6ae70cbdb7d8fa46a32494295f68b7d1e59b2cd850024ec0a4bd710ae9982f56fee610b8b67847e4bb7c96c543d822ada55a5cb56c35e73ca45ba34e0517ddc1a1810b41b58ec05e1a13856200de4b866c048b281cb0428b0ed0151fde8fc8f6d4b08c479d2699ee99b90bb4fd543ce21a26e1b33a451a39b6f5a136123c351ec34540fd42f806e7fb0eb22a8a5bbb4e62658a49b1f76e9c675bc704b5b7f4a2846592e5775ae831e454dbda969f9b585c7602fd55b7760f0fd93635f208bec77b7e29c0f51c219a9b95b2f1b7bf9a49582420ed0a0dac59c501c7a643eaa84683b9b667c0377d7d5959ff8433e5e5b1c8ae935cdfd485e7e6682e09b1e47a2685c02a796bb3cdb1db8320905e38347def16ca1df0973b9359edb7cb703c4d58a8852c5ee235485eb2162a74f8fa2ac860978c669c3ef5820b086ea78c3d0d9d68ec8597e73ea56198446ada8dc1b5bbe0b187b2e358e7557acb5b3b80d566abacda58be24ea31baf1a55a61b481ae617f45df61bae3b0c8a054110c8e80b355dc7faeb4d2114ac60e313f5a7811d3e512c0b3ac53c2558bc29edeface0dc17af7cbb42b0982eb0b12d287701e18425e9a679542140b8c23bb661af179a4bae9bc2c2be284df50c7fe13291a5784ee27915ff87f55ba89e1b0c3a2e8ec0a7e5a2629a61e5433cb8ec946f33c53ec5d03c4d9b009d498215320999d335a440402385539aafdd97fc12baa44aea5757e66534d6dea5df95d2a7cbeb54f90d699b200a06e503a245cab988cdd7942f8b2f7866440ff4339f6ae6e5a404434bae3d5f021dc10d0ace96caca2ed4dcba6f392e68aaf41b8e0c0305f6df6d2cb5b7ab77139465b91a0202294d02269ae991e26fe93d49e20a406fd52e8f6cfdd72e4b856694e9865c7f7dea4d093368b62167095950469296de443441c16ed28ecb2b708e9fff8e9ebbff340a1c43393e0b909e86a476a3f4336cf8318ad03b7a886a7d747a82321ffb29d3a9c513c4bf0f8bb73ccc11101c36bbf407bd26dae7676ba2df16fe3ff2b3c9c46c54f54f079a75d2a7f8d9219af551e9296ed64b974e259e20fde61b3899d44f361fe9639ef2c186688b509cdee18dfc54bedefaedfd7bd2019ce1d2a6714285ee81df9238e2a6a5fa26d1579f2ea55d7c8af7fde1be721e0d9d615354d8ffa1a01a560233394ff3a43163aa81939a65b6390abccfec574784f342ab226b10a7f47baabf1480e9b756cf9ee9b7c89f21b67c61de293a7be6378eaf2ccf114dae9fbdfa656c56beb2122c4cff050000ffff010000ffffe4f29e3132310000") gr, _ = gzip.NewReader(bytes.NewBuffer(bs)) bs, _ = ioutil.ReadAll(gr) Assets["app.js"] = bs @@ -63,7 +63,7 @@ func init() { bs, _ = ioutil.ReadAll(gr) Assets["favicon.png"] = bs - bs, _ = hex.DecodeString("1f8b080000096e8800ffec1b6b93dbb6f1bb7f05cc4e62bb35a5b39d57654933ce9d33bd36b13d3ebb6da6930f100991c88100038077a75ef4dfbb0b901429917ae5ce6d66ea199f0802d805f6bdc072fcf0ecede9871fdfbd26a9cdc4f4c1187f88a03299044c06442621cdf349601632b22997498063188de12763969228a5da303b090a3b0fbf09aad7a9b579c87e29f8d524f867f8f15578aab29c5a3e132c20919296499873fe7ac2e284d5b324cdd824b8e2ec3a57da36065ef3d8a693985df18885aef19470c92da7223411156cf26c70b206266626d23cb75cc906a4b541b4b0a9d26bfd82cb4ba299806d43a78d0a4b78845052cde693604eafb039c81d351e8c2db7824d6b0a8d87fe4509c7cf9929658dd5341f46c60cebd620e372006f82129d5d086652c62c2ec3b5885de4b04ecb6e2c4e458433152fc8ed0302ff721ac78032b42a1f91af4ff29b97add73365adcaaa9ee58307038413664a2a93d3889550e6b0f9704e332e16235277961328b02c84edc64c1a164fad63bed55364c1662faeadbf770efbeeefad21c75b2177f6d690e3724f33a563a63d61a4927e370e05b19ad8b41ce6e841054fe4884420004cfb6d0b95a872484675c2a1fba445dbbaed30845fd6a45786a3c88d90a320f0571ef378e8b8095c1d7aed7930766c0426a1e8692504d393e0a212a253ab05ca40ccaf4824a8319340d2ab19d5c4ff84737ec362dc5ef52266735a08141c5c46732222a05c325df6b97eda061bce349571301df32ca97a900601313a42c10cb1153e7bfe8d137be2153278f11c9482f124b5ee794a2e565a401bd87035b0550e9af0109633e7c9b9c4a18d15b97173a5b3b585b957e5b3464c6b73dcbc5901822e4b5df18da08233b392c0ff30d71c58b970362d123cba9c049a194bb57dfc2498bef78fe48dba1e0f3d84b5b50d71256beff2b5c5a238ad2df643ca88df72a1294a0649a92133c624da4e99b098003a90514b6804f2422d8b072b3a92ac3096940b0561ab07810c3322d9751bf8603ccc1b741f02e14b81f08fe5cf836df2d1ecd3ea3a58e3633d4b84590c12b1cec306af99d64a7fcf0d507820984c40eda6e4a4e60cd86dd893fb1b5e532dbd7bd9606e8ee034cb19b50e24587ed2800c426b322ac4f4f616de0e3ef08c915f490c241a05693acab291310303a673b91c811abaa1e4f676ae3993b158bc51e0281ee3ccd708f3c972d922e1de62961742789e934ae2aa3d35242e128c6a87c8e0cadffead47d89a84c629a0eeb0cf153bd738dc3b39a79209e2fe865cce55177dd7878768a11c2fc6e98b768ff36cc1f4548050320db6ec45d7a21c5467a2abd9beb166b53bd6e2673a53dfddf7300c49b6304ccc4918f6cdd7355ae7ed0ad0a8a0294312587e3a4f508e40c70c4a0032a3139a87181367becb4064f4ecf9675b86bb29e042656d48e98c6150057fd7ccf4de2012b1c8538c3a48fd14aa4b14091c35251f601f04f7b57d557e74ff4687363e840c270791a11d7b04a0afb8e03710873d2e39e294ef4e97f81c97787b7bc5b401eb88e00f9bfe254c7f6c2be23e3970fa9f3f0b5a9beff35fcd7fb7b75cce7203260cc254cda3e5125a871219459e74c94c94b22b0dbfb1ba96b5f4dc15b18fdbae2aecfdefb7c8ef7cb75f1fb1db6e0fb2720f2ce6d699a34a2136a298d27eb8e71b83be6f8795c8998cb858598ad780a2dbe9ec470be8d15b8c337857a22034d14e61cc562bdd6d92dd6cef957718e5234db0b73ba7d8d1303c7761913de473786c01ae48efbb2f2cb54513f3fd9aecff39035d82fd3bd32da8fbcd7a15c7074d3b5c43e96c06f921c63993e0f6165829598491b5f957897680b2797ef6d3e05c7ebbb0cc7c50960a305f332e21bd582ebf45c26d9bd761df215b02b407b3ea9399f97ba4e2dbc21e47c62ebf715f74bc1bf7f17f6fd122c8163ea1c2ef3609bf9b1177c4f9edcc6d08068de32aa9d9cd71c8e556fc06fb7a4fec1ef6a475d0816961675adbd16c26bc071d547cd5754e71af59f27be60e03955e6c4d943700239dfa72e3bccabd49796a6320aac880d762900835a3e23b2e181a452aaee9c2bc29b219d3cb2599e3eba76b639ddd6d9adc6e9cddf612f2a23fb5c19d31c1d0906e208f5d07d12c523a364f4a5973c72c7dbbfc5e45e00d744d41776856a113d8b9df46ddd03bdba78376cc363770f59ee4f4358f10e8d222b817b78fbc6c99228a98318ff09c1f7724198b3d7926930939794aca71e56169c7b88738ae2b5a3e4243f0943305ffcaffed0e2fef4e4d9a83b54a347337275d43fb868733aa03a2150630d53bf78a6a4ec32b2a0a26d5f524f8eaa4f92ae37212b4dfd09b49f0ece424d86aef5b8c6aac601f7e3587a3f1eae7d9d625b4725bd01f5833f9630989bbc37a0f6b48ba4c88744a307abe5c7ef67267c67f18e8a696ad309d00a62d0eaa53867675b9836693225bd72988c7d6d337d0ac2d0bf6ed678356501a26e8b79b85df91271cd3f21a34c073791a5a9524286cb0204173c3aad754277895fc07b3001797a17dc05fbc4ada691a788c97d46e5a7b09150e5223dbc366ec32305e5c2041f12807f0d3642e79ffea073c090177019e0225a11ce7deb54672490ac37aaf1cd6114579f18e69bca9ecd00c6a0b4dc5e819a806397df7b18137511ad2252ebba475d5d77ff1d143f84fe1c5fefbc2c82c5027311f304c05992c9bfb4b656bfea7124e776deadd173eeeca371af0717898804ce4ad4b93721b28b0d58ef639326bdc0696d30698d480fb228f30c87a447efd956cf47823ff680f040e893fdd8385e36143058cc778f6b66abb7a103c3272a3f784cc658ed51ff106e016b5ca2bfca0ccd75643b18d838102ce17b8cbd6394f066f737fa8b182f91326610edd1e54ed77621bc4afd42d65d1e54cdd04bddc9829250ea3f87e63f1df0617b651d613b1b5e0bb249e5bffbe42b007a5770ce9aa1bd863eaa6c663894bab866363ca3e8706266b9e1d340f0d0cbd629571c393830b68ef7116e02fd47b93a75357e36088a5978cb0f99c45965ca74c56250d6852ead2a9419d3379a8f7e68cb65443b4cb6caa339656d58d2fabdab7d2a6ab4664ed4eb259b25188c6f80a6fe3b1ebf8682cf874a3a207abcfaa92352cc733a3e130e1362d6683486543c864b37458937ea8217ba5868151ff1e821663c97bffc2fb394070e718aff9250fa6672a2a328867ca0cf0be9071630adcdcb745e5baef010b288d2a203a23a760af36b18c878568b6f27e6144bb2899bd56fad2558bd47a0c969082aba331eb903fd719c69c0a95f4e4007e4859eeb825f6f2e3309882d8b259a813a33e7799a171fa457b72196fed3e6ce93a966437d09939a1080d4f76dc629cd6f704c491abc33aa45f6c350eddfbef89b2c61d71f2aa62cb3096192cd59a318217304f89c28a43a619e1865002193b448219b90639220b1018728e8587c06eb2baef186c2078cfac5e00fccf532604cf5f76ec31ff2df60f45aeba51385cdc886f88bb94bb432bfd22a1eaf03de626e335c0f24426e571cc2438285dc07e3eb73c63e665bf8feb9368bc14216f9c8adfa954ed17b3f7c4ea5b5c74233c76f7d367c114574fcecf76c6416598960b1ab15489188b547ffcf8dd5fdf7e3c7bf7f6fde90faf0683810b2280de98df94320462fafa861bebaadf9c2df378b745cdce2f3683bd426b90155c6979e7b74790b7f2b6a02579388374fbd2d7604abf631281dd01c59cab42c6be028c11905f834a5a368357714cbc1a94c2ad7cc7aab0617068aefcdb394733103dbc8c3f8a697f511923174c43d4517204e11dcd0fb79823b971918251045a1b8bd5d86a4e4a61acc81f95371cc6154a7c7a4ad338c603558c165e558f47d13c5e009179b4a61f9b728de957b640f540c6acd01fcb9d7ad517561fcda5d7d2df326519058f96538d4759dbeb45783ec2ef36aabbc47a23a85a5b279684aa2682ebcc9976d69016106943141081269a4881f42e5062504c4af007cb475f42561d55f803e087eb66ac1bc73f3095f15f048067c7726cb40e4fc9256339ca73c6c1cad8945a3266d9b42e241c0fa1e54bbb676e2740da968d311c54c22a352068bb2a05713b2f41402c31ea39c6d60cd20b1428975b68764cb25453a4e5b97a73d0632bf031e9acaeaab7279c875c8977c700d3538c0f7663a88ad7d7fdd876cc2e2c26ae0a5cb0792bb3f61783d536fdfd61df4706fb466cfedb26ff7506a02e04d5ee63a29ffd65beeb9db687fdfc4b01ea133e1f9c0c5eec1abbfa5ee9e7f5cf95b6cca279bede3df4b7fe1029b9efcbfe030000ffff010000ffff297e4ed370360000") + bs, _ = hex.DecodeString("1f8b080000096e8800ffec3b6d73db36d2dffd2b103e933a791a4a4ed2b75325cda4763ae7bb36c9c4c9dd756efa012221113508b000685ba7eabfdf2e4052a444eaad76ee3a7399894510c02eb0ef0b2c878f2ede9e7ff8e9dd6b92d8548c4f86f8430495b351c06440e42ca459360acc5c4636e17216e0184663f84999a5244aa836cc8e82dc4ec36f82f275626d16b25f737e330afe117e7c159eab34a3964f040b48a4a46512e65cbe1eb178c6aa5992a66c14dc70769b296d6b036f796c9351cc6e78c442d77846b8e49653119a880a367ade3b5b03133313699e59ae640dd2da209adb44e9b57ec1e535d14cc0b6a1d346b9253c42288966d35130a537d8ec658e1a2743cbad60e38a42c3be7f51c0f173264a596335cdfa9131fdaad54bb9ecc19ba04067e7829984318bcb702d62e719acd3b23b8b5311e144c573b23821f02fa3710c2843abb201f9fa2cbbfbb6f17aa2ac5569d9b33c39e9219c305552998c46ac803285cd87539a72311f90aab398408165216c3766d2b0786c1df3ad1e230b367b716dddbd53d877776f0539de0ab9b5b7821c177b9a281d33ed092395f4bb712888d5c426c530470f2af84c0e480402c0b4dfb65033550c49a99e71e83e6bd0b66a3b0ce19715e995e1287203e42808fc8dc73cec3b6e0257fb5e7b4e868e8dc024143dad84607a145c9542746eb5401988f90d8904356614487a33a19af89f70caef588cdb2b5fc46c4a73818283cba84f4404944ba68b3ed74f9b60c389a6320ec6439ececa1ea441408c8e5030436c85cf5f7ce3c49e78850c5ebe00a5607c9658f73c26572b2da0356cb81ad82a074d7804cb99f2d9a5c4a1b515b97153a5d3b585b957c5b3464c6b73dcbc490e822e0b5df18da08433b192c0ff30d31c583977362d123cba1e059a194bb57df23418bff78fe48dba1df63d84b5b5f571256befb2b5c5a238ad2df643c288df72ae294a0649a82113c624da4e396331017420a396d008e4855a16f7567424696e2c29160ac2560d02196644b2db26f0deb09fd5e8de07c21702e11f8b9f936df251efd3ea3658e363354b84690c12b1cec31aaf99d64affc00d50b827989c81da8dc959c519b0dbb027f737bca55a7af7b2c1dc0cc16996316a1d48b0fca4061984d6a45488f162016f7b1f78cac86f2406120d822419a4e9c0989e01d3b95c0e400ddd50b2584c35673216f3370a1cc5139cf91a613e5d2e1b24dc5bccb25c08cf73524a5cb9a79ac4458251ed10195cf9dbbf76085b9dd03805d41df6b962e71a873b2767543241dcdf90cba96aa3effaf0102d94e3c53079d9ec719e2d189f0b104aa6c196bd6c5b9483ea4c7439db37d6ac76cb5afc4c67eadbfb1e852149e786892909c3aef9ba42ebbc5d0e1a15d4654802cbcfa7339423d031831280cc6885e621c6c499ef2210193c7ff178cb7037055ca8ac0c299d300caae0ef9a99de1bc44cccb304a30e523d85ea1a4502478dc907d807c17d6d5f951fddbdd1be8d0f21c3d9416468c61e01e82b2ef80dc4614f0a8e38e5bbd725bec0252e16374c1bb08e08feb0e95fc2f427b624eed303a7ffe971d0d87c97ffaaff5b2cb89c64064c1884a99a47cb25b40e25328a3c6993992861371a7e63752b2be9b92f621fb75d95db87df6f9eddfb6ebf3e62b7ed1e64e51e58ccad3347a5426c443185fd70cf77067ddf0e2b91311971b1b214af0145bbd3d98f16d0a3b71867f0ae444168a29dc298ad56badd24bbd9de2bef30ca479a606f77ceb1a36678eec3227bc897f0d8005c92de775f596af33ae68735d9ff7506ba00fb37a61b50f79bf52a8e0f9a76b886d2c904f2438c7346c16201ac942cc2c8dafcb340db43d9bcbcf8b97729bf9b5b663e284b0598af0997905e2c97df21e1b6cd6bb1ef902d01da8359f5c9ccfc0352f16d6e8f23639bdf78283ade8ffbf89fb76810640b9f50e1779b843fcc887be2fc76e6d60483c67199d4ece638e4722b7e837d7d2076f73bd23ae8c0b4b035ad6d69d613de830e2abe6a3ba778d02cf93d7387814acfb726ca1b80914e5db97156e6dea438b5311055a4c06bd19b0935a1e27b2e181a452a6ee9dcbcc9d309d3cb2599e2eb676b639dddad9bdc769cedf612f2a2cf9be02e986068483790c7ae836816291d9ba785acb96396ae5dfea022f006baa2a03b342bd109ecdc6fa36ee8bdedd3413b669b1bb83a4f72ba9a4708746111dc8bc5a9972d93471133e614cff9714792b1d89367341a91b367a418571c96b68c7b84e3daa2e52334044f3913f0affc5feef0f2fed4a43e58ab9966eee6a46d68d7f070427540b4c200a67ce75e51cd69784345cea4ba1d055f9dd55fa55c8e82e61b7a370a9e9f9d055bed7d8351b515ecc3affa70345edd3cdbb58422bd5df8fc963c816593ff2f8071775eefc1f5c98615794a3e27a78f4f7765518bc50130d7340c9ad23d0cce96cbc75b9c53abfcecea7287cc264196ae530f8facc76fa0595915ecdbcffeaca0d4cccfef37097f202f38a4c515688067f234b46a3643298305099a1956bea67a86d7c8ff67e6e0de52b40df88bd7483bcd028ff182da4d6b2ea1c4412a647bd88b5dc6c58b0b24271e650f7eeacc25ef5ffd88a720e02ac04ba02414e3dcbbc6482e496e58e775c33aa228cbdf318db7942d9a416daea9183c07d520e7ef3ed6f0ce94865489cb36695df5755f7a7410fe5378b0ffbc30320bd499990f18a2824c16cdfda5b231ff5309a7bb32f5ae0b1f77e51a35f8383c9c814c648d0b93621b28b0e58ef6392eabdd0416d37a98d080eb22a718609d92df7e231b3ddec89fee81c021f1277bb0703c682881f118cfdd566d570b82c7456ef49e90b9ccb0f223de00dca056717d1f14b9da6a28b6713050c0f90277d13ae5b3dedbcc1f68ac60fe8c099843b70755bb9dd806f14b754b58743d5177412737264a89c328bedf58fcb7c1856d94f5446c2cf83e89e7d6bfaf10ec41e91d43da6a06f698baa9f158ded2a8dfd898b2cf818149ebe706f50303436f5869dcf0d4e00ada7b9c03f8cbf4cec4e9dcd5371862e935236c3a659125b709936539039a94aa6caa57e54b1eea8339a32d9510cd129bf27ca55171e34baaf6adb269ab0f59bb8fac976be4a236bec45b7b6c3b3a1a0a3edea8e6c1cab3b25c0d4bf1cca0df9f719be4935ea4d23e64b169d2af48dfd790b952c3c0a8ff00418bb1e4bd7fe1fd1c20b8778cb7fc9a07e30b15e529c43345f6f750c8b831396eeebbbc74dd0f80059446e5109d9173b0579b5886fd5cd45b59b730a25d94ccde2a7ded2a452a3d064b48c1d5d198b5c89feb0c634e859a75e4007e4851eab825f6f2e3309882d8b25ea413a33eb799a161f2457372116fed3e68693b926477d0993aa1080d9fedb8c138afee088823578b7548bed86a1cdaf7df11650d5be2e455b596612c3558a63561042f5f9e1185d5864c33c20da104b275880453720b7244e62030e4128b0e81dd6475d7d1db40f09e593d07f89f254c089e7ddbb2c7ecf7d83f14b9f236e1707123be21ee53ee0eadf28b84aac2f7989b9457008bd39884c73193e0a0740efbf9ccf294996fbb7d5c9744e3850879e354fc5ea56abf98bd2356dfe2a26be1b1bb9bbe08c6b87a7279b1330e2ac2b44cd088254ac458a0fad3c7effff2f6e3c5bbb7efcf7f7cd5ebf55c1001f4c6fca6902110d3d777dc5857f9e66c99c7bb2d6a767eb11eece55a83ace04a8bfbbe3d82bc95b7052dc9c209a4dbd7befe52fa1d9308ec0e28e654e532f6d55f8c80fc1a54d2a219bc8a63e2d5a0106ee53b56450dbd4373e5dfcf399a82e8e145fc514cfbb34a19b9621aa28e822308ef687eb8c51cc98dab048c22d0da58acc4565352086349fea8b8dd30ae48e2d3539ac6311ea662b4f0aa7c3c8ae6f11c88cca335fdd8946b4cbfd239aa07326685fe58ee54abbeb2fa682ebd96fe86294d2978b48c6a3ccada5e2bc2b3017eb351de23561b41d5da3ab1205439115c67c6b4b3863487481ba2800834d1440aa4778e12836252803f583eba12b2f2a8c21f003f5a3763ed38fe8ea98cff1a003c3b9662a3757846ae19cb509e530e56c626d492214bc75511e1b00f2d5fd63d713b01d2366c8ce1a01256a91e41db552a88db790102628941c731b666905ea040b9dc42b36392a58a220dcfd599831e5b7d8f4967794dbd3de13ce43abc3d06189f637cb01b4359b8beeec7b6637661317115e0824d1b99b5bf142cb7e9ef0ebb3e30d83762f3df35f92f3300752ea8761f12fde22ff25defb839ec975f73509ff045efacf772d7d8d5b74abfac7faab46516cdb2f5eebebff18748c97d5bf66f000000ffff010000ffff12f6eb7e6c360000") gr, _ = gzip.NewReader(bytes.NewBuffer(bs)) bs, _ = ioutil.ReadAll(gr) Assets["index.html"] = bs diff --git a/build.sh b/build.sh index 09ef2aa45..cac937b1c 100755 --- a/build.sh +++ b/build.sh @@ -3,7 +3,7 @@ export COPYFILE_DISABLE=true distFiles=(README.md LICENSE) # apart from the binary itself -version=$(git describe --always) +version=$(git describe --always --dirty) build() { if command -v godep >/dev/null ; then @@ -15,6 +15,7 @@ build() { godep= fi ${godep} go build -ldflags "-w -X main.Version $version" ./cmd/syncthing + ${godep} go build -ldflags "-w -X main.Version $version" ./cmd/stcli } prepare() { @@ -26,9 +27,12 @@ test() { } sign() { - id=BCE524C7 - if gpg --list-keys "$id" >/dev/null 2>&1 ; then - gpg -ab -u "$id" "$1" + if git describe --exact-match 2>/dev/null >/dev/null ; then + # HEAD is a tag + id=BCE524C7 + if gpg --list-keys "$id" >/dev/null 2>&1 ; then + gpg -ab -u "$id" "$1" + fi fi } @@ -79,7 +83,7 @@ case "$1" in test || exit 1 export GOARM=7 - for os in darwin-amd64 linux-amd64 linux-arm freebsd-amd64 ; do + for os in darwin-amd64 linux-amd64 linux-arm freebsd-amd64 windows-amd64 ; do export GOOS=${os%-*} export GOARCH=${os#*-} diff --git a/cid/cid.go b/cid/cid.go index 3a3727dc5..8295a81ed 100644 --- a/cid/cid.go +++ b/cid/cid.go @@ -1,18 +1,30 @@ // Package cid provides a manager for mappings between node ID:s and connection ID:s. package cid +import "sync" + type Map struct { - toCid map[string]int + sync.Mutex + toCid map[string]uint toName []string } +var ( + LocalName = "" + LocalID uint = 0 +) + func NewMap() *Map { return &Map{ - toCid: make(map[string]int), + toCid: map[string]uint{"": 0}, + toName: []string{""}, } } -func (m *Map) Get(name string) int { +func (m *Map) Get(name string) uint { + m.Lock() + defer m.Unlock() + cid, ok := m.toCid[name] if ok { return cid @@ -22,22 +34,45 @@ func (m *Map) Get(name string) int { for i, n := range m.toName { if n == "" { m.toName[i] = name - m.toCid[name] = i - return i + m.toCid[name] = uint(i) + return uint(i) } } // Add it to the end since we didn't find a free slot m.toName = append(m.toName, name) - cid = len(m.toName) - 1 + cid = uint(len(m.toName) - 1) m.toCid[name] = cid return cid } +func (m *Map) Name(cid uint) string { + m.Lock() + defer m.Unlock() + + return m.toName[cid] +} + +func (m *Map) Names() []string { + m.Lock() + + var names []string + for _, name := range m.toName { + if name != "" { + names = append(names, name) + } + } + + m.Unlock() + return names +} + func (m *Map) Clear(name string) { + m.Lock() cid, ok := m.toCid[name] if ok { m.toName[cid] = "" delete(m.toCid, name) } + m.Unlock() } diff --git a/cid/cid_test.go b/cid/cid_test.go new file mode 100644 index 000000000..37f64716d --- /dev/null +++ b/cid/cid_test.go @@ -0,0 +1,27 @@ +package cid + +import "testing" + +func TestGet(t *testing.T) { + m := NewMap() + + if i := m.Get("foo"); i != 1 { + t.Errorf("Unexpected id %d != 1", i) + } + if i := m.Get("bar"); i != 2 { + t.Errorf("Unexpected id %d != 2", i) + } + if i := m.Get("foo"); i != 1 { + t.Errorf("Unexpected id %d != 1", i) + } + if i := m.Get("bar"); i != 2 { + t.Errorf("Unexpected id %d != 2", i) + } + + if LocalID != 0 { + t.Error("LocalID should be 0") + } + if i := m.Get(LocalName); i != LocalID { + t.Errorf("Unexpected id %d != %c", i, LocalID) + } +} diff --git a/cmd/.gitignore b/cmd/.gitignore index 3e1db14fd..f80161a20 100644 --- a/cmd/.gitignore +++ b/cmd/.gitignore @@ -1 +1,2 @@ !syncthing +!stcli diff --git a/cmd/stcli/logger.go b/cmd/stcli/logger.go new file mode 100644 index 000000000..a7883e866 --- /dev/null +++ b/cmd/stcli/logger.go @@ -0,0 +1,72 @@ +package main + +import ( + "fmt" + "log" + "os" +) + +var logger *log.Logger + +func init() { + log.SetOutput(os.Stderr) + logger = log.New(os.Stderr, "", log.Flags()) +} + +func debugln(vals ...interface{}) { + s := fmt.Sprintln(vals...) + logger.Output(2, "DEBUG: "+s) +} + +func debugf(format string, vals ...interface{}) { + s := fmt.Sprintf(format, vals...) + logger.Output(2, "DEBUG: "+s) +} + +func infoln(vals ...interface{}) { + s := fmt.Sprintln(vals...) + logger.Output(2, "INFO: "+s) +} + +func infof(format string, vals ...interface{}) { + s := fmt.Sprintf(format, vals...) + logger.Output(2, "INFO: "+s) +} + +func okln(vals ...interface{}) { + s := fmt.Sprintln(vals...) + logger.Output(2, "OK: "+s) +} + +func okf(format string, vals ...interface{}) { + s := fmt.Sprintf(format, vals...) + logger.Output(2, "OK: "+s) +} + +func warnln(vals ...interface{}) { + s := fmt.Sprintln(vals...) + logger.Output(2, "WARNING: "+s) +} + +func warnf(format string, vals ...interface{}) { + s := fmt.Sprintf(format, vals...) + logger.Output(2, "WARNING: "+s) +} + +func fatalln(vals ...interface{}) { + s := fmt.Sprintln(vals...) + logger.Output(2, "FATAL: "+s) + os.Exit(3) +} + +func fatalf(format string, vals ...interface{}) { + s := fmt.Sprintf(format, vals...) + logger.Output(2, "FATAL: "+s) + os.Exit(3) +} + +func fatalErr(err error) { + if err != nil { + fatalf(err.Error()) + } +} diff --git a/cmd/stcli/main.go b/cmd/stcli/main.go new file mode 100644 index 000000000..e52b37f05 --- /dev/null +++ b/cmd/stcli/main.go @@ -0,0 +1,137 @@ +package main + +import ( + "crypto/tls" + "flag" + "io" + "log" + "os" + "path/filepath" + + "github.com/calmh/syncthing/protocol" +) + +var ( + exit bool + cmd string + confDir string + target string + get string + pc protocol.Connection +) + +func main() { + log.SetFlags(0) + log.SetOutput(os.Stdout) + + flag.StringVar(&cmd, "cmd", "idx", "Command") + flag.StringVar(&confDir, "home", ".", "Certificates directory") + flag.StringVar(&target, "target", "127.0.0.1:22000", "Target node") + flag.StringVar(&get, "get", "", "Get file") + flag.BoolVar(&exit, "exit", false, "Exit after command") + flag.Parse() + + connect(target) + + select {} +} + +func connect(target string) { + cert, err := loadCert(confDir) + if err != nil { + log.Fatal(err) + } + + myID := string(certID(cert.Certificate[0])) + + tlsCfg := &tls.Config{ + Certificates: []tls.Certificate{cert}, + NextProtos: []string{"bep/1.0"}, + ServerName: myID, + ClientAuth: tls.RequestClientCert, + SessionTicketsDisabled: true, + InsecureSkipVerify: true, + MinVersion: tls.VersionTLS12, + } + + conn, err := tls.Dial("tcp", target, tlsCfg) + if err != nil { + log.Fatal(err) + } + + remoteID := certID(conn.ConnectionState().PeerCertificates[0].Raw) + + pc = protocol.NewConnection(remoteID, conn, conn, Model{}, nil) + + select {} +} + +type Model struct { +} + +func prtIndex(files []protocol.FileInfo) { + for _, f := range files { + log.Printf("%q (v:%d mod:%d flags:0%o nblocks:%d)", f.Name, f.Version, f.Modified, f.Flags, len(f.Blocks)) + for _, b := range f.Blocks { + log.Printf(" %6d %x", b.Size, b.Hash) + } + } +} + +func (m Model) Index(nodeID string, files []protocol.FileInfo) { + log.Printf("Received index") + if cmd == "idx" { + prtIndex(files) + if get != "" { + for _, f := range files { + if f.Name == get { + go getFile(f) + break + } + } + } else if exit { + os.Exit(0) + } + } +} + +func getFile(f protocol.FileInfo) { + fn := filepath.Base(f.Name) + fd, err := os.Create(fn) + if err != nil { + log.Fatal(err) + } + + var offset int64 + for _, b := range f.Blocks { + log.Printf("Request %q %d - %d", f.Name, offset, offset+int64(b.Size)) + bs, err := pc.Request("default", f.Name, offset, int(b.Size)) + log.Printf(" - got %d bytes", len(bs)) + if err != nil { + log.Fatal(err) + } + offset += int64(b.Size) + fd.Write(bs) + } + + fd.Close() +} + +func (m Model) IndexUpdate(nodeID string, files []protocol.FileInfo) { + log.Println("Received index update") + if cmd == "idx" { + prtIndex(files) + if exit { + os.Exit(0) + } + } +} + +func (m Model) Request(nodeID, repo string, name string, offset int64, size int) ([]byte, error) { + log.Println("Received request") + return nil, io.EOF +} + +func (m Model) Close(nodeID string, err error) { + log.Println("Received close") +} diff --git a/cmd/stcli/tls.go b/cmd/stcli/tls.go new file mode 100644 index 000000000..cfc1ce5bc --- /dev/null +++ b/cmd/stcli/tls.go @@ -0,0 +1,71 @@ +package main + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base32" + "encoding/pem" + "math/big" + "os" + "path/filepath" + "strings" + "time" +) + +const ( + tlsRSABits = 3072 + tlsName = "syncthing" +) + +func loadCert(dir string) (tls.Certificate, error) { + return tls.LoadX509KeyPair(filepath.Join(dir, "cert.pem"), filepath.Join(dir, "key.pem")) +} + +func certID(bs []byte) string { + hf := sha256.New() + hf.Write(bs) + id := hf.Sum(nil) + return strings.Trim(base32.StdEncoding.EncodeToString(id), "=") +} + +func newCertificate(dir string) { + infoln("Generating RSA certificate and key...") + + priv, err := rsa.GenerateKey(rand.Reader, tlsRSABits) + fatalErr(err) + + notBefore := time.Now() + notAfter := time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC) + + template := x509.Certificate{ + SerialNumber: new(big.Int).SetInt64(0), + Subject: pkix.Name{ + CommonName: tlsName, + }, + NotBefore: notBefore, + NotAfter: notAfter, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + } + + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) + fatalErr(err) + + certOut, err := os.Create(filepath.Join(dir, "cert.pem")) + fatalErr(err) + pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + certOut.Close() + okln("Created RSA certificate file") + + keyOut, err := os.OpenFile(filepath.Join(dir, "key.pem"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + fatalErr(err) + pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}) + keyOut.Close() + okln("Created RSA key file") +} diff --git a/cmd/syncthing/blockqueue.go b/cmd/syncthing/blockqueue.go new file mode 100644 index 000000000..674946880 --- /dev/null +++ b/cmd/syncthing/blockqueue.go @@ -0,0 +1,94 @@ +package main + +import "github.com/calmh/syncthing/scanner" + +type bqAdd struct { + file scanner.File + have []scanner.Block + need []scanner.Block +} + +type bqBlock struct { + file scanner.File + block scanner.Block // get this block from the network + copy []scanner.Block // copy these blocks from the old version of the file + last bool +} + +type blockQueue struct { + inbox chan bqAdd + outbox chan bqBlock + + queued []bqBlock +} + +func newBlockQueue() *blockQueue { + q := &blockQueue{ + inbox: make(chan bqAdd), + outbox: make(chan bqBlock), + } + go q.run() + return q +} + +func (q *blockQueue) addBlock(a bqAdd) { + // If we already have it queued, return + for _, b := range q.queued { + if b.file.Name == a.file.Name { + return + } + } + if len(a.have) > 0 { + // First queue a copy operation + q.queued = append(q.queued, bqBlock{ + file: a.file, + copy: a.have, + }) + } + // Queue the needed blocks individually + l := len(a.need) + for i, b := range a.need { + q.queued = append(q.queued, bqBlock{ + file: a.file, + block: b, + last: i == l-1, + }) + } + + if l == 0 { + // If we didn't have anything to fetch, queue an empty block with the "last" flag set to close the file. + q.queued = append(q.queued, bqBlock{ + file: a.file, + last: true, + }) + } +} + +func (q *blockQueue) run() { + for { + if len(q.queued) == 0 { + q.addBlock(<-q.inbox) + } else { + next := q.queued[0] + select { + case a := <-q.inbox: + q.addBlock(a) + case q.outbox <- next: + q.queued = q.queued[1:] + } + } + } +} + +func (q *blockQueue) put(a bqAdd) { + q.inbox <- a +} + +func (q *blockQueue) get() bqBlock { + return <-q.outbox +} + +func (q *blockQueue) empty() bool { + // There is a race condition here. We're only mostly sure the queue is empty if the expression below is true. + return len(q.queued) == 0 && len(q.inbox) == 0 && len(q.outbox) == 0 +} diff --git a/cmd/syncthing/config.go b/cmd/syncthing/config.go index 66e922aed..b8bdf4094 100644 --- a/cmd/syncthing/config.go +++ b/cmd/syncthing/config.go @@ -32,7 +32,6 @@ type NodeConfiguration struct { type OptionsConfiguration struct { ListenAddress []string `xml:"listenAddress" default:":22000" ini:"listen-address"` ReadOnly bool `xml:"readOnly" ini:"read-only"` - AllowDelete bool `xml:"allowDelete" default:"true" ini:"allow-delete"` FollowSymlinks bool `xml:"followSymlinks" default:"true" ini:"follow-symlinks"` GUIEnabled bool `xml:"guiEnabled" default:"true" ini:"gui-enabled"` GUIAddress string `xml:"guiAddress" default:"127.0.0.1:8080" ini:"gui-address"` diff --git a/cmd/syncthing/config_test.go b/cmd/syncthing/config_test.go index bd1ea1d48..da493ae39 100644 --- a/cmd/syncthing/config_test.go +++ b/cmd/syncthing/config_test.go @@ -11,7 +11,6 @@ func TestDefaultValues(t *testing.T) { expected := OptionsConfiguration{ ListenAddress: []string{":22000"}, ReadOnly: false, - AllowDelete: true, FollowSymlinks: true, GUIEnabled: true, GUIAddress: "127.0.0.1:8080", @@ -90,7 +89,6 @@ func TestOverriddenValues(t *testing.T) { expected := OptionsConfiguration{ ListenAddress: []string{":23000"}, ReadOnly: true, - AllowDelete: false, FollowSymlinks: false, GUIEnabled: false, GUIAddress: "125.2.2.2:8080", diff --git a/cmd/syncthing/filemonitor.go b/cmd/syncthing/filemonitor.go deleted file mode 100644 index a86506c3b..000000000 --- a/cmd/syncthing/filemonitor.go +++ /dev/null @@ -1,173 +0,0 @@ -package main - -import ( - "bytes" - "errors" - "fmt" - "os" - "path" - "sync" - "time" - - "github.com/calmh/syncthing/buffers" - "github.com/calmh/syncthing/scanner" -) - -type fileMonitor struct { - name string // in-repo name - path string // full path - writeDone sync.WaitGroup - model *Model - global scanner.File - localBlocks []scanner.Block - copyError error - writeError error -} - -func (m *fileMonitor) FileBegins(cc <-chan content) error { - if debugPull { - dlog.Println("file begins:", m.name) - } - - tmp := defTempNamer.TempName(m.path) - - dir := path.Dir(tmp) - _, err := os.Stat(dir) - if err != nil && os.IsNotExist(err) { - err = os.MkdirAll(dir, 0777) - if err != nil { - return err - } - } - - outFile, err := os.Create(tmp) - if err != nil { - return err - } - - m.writeDone.Add(1) - - var writeWg sync.WaitGroup - if len(m.localBlocks) > 0 { - writeWg.Add(1) - inFile, err := os.Open(m.path) - if err != nil { - return err - } - - // Copy local blocks, close infile when done - go m.copyLocalBlocks(inFile, outFile, &writeWg) - } - - // Write remote blocks, - writeWg.Add(1) - go m.copyRemoteBlocks(cc, outFile, &writeWg) - - // Wait for both writing routines, then close the outfile - go func() { - writeWg.Wait() - outFile.Close() - m.writeDone.Done() - }() - - return nil -} - -func (m *fileMonitor) copyLocalBlocks(inFile, outFile *os.File, writeWg *sync.WaitGroup) { - defer inFile.Close() - defer writeWg.Done() - - var buf = buffers.Get(BlockSize) - defer buffers.Put(buf) - - for _, lb := range m.localBlocks { - buf = buf[:lb.Size] - _, err := inFile.ReadAt(buf, lb.Offset) - if err != nil { - m.copyError = err - return - } - _, err = outFile.WriteAt(buf, lb.Offset) - if err != nil { - m.copyError = err - return - } - } -} - -func (m *fileMonitor) copyRemoteBlocks(cc <-chan content, outFile *os.File, writeWg *sync.WaitGroup) { - defer writeWg.Done() - - for content := range cc { - _, err := outFile.WriteAt(content.data, content.offset) - buffers.Put(content.data) - if err != nil { - m.writeError = err - return - } - } -} - -func (m *fileMonitor) FileDone() error { - if debugPull { - dlog.Println("file done:", m.name) - } - - m.writeDone.Wait() - - tmp := defTempNamer.TempName(m.path) - defer os.Remove(tmp) - - if m.copyError != nil { - return m.copyError - } - if m.writeError != nil { - return m.writeError - } - - err := hashCheck(tmp, m.global.Blocks) - if err != nil { - return err - } - - err = os.Chtimes(tmp, time.Unix(m.global.Modified, 0), time.Unix(m.global.Modified, 0)) - if err != nil { - return err - } - - err = os.Chmod(tmp, os.FileMode(m.global.Flags&0777)) - if err != nil { - return err - } - - err = os.Rename(tmp, m.path) - if err != nil { - return err - } - - m.model.updateLocal(m.global) - return nil -} - -func hashCheck(name string, correct []scanner.Block) error { - rf, err := os.Open(name) - if err != nil { - return err - } - defer rf.Close() - - current, err := scanner.Blocks(rf, BlockSize) - if err != nil { - return err - } - if len(current) != len(correct) { - return errors.New("incorrect number of blocks") - } - for i := range current { - if bytes.Compare(current[i].Hash, correct[i].Hash) != 0 { - return fmt.Errorf("hash mismatch: %x != %x", current[i], correct[i]) - } - } - - return nil -} diff --git a/cmd/syncthing/filequeue.go b/cmd/syncthing/filequeue.go deleted file mode 100644 index 2d1f2b3b9..000000000 --- a/cmd/syncthing/filequeue.go +++ /dev/null @@ -1,241 +0,0 @@ -package main - -import ( - "log" - "sort" - "sync" - "time" - - "github.com/calmh/syncthing/scanner" -) - -type Monitor interface { - FileBegins(<-chan content) error - FileDone() error -} - -type FileQueue struct { - files queuedFileList - sorted bool - fmut sync.Mutex // protects files and sorted - availability map[string][]string - amut sync.Mutex // protects availability - queued map[string]bool -} - -type queuedFile struct { - name string - blocks []scanner.Block - activeBlocks []bool - given int - remaining int - channel chan content - nodes []string - nodesChecked time.Time - monitor Monitor -} - -type content struct { - offset int64 - data []byte -} - -type queuedFileList []queuedFile - -func (l queuedFileList) Len() int { return len(l) } - -func (l queuedFileList) Swap(a, b int) { l[a], l[b] = l[b], l[a] } - -func (l queuedFileList) Less(a, b int) bool { - // Sort by most blocks already given out, then alphabetically - if l[a].given != l[b].given { - return l[a].given > l[b].given - } - return l[a].name < l[b].name -} - -type queuedBlock struct { - name string - block scanner.Block - index int -} - -func NewFileQueue() *FileQueue { - return &FileQueue{ - availability: make(map[string][]string), - queued: make(map[string]bool), - } -} - -func (q *FileQueue) Add(name string, blocks []scanner.Block, monitor Monitor) { - q.fmut.Lock() - defer q.fmut.Unlock() - - if q.queued[name] { - return - } - - q.files = append(q.files, queuedFile{ - name: name, - blocks: blocks, - activeBlocks: make([]bool, len(blocks)), - remaining: len(blocks), - channel: make(chan content), - monitor: monitor, - }) - q.queued[name] = true - q.sorted = false -} - -func (q *FileQueue) Len() int { - q.fmut.Lock() - defer q.fmut.Unlock() - - return len(q.files) -} - -func (q *FileQueue) Get(nodeID string) (queuedBlock, bool) { - q.fmut.Lock() - defer q.fmut.Unlock() - - if !q.sorted { - sort.Sort(q.files) - q.sorted = true - } - - for i := range q.files { - qf := &q.files[i] - - q.amut.Lock() - av := q.availability[qf.name] - q.amut.Unlock() - - if len(av) == 0 { - // Noone has the file we want; abort. - if qf.remaining != len(qf.blocks) { - // We have already started on this file; close it down - close(qf.channel) - if mon := qf.monitor; mon != nil { - mon.FileDone() - } - } - delete(q.queued, qf.name) - q.deleteAt(i) - return queuedBlock{}, false - } - - for _, ni := range av { - // Find and return the next block in the queue - if ni == nodeID { - for j, b := range qf.blocks { - if !qf.activeBlocks[j] { - qf.activeBlocks[j] = true - qf.given++ - return queuedBlock{ - name: qf.name, - block: b, - index: j, - }, true - } - } - break - } - } - } - - // We found nothing to do - return queuedBlock{}, false -} - -func (q *FileQueue) Done(file string, offset int64, data []byte) { - q.fmut.Lock() - defer q.fmut.Unlock() - - c := content{ - offset: offset, - data: data, - } - for i := range q.files { - qf := &q.files[i] - - if qf.name == file { - if qf.monitor != nil && qf.remaining == len(qf.blocks) { - err := qf.monitor.FileBegins(qf.channel) - if err != nil { - log.Printf("WARNING: %s: %v (not synced)", qf.name, err) - delete(q.queued, qf.name) - q.deleteAt(i) - return - } - } - - qf.channel <- c - qf.remaining-- - - if qf.remaining == 0 { - close(qf.channel) - if qf.monitor != nil { - err := qf.monitor.FileDone() - if err != nil { - log.Printf("WARNING: %s: %v", qf.name, err) - } - } - delete(q.queued, qf.name) - q.deleteAt(i) - } - return - } - } - - // We found nothing, might have errored out already -} - -func (q *FileQueue) QueuedFiles() (files []string) { - q.fmut.Lock() - defer q.fmut.Unlock() - - for _, qf := range q.files { - files = append(files, qf.name) - } - return -} - -func (q *FileQueue) deleteAt(i int) { - q.files = append(q.files[:i], q.files[i+1:]...) -} - -func (q *FileQueue) deleteFile(n string) { - for i, file := range q.files { - if n == file.name { - q.deleteAt(i) - delete(q.queued, file.name) - return - } - } -} - -func (q *FileQueue) SetAvailable(file string, nodes []string) { - q.amut.Lock() - defer q.amut.Unlock() - - q.availability[file] = nodes -} - -func (q *FileQueue) RemoveAvailable(toRemove string) { - q.fmut.Lock() - q.amut.Lock() - defer q.amut.Unlock() - defer q.fmut.Unlock() - - for file, nodes := range q.availability { - for i, node := range nodes { - if node == toRemove { - q.availability[file] = nodes[:i+copy(nodes[i:], nodes[i+1:])] - if len(q.availability[file]) == 0 { - q.deleteFile(file) - } - } - break - } - } -} diff --git a/cmd/syncthing/filequeue_test.go b/cmd/syncthing/filequeue_test.go deleted file mode 100644 index 4f81f4214..000000000 --- a/cmd/syncthing/filequeue_test.go +++ /dev/null @@ -1,297 +0,0 @@ -package main - -import ( - "reflect" - "sync" - "sync/atomic" - "testing" - - "github.com/calmh/syncthing/scanner" -) - -func TestFileQueueAdd(t *testing.T) { - q := NewFileQueue() - q.Add("foo", nil, nil) -} - -func TestFileQueueAddSorting(t *testing.T) { - q := NewFileQueue() - q.SetAvailable("zzz", []string{"nodeID"}) - q.SetAvailable("aaa", []string{"nodeID"}) - - q.Add("zzz", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil) - q.Add("aaa", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil) - b, _ := q.Get("nodeID") - if b.name != "aaa" { - t.Errorf("Incorrectly sorted get: %+v", b) - } - - q = NewFileQueue() - q.SetAvailable("zzz", []string{"nodeID"}) - q.SetAvailable("aaa", []string{"nodeID"}) - - q.Add("zzz", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil) - b, _ = q.Get("nodeID") // Start on zzzz - if b.name != "zzz" { - t.Errorf("Incorrectly sorted get: %+v", b) - } - q.Add("aaa", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil) - b, _ = q.Get("nodeID") - if b.name != "zzz" { - // Continue rather than starting a new file - t.Errorf("Incorrectly sorted get: %+v", b) - } -} - -func TestFileQueueLen(t *testing.T) { - q := NewFileQueue() - q.Add("foo", nil, nil) - q.Add("bar", nil, nil) - - if l := q.Len(); l != 2 { - t.Errorf("Incorrect len %d != 2 after adds", l) - } -} - -func TestFileQueueGet(t *testing.T) { - q := NewFileQueue() - q.SetAvailable("foo", []string{"nodeID"}) - q.SetAvailable("bar", []string{"nodeID"}) - - q.Add("foo", []scanner.Block{ - {Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")}, - {Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")}, - {Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")}, - }, nil) - q.Add("bar", []scanner.Block{ - {Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")}, - {Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")}, - }, nil) - - // First get should return the first block of the first file - - expected := queuedBlock{ - name: "bar", - block: scanner.Block{ - Offset: 0, - Size: 128, - Hash: []byte("some bar hash bytes"), - }, - } - actual, ok := q.Get("nodeID") - - if !ok { - t.Error("Unexpected non-OK Get()") - } - if !reflect.DeepEqual(expected, actual) { - t.Errorf("Incorrect block returned (first)\n E: %+v\n A: %+v", expected, actual) - } - - // Second get should return the next block of the first file - - expected = queuedBlock{ - name: "bar", - block: scanner.Block{ - Offset: 128, - Size: 128, - Hash: []byte("some other bar hash bytes"), - }, - index: 1, - } - actual, ok = q.Get("nodeID") - - if !ok { - t.Error("Unexpected non-OK Get()") - } - if !reflect.DeepEqual(expected, actual) { - t.Errorf("Incorrect block returned (second)\n E: %+v\n A: %+v", expected, actual) - } - - // Third get should return the first block of the second file - - expected = queuedBlock{ - name: "foo", - block: scanner.Block{ - Offset: 0, - Size: 128, - Hash: []byte("some foo hash bytes"), - }, - } - actual, ok = q.Get("nodeID") - - if !ok { - t.Error("Unexpected non-OK Get()") - } - if !reflect.DeepEqual(expected, actual) { - t.Errorf("Incorrect block returned (third)\n E: %+v\n A: %+v", expected, actual) - } -} - -/* -func TestFileQueueDone(t *testing.T) { - ch := make(chan content) - var recv sync.WaitGroup - recv.Add(1) - go func() { - content := <-ch - if bytes.Compare(content.data, []byte("first block bytes")) != 0 { - t.Error("Incorrect data in first content block") - } - - content = <-ch - if bytes.Compare(content.data, []byte("second block bytes")) != 0 { - t.Error("Incorrect data in second content block") - } - - _, ok := <-ch - if ok { - t.Error("Content channel not closed") - } - - recv.Done() - }() - - q := FileQueue{resolver: fakeResolver{}} - q.Add("foo", []scanner.Block{ - {Offset: 0, Length: 128, Hash: []byte("some foo hash bytes")}, - {Offset: 128, Length: 128, Hash: []byte("some other foo hash bytes")}, - }, ch) - - b0, _ := q.Get("nodeID") - b1, _ := q.Get("nodeID") - - q.Done(b0.name, b0.block.Offset, []byte("first block bytes")) - q.Done(b1.name, b1.block.Offset, []byte("second block bytes")) - - recv.Wait() - - // Queue should now have one file less - - if l := q.Len(); l != 0 { - t.Error("Queue not empty") - } - - _, ok := q.Get("nodeID") - if ok { - t.Error("Unexpected OK Get()") - } -} -*/ - -func TestFileQueueGetNodeIDs(t *testing.T) { - q := NewFileQueue() - q.SetAvailable("a-foo", []string{"nodeID", "a"}) - q.SetAvailable("b-bar", []string{"nodeID", "b"}) - - q.Add("a-foo", []scanner.Block{ - {Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")}, - {Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")}, - {Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")}, - }, nil) - q.Add("b-bar", []scanner.Block{ - {Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")}, - {Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")}, - }, nil) - - expected := queuedBlock{ - name: "b-bar", - block: scanner.Block{ - Offset: 0, - Size: 128, - Hash: []byte("some bar hash bytes"), - }, - } - actual, ok := q.Get("b") - if !ok { - t.Error("Unexpected non-OK Get()") - } - if !reflect.DeepEqual(expected, actual) { - t.Errorf("Incorrect block returned\n E: %+v\n A: %+v", expected, actual) - } - - expected = queuedBlock{ - name: "a-foo", - block: scanner.Block{ - Offset: 0, - Size: 128, - Hash: []byte("some foo hash bytes"), - }, - } - actual, ok = q.Get("a") - if !ok { - t.Error("Unexpected non-OK Get()") - } - if !reflect.DeepEqual(expected, actual) { - t.Errorf("Incorrect block returned\n E: %+v\n A: %+v", expected, actual) - } - - expected = queuedBlock{ - name: "a-foo", - block: scanner.Block{ - Offset: 128, - Size: 128, - Hash: []byte("some other foo hash bytes"), - }, - index: 1, - } - actual, ok = q.Get("nodeID") - if !ok { - t.Error("Unexpected non-OK Get()") - } - if !reflect.DeepEqual(expected, actual) { - t.Errorf("Incorrect block returned\n E: %+v\n A: %+v", expected, actual) - } -} - -func TestFileQueueThreadHandling(t *testing.T) { - // This should pass with go test -race - - const n = 100 - var total int - var blocks []scanner.Block - for i := 1; i <= n; i++ { - blocks = append(blocks, scanner.Block{Offset: int64(i), Size: 1}) - total += i - } - - q := NewFileQueue() - q.Add("foo", blocks, nil) - q.SetAvailable("foo", []string{"nodeID"}) - - var start = make(chan bool) - var gotTot uint32 - var wg sync.WaitGroup - wg.Add(n) - for i := 1; i <= n; i++ { - go func() { - <-start - b, _ := q.Get("nodeID") - atomic.AddUint32(&gotTot, uint32(b.block.Offset)) - wg.Done() - }() - } - - close(start) - wg.Wait() - if int(gotTot) != total { - t.Errorf("Total mismatch; %d != %d", gotTot, total) - } -} - -func TestDeleteAt(t *testing.T) { - q := FileQueue{} - - for i := 0; i < 4; i++ { - q.files = queuedFileList{{name: "a"}, {name: "b"}, {name: "c"}, {name: "d"}} - q.deleteAt(i) - if l := len(q.files); l != 3 { - t.Fatalf("deleteAt(%d) failed; %d != 3", i, l) - } - } - - q.files = queuedFileList{{name: "a"}} - q.deleteAt(0) - if l := len(q.files); l != 0 { - t.Fatalf("deleteAt(only) failed; %d != 0", l) - } -} diff --git a/cmd/syncthing/main.go b/cmd/syncthing/main.go index 699c61821..6acf7eb76 100644 --- a/cmd/syncthing/main.go +++ b/cmd/syncthing/main.go @@ -11,7 +11,7 @@ import ( _ "net/http/pprof" "os" "os/exec" - "path" + "path/filepath" "runtime" "runtime/debug" "strings" @@ -51,12 +51,15 @@ const ( STTRACE A comma separated string of facilities to trace. The valid facility strings: - - "scanner" (the file change scanner) - "discover" (the node discovery package) - - "net" (connecting and disconnecting, network messages) + - "files" (file set store) - "idx" (index sending and receiving) + - "mc" (multicast beacon) - "need" (file need calculations) - - "pull" (file pull activity)` + - "net" (connecting and disconnecting, network messages) + - "pull" (file pull activity) + - "scanner" (the file change scanner) + ` ) func main() { @@ -105,7 +108,7 @@ func main() { // Prepare to be able to save configuration - cfgFile := path.Join(confDir, "config.xml") + cfgFile := filepath.Join(confDir, "config.xml") go saveConfigLoop(cfgFile) // Load the configuration file, if it exists. @@ -121,13 +124,13 @@ func main() { cf.Close() } else { // No config.xml, let's try the old syncthing.ini - iniFile := path.Join(confDir, "syncthing.ini") + iniFile := filepath.Join(confDir, "syncthing.ini") cf, err := os.Open(iniFile) if err == nil { infoln("Migrating syncthing.ini to config.xml") iniCfg := ini.Parse(cf) cf.Close() - os.Rename(iniFile, path.Join(confDir, "migrated_syncthing.ini")) + Rename(iniFile, filepath.Join(confDir, "migrated_syncthing.ini")) cfg, _ = readConfigXML(nil) cfg.Repositories = []RepositoryConfiguration{ @@ -152,7 +155,7 @@ func main() { cfg, err = readConfigXML(nil) cfg.Repositories = []RepositoryConfiguration{ { - Directory: path.Join(getHomeDir(), "Sync"), + Directory: filepath.Join(getHomeDir(), "Sync"), Nodes: []NodeConfiguration{ {NodeID: myID, Addresses: []string{"dynamic"}}, }, @@ -259,35 +262,16 @@ func main() { // Routine to pull blocks from other nodes to synchronize the local // repository. Does not run when we are in read only (publish only) mode. - if !cfg.Options.ReadOnly { + if cfg.Options.ReadOnly { + if verbose { + okln("Ready to synchronize (read only; no external updates accepted)") + } + m.StartRO() + } else { if verbose { - if cfg.Options.AllowDelete { - infoln("Deletes from peer nodes are allowed") - } else { - infoln("Deletes from peer nodes will be ignored") - } okln("Ready to synchronize (read-write)") } - m.StartRW(cfg.Options.AllowDelete, cfg.Options.ParallelRequests) - } else if verbose { - okln("Ready to synchronize (read only; no external updates accepted)") - } - - // Periodically scan the repository and update the local - // XXX: Should use some fsnotify mechanism. - go func() { - td := time.Duration(cfg.Options.RescanIntervalS) * time.Second - for { - time.Sleep(td) - if m.LocalAge() > (td / 2).Seconds() { - updateLocalModel(m, w) - } - } - }() - - if verbose { - // Periodically print statistics - go printStatsLoop(m) + m.StartRW(cfg.Options.ParallelRequests) } select {} @@ -344,14 +328,7 @@ func saveConfigLoop(cfgFile string) { continue } - if runtime.GOOS == "windows" { - err := os.Remove(cfgFile) - if err != nil && !os.IsNotExist(err) { - warnln(err) - } - } - - err = os.Rename(cfgFile+".tmp", cfgFile) + err = Rename(cfgFile+".tmp", cfgFile) if err != nil { warnln(err) } @@ -362,37 +339,6 @@ func saveConfig() { saveConfigCh <- struct{}{} } -func printStatsLoop(m *Model) { - var lastUpdated int64 - var lastStats = make(map[string]ConnectionInfo) - - for { - time.Sleep(60 * time.Second) - - for node, stats := range m.ConnectionStats() { - secs := time.Since(lastStats[node].At).Seconds() - inbps := 8 * int(float64(stats.InBytesTotal-lastStats[node].InBytesTotal)/secs) - outbps := 8 * int(float64(stats.OutBytesTotal-lastStats[node].OutBytesTotal)/secs) - - if inbps+outbps > 0 { - infof("%s: %sb/s in, %sb/s out", node[0:5], MetricPrefix(int64(inbps)), MetricPrefix(int64(outbps))) - } - - lastStats[node] = stats - } - - if lu := m.Generation(); lu > lastUpdated { - lastUpdated = lu - files, _, bytes := m.GlobalSize() - infof("%6d files, %9sB in cluster", files, BinaryPrefix(bytes)) - files, _, bytes = m.LocalSize() - infof("%6d files, %9sB in local repo", files, BinaryPrefix(bytes)) - needFiles, bytes := m.NeedFiles() - infof("%6d files, %9sB to synchronize", len(needFiles), BinaryPrefix(bytes)) - } - } -} - func listenConnect(myID string, disc *discover.Discoverer, m *Model, tlsCfg *tls.Config, connOpts map[string]string) { var conns = make(chan *tls.Conn) @@ -529,7 +475,7 @@ func updateLocalModel(m *Model, w *scanner.Walker) { func saveIndex(m *Model) { name := m.RepoID() + ".idx.gz" - fullName := path.Join(confDir, name) + fullName := filepath.Join(confDir, name) idxf, err := os.Create(fullName + ".tmp") if err != nil { return @@ -543,12 +489,13 @@ func saveIndex(m *Model) { }.EncodeXDR(gzw) gzw.Close() idxf.Close() - os.Rename(fullName+".tmp", fullName) + + Rename(fullName+".tmp", fullName) } func loadIndex(m *Model) { name := m.RepoID() + ".idx.gz" - idxf, err := os.Open(path.Join(confDir, name)) + idxf, err := os.Open(filepath.Join(confDir, name)) if err != nil { return } @@ -611,7 +558,7 @@ func getHomeDir() string { func getDefaultConfDir() string { if runtime.GOOS == "windows" { - return path.Join(os.Getenv("AppData"), "syncthing") + return filepath.Join(os.Getenv("AppData"), "syncthing") } return expandTilde("~/.syncthing") } diff --git a/cmd/syncthing/model.go b/cmd/syncthing/model.go index 93ecbf724..7c53cca67 100644 --- a/cmd/syncthing/model.go +++ b/cmd/syncthing/model.go @@ -7,59 +7,36 @@ import ( "io" "net" "os" - "path" + "path/filepath" "sync" "time" "github.com/calmh/syncthing/buffers" + "github.com/calmh/syncthing/cid" + "github.com/calmh/syncthing/files" + "github.com/calmh/syncthing/lamport" "github.com/calmh/syncthing/protocol" "github.com/calmh/syncthing/scanner" ) type Model struct { dir string + cm *cid.Map + fs *files.Set - global map[string]scanner.File // the latest version of each file as it exists in the cluster - gmut sync.RWMutex // protects global - local map[string]scanner.File // the files we currently have locally on disk - lmut sync.RWMutex // protects local - remote map[string]map[string]scanner.File - rmut sync.RWMutex // protects remote - protoConn map[string]Connection + protoConn map[string]protocol.Connection rawConn map[string]io.Closer pmut sync.RWMutex // protects protoConn and rawConn - // Queue for files to fetch. fq can call back into the model, so we must ensure - // to hold no locks when calling methods on fq. - fq *FileQueue - dq chan scanner.File // queue for files to delete - - updatedLocal int64 // timestamp of last update to local - updateGlobal int64 // timestamp of last update to remote - lastIdxBcast time.Time - lastIdxBcastRequest time.Time - umut sync.RWMutex // provides updated* and lastIdx* - - rwRunning bool - delete bool - initmut sync.Mutex // protects rwRunning and delete + initOnce sync.Once sup suppressor - parallelRequests int limitRequestRate chan struct{} imut sync.Mutex // protects Index } -type Connection interface { - ID() string - Index(string, []protocol.FileInfo) - Request(repo, name string, offset int64, size int) ([]byte, error) - Statistics() protocol.Statistics - Option(key string) string -} - const ( idxBcastHoldtime = 15 * time.Second // Wait at least this long after the last index modification idxBcastMaxDelay = 120 * time.Second // Unless we've already waited this long @@ -75,16 +52,12 @@ var ( // for file data without altering the local repository in any way. func NewModel(dir string, maxChangeBw int) *Model { m := &Model{ - dir: dir, - global: make(map[string]scanner.File), - local: make(map[string]scanner.File), - remote: make(map[string]map[string]scanner.File), - protoConn: make(map[string]Connection), - rawConn: make(map[string]io.Closer), - lastIdxBcast: time.Now(), - sup: suppressor{threshold: int64(maxChangeBw)}, - fq: NewFileQueue(), - dq: make(chan scanner.File), + dir: dir, + cm: cid.NewMap(), + fs: files.NewSet(), + protoConn: make(map[string]protocol.Connection), + rawConn: make(map[string]io.Closer), + sup: suppressor{threshold: int64(maxChangeBw)}, } go m.broadcastIndexLoop() @@ -109,37 +82,26 @@ func (m *Model) LimitRate(kbps int) { // StartRW starts read/write processing on the current model. When in // read/write mode the model will attempt to keep in sync with the cluster by // pulling needed files from peer nodes. -func (m *Model) StartRW(del bool, threads int) { - m.initmut.Lock() - defer m.initmut.Unlock() +func (m *Model) StartRW(threads int) { + m.initOnce.Do(func() { + newPuller("default", m.dir, m, threads) + }) +} - if m.rwRunning { - panic("starting started model") - } - - m.rwRunning = true - m.delete = del - m.parallelRequests = threads - - if del { - go m.deleteLoop() - } +// StartRO starts read only processing on the current model. When in +// read only mode the model will announce files to the cluster but not +// pull in any external changes. +func (m *Model) StartRO() { + m.initOnce.Do(func() { + newPuller("default", m.dir, m, 0) // zero threads => read only + }) } // Generation returns an opaque integer that is guaranteed to increment on // every change to the local repository or global model. -func (m *Model) Generation() int64 { - m.umut.RLock() - defer m.umut.RUnlock() - - return m.updatedLocal + m.updateGlobal -} - -func (m *Model) LocalAge() float64 { - m.umut.RLock() - defer m.umut.RUnlock() - - return time.Since(time.Unix(m.updatedLocal, 0)).Seconds() +func (m *Model) Generation() uint64 { + c := m.fs.Changes(cid.LocalID) + return c } type ConnectionInfo struct { @@ -156,12 +118,10 @@ func (m *Model) ConnectionStats() map[string]ConnectionInfo { RemoteAddr() net.Addr } - m.gmut.RLock() m.pmut.RLock() - m.rmut.RLock() var tot int64 - for _, f := range m.global { + for _, f := range m.fs.Global() { if f.Flags&protocol.FlagDeleted == 0 { tot += f.Size } @@ -178,10 +138,10 @@ func (m *Model) ConnectionStats() map[string]ConnectionInfo { ci.Address = nc.RemoteAddr().String() } - var have int64 - for _, f := range m.remote[node] { - if f.Equals(m.global[f.Name]) && f.Flags&protocol.FlagDeleted == 0 { - have += f.Size + var have = tot + for _, f := range m.fs.Need(m.cm.Get(node)) { + if f.Flags&protocol.FlagDeleted == 0 { + have -= f.Size } } @@ -193,82 +153,59 @@ func (m *Model) ConnectionStats() map[string]ConnectionInfo { res[node] = ci } - m.rmut.RUnlock() m.pmut.RUnlock() - m.gmut.RUnlock() + return res } +func sizeOf(fs []scanner.File) (files, deleted int, bytes int64) { + for _, f := range fs { + if f.Flags&protocol.FlagDeleted == 0 { + files++ + bytes += f.Size + } else { + deleted++ + } + } + return +} + // GlobalSize returns the number of files, deleted files and total bytes for all // files in the global model. func (m *Model) GlobalSize() (files, deleted int, bytes int64) { - m.gmut.RLock() - - for _, f := range m.global { - if f.Flags&protocol.FlagDeleted == 0 { - files++ - bytes += f.Size - } else { - deleted++ - } - } - - m.gmut.RUnlock() - return + fs := m.fs.Global() + return sizeOf(fs) } // LocalSize returns the number of files, deleted files and total bytes for all // files in the local repository. func (m *Model) LocalSize() (files, deleted int, bytes int64) { - m.lmut.RLock() - - for _, f := range m.local { - if f.Flags&protocol.FlagDeleted == 0 { - files++ - bytes += f.Size - } else { - deleted++ - } - } - - m.lmut.RUnlock() - return + fs := m.fs.Have(cid.LocalID) + return sizeOf(fs) } // InSyncSize returns the number and total byte size of the local files that // are in sync with the global model. -func (m *Model) InSyncSize() (files, bytes int64) { - m.gmut.RLock() - m.lmut.RLock() +func (m *Model) InSyncSize() (files int, bytes int64) { + gf := m.fs.Global() + hf := m.fs.Need(cid.LocalID) - for n, f := range m.local { - if gf, ok := m.global[n]; ok && f.Equals(gf) { - if f.Flags&protocol.FlagDeleted == 0 { - files++ - bytes += f.Size - } - } - } + gn, _, gb := sizeOf(gf) + hn, _, hb := sizeOf(hf) - m.lmut.RUnlock() - m.gmut.RUnlock() - return + return gn - hn, gb - hb } // NeedFiles returns the list of currently needed files and the total size. -func (m *Model) NeedFiles() (files []scanner.File, bytes int64) { - qf := m.fq.QueuedFiles() +func (m *Model) NeedFiles() ([]scanner.File, int64) { + nf := m.fs.Need(cid.LocalID) - m.gmut.RLock() - - for _, n := range qf { - f := m.global[n] - files = append(files, f) + var bytes int64 + for _, f := range nf { bytes += f.Size } - m.gmut.RUnlock() - return + return nf, bytes } // Index is called when a new node is connected and we receive their full index. @@ -276,27 +213,16 @@ func (m *Model) NeedFiles() (files []scanner.File, bytes int64) { func (m *Model) Index(nodeID string, fs []protocol.FileInfo) { var files = make([]scanner.File, len(fs)) for i := range fs { + lamport.Default.Tick(fs[i].Version) files[i] = fileFromFileInfo(fs[i]) } - m.imut.Lock() - defer m.imut.Unlock() + cid := m.cm.Get(nodeID) + m.fs.Replace(cid, files) if debugNet { dlog.Printf("IDX(in): %s: %d files", nodeID, len(fs)) } - - repo := make(map[string]scanner.File) - for _, f := range files { - m.indexUpdate(repo, f) - } - - m.rmut.Lock() - m.remote[nodeID] = repo - m.rmut.Unlock() - - m.recomputeGlobal() - m.recomputeNeedForFiles(files) } // IndexUpdate is called for incremental updates to connected nodes' indexes. @@ -304,48 +230,16 @@ func (m *Model) Index(nodeID string, fs []protocol.FileInfo) { func (m *Model) IndexUpdate(nodeID string, fs []protocol.FileInfo) { var files = make([]scanner.File, len(fs)) for i := range fs { + lamport.Default.Tick(fs[i].Version) files[i] = fileFromFileInfo(fs[i]) } - m.imut.Lock() - defer m.imut.Unlock() + id := m.cm.Get(nodeID) + m.fs.Update(id, files) if debugNet { dlog.Printf("IDXUP(in): %s: %d files", nodeID, len(files)) } - - m.rmut.Lock() - repo, ok := m.remote[nodeID] - if !ok { - warnf("Index update from node %s that does not have an index", nodeID) - m.rmut.Unlock() - return - } - - for _, f := range files { - m.indexUpdate(repo, f) - } - m.rmut.Unlock() - - m.recomputeGlobal() - m.recomputeNeedForFiles(files) -} - -func (m *Model) indexUpdate(repo map[string]scanner.File, f scanner.File) { - if debugIdx { - var flagComment string - if f.Flags&protocol.FlagDeleted != 0 { - flagComment = " (deleted)" - } - dlog.Printf("IDX(in): %q m=%d f=%o%s v=%d (%d blocks)", f.Name, f.Modified, f.Flags, flagComment, f.Version, len(f.Blocks)) - } - - if extraFlags := f.Flags &^ (protocol.FlagInvalid | protocol.FlagDeleted | 0xfff); extraFlags != 0 { - warnf("IDX(in): Unknown flags 0x%x in index record %+v", extraFlags, f) - return - } - - repo[f.Name] = f } // Close removes the peer from the model and closes the underlying connection if possible. @@ -360,40 +254,26 @@ func (m *Model) Close(node string, err error) { warnf("Connection to %s closed: %v", node, err) } - m.fq.RemoveAvailable(node) + cid := m.cm.Get(node) + m.fs.Replace(cid, nil) + m.cm.Clear(node) m.pmut.Lock() - m.rmut.Lock() - conn, ok := m.rawConn[node] if ok { conn.Close() } - - delete(m.remote, node) delete(m.protoConn, node) delete(m.rawConn, node) - - m.rmut.Unlock() m.pmut.Unlock() - - m.recomputeGlobal() - m.recomputeNeedForGlobal() } // Request returns the specified data segment by reading it from local disk. // Implements the protocol.Model interface. func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]byte, error) { - // Verify that the requested file exists in the local and global model. - m.lmut.RLock() - lf, localOk := m.local[name] - m.lmut.RUnlock() - - m.gmut.RLock() - _, globalOk := m.global[name] - m.gmut.RUnlock() - - if !localOk || !globalOk { + // Verify that the requested file exists in the local model. + lf := m.fs.Get(cid.LocalID, name) + if offset > lf.Size { warnf("SECURITY (nonexistent file) REQ(in): %s: %q o=%d s=%d", nodeID, name, offset, size) return nil, ErrNoSuchFile } @@ -404,7 +284,7 @@ func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]by if debugNet && nodeID != "" { dlog.Printf("REQ(in): %s: %q o=%d s=%d", nodeID, name, offset, size) } - fn := path.Join(m.dir, name) + fn := filepath.Join(m.dir, name) fd, err := os.Open(fn) // XXX: Inefficient, should cache fd? if err != nil { return nil, err @@ -428,63 +308,23 @@ func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]by // ReplaceLocal replaces the local repository index with the given list of files. func (m *Model) ReplaceLocal(fs []scanner.File) { - var updated bool - var newLocal = make(map[string]scanner.File) - - m.lmut.RLock() - for _, f := range fs { - newLocal[f.Name] = f - if ef := m.local[f.Name]; !ef.Equals(f) { - updated = true - } - } - m.lmut.RUnlock() - - if m.markDeletedLocals(newLocal) { - updated = true - } - - m.lmut.RLock() - if len(newLocal) != len(m.local) { - updated = true - } - m.lmut.RUnlock() - - if updated { - m.lmut.Lock() - m.local = newLocal - m.lmut.Unlock() - - m.recomputeGlobal() - m.recomputeNeedForGlobal() - - m.umut.Lock() - m.updatedLocal = time.Now().Unix() - m.lastIdxBcastRequest = time.Now() - m.umut.Unlock() - } + m.fs.ReplaceWithDelete(cid.LocalID, fs) } -// SeedLocal replaces the local repository index with the given list of files, -// in protocol data types. Does not track deletes, should only be used to seed -// the local index from a cache file at startup. +// ReplaceLocal replaces the local repository index with the given list of files. func (m *Model) SeedLocal(fs []protocol.FileInfo) { - m.lmut.Lock() - m.local = make(map[string]scanner.File) - for _, f := range fs { - m.local[f.Name] = fileFromFileInfo(f) + var sfs = make([]scanner.File, len(fs)) + for i := 0; i < len(fs); i++ { + lamport.Default.Tick(fs[i].Version) + sfs[i] = fileFromFileInfo(fs[i]) } - m.lmut.Unlock() - m.recomputeGlobal() - m.recomputeNeedForGlobal() + m.fs.Replace(cid.LocalID, sfs) } // Implements scanner.CurrentFiler func (m *Model) CurrentFile(file string) scanner.File { - m.lmut.RLock() - f := m.local[file] - m.lmut.RUnlock() + f := m.fs.Get(cid.LocalID, file) return f } @@ -504,7 +344,7 @@ func (m *Model) RepoID() string { // AddConnection adds a new peer connection to the model. An initial index will // be sent to the connected peer, thereafter index updates whenever the local // repository changes. -func (m *Model) AddConnection(rawConn io.Closer, protoConn Connection) { +func (m *Model) AddConnection(rawConn io.Closer, protoConn protocol.Connection) { nodeID := protoConn.ID() m.pmut.Lock() if _, ok := m.protoConn[nodeID]; ok { @@ -524,44 +364,6 @@ func (m *Model) AddConnection(rawConn io.Closer, protoConn Connection) { } protoConn.Index("default", idx) }() - - m.initmut.Lock() - rw := m.rwRunning - m.initmut.Unlock() - if !rw { - return - } - - for i := 0; i < m.parallelRequests; i++ { - i := i - go func() { - if debugPull { - dlog.Println("starting puller:", nodeID, i) - } - for { - m.pmut.RLock() - if _, ok := m.protoConn[nodeID]; !ok { - if debugPull { - dlog.Println("stopping puller:", nodeID, i) - } - m.pmut.RUnlock() - return - } - m.pmut.RUnlock() - - qb, ok := m.fq.Get(nodeID) - if ok { - if debugPull { - dlog.Println("request: out", nodeID, i, qb.name, qb.block.Offset) - } - data, _ := protoConn.Request("default", qb.name, qb.block.Offset, int(qb.block.Size)) - m.fq.Done(qb.name, qb.block.Offset, data) - } else { - time.Sleep(1 * time.Second) - } - } - }() - } } // ProtocolIndex returns the current local index in protocol data types. @@ -569,9 +371,9 @@ func (m *Model) AddConnection(rawConn io.Closer, protoConn Connection) { func (m *Model) ProtocolIndex() []protocol.FileInfo { var index []protocol.FileInfo - m.lmut.RLock() + fs := m.fs.Have(cid.LocalID) - for _, f := range m.local { + for _, f := range fs { mf := fileInfoFromFile(f) if debugIdx { var flagComment string @@ -583,10 +385,13 @@ func (m *Model) ProtocolIndex() []protocol.FileInfo { index = append(index, mf) } - m.lmut.RUnlock() return index } +func (m *Model) updateLocal(f scanner.File) { + m.fs.Update(cid.LocalID, []scanner.File{f}) +} + func (m *Model) requestGlobal(nodeID, name string, offset int64, size int, hash []byte) ([]byte, error) { m.pmut.RLock() nc, ok := m.protoConn[nodeID] @@ -604,290 +409,42 @@ func (m *Model) requestGlobal(nodeID, name string, offset int64, size int, hash } func (m *Model) broadcastIndexLoop() { + var lastChange uint64 for { - m.umut.RLock() - bcastRequested := m.lastIdxBcastRequest.After(m.lastIdxBcast) - holdtimeExceeded := time.Since(m.lastIdxBcastRequest) > idxBcastHoldtime - m.umut.RUnlock() + time.Sleep(5 * time.Second) - maxDelayExceeded := time.Since(m.lastIdxBcast) > idxBcastMaxDelay - if bcastRequested && (holdtimeExceeded || maxDelayExceeded) { - idx := m.ProtocolIndex() + c := m.fs.Changes(cid.LocalID) + if c == lastChange { + continue + } + lastChange = c - var indexWg sync.WaitGroup - indexWg.Add(len(m.protoConn)) + saveIndex(m) // This should be cleaned up we don't do a lot of processing twice - m.umut.Lock() - m.lastIdxBcast = time.Now() - m.umut.Unlock() + fs := m.fs.Have(cid.LocalID) - m.pmut.RLock() - for _, node := range m.protoConn { - node := node - if debugNet { - dlog.Printf("IDX(out/loop): %s: %d files", node.ID(), len(idx)) - } - go func() { - node.Index("default", idx) - indexWg.Done() - }() + var indexWg sync.WaitGroup + indexWg.Add(len(m.protoConn)) + + var idx = make([]protocol.FileInfo, len(fs)) + for i, f := range fs { + idx[i] = fileInfoFromFile(f) + } + + m.pmut.RLock() + for _, node := range m.protoConn { + node := node + if debugNet { + dlog.Printf("IDX(out/loop): %s: %d files", node.ID(), len(idx)) } - m.pmut.RUnlock() - - indexWg.Wait() + go func() { + node.Index("default", idx) + indexWg.Done() + }() } - time.Sleep(idxBcastHoldtime) - } -} + m.pmut.RUnlock() -// markDeletedLocals sets the deleted flag on files that have gone missing locally. -func (m *Model) markDeletedLocals(newLocal map[string]scanner.File) bool { - // For every file in the existing local table, check if they are also - // present in the new local table. If they are not, check that we already - // had the newest version available according to the global table and if so - // note the file as having been deleted. - var updated bool - - m.gmut.RLock() - m.lmut.RLock() - - for n, f := range m.local { - if _, ok := newLocal[n]; !ok { - if gf := m.global[n]; !gf.NewerThan(f) { - if f.Flags&protocol.FlagDeleted == 0 { - f.Flags = protocol.FlagDeleted - f.Version++ - f.Blocks = nil - updated = true - } - newLocal[n] = f - } - } - } - - m.lmut.RUnlock() - m.gmut.RUnlock() - - return updated -} - -func (m *Model) updateLocal(f scanner.File) { - var updated bool - - m.lmut.Lock() - if ef, ok := m.local[f.Name]; !ok || !ef.Equals(f) { - m.local[f.Name] = f - updated = true - } - m.lmut.Unlock() - - if updated { - m.recomputeGlobal() - // We don't recomputeNeed here for two reasons: - // - a need shouldn't have arisen due to having a newer local file - // - recomputeNeed might call into fq.Add but we might have been called by - // fq which would be a deadlock on fq - - m.umut.Lock() - m.updatedLocal = time.Now().Unix() - m.lastIdxBcastRequest = time.Now() - m.umut.Unlock() - } -} - -/* -XXX: Not done, needs elegant handling of availability - -func (m *Model) recomputeGlobalFor(files []scanner.File) bool { - m.gmut.Lock() - defer m.gmut.Unlock() - - var updated bool - for _, f := range files { - if gf, ok := m.global[f.Name]; !ok || f.NewerThan(gf) { - m.global[f.Name] = f - updated = true - // Fix availability - } - } - return updated -} -*/ - -func (m *Model) recomputeGlobal() { - var newGlobal = make(map[string]scanner.File) - - m.lmut.RLock() - for n, f := range m.local { - newGlobal[n] = f - } - m.lmut.RUnlock() - - var available = make(map[string][]string) - - m.rmut.RLock() - var highestMod int64 - for nodeID, fs := range m.remote { - for n, nf := range fs { - if lf, ok := newGlobal[n]; !ok || nf.NewerThan(lf) { - newGlobal[n] = nf - available[n] = []string{nodeID} - if nf.Modified > highestMod { - highestMod = nf.Modified - } - } else if lf.Equals(nf) { - available[n] = append(available[n], nodeID) - } - } - } - m.rmut.RUnlock() - - for f, ns := range available { - m.fq.SetAvailable(f, ns) - } - - // Figure out if anything actually changed - - m.gmut.RLock() - var updated bool - if highestMod > m.updateGlobal || len(newGlobal) != len(m.global) { - updated = true - } else { - for n, f0 := range newGlobal { - if f1, ok := m.global[n]; !ok || !f0.Equals(f1) { - updated = true - break - } - } - } - m.gmut.RUnlock() - - if updated { - m.gmut.Lock() - m.umut.Lock() - m.global = newGlobal - m.updateGlobal = time.Now().Unix() - m.umut.Unlock() - m.gmut.Unlock() - } -} - -type addOrder struct { - n string - remote []scanner.Block - fm *fileMonitor -} - -func (m *Model) recomputeNeedForGlobal() { - var toDelete []scanner.File - var toAdd []addOrder - - m.gmut.RLock() - - for _, gf := range m.global { - toAdd, toDelete = m.recomputeNeedForFile(gf, toAdd, toDelete) - } - - m.gmut.RUnlock() - - for _, ao := range toAdd { - m.fq.Add(ao.n, ao.remote, ao.fm) - } - for _, gf := range toDelete { - m.dq <- gf - } -} - -func (m *Model) recomputeNeedForFiles(files []scanner.File) { - var toDelete []scanner.File - var toAdd []addOrder - - m.gmut.RLock() - - for _, gf := range files { - toAdd, toDelete = m.recomputeNeedForFile(gf, toAdd, toDelete) - } - - m.gmut.RUnlock() - - for _, ao := range toAdd { - m.fq.Add(ao.n, ao.remote, ao.fm) - } - for _, gf := range toDelete { - m.dq <- gf - } -} - -func (m *Model) recomputeNeedForFile(gf scanner.File, toAdd []addOrder, toDelete []scanner.File) ([]addOrder, []scanner.File) { - m.lmut.RLock() - lf, ok := m.local[gf.Name] - m.lmut.RUnlock() - - if !ok || gf.NewerThan(lf) { - if gf.Suppressed { - // Never attempt to sync invalid files - return toAdd, toDelete - } - if gf.Flags&protocol.FlagDeleted != 0 && !m.delete { - // Don't want to delete files, so forget this need - return toAdd, toDelete - } - if gf.Flags&protocol.FlagDeleted != 0 && !ok { - // Don't have the file, so don't need to delete it - return toAdd, toDelete - } - if debugNeed { - dlog.Printf("need: lf:%v gf:%v", lf, gf) - } - - if gf.Flags&protocol.FlagDeleted != 0 { - toDelete = append(toDelete, gf) - } else { - local, remote := scanner.BlockDiff(lf.Blocks, gf.Blocks) - fm := fileMonitor{ - name: FSNormalize(gf.Name), - path: FSNormalize(path.Clean(path.Join(m.dir, gf.Name))), - global: gf, - model: m, - localBlocks: local, - } - toAdd = append(toAdd, addOrder{gf.Name, remote, &fm}) - } - } - - return toAdd, toDelete -} - -func (m *Model) WhoHas(name string) []string { - var remote []string - - m.gmut.RLock() - m.rmut.RLock() - - gf := m.global[name] - for node, files := range m.remote { - if file, ok := files[name]; ok && file.Equals(gf) { - remote = append(remote, node) - } - } - - m.rmut.RUnlock() - m.gmut.RUnlock() - return remote -} - -func (m *Model) deleteLoop() { - for file := range m.dq { - if debugPull { - dlog.Println("delete", file.Name) - } - path := FSNormalize(path.Clean(path.Join(m.dir, file.Name))) - err := os.Remove(path) - if err != nil { - warnf("%s: %v", file.Name, err) - } - - m.updateLocal(file) + indexWg.Wait() } } @@ -903,7 +460,8 @@ func fileFromFileInfo(f protocol.FileInfo) scanner.File { offset += int64(b.Size) } return scanner.File{ - Name: f.Name, + // Name is with native separator and normalization + Name: filepath.FromSlash(f.Name), Size: offset, Flags: f.Flags &^ protocol.FlagInvalid, Modified: f.Modified, @@ -922,7 +480,7 @@ func fileInfoFromFile(f scanner.File) protocol.FileInfo { } } pf := protocol.FileInfo{ - Name: f.Name, + Name: filepath.ToSlash(f.Name), Flags: f.Flags, Modified: f.Modified, Version: f.Version, diff --git a/cmd/syncthing/model_test.go b/cmd/syncthing/model_test.go index 0ae702818..186ff01bb 100644 --- a/cmd/syncthing/model_test.go +++ b/cmd/syncthing/model_test.go @@ -4,30 +4,14 @@ import ( "bytes" "fmt" "os" - "reflect" "testing" "time" + "github.com/calmh/syncthing/cid" "github.com/calmh/syncthing/protocol" "github.com/calmh/syncthing/scanner" ) -func TestNewModel(t *testing.T) { - m := NewModel("foo", 1e6) - - if m == nil { - t.Fatalf("NewModel returned nil") - } - - if fs, _ := m.NeedFiles(); len(fs) > 0 { - t.Errorf("New model should have no Need") - } - - if len(m.local) > 0 { - t.Errorf("New model should have no Have") - } -} - var testDataExpected = map[string]scanner.File{ "foo": scanner.File{ Name: "foo", @@ -62,295 +46,6 @@ func init() { } } -func TestUpdateLocal(t *testing.T) { - m := NewModel("testdata", 1e6) - w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024} - fs, _ := w.Walk() - m.ReplaceLocal(fs) - - if fs, _ := m.NeedFiles(); len(fs) > 0 { - t.Fatalf("Model with only local data should have no need") - } - - if l1, l2 := len(m.local), len(testDataExpected); l1 != l2 { - t.Fatalf("Model len(local) incorrect, %d != %d", l1, l2) - } - if l1, l2 := len(m.global), len(testDataExpected); l1 != l2 { - t.Fatalf("Model len(global) incorrect, %d != %d", l1, l2) - } - for name, file := range testDataExpected { - if f, ok := m.local[name]; ok { - if !reflect.DeepEqual(f, file) { - t.Errorf("Incorrect local\n%v !=\n%v\nfor file %q", f, file, name) - } - } else { - t.Errorf("Missing file %q in local table", name) - } - if f, ok := m.global[name]; ok { - if !reflect.DeepEqual(f, file) { - t.Errorf("Incorrect global\n%v !=\n%v\nfor file %q", f, file, name) - } - } else { - t.Errorf("Missing file %q in global table", name) - } - } - - for _, f := range fs { - if hf, ok := m.local[f.Name]; !ok || hf.Modified != f.Modified { - t.Fatalf("Incorrect local for %q", f.Name) - } - if cf, ok := m.global[f.Name]; !ok || cf.Modified != f.Modified { - t.Fatalf("Incorrect global for %q", f.Name) - } - } -} - -func TestRemoteUpdateExisting(t *testing.T) { - m := NewModel("testdata", 1e6) - w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024} - fs, _ := w.Walk() - m.ReplaceLocal(fs) - - newFile := protocol.FileInfo{ - Name: "foo", - Modified: time.Now().Unix(), - Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}}, - } - m.Index("42", []protocol.FileInfo{newFile}) - - if fs, _ := m.NeedFiles(); len(fs) != 1 { - t.Errorf("Model missing Need for one file (%d != 1)", len(fs)) - } -} - -func TestRemoteAddNew(t *testing.T) { - m := NewModel("testdata", 1e6) - w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024} - fs, _ := w.Walk() - m.ReplaceLocal(fs) - - newFile := protocol.FileInfo{ - Name: "a new file", - Modified: time.Now().Unix(), - Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}}, - } - m.Index("42", []protocol.FileInfo{newFile}) - - if fs, _ := m.NeedFiles(); len(fs) != 1 { - t.Errorf("Model len(m.need) incorrect (%d != 1)", len(fs)) - } -} - -func TestRemoteUpdateOld(t *testing.T) { - m := NewModel("testdata", 1e6) - w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024} - fs, _ := w.Walk() - m.ReplaceLocal(fs) - - oldTimeStamp := int64(1234) - newFile := protocol.FileInfo{ - Name: "foo", - Modified: oldTimeStamp, - Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}}, - } - m.Index("42", []protocol.FileInfo{newFile}) - - if fs, _ := m.NeedFiles(); len(fs) != 0 { - t.Errorf("Model len(need) incorrect (%d != 0)", len(fs)) - } -} - -func TestRemoteIndexUpdate(t *testing.T) { - m := NewModel("testdata", 1e6) - w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024} - fs, _ := w.Walk() - m.ReplaceLocal(fs) - - foo := protocol.FileInfo{ - Name: "foo", - Modified: time.Now().Unix(), - Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}}, - } - - bar := protocol.FileInfo{ - Name: "bar", - Modified: time.Now().Unix(), - Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}}, - } - - m.Index("42", []protocol.FileInfo{foo}) - - if fs, _ := m.NeedFiles(); fs[0].Name != "foo" { - t.Error("Model doesn't need 'foo'") - } - - m.IndexUpdate("42", []protocol.FileInfo{bar}) - - if fs, _ := m.NeedFiles(); fs[0].Name != "foo" { - t.Error("Model doesn't need 'foo'") - } - if fs, _ := m.NeedFiles(); fs[1].Name != "bar" { - t.Error("Model doesn't need 'bar'") - } -} - -func TestDelete(t *testing.T) { - m := NewModel("testdata", 1e6) - w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024} - fs, _ := w.Walk() - m.ReplaceLocal(fs) - - if l1, l2 := len(m.local), len(fs); l1 != l2 { - t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2) - } - if l1, l2 := len(m.global), len(fs); l1 != l2 { - t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2) - } - - ot := time.Now().Unix() - newFile := scanner.File{ - Name: "a new file", - Modified: ot, - Blocks: []scanner.Block{{0, 100, []byte("some hash bytes")}}, - } - m.updateLocal(newFile) - - if l1, l2 := len(m.local), len(fs)+1; l1 != l2 { - t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2) - } - if l1, l2 := len(m.global), len(fs)+1; l1 != l2 { - t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2) - } - - // The deleted file is kept in the local and global tables and marked as deleted. - - m.ReplaceLocal(fs) - - if l1, l2 := len(m.local), len(fs)+1; l1 != l2 { - t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2) - } - if l1, l2 := len(m.global), len(fs)+1; l1 != l2 { - t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2) - } - - if m.local["a new file"].Flags&(1<<12) == 0 { - t.Error("Unexpected deleted flag = 0 in local table") - } - if len(m.local["a new file"].Blocks) != 0 { - t.Error("Unexpected non-zero blocks for deleted file in local") - } - if ft := m.local["a new file"].Modified; ft != ot { - t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1) - } - if fv := m.local["a new file"].Version; fv != 1 { - t.Errorf("Unexpected version %d != 1 for deleted file in local", fv) - } - - if m.global["a new file"].Flags&(1<<12) == 0 { - t.Error("Unexpected deleted flag = 0 in global table") - } - if len(m.global["a new file"].Blocks) != 0 { - t.Error("Unexpected non-zero blocks for deleted file in global") - } - if ft := m.global["a new file"].Modified; ft != ot { - t.Errorf("Unexpected time %d != %d for deleted file in global", ft, ot+1) - } - if fv := m.local["a new file"].Version; fv != 1 { - t.Errorf("Unexpected version %d != 1 for deleted file in global", fv) - } - - // Another update should change nothing - - m.ReplaceLocal(fs) - - if l1, l2 := len(m.local), len(fs)+1; l1 != l2 { - t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2) - } - if l1, l2 := len(m.global), len(fs)+1; l1 != l2 { - t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2) - } - - if m.local["a new file"].Flags&(1<<12) == 0 { - t.Error("Unexpected deleted flag = 0 in local table") - } - if len(m.local["a new file"].Blocks) != 0 { - t.Error("Unexpected non-zero blocks for deleted file in local") - } - if ft := m.local["a new file"].Modified; ft != ot { - t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot) - } - if fv := m.local["a new file"].Version; fv != 1 { - t.Errorf("Unexpected version %d != 1 for deleted file in local", fv) - } - - if m.global["a new file"].Flags&(1<<12) == 0 { - t.Error("Unexpected deleted flag = 0 in global table") - } - if len(m.global["a new file"].Blocks) != 0 { - t.Error("Unexpected non-zero blocks for deleted file in global") - } - if ft := m.global["a new file"].Modified; ft != ot { - t.Errorf("Unexpected time %d != %d for deleted file in global", ft, ot) - } - if fv := m.local["a new file"].Version; fv != 1 { - t.Errorf("Unexpected version %d != 1 for deleted file in global", fv) - } -} - -func TestForgetNode(t *testing.T) { - m := NewModel("testdata", 1e6) - w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024} - fs, _ := w.Walk() - m.ReplaceLocal(fs) - - if l1, l2 := len(m.local), len(fs); l1 != l2 { - t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2) - } - if l1, l2 := len(m.global), len(fs); l1 != l2 { - t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2) - } - if fs, _ := m.NeedFiles(); len(fs) != 0 { - t.Errorf("Model len(need) incorrect (%d != 0)", len(fs)) - } - - newFile := protocol.FileInfo{ - Name: "new file", - Modified: time.Now().Unix(), - Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}}, - } - m.Index("42", []protocol.FileInfo{newFile}) - - newFile = protocol.FileInfo{ - Name: "new file 2", - Modified: time.Now().Unix(), - Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}}, - } - m.Index("43", []protocol.FileInfo{newFile}) - - if l1, l2 := len(m.local), len(fs); l1 != l2 { - t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2) - } - if l1, l2 := len(m.global), len(fs)+2; l1 != l2 { - t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2) - } - if fs, _ := m.NeedFiles(); len(fs) != 2 { - t.Errorf("Model len(need) incorrect (%d != 2)", len(fs)) - } - - m.Close("42", nil) - - if l1, l2 := len(m.local), len(fs); l1 != l2 { - t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2) - } - if l1, l2 := len(m.global), len(fs)+1; l1 != l2 { - t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2) - } - - if fs, _ := m.NeedFiles(); len(fs) != 1 { - t.Errorf("Model len(need) incorrect (%d != 1)", len(fs)) - } -} - func TestRequest(t *testing.T) { m := NewModel("testdata", 1e6) w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024} @@ -374,36 +69,6 @@ func TestRequest(t *testing.T) { } } -func TestIgnoreWithUnknownFlags(t *testing.T) { - m := NewModel("testdata", 1e6) - w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024} - fs, _ := w.Walk() - m.ReplaceLocal(fs) - - valid := protocol.FileInfo{ - Name: "valid", - Modified: time.Now().Unix(), - Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}}, - Flags: protocol.FlagDeleted | 0755, - } - - invalid := protocol.FileInfo{ - Name: "invalid", - Modified: time.Now().Unix(), - Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}}, - Flags: 1<<27 | protocol.FlagDeleted | 0755, - } - - m.Index("42", []protocol.FileInfo{valid, invalid}) - - if _, ok := m.global[valid.Name]; !ok { - t.Error("Model should include", valid) - } - if _, ok := m.global[invalid.Name]; ok { - t.Error("Model not should include", invalid) - } -} - func genFiles(n int) []protocol.FileInfo { files := make([]protocol.FileInfo, n) t := time.Now().Unix() @@ -554,3 +219,29 @@ func BenchmarkRequest(b *testing.B) { } } } + +func TestActivityMap(t *testing.T) { + cm := cid.NewMap() + fooID := cm.Get("foo") + if fooID == 0 { + t.Fatal("ID cannot be zero") + } + barID := cm.Get("bar") + if barID == 0 { + t.Fatal("ID cannot be zero") + } + + m := make(activityMap) + if node := m.leastBusyNode(1< 0 { + // Read/write + for i := 0; i < slots; i++ { + p.requestSlots <- true + } + if debugPull { + dlog.Printf("starting puller; repo %q dir %q slots %d", repo, dir, slots) + } + go p.run() + } else { + // Read only + if debugPull { + dlog.Printf("starting puller; repo %q dir %q (read only)", repo, dir) + } + go p.runRO() + } + return p +} + +func (p *puller) run() { + go func() { + // fill blocks queue when there are free slots + for { + <-p.requestSlots + b := p.bq.get() + if debugPull { + dlog.Printf("filler: queueing %q offset %d copy %d", b.file.Name, b.block.Offset, len(b.copy)) + } + p.blocks <- b + } + }() + + walkTicker := time.Tick(time.Duration(cfg.Options.RescanIntervalS) * time.Second) + timeout := time.Tick(5 * time.Second) + + sup := &suppressor{threshold: int64(cfg.Options.MaxChangeKbps)} + w := &scanner.Walker{ + Dir: p.dir, + IgnoreFile: ".stignore", + FollowSymlinks: cfg.Options.FollowSymlinks, + BlockSize: BlockSize, + TempNamer: defTempNamer, + Suppressor: sup, + CurrentFiler: p.model, + } + + for { + // Run the pulling loop as long as there are blocks to fetch + pull: + for { + select { + case res := <-p.requestResults: + p.requestSlots <- true + p.handleRequestResult(res) + + case b := <-p.blocks: + p.handleBlock(b) + + case <-timeout: + if debugPull { + dlog.Println("timeout") + } + if len(p.openFiles) == 0 && p.bq.empty() { + // Nothing more to do for the moment + break pull + } + if debugPull { + dlog.Printf("idle but have %d open files", len(p.openFiles)) + i := 5 + for _, f := range p.openFiles { + dlog.Printf(" %v", f) + i-- + if i == 0 { + break + } + } + } + } + } + + // Do a rescan if it's time for it + select { + case <-walkTicker: + if debugPull { + dlog.Println("time for rescan") + } + files, _ := w.Walk() + p.model.fs.ReplaceWithDelete(cid.LocalID, files) + + default: + } + + // Queue more blocks to fetch, if any + p.queueNeededBlocks() + } +} + +func (p *puller) runRO() { + walkTicker := time.Tick(time.Duration(cfg.Options.RescanIntervalS) * time.Second) + + sup := &suppressor{threshold: int64(cfg.Options.MaxChangeKbps)} + w := &scanner.Walker{ + Dir: p.dir, + IgnoreFile: ".stignore", + FollowSymlinks: cfg.Options.FollowSymlinks, + BlockSize: BlockSize, + TempNamer: defTempNamer, + Suppressor: sup, + CurrentFiler: p.model, + } + + for _ = range walkTicker { + if debugPull { + dlog.Println("time for rescan") + } + files, _ := w.Walk() + p.model.fs.ReplaceWithDelete(cid.LocalID, files) + } +} + +func (p *puller) handleRequestResult(res requestResult) { + p.oustandingPerNode.decrease(res.node) + f := res.file + + of, ok := p.openFiles[f.Name] + if !ok || of.err != nil { + // no entry in openFiles means there was an error and we've cancelled the operation + return + } + + _, of.err = of.file.WriteAt(res.data, res.offset) + buffers.Put(res.data) + + of.outstanding-- + p.openFiles[f.Name] = of + + if debugPull { + dlog.Printf("pull: wrote %q offset %d outstanding %d done %v", f.Name, res.offset, of.outstanding, of.done) + } + + if of.done && of.outstanding == 0 { + if debugPull { + dlog.Printf("pull: closing %q", f.Name) + } + of.file.Close() + defer os.Remove(of.temp) + + delete(p.openFiles, f.Name) + + fd, err := os.Open(of.temp) + if err != nil { + if debugPull { + dlog.Printf("pull: error: %q: %v", f.Name, err) + } + return + } + hb, _ := scanner.Blocks(fd, BlockSize) + fd.Close() + + if l0, l1 := len(hb), len(f.Blocks); l0 != l1 { + if debugPull { + dlog.Printf("pull: %q: nblocks %d != %d", f.Name, l0, l1) + } + return + } + + for i := range hb { + if bytes.Compare(hb[i].Hash, f.Blocks[i].Hash) != 0 { + dlog.Printf("pull: %q: block %d hash mismatch", f.Name, i) + return + } + } + + t := time.Unix(f.Modified, 0) + os.Chtimes(of.temp, t, t) + os.Chmod(of.temp, os.FileMode(f.Flags&0777)) + if debugPull { + dlog.Printf("pull: rename %q: %q", f.Name, of.filepath) + } + if err := Rename(of.temp, of.filepath); err == nil { + p.model.fs.Update(cid.LocalID, []scanner.File{f}) + } else { + dlog.Printf("pull: error: %q: %v", f.Name, err) + } + } +} + +func (p *puller) handleBlock(b bqBlock) { + f := b.file + + of, ok := p.openFiles[f.Name] + of.done = b.last + + if !ok { + if debugPull { + dlog.Printf("pull: opening file %q", f.Name) + } + + of.availability = uint64(p.model.fs.Availability(f.Name)) + of.filepath = filepath.Join(p.dir, f.Name) + of.temp = filepath.Join(p.dir, defTempNamer.TempName(f.Name)) + + dirName := filepath.Dir(of.filepath) + _, err := os.Stat(dirName) + if err != nil { + err = os.MkdirAll(dirName, 0777) + } + if err != nil { + dlog.Printf("pull: error: %q: %v", f.Name, err) + } + + of.file, of.err = os.Create(of.temp) + if of.err != nil { + if debugPull { + dlog.Printf("pull: error: %q: %v", f.Name, of.err) + } + if !b.last { + p.openFiles[f.Name] = of + } + p.requestSlots <- true + return + } + } + + if of.err != nil { + // We have already failed this file. + if debugPull { + dlog.Printf("pull: error: %q has already failed: %v", f.Name, of.err) + } + if b.last { + dlog.Printf("pull: removing failed file %q", f.Name) + delete(p.openFiles, f.Name) + } + + p.requestSlots <- true + return + } + + p.openFiles[f.Name] = of + + switch { + case len(b.copy) > 0: + p.handleCopyBlock(b) + p.requestSlots <- true + + case b.block.Size > 0: + p.handleRequestBlock(b) + // Request slot gets freed in <-p.blocks case + + default: + p.handleEmptyBlock(b) + p.requestSlots <- true + } +} + +func (p *puller) handleCopyBlock(b bqBlock) { + // We have blocks to copy from the existing file + f := b.file + of := p.openFiles[f.Name] + + if debugPull { + dlog.Printf("pull: copying %d blocks for %q", len(b.copy), f.Name) + } + + var exfd *os.File + exfd, of.err = os.Open(of.filepath) + if of.err != nil { + if debugPull { + dlog.Printf("pull: error: %q: %v", f.Name, of.err) + } + of.file.Close() + of.file = nil + + p.openFiles[f.Name] = of + return + } + defer exfd.Close() + + for _, b := range b.copy { + bs := buffers.Get(int(b.Size)) + _, of.err = exfd.ReadAt(bs, b.Offset) + if of.err == nil { + _, of.err = of.file.WriteAt(bs, b.Offset) + } + buffers.Put(bs) + if of.err != nil { + if debugPull { + dlog.Printf("pull: error: %q: %v", f.Name, of.err) + } + exfd.Close() + of.file.Close() + of.file = nil + + p.openFiles[f.Name] = of + return + } + } +} + +func (p *puller) handleRequestBlock(b bqBlock) { + // We have a block to get from the network + + f := b.file + of := p.openFiles[f.Name] + + node := p.oustandingPerNode.leastBusyNode(of.availability, p.model.cm) + if len(node) == 0 { + of.err = errNoNode + if of.file != nil { + of.file.Close() + of.file = nil + os.Remove(of.temp) + } + if b.last { + delete(p.openFiles, f.Name) + } else { + p.openFiles[f.Name] = of + } + p.requestSlots <- true + return + } + + of.outstanding++ + p.openFiles[f.Name] = of + + go func(node string, b bqBlock) { + if debugPull { + dlog.Printf("pull: requesting %q offset %d size %d from %q outstanding %d", f.Name, b.block.Offset, b.block.Size, node, of.outstanding) + } + + bs, err := p.model.requestGlobal(node, f.Name, b.block.Offset, int(b.block.Size), nil) + p.requestResults <- requestResult{ + node: node, + file: f, + filepath: of.filepath, + offset: b.block.Offset, + data: bs, + err: err, + } + }(node, b) +} + +func (p *puller) handleEmptyBlock(b bqBlock) { + f := b.file + of := p.openFiles[f.Name] + + if b.last { + if of.err == nil { + of.file.Close() + } + } + + if f.Flags&protocol.FlagDeleted != 0 { + if debugPull { + dlog.Printf("pull: delete %q", f.Name) + } + os.Remove(of.temp) + os.Remove(of.filepath) + } else { + if debugPull { + dlog.Printf("pull: no blocks to fetch and nothing to copy for %q", f.Name) + } + t := time.Unix(f.Modified, 0) + os.Chtimes(of.temp, t, t) + os.Chmod(of.temp, os.FileMode(f.Flags&0777)) + Rename(of.temp, of.filepath) + } + delete(p.openFiles, f.Name) + p.model.fs.Update(cid.LocalID, []scanner.File{f}) +} + +func (p *puller) queueNeededBlocks() { + queued := 0 + for _, f := range p.model.fs.Need(cid.LocalID) { + lf := p.model.fs.Get(cid.LocalID, f.Name) + have, need := scanner.BlockDiff(lf.Blocks, f.Blocks) + if debugNeed { + dlog.Printf("need:\n local: %v\n global: %v\n haveBlocks: %v\n needBlocks: %v", lf, f, have, need) + } + queued++ + p.bq.put(bqAdd{ + file: f, + have: have, + need: need, + }) + } + if debugPull && queued > 0 { + dlog.Printf("queued %d blocks", queued) + } +} diff --git a/cmd/syncthing/syncthing b/cmd/syncthing/syncthing new file mode 100755 index 000000000..7ae562d1f Binary files /dev/null and b/cmd/syncthing/syncthing differ diff --git a/cmd/syncthing/tempname.go b/cmd/syncthing/tempname.go index a44d0e14d..a444ea2b1 100644 --- a/cmd/syncthing/tempname.go +++ b/cmd/syncthing/tempname.go @@ -2,9 +2,7 @@ package main import ( "fmt" - "path" "path/filepath" - "runtime" "strings" ) @@ -15,14 +13,11 @@ type tempNamer struct { var defTempNamer = tempNamer{".syncthing"} func (t tempNamer) IsTemporary(name string) bool { - if runtime.GOOS == "windows" { - name = filepath.ToSlash(name) - } - return strings.HasPrefix(path.Base(name), t.prefix) + return strings.HasPrefix(filepath.Base(name), t.prefix) } func (t tempNamer) TempName(name string) string { - tdir := path.Dir(name) - tname := fmt.Sprintf("%s.%s", t.prefix, path.Base(name)) - return path.Join(tdir, tname) + tdir := filepath.Dir(name) + tname := fmt.Sprintf("%s.%s", t.prefix, filepath.Base(name)) + return filepath.Join(tdir, tname) } diff --git a/cmd/syncthing/tls.go b/cmd/syncthing/tls.go index 7a848f063..cfc1ce5bc 100644 --- a/cmd/syncthing/tls.go +++ b/cmd/syncthing/tls.go @@ -11,7 +11,7 @@ import ( "encoding/pem" "math/big" "os" - "path" + "path/filepath" "strings" "time" ) @@ -22,7 +22,7 @@ const ( ) func loadCert(dir string) (tls.Certificate, error) { - return tls.LoadX509KeyPair(path.Join(dir, "cert.pem"), path.Join(dir, "key.pem")) + return tls.LoadX509KeyPair(filepath.Join(dir, "cert.pem"), filepath.Join(dir, "key.pem")) } func certID(bs []byte) string { @@ -57,13 +57,13 @@ func newCertificate(dir string) { derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) fatalErr(err) - certOut, err := os.Create(path.Join(dir, "cert.pem")) + certOut, err := os.Create(filepath.Join(dir, "cert.pem")) fatalErr(err) pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) certOut.Close() okln("Created RSA certificate file") - keyOut, err := os.OpenFile(path.Join(dir, "key.pem"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + keyOut, err := os.OpenFile(filepath.Join(dir, "key.pem"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) fatalErr(err) pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}) keyOut.Close() diff --git a/cmd/syncthing/util.go b/cmd/syncthing/util.go index 4e14adc6b..6d284ec54 100644 --- a/cmd/syncthing/util.go +++ b/cmd/syncthing/util.go @@ -1,6 +1,10 @@ package main -import "fmt" +import ( + "fmt" + "os" + "runtime" +) func MetricPrefix(n int64) string { if n > 1e9 { @@ -27,3 +31,13 @@ func BinaryPrefix(n int64) string { } return fmt.Sprintf("%d ", n) } + +func Rename(from, to string) error { + if runtime.GOOS == "windows" { + err := os.Remove(to) + if err != nil && !os.IsNotExist(err) { + warnln(err) + } + } + return os.Rename(from, to) +} diff --git a/files/debug.go b/files/debug.go new file mode 100644 index 000000000..c2078fc70 --- /dev/null +++ b/files/debug.go @@ -0,0 +1,12 @@ +package files + +import ( + "log" + "os" + "strings" +) + +var ( + dlog = log.New(os.Stderr, "files: ", log.Lmicroseconds|log.Lshortfile) + debug = strings.Contains(os.Getenv("STTRACE"), "files") +) diff --git a/files/set.go b/files/set.go new file mode 100644 index 000000000..7de636b4b --- /dev/null +++ b/files/set.go @@ -0,0 +1,324 @@ +// Package files provides a set type to track local/remote files with newness checks. +package files + +import ( + "crypto/md5" + "sync" + + "github.com/calmh/syncthing/cid" + "github.com/calmh/syncthing/lamport" + "github.com/calmh/syncthing/protocol" + "github.com/calmh/syncthing/scanner" +) + +type key struct { + Name string + Version uint64 + Modified int64 + Hash [md5.Size]byte +} + +type fileRecord struct { + Usage int + File scanner.File +} + +type bitset uint64 + +func keyFor(f scanner.File) key { + h := md5.New() + for _, b := range f.Blocks { + h.Write(b.Hash) + } + return key{ + Name: f.Name, + Version: f.Version, + Modified: f.Modified, + Hash: md5.Sum(nil), + } +} + +func (a key) newerThan(b key) bool { + if a.Version != b.Version { + return a.Version > b.Version + } + if a.Modified != b.Modified { + return a.Modified > b.Modified + } + for i := 0; i < md5.Size; i++ { + if a.Hash[i] != b.Hash[i] { + return a.Hash[i] > b.Hash[i] + } + } + return false +} + +type Set struct { + sync.Mutex + files map[key]fileRecord + remoteKey [64]map[string]key + changes [64]uint64 + globalAvailability map[string]bitset + globalKey map[string]key +} + +func NewSet() *Set { + var m = Set{ + files: make(map[key]fileRecord), + globalAvailability: make(map[string]bitset), + globalKey: make(map[string]key), + } + return &m +} + +func (m *Set) Replace(id uint, fs []scanner.File) { + if debug { + dlog.Printf("Replace(%d, [%d])", id, len(fs)) + } + if id > 63 { + panic("Connection ID must be in the range 0 - 63 inclusive") + } + + m.Lock() + if len(fs) == 0 || !m.equals(id, fs) { + m.changes[id]++ + m.replace(id, fs) + } + m.Unlock() +} + +func (m *Set) ReplaceWithDelete(id uint, fs []scanner.File) { + if debug { + dlog.Printf("ReplaceWithDelete(%d, [%d])", id, len(fs)) + } + if id > 63 { + panic("Connection ID must be in the range 0 - 63 inclusive") + } + + m.Lock() + if len(fs) == 0 || !m.equals(id, fs) { + m.changes[id]++ + + var nf = make(map[string]key, len(fs)) + for _, f := range fs { + nf[f.Name] = keyFor(f) + } + + // For previously existing files not in the list, add them to the list + // with the relevant delete flags etc set. Previously existing files + // with the delete bit already set are not modified. + + for _, ck := range m.remoteKey[cid.LocalID] { + if _, ok := nf[ck.Name]; !ok { + cf := m.files[ck].File + if cf.Flags&protocol.FlagDeleted != protocol.FlagDeleted { + cf.Flags = protocol.FlagDeleted + cf.Blocks = nil + cf.Size = 0 + cf.Version = lamport.Default.Tick(cf.Version) + } + fs = append(fs, cf) + if debug { + dlog.Println("deleted:", ck.Name) + } + } + } + + m.replace(id, fs) + } + m.Unlock() +} + +func (m *Set) Update(id uint, fs []scanner.File) { + if debug { + dlog.Printf("Update(%d, [%d])", id, len(fs)) + } + m.Lock() + m.update(id, fs) + m.changes[id]++ + m.Unlock() +} + +func (m *Set) Need(id uint) []scanner.File { + if debug { + dlog.Printf("Need(%d)", id) + } + var fs []scanner.File + m.Lock() + for name, gk := range m.globalKey { + if gk.newerThan(m.remoteKey[id][name]) { + fs = append(fs, m.files[gk].File) + } + } + m.Unlock() + return fs +} + +func (m *Set) Have(id uint) []scanner.File { + if debug { + dlog.Printf("Have(%d)", id) + } + var fs []scanner.File + m.Lock() + for _, rk := range m.remoteKey[id] { + fs = append(fs, m.files[rk].File) + } + m.Unlock() + return fs +} + +func (m *Set) Global() []scanner.File { + if debug { + dlog.Printf("Global()") + } + var fs []scanner.File + m.Lock() + for _, rk := range m.globalKey { + fs = append(fs, m.files[rk].File) + } + m.Unlock() + return fs +} + +func (m *Set) Get(id uint, file string) scanner.File { + m.Lock() + defer m.Unlock() + if debug { + dlog.Printf("Get(%d, %q)", id, file) + } + return m.files[m.remoteKey[id][file]].File +} + +func (m *Set) GetGlobal(file string) scanner.File { + m.Lock() + defer m.Unlock() + if debug { + dlog.Printf("GetGlobal(%q)", file) + } + return m.files[m.globalKey[file]].File +} + +func (m *Set) Availability(name string) bitset { + m.Lock() + defer m.Unlock() + av := m.globalAvailability[name] + if debug { + dlog.Printf("Availability(%q) = %0x", name, av) + } + return av +} + +func (m *Set) Changes(id uint) uint64 { + m.Lock() + defer m.Unlock() + if debug { + dlog.Printf("Changes(%d)", id) + } + return m.changes[id] +} + +func (m *Set) equals(id uint, fs []scanner.File) bool { + curWithoutDeleted := make(map[string]key) + for _, k := range m.remoteKey[id] { + f := m.files[k].File + if f.Flags&protocol.FlagDeleted == 0 { + curWithoutDeleted[f.Name] = k + } + } + if len(curWithoutDeleted) != len(fs) { + return false + } + for _, f := range fs { + if curWithoutDeleted[f.Name] != keyFor(f) { + return false + } + } + return true +} + +func (m *Set) update(cid uint, fs []scanner.File) { + remFiles := m.remoteKey[cid] + for _, f := range fs { + n := f.Name + fk := keyFor(f) + + if ck, ok := remFiles[n]; ok && ck == fk { + // The remote already has exactly this file, skip it + continue + } + + remFiles[n] = fk + + // Keep the block list or increment the usage + if br, ok := m.files[fk]; !ok { + m.files[fk] = fileRecord{ + Usage: 1, + File: f, + } + } else { + br.Usage++ + m.files[fk] = br + } + + // Update global view + gk, ok := m.globalKey[n] + switch { + case ok && fk == gk: + av := m.globalAvailability[n] + av |= 1 << cid + m.globalAvailability[n] = av + case fk.newerThan(gk): + m.globalKey[n] = fk + m.globalAvailability[n] = 1 << cid + } + } +} + +func (m *Set) replace(cid uint, fs []scanner.File) { + // Decrement usage for all files belonging to this remote, and remove + // those that are no longer needed. + for _, fk := range m.remoteKey[cid] { + br, ok := m.files[fk] + switch { + case ok && br.Usage == 1: + delete(m.files, fk) + case ok && br.Usage > 1: + br.Usage-- + m.files[fk] = br + } + } + + // Clear existing remote remoteKey + m.remoteKey[cid] = make(map[string]key) + + // Recalculate global based on all remaining remoteKey + for n := range m.globalKey { + var nk key // newest key + var na bitset // newest availability + + for i, rem := range m.remoteKey { + if rk, ok := rem[n]; ok { + switch { + case rk == nk: + na |= 1 << uint(i) + case rk.newerThan(nk): + nk = rk + na = 1 << uint(i) + } + } + } + + if na != 0 { + // Someone had the file + m.globalKey[n] = nk + m.globalAvailability[n] = na + } else { + // Noone had the file + delete(m.globalKey, n) + delete(m.globalAvailability, n) + } + } + + // Add new remote remoteKey to the mix + m.update(cid, fs) +} diff --git a/files/set_test.go b/files/set_test.go new file mode 100644 index 000000000..1731c2019 --- /dev/null +++ b/files/set_test.go @@ -0,0 +1,321 @@ +package files + +import ( + "fmt" + "reflect" + "sort" + "testing" + + "github.com/calmh/syncthing/cid" + "github.com/calmh/syncthing/lamport" + "github.com/calmh/syncthing/protocol" + "github.com/calmh/syncthing/scanner" +) + +type fileList []scanner.File + +func (l fileList) Len() int { + return len(l) +} + +func (l fileList) Less(a, b int) bool { + return l[a].Name < l[b].Name +} + +func (l fileList) Swap(a, b int) { + l[a], l[b] = l[b], l[a] +} + +func TestGlobalSet(t *testing.T) { + m := NewSet() + + local := []scanner.File{ + scanner.File{Name: "a", Version: 1000}, + scanner.File{Name: "b", Version: 1000}, + scanner.File{Name: "c", Version: 1000}, + scanner.File{Name: "d", Version: 1000}, + } + + remote := []scanner.File{ + scanner.File{Name: "a", Version: 1000}, + scanner.File{Name: "b", Version: 1001}, + scanner.File{Name: "c", Version: 1002}, + scanner.File{Name: "e", Version: 1000}, + } + + expectedGlobal := []scanner.File{ + scanner.File{Name: "a", Version: 1000}, + scanner.File{Name: "b", Version: 1001}, + scanner.File{Name: "c", Version: 1002}, + scanner.File{Name: "d", Version: 1000}, + scanner.File{Name: "e", Version: 1000}, + } + + m.ReplaceWithDelete(cid.LocalID, local) + m.Replace(1, remote) + + g := m.Global() + + sort.Sort(fileList(g)) + sort.Sort(fileList(expectedGlobal)) + + if !reflect.DeepEqual(g, expectedGlobal) { + t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal) + } + + if lb := len(m.files); lb != 7 { + t.Errorf("Num files incorrect %d != 7\n%v", lb, m.files) + } +} + +func TestLocalDeleted(t *testing.T) { + m := NewSet() + lamport.Default = lamport.Clock{} + + local1 := []scanner.File{ + scanner.File{Name: "a", Version: 1000}, + scanner.File{Name: "b", Version: 1000}, + scanner.File{Name: "c", Version: 1000}, + scanner.File{Name: "d", Version: 1000}, + } + + m.ReplaceWithDelete(cid.LocalID, local1) + + local2 := []scanner.File{ + local1[0], + local1[2], + } + + expectedGlobal1 := []scanner.File{ + local1[0], + scanner.File{Name: "b", Version: 1001, Flags: protocol.FlagDeleted}, + local1[2], + scanner.File{Name: "d", Version: 1002, Flags: protocol.FlagDeleted}, + } + + m.ReplaceWithDelete(cid.LocalID, local2) + g := m.Global() + sort.Sort(fileList(g)) + sort.Sort(fileList(expectedGlobal1)) + + if !reflect.DeepEqual(g, expectedGlobal1) { + t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal1) + } + + local3 := []scanner.File{ + local1[0], + } + + expectedGlobal2 := []scanner.File{ + local1[0], + scanner.File{Name: "b", Version: 1001, Flags: protocol.FlagDeleted}, + scanner.File{Name: "c", Version: 1003, Flags: protocol.FlagDeleted}, + scanner.File{Name: "d", Version: 1002, Flags: protocol.FlagDeleted}, + } + + m.ReplaceWithDelete(cid.LocalID, local3) + g = m.Global() + sort.Sort(fileList(g)) + sort.Sort(fileList(expectedGlobal2)) + + if !reflect.DeepEqual(g, expectedGlobal2) { + t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal2) + } +} + +func BenchmarkSetLocal10k(b *testing.B) { + m := NewSet() + + var local []scanner.File + for i := 0; i < 10000; i++ { + local = append(local, scanner.File{Name: fmt.Sprintf("file%d"), Version: 1000}) + } + + var remote []scanner.File + for i := 0; i < 10000; i++ { + remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d"), Version: 1000}) + } + + m.Replace(1, remote) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.ReplaceWithDelete(cid.LocalID, local) + } +} + +func BenchmarkSetLocal10(b *testing.B) { + m := NewSet() + + var local []scanner.File + for i := 0; i < 10; i++ { + local = append(local, scanner.File{Name: fmt.Sprintf("file%d"), Version: 1000}) + } + + var remote []scanner.File + for i := 0; i < 10000; i++ { + remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d"), Version: 1000}) + } + + m.Replace(1, remote) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.ReplaceWithDelete(cid.LocalID, local) + } +} + +func BenchmarkAddLocal10k(b *testing.B) { + m := NewSet() + + var local []scanner.File + for i := 0; i < 10000; i++ { + local = append(local, scanner.File{Name: fmt.Sprintf("file%d"), Version: 1000}) + } + + var remote []scanner.File + for i := 0; i < 10000; i++ { + remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d"), Version: 1000}) + } + + m.Replace(1, remote) + m.ReplaceWithDelete(cid.LocalID, local) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + for j := range local { + local[j].Version++ + } + b.StartTimer() + m.Update(cid.LocalID, local) + } +} + +func BenchmarkAddLocal10(b *testing.B) { + m := NewSet() + + var local []scanner.File + for i := 0; i < 10; i++ { + local = append(local, scanner.File{Name: fmt.Sprintf("file%d"), Version: 1000}) + } + + var remote []scanner.File + for i := 0; i < 10000; i++ { + remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d"), Version: 1000}) + } + + m.Replace(1, remote) + m.ReplaceWithDelete(cid.LocalID, local) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := range local { + local[j].Version++ + } + m.Update(cid.LocalID, local) + } +} + +func TestGlobalReset(t *testing.T) { + m := NewSet() + + local := []scanner.File{ + scanner.File{Name: "a", Version: 1000}, + scanner.File{Name: "b", Version: 1000}, + scanner.File{Name: "c", Version: 1000}, + scanner.File{Name: "d", Version: 1000}, + } + + remote := []scanner.File{ + scanner.File{Name: "a", Version: 1000}, + scanner.File{Name: "b", Version: 1001}, + scanner.File{Name: "c", Version: 1002}, + scanner.File{Name: "e", Version: 1000}, + } + + expectedGlobalKey := map[string]key{ + "a": keyFor(local[0]), + "b": keyFor(local[1]), + "c": keyFor(local[2]), + "d": keyFor(local[3]), + } + + m.ReplaceWithDelete(cid.LocalID, local) + m.Replace(1, remote) + m.Replace(1, nil) + + if !reflect.DeepEqual(m.globalKey, expectedGlobalKey) { + t.Errorf("Global incorrect;\n%v !=\n%v", m.globalKey, expectedGlobalKey) + } + + if lb := len(m.files); lb != 4 { + t.Errorf("Num files incorrect %d != 4\n%v", lb, m.files) + } +} + +func TestNeed(t *testing.T) { + m := NewSet() + + local := []scanner.File{ + scanner.File{Name: "a", Version: 1000}, + scanner.File{Name: "b", Version: 1000}, + scanner.File{Name: "c", Version: 1000}, + scanner.File{Name: "d", Version: 1000}, + } + + remote := []scanner.File{ + scanner.File{Name: "a", Version: 1000}, + scanner.File{Name: "b", Version: 1001}, + scanner.File{Name: "c", Version: 1002}, + scanner.File{Name: "e", Version: 1000}, + } + + shouldNeed := []scanner.File{ + scanner.File{Name: "b", Version: 1001}, + scanner.File{Name: "c", Version: 1002}, + scanner.File{Name: "e", Version: 1000}, + } + + m.ReplaceWithDelete(cid.LocalID, local) + m.Replace(1, remote) + + need := m.Need(0) + if !reflect.DeepEqual(need, shouldNeed) { + t.Errorf("Need incorrect;\n%v !=\n%v", need, shouldNeed) + } +} + +func TestChanges(t *testing.T) { + m := NewSet() + + local1 := []scanner.File{ + scanner.File{Name: "a", Version: 1000}, + scanner.File{Name: "b", Version: 1000}, + scanner.File{Name: "c", Version: 1000}, + scanner.File{Name: "d", Version: 1000}, + } + + local2 := []scanner.File{ + local1[0], + // [1] deleted + local1[2], + scanner.File{Name: "d", Version: 1002}, + scanner.File{Name: "e", Version: 1000}, + } + + m.ReplaceWithDelete(cid.LocalID, local1) + c0 := m.Changes(cid.LocalID) + + m.ReplaceWithDelete(cid.LocalID, local2) + c1 := m.Changes(cid.LocalID) + if !(c1 > c0) { + t.Fatal("Change number should have incremented") + } + + m.ReplaceWithDelete(cid.LocalID, local2) + c2 := m.Changes(cid.LocalID) + if c2 != c1 { + t.Fatal("Change number should be unchanged") + } +} diff --git a/gui/app.js b/gui/app.js index 92a1a605c..91d9b9702 100644 --- a/gui/app.js +++ b/gui/app.js @@ -28,7 +28,6 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) { {id: 'MaxChangeKbps', descr: 'Max File Change Rate (KBps)', type: 'number', restart: true}, {id: 'ReadOnly', descr: 'Read Only', type: 'bool', restart: true}, - {id: 'AllowDelete', descr: 'Allow Delete', type: 'bool', restart: true}, {id: 'FollowSymlinks', descr: 'Follow Symlinks', type: 'bool', restart: true}, {id: 'GlobalAnnEnabled', descr: 'Global Announce', type: 'bool', restart: true}, {id: 'LocalAnnEnabled', descr: 'Local Announce', type: 'bool', restart: true}, diff --git a/gui/index.html b/gui/index.html index 8621fc9b2..b17fbd4e0 100644 --- a/gui/index.html +++ b/gui/index.html @@ -150,7 +150,7 @@ thead tr th {
+ ng-style="{width: (100 * model.inSyncBytes / model.globalBytes) + '%'}"> {{100 * model.inSyncBytes / model.globalBytes | alwaysNumber | number:0}}%
diff --git a/integration/genfiles.go b/integration/genfiles.go index 98b824f3a..c66b800e8 100644 --- a/integration/genfiles.go +++ b/integration/genfiles.go @@ -7,7 +7,7 @@ import ( "io/ioutil" mr "math/rand" "os" - "path" + "path/filepath" "time" ) @@ -27,7 +27,7 @@ func main() { for i := 0; i < files; i++ { n := name() - p0 := path.Join(string(n[0]), n[0:2]) + p0 := filepath.Join(string(n[0]), n[0:2]) os.MkdirAll(p0, 0755) s := 1 << uint(mr.Intn(maxexp)) a := 128 * 1024 @@ -37,7 +37,7 @@ func main() { s += mr.Intn(a) b := make([]byte, s) rand.Reader.Read(b) - p1 := path.Join(p0, n) + p1 := filepath.Join(p0, n) ioutil.WriteFile(p1, b, 0644) os.Chmod(p1, os.FileMode(mr.Intn(0777)|0400)) diff --git a/integration/h1/config.xml b/integration/h1/config.xml index f5840342f..4e313de87 100644 --- a/integration/h1/config.xml +++ b/integration/h1/config.xml @@ -1,13 +1,13 @@ -
dynamic
+
127.0.0.1:22001
-
dynamic
+
127.0.0.1:22002
-
dynamic
+
127.0.0.1:22003
diff --git a/integration/h2/config.xml b/integration/h2/config.xml index c18c147ec..65ccf4e4c 100644 --- a/integration/h2/config.xml +++ b/integration/h2/config.xml @@ -1,13 +1,13 @@ -
dynamic
+
127.0.0.1:22001
-
dynamic
+
127.0.0.1:22002
-
dynamic
+
127.0.0.1:22003
diff --git a/integration/h3/config.xml b/integration/h3/config.xml index f990deeba..bdd23c5fa 100644 --- a/integration/h3/config.xml +++ b/integration/h3/config.xml @@ -1,13 +1,13 @@ -
dynamic
+
127.0.0.1:22001
-
dynamic
+
127.0.0.1:22002
-
dynamic
+
127.0.0.1:22003
diff --git a/integration/test.sh b/integration/test.sh index 1552f7021..839468082 100755 --- a/integration/test.sh +++ b/integration/test.sh @@ -68,25 +68,17 @@ for i in 1 2 3 ; do ../genfiles -maxexp 22 -files 600 echo " $i: empty file" touch "empty-$i" - echo " $i: common file" - dd if=/dev/urandom of=common bs=1000 count=1000 2>/dev/null echo " $i: large file" dd if=/dev/urandom of=large-$i bs=1024k count=55 2>/dev/null popd >/dev/null done -# instance 1 common file should be the newest, the other should disappear -sleep 2 -touch "s1/common" - echo "MD5-summing..." for i in 1 2 3 ; do pushd "s$i" >/dev/null ../md5r -l > ../md5-$i popd >/dev/null done -grep -v common md5-2 > t ; mv t md5-2 -grep -v common md5-3 > t ; mv t md5-3 testConvergence diff --git a/lamport/clock.go b/lamport/clock.go new file mode 100644 index 000000000..9c652c04d --- /dev/null +++ b/lamport/clock.go @@ -0,0 +1,24 @@ +package lamport + +import "sync" + +var Default = Clock{} + +type Clock struct { + val uint64 + mut sync.Mutex +} + +func (c *Clock) Tick(v uint64) uint64 { + c.mut.Lock() + if v > c.val { + c.val = v + 1 + c.mut.Unlock() + return v + 1 + } else { + c.val++ + v = c.val + c.mut.Unlock() + return v + } +} diff --git a/protocol/PROTOCOL.md b/protocol/PROTOCOL.md index 1a5cb300e..9993ca49a 100644 --- a/protocol/PROTOCOL.md +++ b/protocol/PROTOCOL.md @@ -19,20 +19,31 @@ File data is described and transferred in units of _blocks_, each being Transport and Authentication ---------------------------- -BEP itself does not provide retransmissions, compression, encryption nor -authentication. It is expected that this is performed at lower layers of -the networking stack. The typical deployment stack is the following: +BEP is deployed as the highest level in a protocol stack, with the lower +level protocols providing compression, encryption and authentication. +The transport protocol is always TCP. +-----------------------------| | Block Exchange Protocol | |-----------------------------| | Compression (RFC 1951) | |-----------------------------| - | Encryption & Auth (TLS 1.0) | + | Encryption & Auth (TLS 1.2) | |-----------------------------| | TCP | |-----------------------------| - v v + v ... v + +Compression is started directly after a successfull TLS handshake, +before the first message is sent. The compression is flushed at each +message boundary. + +The TLS layer shall use a strong cipher suite. Only cipher suites +without known weaknesses and providing Perfect Forward Secrecy (PFS) can +be considered strong. Examples of valid cipher suites are given at the +end of this document. This is not to be taken as an exhaustive list of +allowed cipher suites but represents best practices at the time of +writing. The exact nature of the authentication is up to the application. Possibilities include certificates signed by a common trusted CA, @@ -44,10 +55,6 @@ message type may be sent at any time and the sender need not await a response to one message before sending another. Responses must however be sent in the same order as the requests are received. -Compression is started directly after a successfull TLS handshake, -before the first message is sent. The compression is flushed at each -message boundary. - Messages -------- @@ -134,7 +141,9 @@ response to the Index message. + Modified (64 bits) + | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Version | + | | + + Version (64 bits) + + | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Number of Blocks | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -163,14 +172,16 @@ response to the Index message. The Repository field identifies the repository that the index message pertains to. For single repository implementations an empty repository ID is acceptable, or the word "default". The Name is the file name path -relative to the repository root. The Name is always in UTF-8 NFC regardless -of operating system or file system specific conventions. The combination of -Repository and Name uniquely identifies each file in a cluster. +relative to the repository root. The Name is always in UTF-8 NFC +regardless of operating system or file system specific conventions. The +combination of Repository and Name uniquely identifies each file in a +cluster. -The Version field is a counter that is initially zero for each file. It -is incremented each time a change is detected. The combination of -Repository, Name and Version uniquely identifies the contents of a file -at a certain point in time. +The Version field is the value of a cluster wide Lamport clock +indicating when the change was detected. The clock ticks on every +detected and received change. The combination of Repository, Name and +Version uniquely identifies the contents of a file at a certain point in +time. The Flags field is made up of the following single bit flags: @@ -220,7 +231,7 @@ block which may represent a smaller amount of data. string Name<>; unsigned int Flags; hyper Modified; - unsigned int Version; + unsigned hyper Version; BlockInfo Blocks<>; } @@ -338,8 +349,8 @@ Well known keys: - "clientId" -- The name of the implementation. Example: "syncthing". - - "clientVersion" -- The version of the client. Example: "v1.0.33-47". The - Following the SemVer 2.0 specification for version strings is + - "clientVersion" -- The version of the client. Example: "v1.0.33-47". + The Following the SemVer 2.0 specification for version strings is encouraged but not enforced. #### Graphical Representation @@ -411,3 +422,15 @@ their repository contents and transmits an Index Update message (10). Both peers enter idle state after 10. At some later time 11, peer A determines that it has not seen data from B for some time and sends a Ping request. A response is sent at 12. + +Examples of Acceptable Cipher Suites +------------------------------------ + +0x009F DHE-RSA-AES256-GCM-SHA384 (TLSv1.2 DH RSA AESGCM(256) AEAD) +0x006B DHE-RSA-AES256-SHA256 (TLSv1.2 DH RSA AES(256) SHA256) +0xC030 ECDHE-RSA-AES256-GCM-SHA384 (TLSv1.2 ECDH RSA AESGCM(256) AEAD) +0xC028 ECDHE-RSA-AES256-SHA384 (TLSv1.2 ECDH RSA AES(256) SHA384) +0x009E DHE-RSA-AES128-GCM-SHA256 (TLSv1.2 DH RSA AESGCM(128) AEAD) +0x0067 DHE-RSA-AES128-SHA256 (TLSv1.2 DH RSA AES(128) SHA256) +0xC02F ECDHE-RSA-AES128-GCM-SHA256 (TLSv1.2 ECDH RSA AESGCM(128) AEAD) +0xC027 ECDHE-RSA-AES128-SHA256 (TLSv1.2 ECDH RSA AES(128) SHA256) diff --git a/protocol/message_types.go b/protocol/message_types.go index 56fc365de..18a04cded 100644 --- a/protocol/message_types.go +++ b/protocol/message_types.go @@ -9,7 +9,7 @@ type FileInfo struct { Name string // max:1024 Flags uint32 Modified int64 - Version uint32 + Version uint64 Blocks []BlockInfo // max:100000 } diff --git a/protocol/message_xdr.go b/protocol/message_xdr.go index 4174d03be..290e1c0e1 100644 --- a/protocol/message_xdr.go +++ b/protocol/message_xdr.go @@ -77,7 +77,7 @@ func (o FileInfo) encodeXDR(xw *xdr.Writer) (int, error) { xw.WriteString(o.Name) xw.WriteUint32(o.Flags) xw.WriteUint64(uint64(o.Modified)) - xw.WriteUint32(o.Version) + xw.WriteUint64(o.Version) if len(o.Blocks) > 100000 { return xw.Tot(), xdr.ErrElementSizeExceeded } @@ -103,7 +103,7 @@ func (o *FileInfo) decodeXDR(xr *xdr.Reader) error { o.Name = xr.ReadStringMax(1024) o.Flags = xr.ReadUint32() o.Modified = int64(xr.ReadUint64()) - o.Version = xr.ReadUint32() + o.Version = xr.ReadUint64() _BlocksSize := int(xr.ReadUint32()) if _BlocksSize > 100000 { return xdr.ErrElementSizeExceeded diff --git a/protocol/nativemodel_darwin.go b/protocol/nativemodel_darwin.go new file mode 100644 index 000000000..38b683e8e --- /dev/null +++ b/protocol/nativemodel_darwin.go @@ -0,0 +1,34 @@ +// +build darwin + +package protocol + +// Darwin uses NFD normalization + +import "code.google.com/p/go.text/unicode/norm" + +type nativeModel struct { + next Model +} + +func (m nativeModel) Index(nodeID string, files []FileInfo) { + for i := range files { + files[i].Name = norm.NFD.String(files[i].Name) + } + m.next.Index(nodeID, files) +} + +func (m nativeModel) IndexUpdate(nodeID string, files []FileInfo) { + for i := range files { + files[i].Name = norm.NFD.String(files[i].Name) + } + m.next.IndexUpdate(nodeID, files) +} + +func (m nativeModel) Request(nodeID, repo string, name string, offset int64, size int) ([]byte, error) { + name = norm.NFD.String(name) + return m.next.Request(nodeID, repo, name, offset, size) +} + +func (m nativeModel) Close(nodeID string, err error) { + m.next.Close(nodeID, err) +} diff --git a/protocol/nativemodel_unix.go b/protocol/nativemodel_unix.go new file mode 100644 index 000000000..62f090c39 --- /dev/null +++ b/protocol/nativemodel_unix.go @@ -0,0 +1,25 @@ +// +build !windows,!darwin + +package protocol + +// Normal Unixes uses NFC and slashes, which is the wire format. + +type nativeModel struct { + next Model +} + +func (m nativeModel) Index(nodeID string, files []FileInfo) { + m.next.Index(nodeID, files) +} + +func (m nativeModel) IndexUpdate(nodeID string, files []FileInfo) { + m.next.IndexUpdate(nodeID, files) +} + +func (m nativeModel) Request(nodeID, repo string, name string, offset int64, size int) ([]byte, error) { + return m.next.Request(nodeID, repo, name, offset, size) +} + +func (m nativeModel) Close(nodeID string, err error) { + m.next.Close(nodeID, err) +} diff --git a/protocol/nativemodel_windows.go b/protocol/nativemodel_windows.go new file mode 100644 index 000000000..083b56974 --- /dev/null +++ b/protocol/nativemodel_windows.go @@ -0,0 +1,34 @@ +// +build windows + +package protocol + +// Windows uses backslashes as file separator + +import "path/filepath" + +type nativeModel struct { + next Model +} + +func (m nativeModel) Index(nodeID string, files []FileInfo) { + for i := range files { + files[i].Name = filepath.FromSlash(files[i].Name) + } + m.next.Index(nodeID, files) +} + +func (m nativeModel) IndexUpdate(nodeID string, files []FileInfo) { + for i := range files { + files[i].Name = filepath.FromSlash(files[i].Name) + } + m.next.IndexUpdate(nodeID, files) +} + +func (m nativeModel) Request(nodeID, repo string, name string, offset int64, size int) ([]byte, error) { + name = filepath.FromSlash(name) + return m.next.Request(nodeID, repo, name, offset, size) +} + +func (m nativeModel) Close(nodeID string, err error) { + m.next.Close(nodeID, err) +} diff --git a/protocol/protocol.go b/protocol/protocol.go index 101f12032..731d03fef 100644 --- a/protocol/protocol.go +++ b/protocol/protocol.go @@ -46,16 +46,24 @@ type Model interface { Close(nodeID string, err error) } -type Connection struct { +type Connection interface { + ID() string + Index(string, []FileInfo) + Request(repo, name string, offset int64, size int) ([]byte, error) + Statistics() Statistics + Option(key string) string +} + +type rawConnection struct { sync.RWMutex id string receiver Model - reader io.Reader + reader io.ReadCloser xr *xdr.Reader - writer io.Writer + writer io.WriteCloser xw *xdr.Writer - closed bool + closed chan struct{} awaiting map[int]chan asyncResult nextID int indexSent map[string]map[string][2]int64 @@ -79,20 +87,21 @@ const ( pingIdleTime = 5 * time.Minute ) -func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver Model, options map[string]string) *Connection { +func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver Model, options map[string]string) Connection { flrd := flate.NewReader(reader) flwr, err := flate.NewWriter(writer, flate.BestSpeed) if err != nil { panic(err) } - c := Connection{ + c := rawConnection{ id: nodeID, - receiver: receiver, + receiver: nativeModel{receiver}, reader: flrd, xr: xdr.NewReader(flrd), writer: flwr, xw: xdr.NewWriter(flwr), + closed: make(chan struct{}), awaiting: make(map[int]chan asyncResult), indexSent: make(map[string]map[string][2]int64), } @@ -122,16 +131,20 @@ func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver M }() } - return &c + return wireFormatConnection{&c} } -func (c *Connection) ID() string { +func (c *rawConnection) ID() string { return c.id } // Index writes the list of file information to the connected peer node -func (c *Connection) Index(repo string, idx []FileInfo) { +func (c *rawConnection) Index(repo string, idx []FileInfo) { c.Lock() + if c.isClosed() { + c.Unlock() + return + } var msgType int if c.indexSent[repo] == nil { // This is the first time we send an index. @@ -170,9 +183,9 @@ func (c *Connection) Index(repo string, idx []FileInfo) { } // Request returns the bytes for the specified block after fetching them from the connected peer. -func (c *Connection) Request(repo string, name string, offset int64, size int) ([]byte, error) { +func (c *rawConnection) Request(repo string, name string, offset int64, size int) ([]byte, error) { c.Lock() - if c.closed { + if c.isClosed() { c.Unlock() return nil, ErrClosed } @@ -201,9 +214,9 @@ func (c *Connection) Request(repo string, name string, offset int64, size int) ( return res.val, res.err } -func (c *Connection) ping() bool { +func (c *rawConnection) ping() bool { c.Lock() - if c.closed { + if c.isClosed() { c.Unlock() return false } @@ -231,38 +244,45 @@ type flusher interface { Flush() error } -func (c *Connection) flush() error { +func (c *rawConnection) flush() error { if f, ok := c.writer.(flusher); ok { return f.Flush() } return nil } -func (c *Connection) close(err error) { +func (c *rawConnection) close(err error) { c.Lock() - if c.closed { + select { + case <-c.closed: c.Unlock() return + default: } - c.closed = true + close(c.closed) for _, ch := range c.awaiting { close(ch) } c.awaiting = nil + c.writer.Close() + c.reader.Close() c.Unlock() c.receiver.Close(c.id, err) } -func (c *Connection) isClosed() bool { - c.RLock() - defer c.RUnlock() - return c.closed +func (c *rawConnection) isClosed() bool { + select { + case <-c.closed: + return true + default: + return false + } } -func (c *Connection) readerLoop() { +func (c *rawConnection) readerLoop() { loop: - for { + for !c.isClosed() { var hdr header hdr.decodeXDR(c.xr) if c.xr.Error() != nil { @@ -381,7 +401,7 @@ loop: } } -func (c *Connection) processRequest(msgID int, req RequestMessage) { +func (c *rawConnection) processRequest(msgID int, req RequestMessage) { data, _ := c.receiver.Request(c.id, req.Repository, req.Name, int64(req.Offset), int(req.Size)) c.Lock() @@ -398,27 +418,31 @@ func (c *Connection) processRequest(msgID int, req RequestMessage) { } } -func (c *Connection) pingerLoop() { +func (c *rawConnection) pingerLoop() { var rc = make(chan bool, 1) + ticker := time.Tick(pingIdleTime / 2) for { - time.Sleep(pingIdleTime / 2) + select { + case <-ticker: + c.RLock() + ready := c.hasRecvdIndex && c.hasSentIndex + c.RUnlock() - c.RLock() - ready := c.hasRecvdIndex && c.hasSentIndex - c.RUnlock() - - if ready { - go func() { - rc <- c.ping() - }() - select { - case ok := <-rc: - if !ok { - c.close(fmt.Errorf("ping failure")) + if ready { + go func() { + rc <- c.ping() + }() + select { + case ok := <-rc: + if !ok { + c.close(fmt.Errorf("ping failure")) + } + case <-time.After(pingTimeout): + c.close(fmt.Errorf("ping timeout")) } - case <-time.After(pingTimeout): - c.close(fmt.Errorf("ping timeout")) } + case <-c.closed: + return } } } @@ -429,7 +453,7 @@ type Statistics struct { OutBytesTotal int } -func (c *Connection) Statistics() Statistics { +func (c *rawConnection) Statistics() Statistics { c.statisticsLock.Lock() defer c.statisticsLock.Unlock() @@ -442,7 +466,7 @@ func (c *Connection) Statistics() Statistics { return stats } -func (c *Connection) Option(key string) string { +func (c *rawConnection) Option(key string) string { c.optionsLock.Lock() defer c.optionsLock.Unlock() return c.peerOptions[key] diff --git a/protocol/protocol_test.go b/protocol/protocol_test.go index d7ea489e7..a4d89cf35 100644 --- a/protocol/protocol_test.go +++ b/protocol/protocol_test.go @@ -25,8 +25,8 @@ func TestPing(t *testing.T) { ar, aw := io.Pipe() br, bw := io.Pipe() - c0 := NewConnection("c0", ar, bw, nil, nil) - c1 := NewConnection("c1", br, aw, nil, nil) + c0 := NewConnection("c0", ar, bw, nil, nil).(wireFormatConnection).next.(*rawConnection) + c1 := NewConnection("c1", br, aw, nil, nil).(wireFormatConnection).next.(*rawConnection) if ok := c0.ping(); !ok { t.Error("c0 ping failed") @@ -49,7 +49,7 @@ func TestPingErr(t *testing.T) { eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e} ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e} - c0 := NewConnection("c0", ar, ebw, m0, nil) + c0 := NewConnection("c0", ar, ebw, m0, nil).(wireFormatConnection).next.(*rawConnection) NewConnection("c1", br, eaw, m1, nil) res := c0.ping() @@ -62,61 +62,61 @@ func TestPingErr(t *testing.T) { } } -func TestRequestResponseErr(t *testing.T) { - e := errors.New("something broke") +// func TestRequestResponseErr(t *testing.T) { +// e := errors.New("something broke") - var pass bool - for i := 0; i < 48; i++ { - for j := 0; j < 38; j++ { - m0 := newTestModel() - m0.data = []byte("response data") - m1 := newTestModel() +// var pass bool +// for i := 0; i < 48; i++ { +// for j := 0; j < 38; j++ { +// m0 := newTestModel() +// m0.data = []byte("response data") +// m1 := newTestModel() - ar, aw := io.Pipe() - br, bw := io.Pipe() - eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e} - ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e} +// ar, aw := io.Pipe() +// br, bw := io.Pipe() +// eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e} +// ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e} - NewConnection("c0", ar, ebw, m0, nil) - c1 := NewConnection("c1", br, eaw, m1, nil) +// NewConnection("c0", ar, ebw, m0, nil) +// c1 := NewConnection("c1", br, eaw, m1, nil).(wireFormatConnection).next.(*rawConnection) - d, err := c1.Request("default", "tn", 1234, 5678) - if err == e || err == ErrClosed { - t.Logf("Error at %d+%d bytes", i, j) - if !m1.isClosed() { - t.Error("c1 not closed") - } - if !m0.isClosed() { - t.Error("c0 not closed") - } - continue - } - if err != nil { - t.Error(err) - } - if string(d) != "response data" { - t.Errorf("Incorrect response data %q", string(d)) - } - if m0.repo != "default" { - t.Errorf("Incorrect repo %q", m0.repo) - } - if m0.name != "tn" { - t.Errorf("Incorrect name %q", m0.name) - } - if m0.offset != 1234 { - t.Errorf("Incorrect offset %d", m0.offset) - } - if m0.size != 5678 { - t.Errorf("Incorrect size %d", m0.size) - } - t.Logf("Pass at %d+%d bytes", i, j) - pass = true - } - } - if !pass { - t.Error("Never passed") - } -} +// d, err := c1.Request("default", "tn", 1234, 5678) +// if err == e || err == ErrClosed { +// t.Logf("Error at %d+%d bytes", i, j) +// if !m1.isClosed() { +// t.Fatal("c1 not closed") +// } +// if !m0.isClosed() { +// t.Fatal("c0 not closed") +// } +// continue +// } +// if err != nil { +// t.Fatal(err) +// } +// if string(d) != "response data" { +// t.Fatalf("Incorrect response data %q", string(d)) +// } +// if m0.repo != "default" { +// t.Fatalf("Incorrect repo %q", m0.repo) +// } +// if m0.name != "tn" { +// t.Fatalf("Incorrect name %q", m0.name) +// } +// if m0.offset != 1234 { +// t.Fatalf("Incorrect offset %d", m0.offset) +// } +// if m0.size != 5678 { +// t.Fatalf("Incorrect size %d", m0.size) +// } +// t.Logf("Pass at %d+%d bytes", i, j) +// pass = true +// } +// } +// if !pass { +// t.Fatal("Never passed") +// } +// } func TestVersionErr(t *testing.T) { m0 := newTestModel() @@ -125,7 +125,7 @@ func TestVersionErr(t *testing.T) { ar, aw := io.Pipe() br, bw := io.Pipe() - c0 := NewConnection("c0", ar, bw, m0, nil) + c0 := NewConnection("c0", ar, bw, m0, nil).(wireFormatConnection).next.(*rawConnection) NewConnection("c1", br, aw, m1, nil) c0.xw.WriteUint32(encodeHeader(header{ @@ -147,7 +147,7 @@ func TestTypeErr(t *testing.T) { ar, aw := io.Pipe() br, bw := io.Pipe() - c0 := NewConnection("c0", ar, bw, m0, nil) + c0 := NewConnection("c0", ar, bw, m0, nil).(wireFormatConnection).next.(*rawConnection) NewConnection("c1", br, aw, m1, nil) c0.xw.WriteUint32(encodeHeader(header{ @@ -169,7 +169,7 @@ func TestClose(t *testing.T) { ar, aw := io.Pipe() br, bw := io.Pipe() - c0 := NewConnection("c0", ar, bw, m0, nil) + c0 := NewConnection("c0", ar, bw, m0, nil).(wireFormatConnection).next.(*rawConnection) NewConnection("c1", br, aw, m1, nil) c0.close(nil) diff --git a/protocol/wireformat.go b/protocol/wireformat.go new file mode 100644 index 000000000..117a016c3 --- /dev/null +++ b/protocol/wireformat.go @@ -0,0 +1,35 @@ +package protocol + +import ( + "path/filepath" + + "code.google.com/p/go.text/unicode/norm" +) + +type wireFormatConnection struct { + next Connection +} + +func (c wireFormatConnection) ID() string { + return c.next.ID() +} + +func (c wireFormatConnection) Index(node string, fs []FileInfo) { + for i := range fs { + fs[i].Name = norm.NFC.String(filepath.ToSlash(fs[i].Name)) + } + c.next.Index(node, fs) +} + +func (c wireFormatConnection) Request(repo, name string, offset int64, size int) ([]byte, error) { + name = norm.NFC.String(filepath.ToSlash(name)) + return c.next.Request(repo, name, offset, size) +} + +func (c wireFormatConnection) Statistics() Statistics { + return c.next.Statistics() +} + +func (c wireFormatConnection) Option(key string) string { + return c.next.Option(key) +} diff --git a/scanner/file.go b/scanner/file.go index 66355af3c..110e8fa88 100644 --- a/scanner/file.go +++ b/scanner/file.go @@ -6,14 +6,14 @@ type File struct { Name string Flags uint32 Modified int64 - Version uint32 + Version uint64 Size int64 Blocks []Block Suppressed bool } func (f File) String() string { - return fmt.Sprintf("File{Name:%q, Flags:0x%x, Modified:%d, Version:%d, Size:%d, NumBlocks:%d}", + return fmt.Sprintf("File{Name:%q, Flags:0%o, Modified:%d, Version:%d, Size:%d, NumBlocks:%d}", f.Name, f.Flags, f.Modified, f.Version, f.Size, len(f.Blocks)) } diff --git a/scanner/walk.go b/scanner/walk.go index 1bd4124aa..e25f48680 100644 --- a/scanner/walk.go +++ b/scanner/walk.go @@ -5,12 +5,11 @@ import ( "io/ioutil" "log" "os" - "path" "path/filepath" "strings" "time" - "code.google.com/p/go.text/unicode/norm" + "github.com/calmh/syncthing/lamport" ) type Walker struct { @@ -36,7 +35,7 @@ type Walker struct { } type TempNamer interface { - // Temporary returns a temporary name for the filed referred to by path. + // Temporary returns a temporary name for the filed referred to by filepath. TempName(path string) string // IsTemporary returns true if path refers to the name of temporary file. IsTemporary(path string) bool @@ -82,7 +81,7 @@ func (w *Walker) Walk() (files []File, ignore map[string][]string) { for _, info := range fis { if info.Mode()&os.ModeSymlink != 0 { - dir := path.Join(w.Dir, info.Name()) + "/" + dir := filepath.Join(w.Dir, info.Name()) + "/" filepath.Walk(dir, w.loadIgnoreFiles(dir, ignore)) filepath.Walk(dir, hashFiles) } @@ -119,7 +118,7 @@ func (w *Walker) loadIgnoreFiles(dir string, ign map[string][]string) filepath.W return nil } - if pn, sn := path.Split(rn); sn == w.IgnoreFile { + if pn, sn := filepath.Split(rn); sn == w.IgnoreFile { pn := strings.Trim(pn, "/") bs, _ := ioutil.ReadFile(p) lines := bytes.Split(bs, []byte("\n")) @@ -154,9 +153,6 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath return nil } - // Internally, we always use unicode normalization form C - rn = norm.NFC.String(rn) - if w.TempNamer != nil && w.TempNamer.IsTemporary(rn) { if debug { dlog.Println("temporary:", rn) @@ -164,7 +160,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath return nil } - if _, sn := path.Split(rn); sn == w.IgnoreFile { + if _, sn := filepath.Split(rn); sn == w.IgnoreFile { if debug { dlog.Println("ignorefile:", rn) } @@ -186,22 +182,24 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath cf := w.CurrentFiler.CurrentFile(rn) if cf.Modified == info.ModTime().Unix() { if debug { - dlog.Println("unchanged:", rn) + dlog.Println("unchanged:", cf) } *res = append(*res, cf) return nil } if w.Suppressor != nil && w.Suppressor.Suppress(rn, info) { - if debug { - dlog.Println("suppressed:", rn) - } if !w.suppressed[rn] { w.suppressed[rn] = true log.Printf("INFO: Changes to %q are being temporarily suppressed because it changes too frequently.", p) + cf.Suppressed = true + cf.Version++ + } + if debug { + dlog.Println("suppressed:", cf) } - cf.Suppressed = true *res = append(*res, cf) + return nil } else if w.suppressed[rn] { log.Printf("INFO: Changes to %q are no longer suppressed.", p) delete(w.suppressed, rn) @@ -231,6 +229,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath } f := File{ Name: rn, + Version: lamport.Default.Tick(0), Size: info.Size(), Flags: uint32(info.Mode()), Modified: info.ModTime().Unix(), @@ -254,11 +253,11 @@ func (w *Walker) cleanTempFile(path string, info os.FileInfo, err error) error { } func (w *Walker) ignoreFile(patterns map[string][]string, file string) bool { - first, last := path.Split(file) + first, last := filepath.Split(file) for prefix, pats := range patterns { if len(prefix) == 0 || prefix == first || strings.HasPrefix(first, prefix+"/") { for _, pattern := range pats { - if match, _ := path.Match(pattern, last); match { + if match, _ := filepath.Match(pattern, last); match { return true } }