Change DB label format (index folders, devices)

This commit is contained in:
Jakob Borg 2016-01-03 19:08:19 +01:00
parent 18ae87962d
commit ac190b2e39
14 changed files with 522 additions and 125 deletions

View File

@ -48,7 +48,7 @@ var locations = map[locationEnum]string{
locKeyFile: "${config}/key.pem",
locHTTPSCertFile: "${config}/https-cert.pem",
locHTTPSKeyFile: "${config}/https-key.pem",
locDatabase: "${config}/index-v0.11.0.db",
locDatabase: "${config}/index-v0.13.0.db",
locLogFile: "${config}/syncthing.log", // -logfile on Windows
locCsrfTokens: "${config}/csrftokens.txt",
locPanicLog: "${config}/panic-${timestamp}.log",

View File

@ -633,6 +633,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
dbFile := locations[locDatabase]
ldb, err := db.Open(dbFile)
if err != nil {
l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
}
@ -1156,6 +1157,7 @@ func cleanConfigDirectory() {
"panic-*.log": 7 * 24 * time.Hour, // keep panic logs for a week
"audit-*.log": 7 * 24 * time.Hour, // keep audit logs for a week
"index": 14 * 24 * time.Hour, // keep old index format for two weeks
"index*.converted": 14 * 24 * time.Hour, // keep old converted indexes for two weeks
"config.xml.v*": 30 * 24 * time.Hour, // old config versions for a month
"*.idx.gz": 30 * 24 * time.Hour, // these should for sure no longer exist
"backup-of-v0.8": 30 * 24 * time.Hour, // these neither

1
lib/db/.gitignore vendored
View File

@ -1 +1,2 @@
!*.zip
testdata/*.db

View File

@ -4,16 +4,9 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// Package db provides a set type to track local/remote files with newness
// checks. We must do a certain amount of normalization in here. We will get
// fed paths with either native or wire-format separators and encodings
// depending on who calls us. We transform paths to wire-format (NFC and
// slashes) on the way to the database, and transform to native format
// (varying separator and encoding) on the way back out.
package db
import (
"bytes"
"encoding/binary"
"fmt"
@ -30,10 +23,10 @@ const maxBatchSize = 256 << 10
type BlockMap struct {
db *Instance
folder string
folder uint32
}
func NewBlockMap(db *Instance, folder string) *BlockMap {
func NewBlockMap(db *Instance, folder uint32) *BlockMap {
return &BlockMap{
db: db,
folder: folder,
@ -123,7 +116,7 @@ func (m *BlockMap) Discard(files []protocol.FileInfo) error {
// Drop block map, removing all entries related to this block map from the db.
func (m *BlockMap) Drop() error {
batch := new(leveldb.Batch)
iter := m.db.NewIterator(util.BytesPrefix(m.blockKeyInto(nil, nil, "")[:1+64]), nil)
iter := m.db.NewIterator(util.BytesPrefix(m.blockKeyInto(nil, nil, "")[:keyPrefixLen+keyFolderLen]), nil)
defer iter.Release()
for iter.Next() {
if batch.Len() > maxBatchSize {
@ -173,12 +166,13 @@ func (f *BlockFinder) String() string {
func (f *BlockFinder) Iterate(folders []string, hash []byte, iterFn func(string, string, int32) bool) bool {
var key []byte
for _, folder := range folders {
key = blockKeyInto(key, hash, folder, "")
folderID := f.db.folderIdx.ID([]byte(folder))
key = blockKeyInto(key, hash, folderID, "")
iter := f.db.NewIterator(util.BytesPrefix(key), nil)
defer iter.Release()
for iter.Next() && iter.Error() == nil {
folder, file := fromBlockKey(iter.Key())
file := blockKeyName(iter.Key())
index := int32(binary.BigEndian.Uint32(iter.Value()))
if iterFn(folder, osutil.NativeFilename(file), index) {
return true
@ -194,48 +188,41 @@ func (f *BlockFinder) Fix(folder, file string, index int32, oldHash, newHash []b
buf := make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(index))
folderID := f.db.folderIdx.ID([]byte(folder))
batch := new(leveldb.Batch)
batch.Delete(blockKeyInto(nil, oldHash, folder, file))
batch.Put(blockKeyInto(nil, newHash, folder, file), buf)
batch.Delete(blockKeyInto(nil, oldHash, folderID, file))
batch.Put(blockKeyInto(nil, newHash, folderID, file), buf)
return f.db.Write(batch, nil)
}
// m.blockKey returns a byte slice encoding the following information:
// keyTypeBlock (1 byte)
// folder (64 bytes)
// folder (4 bytes)
// block hash (32 bytes)
// file name (variable size)
func blockKeyInto(o, hash []byte, folder, file string) []byte {
reqLen := 1 + 64 + 32 + len(file)
func blockKeyInto(o, hash []byte, folder uint32, file string) []byte {
reqLen := keyPrefixLen + keyFolderLen + keyHashLen + len(file)
if cap(o) < reqLen {
o = make([]byte, reqLen)
} else {
o = o[:reqLen]
}
o[0] = KeyTypeBlock
copy(o[1:], []byte(folder))
for i := len(folder); i < 64; i++ {
o[1+i] = 0
}
copy(o[1+64:], []byte(hash))
copy(o[1+64+32:], []byte(file))
binary.BigEndian.PutUint32(o[keyPrefixLen:], folder)
copy(o[keyPrefixLen+keyFolderLen:], []byte(hash))
copy(o[keyPrefixLen+keyFolderLen+keyHashLen:], []byte(file))
return o
}
func fromBlockKey(data []byte) (string, string) {
if len(data) < 1+64+32+1 {
// blockKeyName returns the file name from the block key
func blockKeyName(data []byte) string {
if len(data) < keyPrefixLen+keyFolderLen+keyHashLen+1 {
panic("Incorrect key length")
}
if data[0] != KeyTypeBlock {
panic("Incorrect key type")
}
file := string(data[1+64+32:])
slice := data[1 : 1+64]
izero := bytes.IndexByte(slice, 0)
if izero > -1 {
return string(slice[:izero]), file
}
return string(slice), file
file := string(data[keyPrefixLen+keyFolderLen+keyHashLen:])
return file
}

View File

@ -10,6 +10,7 @@ import (
"testing"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syndtr/goleveldb/leveldb/util"
)
func genBlocks(n int) []protocol.BlockInfo {
@ -55,7 +56,7 @@ func setup() (*Instance, *BlockFinder) {
}
func dbEmpty(db *Instance) bool {
iter := db.NewIterator(nil, nil)
iter := db.NewIterator(util.BytesPrefix([]byte{KeyTypeBlock}), nil)
defer iter.Release()
if iter.Next() {
return false
@ -70,7 +71,7 @@ func TestBlockMapAddUpdateWipe(t *testing.T) {
t.Fatal("db not empty")
}
m := NewBlockMap(db, "folder1")
m := NewBlockMap(db, db.folderIdx.ID([]byte("folder1")))
f3.Flags |= protocol.FlagDirectory
@ -152,8 +153,8 @@ func TestBlockMapAddUpdateWipe(t *testing.T) {
func TestBlockFinderLookup(t *testing.T) {
db, f := setup()
m1 := NewBlockMap(db, "folder1")
m2 := NewBlockMap(db, "folder2")
m1 := NewBlockMap(db, db.folderIdx.ID([]byte("folder1")))
m2 := NewBlockMap(db, db.folderIdx.ID([]byte("folder2")))
err := m1.Add([]protocol.FileInfo{f1})
if err != nil {
@ -221,7 +222,7 @@ func TestBlockFinderFix(t *testing.T) {
return true
}
m := NewBlockMap(db, "folder1")
m := NewBlockMap(db, db.folderIdx.ID([]byte("folder1")))
err := m.Add([]protocol.FileInfo{f1})
if err != nil {
t.Fatal(err)

View File

@ -42,6 +42,8 @@ const (
KeyTypeDeviceStatistic
KeyTypeFolderStatistic
KeyTypeVirtualMtime
KeyTypeFolderIdx
KeyTypeDeviceIdx
)
type fileVersion struct {

114
lib/db/leveldb_convert.go Normal file
View File

@ -0,0 +1,114 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package db
import (
"bytes"
"github.com/syndtr/goleveldb/leveldb"
)
// convertKeyFormat converts from the v0.12 to the v0.13 database format, to
// avoid having to do rescan. The change is in the key format for folder
// labels, so we basically just iterate over the database rewriting keys as
// necessary.
func convertKeyFormat(from, to *leveldb.DB) error {
l.Infoln("Converting database key format")
blocks, files, globals, unchanged := 0, 0, 0, 0
dbi := newDBInstance(to)
i := from.NewIterator(nil, nil)
for i.Next() {
key := i.Key()
switch key[0] {
case KeyTypeBlock:
folder, file := oldFromBlockKey(key)
folderIdx := dbi.folderIdx.ID([]byte(folder))
hash := key[1+64:]
newKey := blockKeyInto(nil, hash, folderIdx, file)
if err := to.Put(newKey, i.Value(), nil); err != nil {
return err
}
blocks++
case KeyTypeDevice:
newKey := dbi.deviceKey(oldDeviceKeyFolder(key), oldDeviceKeyDevice(key), oldDeviceKeyName(key))
if err := to.Put(newKey, i.Value(), nil); err != nil {
return err
}
files++
case KeyTypeGlobal:
newKey := dbi.globalKey(oldGlobalKeyFolder(key), oldGlobalKeyName(key))
if err := to.Put(newKey, i.Value(), nil); err != nil {
return err
}
globals++
case KeyTypeVirtualMtime:
// Cannot be converted, we drop it instead :(
default:
if err := to.Put(key, i.Value(), nil); err != nil {
return err
}
unchanged++
}
}
l.Infof("Converted %d blocks, %d files, %d globals (%d unchanged).", blocks, files, globals, unchanged)
return nil
}
func oldDeviceKeyFolder(key []byte) []byte {
folder := key[1 : 1+64]
izero := bytes.IndexByte(folder, 0)
if izero < 0 {
return folder
}
return folder[:izero]
}
func oldDeviceKeyDevice(key []byte) []byte {
return key[1+64 : 1+64+32]
}
func oldDeviceKeyName(key []byte) []byte {
return key[1+64+32:]
}
func oldGlobalKeyName(key []byte) []byte {
return key[1+64:]
}
func oldGlobalKeyFolder(key []byte) []byte {
folder := key[1 : 1+64]
izero := bytes.IndexByte(folder, 0)
if izero < 0 {
return folder
}
return folder[:izero]
}
func oldFromBlockKey(data []byte) (string, string) {
if len(data) < 1+64+32+1 {
panic("Incorrect key length")
}
if data[0] != KeyTypeBlock {
panic("Incorrect key type")
}
file := string(data[1+64+32:])
slice := data[1 : 1+64]
izero := bytes.IndexByte(slice, 0)
if izero > -1 {
return string(slice[:izero]), file
}
return string(slice), file
}

View File

@ -0,0 +1,136 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package db
import (
"archive/zip"
"io"
"os"
"path/filepath"
"testing"
"github.com/syndtr/goleveldb/leveldb"
)
func TestLabelConversion(t *testing.T) {
os.RemoveAll("testdata/oldformat.db")
defer os.RemoveAll("testdata/oldformat.db")
os.RemoveAll("testdata/newformat.db")
defer os.RemoveAll("testdata/newformat.db")
if err := unzip("testdata/oldformat.db.zip", "testdata"); err != nil {
t.Fatal(err)
}
odb, err := leveldb.OpenFile("testdata/oldformat.db", nil)
if err != nil {
t.Fatal(err)
}
ldb, err := leveldb.OpenFile("testdata/newformat.db", nil)
if err != nil {
t.Fatal(err)
}
if err = convertKeyFormat(odb, ldb); err != nil {
t.Fatal(err)
}
ldb.Close()
odb.Close()
inst, err := Open("testdata/newformat.db")
if err != nil {
t.Fatal(err)
}
fs := NewFileSet("default", inst)
files, deleted, _ := fs.GlobalSize()
if files+deleted != 953 {
// Expected number of global entries determined by
// ../../bin/stindex testdata/oldformat.db/ | grep global | grep -c default
t.Errorf("Conversion error, global list differs (%d != 953)", files+deleted)
}
files, deleted, _ = fs.LocalSize()
if files+deleted != 953 {
t.Errorf("Conversion error, device list differs (%d != 953)", files+deleted)
}
f := NewBlockFinder(inst)
// [block] F:"default" H:1c25dea9003cc16216e2a22900be1ec1cc5aaf270442904e2f9812c314e929d8 N:"f/f2/f25f1b3e6e029231b933531b2138796d" I:3
h := []byte{0x1c, 0x25, 0xde, 0xa9, 0x00, 0x3c, 0xc1, 0x62, 0x16, 0xe2, 0xa2, 0x29, 0x00, 0xbe, 0x1e, 0xc1, 0xcc, 0x5a, 0xaf, 0x27, 0x04, 0x42, 0x90, 0x4e, 0x2f, 0x98, 0x12, 0xc3, 0x14, 0xe9, 0x29, 0xd8}
found := 0
f.Iterate([]string{"default"}, h, func(folder, file string, idx int32) bool {
if folder == "default" && file == filepath.FromSlash("f/f2/f25f1b3e6e029231b933531b2138796d") && idx == 3 {
found++
}
return true
})
if found != 1 {
t.Errorf("Found %d blocks instead of expected 1", found)
}
inst.Close()
}
func unzip(src, dest string) error {
r, err := zip.OpenReader(src)
if err != nil {
return err
}
defer func() {
if err := r.Close(); err != nil {
panic(err)
}
}()
os.MkdirAll(dest, 0755)
// Closure to address file descriptors issue with all the deferred .Close() methods
extractAndWriteFile := func(f *zip.File) error {
rc, err := f.Open()
if err != nil {
return err
}
defer func() {
if err := rc.Close(); err != nil {
panic(err)
}
}()
path := filepath.Join(dest, f.Name)
if f.FileInfo().IsDir() {
os.MkdirAll(path, f.Mode())
} else {
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}
defer func() {
if err := f.Close(); err != nil {
panic(err)
}
}()
_, err = io.Copy(f, rc)
if err != nil {
return err
}
}
return nil
}
for _, f := range r.File {
err := extractAndWriteFile(f)
if err != nil {
return err
}
}
return nil
}

View File

@ -8,11 +8,15 @@ package db
import (
"bytes"
"encoding/binary"
"os"
"path/filepath"
"sort"
"strings"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/sync"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator"
@ -25,14 +29,33 @@ type deletionHandler func(t readWriteTransaction, folder, device, name []byte, d
type Instance struct {
*leveldb.DB
folderIdx *smallIndex
deviceIdx *smallIndex
}
const (
keyPrefixLen = 1
keyFolderLen = 4 // indexed
keyDeviceLen = 4 // indexed
keyHashLen = 32
)
func Open(file string) (*Instance, error) {
opts := &opt.Options{
OpenFilesCacheCapacity: 100,
WriteBuffer: 4 << 20,
}
if _, err := os.Stat(file); os.IsNotExist(err) {
// The file we are looking to open does not exist. This may be the
// first launch so we should look for an old version and try to
// convert it.
if err := checkConvertDatabase(file); err != nil {
l.Infoln("Converting old database:", err)
l.Infoln("Will rescan from scratch.")
}
}
db, err := leveldb.OpenFile(file, opts)
if leveldbIsCorrupted(err) {
db, err = leveldb.RecoverFile(file, opts)
@ -60,9 +83,12 @@ func OpenMemory() *Instance {
}
func newDBInstance(db *leveldb.DB) *Instance {
return &Instance{
i := &Instance{
DB: db,
}
i.folderIdx = newSmallIndex(i, []byte{KeyTypeFolderIdx})
i.deviceIdx = newSmallIndex(i, []byte{KeyTypeDeviceIdx})
return i
}
func (db *Instance) Compact() error {
@ -72,13 +98,10 @@ func (db *Instance) Compact() error {
func (db *Instance) genericReplace(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker, deleteFn deletionHandler) int64 {
sort.Sort(fileList(fs)) // sort list on name, same as in the database
start := db.deviceKey(folder, device, nil) // before all folder/device files
limit := db.deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
t := db.newReadWriteTransaction()
defer t.close()
dbi := t.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, device, nil)[:keyPrefixLen+keyFolderLen+keyDeviceLen]), nil)
defer dbi.Release()
moreDb := dbi.Next()
@ -237,13 +260,10 @@ func (db *Instance) updateFiles(folder, device []byte, fs []protocol.FileInfo, l
}
func (db *Instance) withHave(folder, device []byte, truncate bool, fn Iterator) {
start := db.deviceKey(folder, device, nil) // before all folder/device files
limit := db.deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
t := db.newReadOnlyTransaction()
defer t.close()
dbi := t.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, device, nil)[:keyPrefixLen+keyFolderLen+keyDeviceLen]), nil)
defer dbi.Release()
for dbi.Next() {
@ -258,13 +278,10 @@ func (db *Instance) withHave(folder, device []byte, truncate bool, fn Iterator)
}
func (db *Instance) withAllFolderTruncated(folder []byte, fn func(device []byte, f FileInfoTruncated) bool) {
start := db.deviceKey(folder, nil, nil) // before all folder/device files
limit := db.deviceKey(folder, protocol.LocalDeviceID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
t := db.newReadWriteTransaction()
defer t.close()
dbi := t.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, nil, nil)[:keyPrefixLen+keyFolderLen]), nil)
defer dbi.Release()
for dbi.Next() {
@ -359,7 +376,10 @@ func (db *Instance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator
l.Debugf("vl.versions[0].device: %x", vl.versions[0].device)
l.Debugf("name: %q (%x)", name, name)
l.Debugf("fk: %q", fk)
l.Debugf("fk: %x %x %x", fk[1:1+64], fk[1+64:1+64+32], fk[1+64+32:])
l.Debugf("fk: %x %x %x",
fk[keyPrefixLen:keyPrefixLen+keyFolderLen],
fk[keyPrefixLen+keyFolderLen:keyPrefixLen+keyFolderLen+keyDeviceLen],
fk[keyPrefixLen+keyFolderLen+keyDeviceLen:])
panic(err)
}
@ -403,13 +423,10 @@ func (db *Instance) availability(folder, file []byte) []protocol.DeviceID {
}
func (db *Instance) withNeed(folder, device []byte, truncate bool, fn Iterator) {
start := db.globalKey(folder, nil)
limit := db.globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff})
t := db.newReadOnlyTransaction()
defer t.close()
dbi := t.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
dbi := t.NewIterator(util.BytesPrefix(db.globalKey(folder, nil)[:keyPrefixLen+keyFolderLen]), nil)
defer dbi.Release()
var fk []byte
@ -546,9 +563,7 @@ func (db *Instance) checkGlobals(folder []byte, globalSize *sizeTracker) {
t := db.newReadWriteTransaction()
defer t.close()
start := db.globalKey(folder, nil)
limit := db.globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff})
dbi := t.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
dbi := t.NewIterator(util.BytesPrefix(db.globalKey(folder, nil)[:keyPrefixLen+keyFolderLen]), nil)
defer dbi.Release()
var fk []byte
@ -598,71 +613,72 @@ func (db *Instance) checkGlobals(folder []byte, globalSize *sizeTracker) {
// deviceKey returns a byte slice encoding the following information:
// keyTypeDevice (1 byte)
// folder (64 bytes)
// device (32 bytes)
// folder (4 bytes)
// device (4 bytes)
// name (variable size)
func (db *Instance) deviceKey(folder, device, file []byte) []byte {
return db.deviceKeyInto(nil, folder, device, file)
}
func (db *Instance) deviceKeyInto(k []byte, folder, device, file []byte) []byte {
reqLen := 1 + 64 + 32 + len(file)
reqLen := keyPrefixLen + keyFolderLen + keyDeviceLen + len(file)
if len(k) < reqLen {
k = make([]byte, reqLen)
}
k[0] = KeyTypeDevice
if len(folder) > 64 {
panic("folder name too long")
}
copy(k[1:], []byte(folder))
copy(k[1+64:], device[:])
copy(k[1+64+32:], []byte(file))
binary.BigEndian.PutUint32(k[keyPrefixLen:], db.folderIdx.ID(folder))
binary.BigEndian.PutUint32(k[keyPrefixLen+keyFolderLen:], db.deviceIdx.ID(device))
copy(k[keyPrefixLen+keyFolderLen+keyDeviceLen:], []byte(file))
return k[:reqLen]
}
// deviceKeyName returns the device ID from the key
func (db *Instance) deviceKeyName(key []byte) []byte {
return key[1+64+32:]
return key[keyPrefixLen+keyFolderLen+keyDeviceLen:]
}
// deviceKeyFolder returns the folder name from the key
func (db *Instance) deviceKeyFolder(key []byte) []byte {
folder := key[1 : 1+64]
izero := bytes.IndexByte(folder, 0)
if izero < 0 {
folder, ok := db.folderIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen:]))
if !ok {
panic("bug: lookup of nonexistent folder ID")
}
return folder
}
return folder[:izero]
}
// deviceKeyDevice returns the device ID from the key
func (db *Instance) deviceKeyDevice(key []byte) []byte {
return key[1+64 : 1+64+32]
device, ok := db.deviceIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen+keyFolderLen:]))
if !ok {
panic("bug: lookup of nonexistent device ID")
}
return device
}
// globalKey returns a byte slice encoding the following information:
// keyTypeGlobal (1 byte)
// folder (64 bytes)
// folder (4 bytes)
// name (variable size)
func (db *Instance) globalKey(folder, file []byte) []byte {
k := make([]byte, 1+64+len(file))
k := make([]byte, keyPrefixLen+keyFolderLen+len(file))
k[0] = KeyTypeGlobal
if len(folder) > 64 {
panic("folder name too long")
}
copy(k[1:], []byte(folder))
copy(k[1+64:], []byte(file))
binary.BigEndian.PutUint32(k[keyPrefixLen:], db.folderIdx.ID(folder))
copy(k[keyPrefixLen+keyFolderLen:], []byte(file))
return k
}
// globalKeyName returns the filename from the key
func (db *Instance) globalKeyName(key []byte) []byte {
return key[1+64:]
return key[keyPrefixLen+keyFolderLen:]
}
// globalKeyFolder returns the folder name from the key
func (db *Instance) globalKeyFolder(key []byte) []byte {
folder := key[1 : 1+64]
izero := bytes.IndexByte(folder, 0)
if izero < 0 {
return folder
folder, ok := db.folderIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen:]))
if !ok {
panic("bug: lookup of nonexistent folder ID")
}
return folder[:izero]
return folder
}
func unmarshalTrunc(bs []byte, truncate bool) (FileIntf, error) {
@ -692,3 +708,132 @@ func leveldbIsCorrupted(err error) bool {
return false
}
// checkConvertDatabase tries to convert an existing old (v0.11) database to
// new (v0.13) format.
func checkConvertDatabase(dbFile string) error {
oldLoc := filepath.Join(filepath.Dir(dbFile), "index-v0.11.0.db")
if _, err := os.Stat(oldLoc); os.IsNotExist(err) {
// The old database file does not exist; that's ok, continue as if
// everything succeeded.
return nil
} else if err != nil {
// Any other error is weird.
return err
}
// There exists a database in the old format. We run a one time
// conversion from old to new.
fromDb, err := leveldb.OpenFile(oldLoc, nil)
if err != nil {
return err
}
toDb, err := leveldb.OpenFile(dbFile, nil)
if err != nil {
return err
}
err = convertKeyFormat(fromDb, toDb)
if err != nil {
return err
}
err = toDb.Close()
if err != nil {
return err
}
// We've done this one, we don't want to do it again (if the user runs
// -reset or so). We don't care too much about errors any more at this stage.
fromDb.Close()
osutil.Rename(oldLoc, oldLoc+".converted")
return nil
}
// A smallIndex is an in memory bidirectional []byte to uint32 map. It gives
// fast lookups in both directions and persists to the database. Don't use for
// storing more items than fit comfortably in RAM.
type smallIndex struct {
db *Instance
prefix []byte
id2val map[uint32]string
val2id map[string]uint32
nextID uint32
mut sync.Mutex
}
func newSmallIndex(db *Instance, prefix []byte) *smallIndex {
idx := &smallIndex{
db: db,
prefix: prefix,
id2val: make(map[uint32]string),
val2id: make(map[string]uint32),
mut: sync.NewMutex(),
}
idx.load()
return idx
}
// load iterates over the prefix space in the database and populates the in
// memory maps.
func (i *smallIndex) load() {
tr := i.db.newReadOnlyTransaction()
it := tr.NewIterator(util.BytesPrefix(i.prefix), nil)
for it.Next() {
val := string(it.Value())
id := binary.BigEndian.Uint32(it.Key()[len(i.prefix):])
i.id2val[id] = val
i.val2id[val] = id
if id >= i.nextID {
i.nextID = id + 1
}
}
it.Release()
tr.close()
}
// ID returns the index number for the given byte slice, allocating a new one
// and persisting this to the database if necessary.
func (i *smallIndex) ID(val []byte) uint32 {
i.mut.Lock()
// intentionally avoiding defer here as we want this call to be as fast as
// possible in the general case (folder ID already exists). The map lookup
// with the conversion of []byte to string is compiler optimized to not
// copy the []byte, which is why we don't assign it to a temp variable
// here.
if id, ok := i.val2id[string(val)]; ok {
i.mut.Unlock()
return id
}
id := i.nextID
i.nextID++
valStr := string(val)
i.val2id[valStr] = id
i.id2val[id] = valStr
key := make([]byte, len(i.prefix)+8) // prefix plus uint32 id
copy(key, i.prefix)
binary.BigEndian.PutUint32(key[len(i.prefix):], id)
i.db.Put(key, val, nil)
i.mut.Unlock()
return id
}
// Val returns the value for the given index number, or (nil, false) if there
// is no such index number.
func (i *smallIndex) Val(id uint32) ([]byte, bool) {
i.mut.Lock()
val, ok := i.id2val[id]
i.mut.Unlock()
if !ok {
return nil, false
}
return []byte(val), true
}

View File

@ -16,7 +16,9 @@ func TestDeviceKey(t *testing.T) {
dev := []byte("device67890123456789012345678901")
name := []byte("name")
db := &Instance{}
db := OpenMemory()
db.folderIdx.ID(fld)
db.deviceIdx.ID(dev)
key := db.deviceKey(fld, dev, name)
@ -38,7 +40,8 @@ func TestGlobalKey(t *testing.T) {
fld := []byte("folder6789012345678901234567890123456789012345678901234567890123")
name := []byte("name")
db := &Instance{}
db := OpenMemory()
db.folderIdx.ID(fld)
key := db.globalKey(fld, name)

View File

@ -97,7 +97,7 @@ func NewFileSet(folder string, db *Instance) *FileSet {
localVersion: make(map[protocol.DeviceID]int64),
folder: folder,
db: db,
blockmap: NewBlockMap(db, folder),
blockmap: NewBlockMap(db, db.folderIdx.ID([]byte(folder))),
mutex: sync.NewMutex(),
}
@ -244,7 +244,7 @@ func DropFolder(db *Instance, folder string) {
db.dropFolder([]byte(folder))
bm := &BlockMap{
db: db,
folder: folder,
folder: db.folderIdx.ID([]byte(folder)),
}
bm.Drop()
NewVirtualMtimeRepo(db, folder).Drop()

BIN
lib/db/testdata/oldformat.db.zip vendored Normal file

Binary file not shown.

View File

@ -7,6 +7,7 @@
package db
import (
"encoding/binary"
"fmt"
"time"
)
@ -24,10 +25,12 @@ type VirtualMtimeRepo struct {
}
func NewVirtualMtimeRepo(ldb *Instance, folder string) *VirtualMtimeRepo {
prefix := string(KeyTypeVirtualMtime) + folder
var prefix [5]byte // key type + 4 bytes folder idx number
prefix[0] = KeyTypeVirtualMtime
binary.BigEndian.PutUint32(prefix[1:], ldb.folderIdx.ID([]byte(folder)))
return &VirtualMtimeRepo{
ns: NewNamespacedKV(ldb, prefix),
ns: NewNamespacedKV(ldb, string(prefix[:])),
}
}

View File

@ -14,7 +14,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
@ -192,6 +191,9 @@ func alterFiles(dir string) error {
return osutil.TryRename(path, newPath)
}
/*
This doesn't in fact work. Sometimes it appears to. We need to get this sorted...
// Switch between files and directories
case r == 3 && comps > 3 && rand.Float64() < 0.2:
if !info.Mode().IsRegular() {
@ -216,6 +218,7 @@ func alterFiles(dir string) error {
generateFiles(path, 10, 20, "../LICENSE")
}
return err
*/
/*
This fails. Bug?