Update restore functionality to run in online mode, consume Enterprise backup files. (#9207)

* Live Restore + Enterprise data format compatability

* Extended ImportData to import all DB's if no db name given

* Added a new enterprise data test, and backup command now prints the backup file paths at conclusion

* Added whole-system backup test

* Update to use protobuf in all enterprise data cases

* Update to test to do cross-testing with enterprise version

* incremental enterprise backup format support
pull/9308/head
Adam 2018-01-10 13:59:18 -05:00 committed by GitHub
parent a4d48f95f3
commit 938db68198
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 1557 additions and 426 deletions

View File

@ -7,7 +7,9 @@
### Features
- [#8495](https://github.com/influxdata/influxdb/pull/8495): Improve CLI connection warnings
- [#9146](https://github.com/influxdata/influxdb/issues/9146): Backup can produce data in the same format as the enterprise backup/restore tool.
- [#3019](https://github.com/influxdata/influxdb/issues/3019): Backup utility prints a list of backup files.
- [#9146](https://github.com/influxdata/influxdb/issues/9146): Backup/Restore can produce/consume data in the same format as the enterprise backup/restore tool.
- [#8880](https://github.com/influxdata/influxdb/issues/8879): Restore runs in online mode, does not delete existing databases
- [#8879](https://github.com/influxdata/influxdb/issues/8879): Export functionality using start/end to filter exported data by timestamp
- [#9084](https://github.com/influxdata/influxdb/pull/9084): Handle high cardinality deletes in TSM engine
- [#9162](https://github.com/influxdata/influxdb/pull/9162): Improve inmem index startup performance for high cardinality.

View File

@ -57,6 +57,8 @@ type Command struct {
enterprise bool
manifest backup_util.Manifest
enterpriseFileBase string
BackupFiles []string
}
// NewCommand returns a new instance of Command with default settings.
@ -117,19 +119,22 @@ func (cmd *Command) Run(args ...string) error {
}
if cmd.enterprise {
cmd.manifest.Platform = "OSS"
filename := cmd.enterpriseFileBase + ".manifest"
if err := cmd.manifest.Save(filepath.Join(cmd.path, filename)); err != nil {
cmd.StderrLogger.Printf("manifest save failed: %v", err)
return err
}
cmd.BackupFiles = append(cmd.BackupFiles, filename)
}
if err != nil {
cmd.StderrLogger.Printf("backup failed: %v", err)
return err
}
cmd.StdoutLogger.Println("backup complete")
cmd.StdoutLogger.Println("backup complete:")
for _, v := range cmd.BackupFiles {
cmd.StdoutLogger.Println("\t" + filepath.Join(cmd.path, v))
}
return nil
}
@ -158,6 +163,8 @@ func (cmd *Command) parseFlags(args []string) (err error) {
return err
}
cmd.BackupFiles = []string{}
// for enterprise saving, if needed
cmd.enterpriseFileBase = time.Now().UTC().Format(backup_util.EnterpriseFileNamePattern)
@ -237,6 +244,9 @@ func (cmd *Command) backupShard(db, rp, sid string) error {
// TODO: verify shard backup data
err = cmd.downloadAndVerify(req, shardArchivePath, nil)
if !cmd.enterprise {
cmd.BackupFiles = append(cmd.BackupFiles, shardArchivePath)
}
if err != nil {
return err
@ -295,6 +305,8 @@ func (cmd *Command) backupShard(db, rp, sid string) error {
if err := out.Close(); err != nil {
return err
}
cmd.BackupFiles = append(cmd.BackupFiles, filename)
}
return nil
@ -401,6 +413,10 @@ func (cmd *Command) backupMetastore() error {
return err
}
if !cmd.enterprise {
cmd.BackupFiles = append(cmd.BackupFiles, metastoreArchivePath)
}
if cmd.enterprise {
metaBytes, err := backup_util.GetMetaBytes(metastoreArchivePath)
defer os.Remove(metastoreArchivePath)
@ -408,13 +424,19 @@ func (cmd *Command) backupMetastore() error {
return err
}
filename := cmd.enterpriseFileBase + ".meta"
if err := ioutil.WriteFile(filepath.Join(cmd.path, filename), metaBytes, 0644); err != nil {
ep := backup_util.EnterprisePacker{Data: metaBytes, MaxNodeID: 0}
protoBytes, err := ep.MarshalBinary()
if err != nil {
return err
}
if err := ioutil.WriteFile(filepath.Join(cmd.path, filename), protoBytes, 0644); err != nil {
fmt.Fprintln(cmd.Stdout, "Error.")
return err
}
cmd.manifest.Meta.FileName = filename
cmd.manifest.Meta.Size = int64(len(metaBytes))
cmd.BackupFiles = append(cmd.BackupFiles, filename)
}
return nil

View File

@ -3,17 +3,22 @@ package backup_util
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"os"
"sort"
"strings"
"encoding/json"
"github.com/gogo/protobuf/proto"
internal "github.com/influxdata/influxdb/cmd/influxd/backup_util/internal"
"github.com/influxdata/influxdb/services/snapshotter"
"io/ioutil"
"path/filepath"
)
//go:generate protoc --gogo_out=. internal/data.proto
const (
// Suffix is a suffix added to the backup while it's in-process.
Suffix = ".pending"
@ -26,12 +31,29 @@ const (
BackupFilePattern = "%s.%s.%05d"
EnterpriseFileNamePattern = "20060102T150405Z"
OSSManifest = "OSS"
ENTManifest = "ENT"
)
type EnterprisePacker struct {
Data []byte
MaxNodeID uint64
}
func (ep EnterprisePacker) MarshalBinary() ([]byte, error) {
ed := internal.EnterpriseData{Data: ep.Data, MaxNodeID: &ep.MaxNodeID}
return proto.Marshal(&ed)
}
func (ep *EnterprisePacker) UnmarshalBinary(data []byte) error {
var pb internal.EnterpriseData
if err := proto.Unmarshal(data, &pb); err != nil {
return err
}
ep.Data = pb.GetData()
ep.MaxNodeID = pb.GetMaxNodeID()
return nil
}
func GetMetaBytes(fname string) ([]byte, error) {
f, err := os.Open(fname)
if err != nil {
@ -65,10 +87,9 @@ func GetMetaBytes(fname string) ([]byte, error) {
// If Limited is false, the manifest contains a full backup, otherwise
// it is a partial backup.
type Manifest struct {
Platform string `json:"platform"`
Meta MetaEntry `json:"meta"`
Limited bool `json:"limited"`
Files []Entry `json:"files"`
Meta MetaEntry `json:"meta"`
Limited bool `json:"limited"`
Files []Entry `json:"files"`
// If limited is true, then one (or all) of the following fields will be set
@ -123,6 +144,64 @@ func (manifest *Manifest) Save(filename string) error {
return ioutil.WriteFile(filename, b, 0600)
}
// LoadIncremental loads multiple manifest files from a given directory.
func LoadIncremental(dir string) (*MetaEntry, map[uint64]*Entry, error) {
manifests, err := filepath.Glob(filepath.Join(dir, "*.manifest"))
if err != nil {
return nil, nil, err
}
shards := make(map[uint64]*Entry)
if len(manifests) == 0 {
return nil, shards, nil
}
sort.Sort(sort.Reverse(sort.StringSlice(manifests)))
var metaEntry MetaEntry
for _, fileName := range manifests {
fi, err := os.Stat(fileName)
if err != nil {
return nil, nil, err
}
if fi.IsDir() {
continue
}
f, err := os.Open(fileName)
if err != nil {
return nil, nil, err
}
var manifest Manifest
err = json.NewDecoder(f).Decode(&manifest)
f.Close()
if err != nil {
return nil, nil, fmt.Errorf("read manifest: %v", err)
}
// sorted (descending) above, so first manifest is most recent
if metaEntry.FileName == "" {
metaEntry = manifest.Meta
}
for i := range manifest.Files {
sh := manifest.Files[i]
if _, err := os.Stat(filepath.Join(dir, sh.FileName)); err != nil {
continue
}
e := shards[sh.ShardID]
if e == nil || sh.LastModified > e.LastModified {
shards[sh.ShardID] = &sh
}
}
}
return &metaEntry, shards, nil
}
type CountingWriter struct {
io.Writer
Total int64 // Total # of bytes transferred

View File

@ -0,0 +1,71 @@
// Code generated by protoc-gen-gogo.
// source: internal/data.proto
// DO NOT EDIT!
/*
Package backup_util is a generated protocol buffer package.
It is generated from these files:
internal/data.proto
It has these top-level messages:
EnterpriseData
*/
package backup_util
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type EnterpriseData struct {
Data []byte `protobuf:"bytes,1,req,name=Data" json:"Data,omitempty"`
MaxNodeID *uint64 `protobuf:"varint,2,req,name=MaxNodeID" json:"MaxNodeID,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *EnterpriseData) Reset() { *m = EnterpriseData{} }
func (m *EnterpriseData) String() string { return proto.CompactTextString(m) }
func (*EnterpriseData) ProtoMessage() {}
func (*EnterpriseData) Descriptor() ([]byte, []int) { return fileDescriptorData, []int{0} }
func (m *EnterpriseData) GetData() []byte {
if m != nil {
return m.Data
}
return nil
}
func (m *EnterpriseData) GetMaxNodeID() uint64 {
if m != nil && m.MaxNodeID != nil {
return *m.MaxNodeID
}
return 0
}
func init() {
proto.RegisterType((*EnterpriseData)(nil), "backup_util.EnterpriseData")
}
func init() { proto.RegisterFile("internal/data.proto", fileDescriptorData) }
var fileDescriptorData = []byte{
// 110 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xce, 0xcc, 0x2b, 0x49,
0x2d, 0xca, 0x4b, 0xcc, 0xd1, 0x4f, 0x49, 0x2c, 0x49, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17,
0xe2, 0x4e, 0x4a, 0x4c, 0xce, 0x2e, 0x2d, 0x88, 0x2f, 0x2d, 0xc9, 0xcc, 0x51, 0x72, 0xe2, 0xe2,
0x73, 0x05, 0xa9, 0x29, 0x28, 0xca, 0x2c, 0x4e, 0x75, 0x49, 0x2c, 0x49, 0x14, 0x12, 0xe2, 0x62,
0x01, 0xd1, 0x12, 0x8c, 0x0a, 0x4c, 0x1a, 0x3c, 0x41, 0x60, 0xb6, 0x90, 0x0c, 0x17, 0xa7, 0x6f,
0x62, 0x85, 0x5f, 0x7e, 0x4a, 0xaa, 0xa7, 0x8b, 0x04, 0x93, 0x02, 0x93, 0x06, 0x4b, 0x10, 0x42,
0x00, 0x10, 0x00, 0x00, 0xff, 0xff, 0xd7, 0xda, 0x53, 0xc6, 0x66, 0x00, 0x00, 0x00,
}

View File

@ -0,0 +1,12 @@
package backup_util;
//========================================================================
//
// Metadata
//
//========================================================================
message EnterpriseData {
required bytes Data = 1;
required uint64 MaxNodeID = 2;
}

View File

@ -10,29 +10,50 @@ import (
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"compress/gzip"
"github.com/influxdata/influxdb/cmd/influxd/backup_util"
tarstream "github.com/influxdata/influxdb/pkg/tar"
"github.com/influxdata/influxdb/services/meta"
"github.com/influxdata/influxdb/services/snapshotter"
)
// Command represents the program execution for "influxd restore".
type Command struct {
Stdout io.Writer
Stderr io.Writer
// The logger passed to the ticker during execution.
StdoutLogger *log.Logger
StderrLogger *log.Logger
backupFilesPath string
metadir string
datadir string
database string
retention string
shard string
// Standard input/output, overridden for testing.
Stderr io.Writer
Stdout io.Writer
host string
path string
client *snapshotter.Client
backupFilesPath string
metadir string
datadir string
destinationDatabase string
sourceDatabase string
backupRetention string
restoreRetention string
shard uint64
enterprise bool
online bool
manifestMeta *backup_util.MetaEntry
manifestFiles map[uint64]*backup_util.Entry
// TODO: when the new meta stuff is done this should not be exported or be gone
MetaConfig *meta.Config
shardIDMap map[uint64]uint64
}
// NewCommand returns a new instance of Command with default settings.
@ -46,19 +67,32 @@ func NewCommand() *Command {
// Run executes the program.
func (cmd *Command) Run(args ...string) error {
// Set up logger.
cmd.StdoutLogger = log.New(cmd.Stdout, "", log.LstdFlags)
cmd.StderrLogger = log.New(cmd.Stderr, "", log.LstdFlags)
if err := cmd.parseFlags(args); err != nil {
return err
}
if cmd.enterprise {
return cmd.runOnlineEnterprise()
} else if cmd.online {
return cmd.runOnlineLegacy()
} else {
return cmd.runOffline()
}
}
func (cmd *Command) runOffline() error {
if cmd.metadir != "" {
if err := cmd.unpackMeta(); err != nil {
return err
}
}
if cmd.shard != "" {
if cmd.shard != 0 {
return cmd.unpackShard(cmd.shard)
} else if cmd.retention != "" {
} else if cmd.restoreRetention != "" {
return cmd.unpackRetention()
} else if cmd.datadir != "" {
return cmd.unpackDatabase()
@ -66,14 +100,49 @@ func (cmd *Command) Run(args ...string) error {
return nil
}
func (cmd *Command) runOnlineEnterprise() error {
err := cmd.updateMetaEnterprise()
if err != nil {
cmd.StderrLogger.Printf("error updating meta: %v", err)
return err
}
err = cmd.uploadShardsEnterprise()
if err != nil {
cmd.StderrLogger.Printf("error updating shards: %v", err)
return err
}
return nil
}
func (cmd *Command) runOnlineLegacy() error {
err := cmd.updateMetaLegacy()
if err != nil {
cmd.StderrLogger.Printf("error updating meta: %v", err)
return err
}
err = cmd.uploadShardsLegacy()
if err != nil {
cmd.StderrLogger.Printf("error updating shards: %v", err)
return err
}
return nil
}
// parseFlags parses and validates the command line arguments.
func (cmd *Command) parseFlags(args []string) error {
fs := flag.NewFlagSet("", flag.ContinueOnError)
fs.StringVar(&cmd.host, "host", "localhost:8088", "")
fs.StringVar(&cmd.metadir, "metadir", "", "")
fs.StringVar(&cmd.datadir, "datadir", "", "")
fs.StringVar(&cmd.database, "database", "", "")
fs.StringVar(&cmd.retention, "retention", "", "")
fs.StringVar(&cmd.shard, "shard", "", "")
fs.StringVar(&cmd.destinationDatabase, "database", "", "")
fs.StringVar(&cmd.restoreRetention, "retention", "", "")
fs.StringVar(&cmd.sourceDatabase, "db", "", "")
fs.StringVar(&cmd.destinationDatabase, "newdb", "", "")
fs.StringVar(&cmd.backupRetention, "rp", "", "")
fs.StringVar(&cmd.restoreRetention, "newrp", "", "")
fs.Uint64Var(&cmd.shard, "shard", 0, "")
fs.BoolVar(&cmd.online, "online", false, "")
fs.BoolVar(&cmd.enterprise, "enterprise", false, "")
fs.SetOutput(cmd.Stdout)
fs.Usage = cmd.printUsage
if err := fs.Parse(args); err != nil {
@ -82,6 +151,7 @@ func (cmd *Command) parseFlags(args []string) error {
cmd.MetaConfig = meta.NewConfig()
cmd.MetaConfig.Dir = cmd.metadir
cmd.client = snapshotter.NewClient(cmd.host)
// Require output path.
cmd.backupFilesPath = fs.Arg(0)
@ -89,24 +159,53 @@ func (cmd *Command) parseFlags(args []string) error {
return fmt.Errorf("path with backup files required")
}
// validate the arguments
if cmd.metadir == "" && cmd.database == "" {
return fmt.Errorf("-metadir or -database are required to restore")
fi, err := os.Stat(cmd.backupFilesPath)
if err != nil || !fi.IsDir() {
return fmt.Errorf("backup path should be a valid directory: %s", cmd.backupFilesPath)
}
if cmd.database != "" && cmd.datadir == "" {
return fmt.Errorf("-datadir is required to restore")
}
if cmd.enterprise || cmd.online {
// validate the arguments
if cmd.shard != "" {
if cmd.database == "" {
return fmt.Errorf("-database is required to restore shard")
if cmd.metadir != "" {
return fmt.Errorf("offline parameter metadir found, not compatible with -enterprise")
}
if cmd.retention == "" {
return fmt.Errorf("-retention is required to restore shard")
if cmd.datadir != "" {
return fmt.Errorf("offline parameter datadir found, not compatible with -enterprise")
}
if cmd.restoreRetention == "" {
cmd.restoreRetention = cmd.backupRetention
}
if cmd.enterprise {
var err error
cmd.manifestMeta, cmd.manifestFiles, err = backup_util.LoadIncremental(cmd.backupFilesPath)
if err != nil {
return fmt.Errorf("restore failed while processing manifest files: %s", err.Error())
}
}
} else {
// validate the arguments
if cmd.metadir == "" && cmd.destinationDatabase == "" {
return fmt.Errorf("-metadir or -destinationDatabase are required to restore")
}
if cmd.destinationDatabase != "" && cmd.datadir == "" {
return fmt.Errorf("-datadir is required to restore")
}
if cmd.shard != 0 {
if cmd.destinationDatabase == "" {
return fmt.Errorf("-destinationDatabase is required to restore shard")
}
if cmd.backupRetention == "" {
return fmt.Errorf("-retention is required to restore shard")
}
} else if cmd.backupRetention != "" && cmd.destinationDatabase == "" {
return fmt.Errorf("-destinationDatabase is required to restore retention policy")
}
} else if cmd.retention != "" && cmd.database == "" {
return fmt.Errorf("-database is required to restore retention policy")
}
return nil
@ -171,7 +270,7 @@ func (cmd *Command) unpackMeta() error {
c.Dir = cmd.metadir
// Create the meta dir
if os.MkdirAll(c.Dir, 0700); err != nil {
if err := os.MkdirAll(c.Dir, 0700); err != nil {
return err
}
@ -212,11 +311,75 @@ func (cmd *Command) unpackMeta() error {
return nil
}
func (cmd *Command) updateMetaEnterprise() error {
var metaBytes []byte
fileName := filepath.Join(cmd.backupFilesPath, cmd.manifestMeta.FileName)
fileBytes, err := ioutil.ReadFile(fileName)
if err != nil {
return err
}
var ep backup_util.EnterprisePacker
ep.UnmarshalBinary(fileBytes)
metaBytes = ep.Data
req := &snapshotter.Request{
Type: snapshotter.RequestMetaStoreUpdate,
BackupDatabase: cmd.sourceDatabase,
RestoreDatabase: cmd.destinationDatabase,
BackupRetentionPolicy: cmd.backupRetention,
RestoreRetentionPolicy: cmd.restoreRetention,
UploadSize: int64(len(metaBytes)),
}
shardIDMap, err := cmd.client.UpdateMeta(req, bytes.NewReader(metaBytes))
cmd.shardIDMap = shardIDMap
return err
}
// updateMetaLive takes a metadata backup and sends it to the influx server
// for a live merger of metadata.
func (cmd *Command) updateMetaLegacy() error {
var metaBytes []byte
// find the meta file
metaFiles, err := filepath.Glob(filepath.Join(cmd.backupFilesPath, backup_util.Metafile+".*"))
if err != nil {
return err
}
if len(metaFiles) == 0 {
return fmt.Errorf("no metastore backups in %s", cmd.backupFilesPath)
}
fileName := metaFiles[len(metaFiles)-1]
cmd.StdoutLogger.Printf("Using metastore snapshot: %v\n", fileName)
metaBytes, err = backup_util.GetMetaBytes(fileName)
req := &snapshotter.Request{
Type: snapshotter.RequestMetaStoreUpdate,
BackupDatabase: cmd.sourceDatabase,
RestoreDatabase: cmd.destinationDatabase,
BackupRetentionPolicy: cmd.backupRetention,
RestoreRetentionPolicy: cmd.restoreRetention,
UploadSize: int64(len(metaBytes)),
}
shardIDMap, err := cmd.client.UpdateMeta(req, bytes.NewReader(metaBytes))
cmd.shardIDMap = shardIDMap
return err
}
// unpackShard will look for all backup files in the path matching this shard ID
// and restore them to the data dir
func (cmd *Command) unpackShard(shardID string) error {
func (cmd *Command) unpackShard(shard uint64) error {
shardID := strconv.FormatUint(shard, 10)
// make sure the shard isn't already there so we don't clobber anything
restorePath := filepath.Join(cmd.datadir, cmd.database, cmd.retention, shardID)
restorePath := filepath.Join(cmd.datadir, cmd.destinationDatabase, cmd.restoreRetention, shardID)
if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("shard already present: %s", restorePath)
}
@ -227,21 +390,93 @@ func (cmd *Command) unpackShard(shardID string) error {
}
// find the shard backup files
pat := filepath.Join(cmd.backupFilesPath, fmt.Sprintf(backup_util.BackupFilePattern, cmd.database, cmd.retention, id))
pat := filepath.Join(cmd.backupFilesPath, fmt.Sprintf(backup_util.BackupFilePattern, cmd.destinationDatabase, cmd.restoreRetention, id))
return cmd.unpackFiles(pat + ".*")
}
// unpackDatabase will look for all backup files in the path matching this database
func (cmd *Command) uploadShardsEnterprise() error {
for _, file := range cmd.manifestFiles {
if cmd.sourceDatabase == "" || cmd.sourceDatabase == file.Database {
if cmd.backupRetention == "" || cmd.backupRetention == file.Policy {
if cmd.shard == 0 || cmd.shard == file.ShardID {
cmd.StdoutLogger.Printf("Restoring shard %d live from backup %s\n", file.ShardID, file.FileName)
f, err := os.Open(filepath.Join(cmd.backupFilesPath, file.FileName))
if err != nil {
f.Close()
return err
}
gr, err := gzip.NewReader(f)
if err != nil {
f.Close()
return err
}
tr := tar.NewReader(gr)
targetDB := cmd.destinationDatabase
if targetDB == "" {
targetDB = file.Database
}
if err := cmd.client.UploadShard(file.ShardID, cmd.shardIDMap[file.ShardID], cmd.destinationDatabase, cmd.restoreRetention, tr); err != nil {
f.Close()
return err
}
f.Close()
}
}
}
}
return nil
}
// unpackFiles will look for backup files matching the pattern and restore them to the data dir
func (cmd *Command) uploadShardsLegacy() error {
// find the destinationDatabase backup files
pat := fmt.Sprintf("%s.*", filepath.Join(cmd.backupFilesPath, cmd.sourceDatabase))
cmd.StdoutLogger.Printf("Restoring live from backup %s\n", pat)
backupFiles, err := filepath.Glob(pat)
if err != nil {
return err
}
if len(backupFiles) == 0 {
return fmt.Errorf("no backup files in %s", cmd.backupFilesPath)
}
for _, fn := range backupFiles {
parts := strings.Split(fn, ".")
if len(parts) != 4 {
cmd.StderrLogger.Printf("Skipping mis-named backup file: %s", fn)
}
shardID, err := strconv.ParseUint(parts[2], 10, 64)
if err != nil {
return err
}
f, err := os.Open(fn)
if err != nil {
return err
}
tr := tar.NewReader(f)
if err := cmd.client.UploadShard(shardID, cmd.shardIDMap[shardID], cmd.destinationDatabase, cmd.restoreRetention, tr); err != nil {
f.Close()
return err
}
f.Close()
}
return nil
}
// unpackDatabase will look for all backup files in the path matching this destinationDatabase
// and restore them to the data dir
func (cmd *Command) unpackDatabase() error {
// make sure the shard isn't already there so we don't clobber anything
restorePath := filepath.Join(cmd.datadir, cmd.database)
restorePath := filepath.Join(cmd.datadir, cmd.destinationDatabase)
if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("database already present: %s", restorePath)
}
// find the database backup files
pat := filepath.Join(cmd.backupFilesPath, cmd.database)
pat := filepath.Join(cmd.backupFilesPath, cmd.destinationDatabase)
return cmd.unpackFiles(pat + ".*")
}
@ -249,19 +484,19 @@ func (cmd *Command) unpackDatabase() error {
// and restore them to the data dir
func (cmd *Command) unpackRetention() error {
// make sure the shard isn't already there so we don't clobber anything
restorePath := filepath.Join(cmd.datadir, cmd.database, cmd.retention)
restorePath := filepath.Join(cmd.datadir, cmd.destinationDatabase, cmd.restoreRetention)
if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("retention already present: %s", restorePath)
}
// find the retention backup files
pat := filepath.Join(cmd.backupFilesPath, cmd.database)
return cmd.unpackFiles(fmt.Sprintf("%s.%s.*", pat, cmd.retention))
pat := filepath.Join(cmd.backupFilesPath, cmd.destinationDatabase)
return cmd.unpackFiles(fmt.Sprintf("%s.%s.*", pat, cmd.restoreRetention))
}
// unpackFiles will look for backup files matching the pattern and restore them to the data dir
func (cmd *Command) unpackFiles(pat string) error {
fmt.Printf("Restoring from backup %s\n", pat)
cmd.StdoutLogger.Printf("Restoring offline from backup %s\n", pat)
backupFiles, err := filepath.Glob(pat)
if err != nil {
@ -289,67 +524,64 @@ func (cmd *Command) unpackTar(tarFile string) error {
}
defer f.Close()
tr := tar.NewReader(f)
for {
hdr, err := tr.Next()
if err == io.EOF {
return nil
} else if err != nil {
return err
}
if err := cmd.unpackFile(tr, hdr.Name); err != nil {
return err
}
}
}
// unpackFile will copy the current file from the tar archive to the data dir
func (cmd *Command) unpackFile(tr *tar.Reader, fileName string) error {
nativeFileName := filepath.FromSlash(fileName)
fn := filepath.Join(cmd.datadir, nativeFileName)
fmt.Printf("unpacking %s\n", fn)
if err := os.MkdirAll(filepath.Dir(fn), 0777); err != nil {
return fmt.Errorf("error making restore dir: %s", err.Error())
// should get us ["db","rp", "00001", "00"]
pathParts := strings.Split(filepath.Base(tarFile), ".")
if len(pathParts) != 4 {
return fmt.Errorf("backup tarfile name incorrect format")
}
ff, err := os.Create(fn)
if err != nil {
return err
}
defer ff.Close()
shardPath := filepath.Join(cmd.datadir, pathParts[0], pathParts[1], strings.Trim(pathParts[2], "0"))
os.MkdirAll(shardPath, 0755)
if _, err := io.Copy(ff, tr); err != nil {
return err
}
return nil
return tarstream.Restore(f, shardPath)
}
// printUsage prints the usage message to STDERR.
func (cmd *Command) printUsage() {
fmt.Fprintf(cmd.Stdout, `Uses backups from the PATH to restore the metastore, databases,
retention policies, or specific shards. The InfluxDB process must not be
running during a restore.
retention policies, or specific shards. Default mode requires the instance to be stopped before running, and will wipe
all databases from the system (e.g., for disaster recovery). The improved online and enterprise modes requires
the instance to be running, and the database name used must not already exist.
Usage: influxd restore [flags] PATH
Usage: influxd restore [-enterprise] [flags] PATH
The default mode consumes files in an OSS only file format. PATH is a directory containing the backup data
Options:
-metadir <path>
Optional. If set the metastore will be recovered to the given path.
-datadir <path>
Optional. If set the restore process will recover the specified
database, retention policy or shard to the given directory.
-database <name>
Optional. Required if no metadir given. Will restore the database
TSM files.
Optional. Required if no metadir given. Will restore a single database's data.
-retention <name>
Optional. If given, database is required. Will restore the retention policy's
TSM files.
Optional. If given, -database is required. Will restore the retention policy's
data.
-shard <id>
Optional. If given, database and retention are required. Will restore the shard's
TSM files.
Optional. If given, -database and -retention are required. Will restore the shard's
data.
-online
Optional. If given, the restore will be done using the new process, detailed below. All other arguments
above should be omitted.
The -enterprise restore mode consumes files in an improved format that includes a file manifest.
Options:
-host <host:port>
The host to connect to and perform a snapshot of. Defaults to '127.0.0.1:8088'.
-db <name>
Identifies the database from the backup that will be restored.
-newdb <name>
The name of the database into which the archived data will be imported on the target system.
If not given, then the value of -db is used. The new database name must be unique to the target system.
-rp <name>
Identifies the retention policy from the backup that will be restored. Requires that -db is set.
-newrp <name>
The name of the retention policy that will be created on the target system. Requires that -rp is set.
If not given, the value of -rp is used.
-shard <id>
Optional. If given, -db and -rp are required. Will restore the single shard's data.
`)
}

View File

@ -3,11 +3,15 @@ influxd-restore(1)
NAME
----
influxd-restore - Restores the metastore, databases, retention policies, or specific shards
influxd-restore - Uses backups from the PATH to restore the metastore, databases, retention policies, or specific
shards. Default mode requires the instance to be stopped before running, and will wipe all databases from the
system (e.g., for disaster recovery). The improved online and enterprise modes requires the instance to be running,
and the database name used must not already exist.
SYNOPSIS
--------
'influxd restore' [options] PATH
'influxd restore' [-enterprise] [flags] PATH
DESCRIPTION
-----------
@ -15,20 +19,52 @@ Uses backups from the PATH to restore the metastore, databases, retention polici
OPTIONS
-------
The default mode consumes files in an OSS only file format. PATH is a directory containing the backup data
-metadir <path>::
If set, the metastore will be recovered to the given path. Optional.
Optional. If set the metastore will be recovered to the given path.
-datadir <path>::
If set, the restore process will recover the specified database, retention policy, or shard to the given directory. Optional.
Optional. If set the restore process will recover the specified
destinationDatabase, retention policy or shard to the given directory.
-database <name>::
Will restore the database TSM files. Required if no metadir is given. Optional.
Optional. Required if no metadir given. Will restore a single database's data.
-retention <name>::
Will restore the retention policy's TSM files. If given, database is required. Optional.
Optional. If given, -database is required. Will restore the retention policy's
data.
-shard <id>::
Will restore the shard's TSM files. If given, database and retention are required. Optional.
Optional. If given, -database and -retention are required. Will restore the shard's
data.
-online::
Optional. If given, the restore will be done using the new process, detailed below. All other arguments
above should be omitted.
The -enterprise restore mode consumes files in an improved format that includes a file manifest.
Options:
-host <host:port>::
The host to connect to and perform a snapshot of. Defaults to '127.0.0.1:8088'.
-db <name>::
Identifies the database from the backup that will be restored.
-newdb <name>::
The name of the database into which the archived data will be imported on the target system.
If not given, then the value of -db is used. The new database name must be unique to the target system.
-rp <name>::
Identifies the retention policy from the backup that will be restored. Requires that -db is set.
-newrp <name>::
The name of the retention policy that will be created on the target system. Requires that -rp is set.
If not given, the value of -rp is used.
-shard <id>::
Optional. If given, -db and -rp are required. Will restore the single shard's data.
SEE ALSO
--------

20
pkg/tar/file_unix.go Normal file
View File

@ -0,0 +1,20 @@
// +build !windows
package tar
import "os"
func syncDir(dirName string) error {
// fsync the dir to flush the rename
dir, err := os.OpenFile(dirName, os.O_RDONLY, os.ModeDir)
if err != nil {
return err
}
defer dir.Close()
return dir.Sync()
}
// renameFile renames the file at oldpath to newpath.
func renameFile(oldpath, newpath string) error {
return os.Rename(oldpath, newpath)
}

19
pkg/tar/file_windows.go Normal file
View File

@ -0,0 +1,19 @@
package tar
import "os"
func syncDir(dirName string) error {
return nil
}
// renameFile renames the file at oldpath to newpath.
// If newpath already exists, it will be removed before renaming.
func renameFile(oldpath, newpath string) error {
if _, err := os.Stat(newpath); err == nil {
if err = os.Remove(newpath); nil != err {
return err
}
}
return os.Rename(oldpath, newpath)
}

160
pkg/tar/stream.go Normal file
View File

@ -0,0 +1,160 @@
package tar
import (
"archive/tar"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
)
// Stream is a convenience function for creating a tar of a shard dir. It walks over the directory and subdirs,
// possibly writing each file to a tar writer stream. By default StreamFile is used, which will result in all files
// being written. A custom writeFunc can be passed so that each file may be written, modified+written, or skipped
// depending on the custom logic.
func Stream(w io.Writer, dir, relativePath string, writeFunc func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error) error {
tw := tar.NewWriter(w)
defer tw.Close()
if writeFunc == nil {
writeFunc = StreamFile
}
return filepath.Walk(dir, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Skip adding an entry for the root dir
if dir == path && f.IsDir() {
return nil
}
// Figure out the the full relative path including any sub-dirs
subDir, _ := filepath.Split(path)
subDir, err = filepath.Rel(dir, subDir)
if err != nil {
return err
}
return writeFunc(f, filepath.Join(relativePath, subDir), path, tw)
})
}
// Generates a filtering function for Stream that checks an incoming file, and only writes the file to the stream if
// its mod time is later than since. Example: to tar only files newer than a certain datetime, use
// tar.Stream(w, dir, relativePath, SinceFilterTarFile(datetime))
func SinceFilterTarFile(since time.Time) func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error {
return func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error {
if f.ModTime().After(since) {
return StreamFile(f, shardRelativePath, fullPath, tw)
}
return nil
}
}
// stream a single file to tw, extending the header name using the shardRelativePath
func StreamFile(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error {
h, err := tar.FileInfoHeader(f, f.Name())
if err != nil {
return err
}
h.Name = filepath.ToSlash(filepath.Join(shardRelativePath, f.Name()))
if err := tw.WriteHeader(h); err != nil {
return err
}
if !f.Mode().IsRegular() {
return nil
}
fr, err := os.Open(fullPath)
if err != nil {
return err
}
defer fr.Close()
_, err = io.CopyN(tw, fr, h.Size)
return err
}
// Restore reads a tar archive from r and extracts all of its files into dir,
// using only the base name of each file.
func Restore(r io.Reader, dir string) error {
tr := tar.NewReader(r)
for {
if err := extractFile(tr, dir); err == io.EOF {
break
} else if err != nil {
return err
}
}
return syncDir(dir)
}
// extractFile copies the next file from tr into dir, using the file's base name.
func extractFile(tr *tar.Reader, dir string) error {
// Read next archive file.
hdr, err := tr.Next()
if err != nil {
return err
}
// The hdr.Name is the relative path of the file from the root data dir.
// e.g (db/rp/1/xxxxx.tsm or db/rp/1/index/xxxxxx.tsi)
sections := strings.Split(filepath.FromSlash(hdr.Name), string(filepath.Separator))
if len(sections) < 3 {
return fmt.Errorf("invalid archive path: %s", hdr.Name)
}
relativePath := filepath.Join(sections[3:]...)
subDir, _ := filepath.Split(relativePath)
// If this is a directory entry (usually just `index` for tsi), create it an move on.
if hdr.Typeflag == tar.TypeDir {
if err := os.MkdirAll(filepath.Join(dir, subDir), os.FileMode(hdr.Mode).Perm()); err != nil {
return err
}
return nil
}
// Make sure the dir we need to write into exists. It should, but just double check in
// case we get a slightly invalid tarball.
if subDir != "" {
if err := os.MkdirAll(filepath.Join(dir, subDir), 0755); err != nil {
return err
}
}
destPath := filepath.Join(dir, relativePath)
tmp := destPath + ".tmp"
// Create new file on disk.
f, err := os.OpenFile(tmp, os.O_CREATE|os.O_RDWR, os.FileMode(hdr.Mode).Perm())
if err != nil {
return err
}
defer f.Close()
// Copy from archive to the file.
if _, err := io.CopyN(f, tr, hdr.Size); err != nil {
return err
}
// Sync to disk & close.
if err := f.Sync(); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
return renameFile(tmp, destPath)
}

View File

@ -10,6 +10,7 @@ import (
"time"
"unicode"
"fmt"
"github.com/gogo/protobuf/proto"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/models"
@ -783,6 +784,106 @@ func (data *Data) hasAdminUser() bool {
return false
}
// ImportData imports selected data into the current metadata.
// if non-empty, backupDBName, restoreDBName, backupRPName, restoreRPName can be used to select DB metadata from other,
// and to assign a new name to the imported data. Returns a map of shard ID's in the old metadata to new shard ID's
// in the new metadata, along with a list of new databases created, both of which can assist in the import of existing
// shard data during a database restore.
func (data *Data) ImportData(other Data, backupDBName, restoreDBName, backupRPName, restoreRPName string) (map[uint64]uint64, []string, error) {
shardIDMap := make(map[uint64]uint64)
if backupDBName != "" {
dbName, err := data.importOneDB(other, backupDBName, restoreDBName, backupRPName, restoreRPName, shardIDMap)
if err != nil {
return nil, nil, err
}
return shardIDMap, []string{dbName}, nil
}
// if no backupDBName then we'll try to import all the DB's. If one of them fails, we'll mark the whole
// operation a failure and return an error.
var newDBs []string
for _, dbi := range other.Databases {
if dbi.Name == "_internal" {
continue
}
dbName, err := data.importOneDB(other, dbi.Name, "", "", "", shardIDMap)
if err != nil {
return nil, nil, err
}
newDBs = append(newDBs, dbName)
}
return shardIDMap, newDBs, nil
}
// importOneDB imports a single database/rp from an external metadata object, renaming them if new names are provided.
func (data *Data) importOneDB(other Data, backupDBName, restoreDBName, backupRPName, restoreRPName string, shardIDMap map[uint64]uint64) (string, error) {
dbPtr := other.Database(backupDBName)
if dbPtr == nil {
return "", fmt.Errorf("imported metadata does not have datbase named %s", backupDBName)
}
if restoreDBName == "" {
restoreDBName = backupDBName
}
if data.Database(restoreDBName) != nil {
return "", errors.New("database already exists")
}
// change the names if we want/need to
err := data.CreateDatabase(restoreDBName)
if err != nil {
return "", err
}
dbImport := data.Database(restoreDBName)
if backupRPName != "" {
rpPtr := dbPtr.RetentionPolicy(backupRPName)
if rpPtr != nil {
rpImport := rpPtr.clone()
if restoreRPName == "" {
restoreRPName = backupRPName
}
rpImport.Name = restoreRPName
dbImport.RetentionPolicies = []RetentionPolicyInfo{rpImport}
dbImport.DefaultRetentionPolicy = restoreRPName
} else {
return "", fmt.Errorf("retention Policy not found in meta backup: %s.%s", backupDBName, backupRPName)
}
} else { // import all RP's without renaming
dbImport.DefaultRetentionPolicy = dbPtr.DefaultRetentionPolicy
if dbPtr.RetentionPolicies != nil {
dbImport.RetentionPolicies = make([]RetentionPolicyInfo, len(dbPtr.RetentionPolicies))
for i := range dbPtr.RetentionPolicies {
dbImport.RetentionPolicies[i] = dbPtr.RetentionPolicies[i].clone()
}
}
}
// renumber the shard groups and shards for the new retention policy(ies)
for _, rpImport := range dbImport.RetentionPolicies {
for j, sgImport := range rpImport.ShardGroups {
data.MaxShardGroupID++
rpImport.ShardGroups[j].ID = data.MaxShardGroupID
for k, _ := range sgImport.Shards {
data.MaxShardID++
shardIDMap[sgImport.Shards[k].ID] = data.MaxShardID
sgImport.Shards[k].ID = data.MaxShardID
// OSS doesn't use Owners but if we are importing this from Enterprise, we'll want to clear it out
// to avoid any issues if they ever export this DB again to bring back to Enterprise.
sgImport.Shards[k].Owners = []ShardOwner{}
}
}
}
return restoreDBName, nil
}
// NodeInfo represents information about a single node in the cluster.
type NodeInfo struct {
ID uint64

View File

@ -745,6 +745,7 @@ var E_CreateNodeCommand_Command = &proto.ExtensionDesc{
Field: 101,
Name: "meta.CreateNodeCommand.command",
Tag: "bytes,101,opt,name=command",
Filename: "internal/meta.proto",
}
type DeleteNodeCommand struct {
@ -778,6 +779,7 @@ var E_DeleteNodeCommand_Command = &proto.ExtensionDesc{
Field: 102,
Name: "meta.DeleteNodeCommand.command",
Tag: "bytes,102,opt,name=command",
Filename: "internal/meta.proto",
}
type CreateDatabaseCommand struct {
@ -811,6 +813,7 @@ var E_CreateDatabaseCommand_Command = &proto.ExtensionDesc{
Field: 103,
Name: "meta.CreateDatabaseCommand.command",
Tag: "bytes,103,opt,name=command",
Filename: "internal/meta.proto",
}
type DropDatabaseCommand struct {
@ -836,6 +839,7 @@ var E_DropDatabaseCommand_Command = &proto.ExtensionDesc{
Field: 104,
Name: "meta.DropDatabaseCommand.command",
Tag: "bytes,104,opt,name=command",
Filename: "internal/meta.proto",
}
type CreateRetentionPolicyCommand struct {
@ -871,6 +875,7 @@ var E_CreateRetentionPolicyCommand_Command = &proto.ExtensionDesc{
Field: 105,
Name: "meta.CreateRetentionPolicyCommand.command",
Tag: "bytes,105,opt,name=command",
Filename: "internal/meta.proto",
}
type DropRetentionPolicyCommand struct {
@ -904,6 +909,7 @@ var E_DropRetentionPolicyCommand_Command = &proto.ExtensionDesc{
Field: 106,
Name: "meta.DropRetentionPolicyCommand.command",
Tag: "bytes,106,opt,name=command",
Filename: "internal/meta.proto",
}
type SetDefaultRetentionPolicyCommand struct {
@ -939,6 +945,7 @@ var E_SetDefaultRetentionPolicyCommand_Command = &proto.ExtensionDesc{
Field: 107,
Name: "meta.SetDefaultRetentionPolicyCommand.command",
Tag: "bytes,107,opt,name=command",
Filename: "internal/meta.proto",
}
type UpdateRetentionPolicyCommand struct {
@ -998,6 +1005,7 @@ var E_UpdateRetentionPolicyCommand_Command = &proto.ExtensionDesc{
Field: 108,
Name: "meta.UpdateRetentionPolicyCommand.command",
Tag: "bytes,108,opt,name=command",
Filename: "internal/meta.proto",
}
type CreateShardGroupCommand struct {
@ -1039,6 +1047,7 @@ var E_CreateShardGroupCommand_Command = &proto.ExtensionDesc{
Field: 109,
Name: "meta.CreateShardGroupCommand.command",
Tag: "bytes,109,opt,name=command",
Filename: "internal/meta.proto",
}
type DeleteShardGroupCommand struct {
@ -1080,6 +1089,7 @@ var E_DeleteShardGroupCommand_Command = &proto.ExtensionDesc{
Field: 110,
Name: "meta.DeleteShardGroupCommand.command",
Tag: "bytes,110,opt,name=command",
Filename: "internal/meta.proto",
}
type CreateContinuousQueryCommand struct {
@ -1123,6 +1133,7 @@ var E_CreateContinuousQueryCommand_Command = &proto.ExtensionDesc{
Field: 111,
Name: "meta.CreateContinuousQueryCommand.command",
Tag: "bytes,111,opt,name=command",
Filename: "internal/meta.proto",
}
type DropContinuousQueryCommand struct {
@ -1156,6 +1167,7 @@ var E_DropContinuousQueryCommand_Command = &proto.ExtensionDesc{
Field: 112,
Name: "meta.DropContinuousQueryCommand.command",
Tag: "bytes,112,opt,name=command",
Filename: "internal/meta.proto",
}
type CreateUserCommand struct {
@ -1197,6 +1209,7 @@ var E_CreateUserCommand_Command = &proto.ExtensionDesc{
Field: 113,
Name: "meta.CreateUserCommand.command",
Tag: "bytes,113,opt,name=command",
Filename: "internal/meta.proto",
}
type DropUserCommand struct {
@ -1222,6 +1235,7 @@ var E_DropUserCommand_Command = &proto.ExtensionDesc{
Field: 114,
Name: "meta.DropUserCommand.command",
Tag: "bytes,114,opt,name=command",
Filename: "internal/meta.proto",
}
type UpdateUserCommand struct {
@ -1255,6 +1269,7 @@ var E_UpdateUserCommand_Command = &proto.ExtensionDesc{
Field: 115,
Name: "meta.UpdateUserCommand.command",
Tag: "bytes,115,opt,name=command",
Filename: "internal/meta.proto",
}
type SetPrivilegeCommand struct {
@ -1296,6 +1311,7 @@ var E_SetPrivilegeCommand_Command = &proto.ExtensionDesc{
Field: 116,
Name: "meta.SetPrivilegeCommand.command",
Tag: "bytes,116,opt,name=command",
Filename: "internal/meta.proto",
}
type SetDataCommand struct {
@ -1321,6 +1337,7 @@ var E_SetDataCommand_Command = &proto.ExtensionDesc{
Field: 117,
Name: "meta.SetDataCommand.command",
Tag: "bytes,117,opt,name=command",
Filename: "internal/meta.proto",
}
type SetAdminPrivilegeCommand struct {
@ -1354,6 +1371,7 @@ var E_SetAdminPrivilegeCommand_Command = &proto.ExtensionDesc{
Field: 118,
Name: "meta.SetAdminPrivilegeCommand.command",
Tag: "bytes,118,opt,name=command",
Filename: "internal/meta.proto",
}
type UpdateNodeCommand struct {
@ -1387,6 +1405,7 @@ var E_UpdateNodeCommand_Command = &proto.ExtensionDesc{
Field: 119,
Name: "meta.UpdateNodeCommand.command",
Tag: "bytes,119,opt,name=command",
Filename: "internal/meta.proto",
}
type CreateSubscriptionCommand struct {
@ -1444,6 +1463,7 @@ var E_CreateSubscriptionCommand_Command = &proto.ExtensionDesc{
Field: 121,
Name: "meta.CreateSubscriptionCommand.command",
Tag: "bytes,121,opt,name=command",
Filename: "internal/meta.proto",
}
type DropSubscriptionCommand struct {
@ -1485,6 +1505,7 @@ var E_DropSubscriptionCommand_Command = &proto.ExtensionDesc{
Field: 122,
Name: "meta.DropSubscriptionCommand.command",
Tag: "bytes,122,opt,name=command",
Filename: "internal/meta.proto",
}
type RemovePeerCommand struct {
@ -1518,6 +1539,7 @@ var E_RemovePeerCommand_Command = &proto.ExtensionDesc{
Field: 123,
Name: "meta.RemovePeerCommand.command",
Tag: "bytes,123,opt,name=command",
Filename: "internal/meta.proto",
}
type CreateMetaNodeCommand struct {
@ -1559,6 +1581,7 @@ var E_CreateMetaNodeCommand_Command = &proto.ExtensionDesc{
Field: 124,
Name: "meta.CreateMetaNodeCommand.command",
Tag: "bytes,124,opt,name=command",
Filename: "internal/meta.proto",
}
type CreateDataNodeCommand struct {
@ -1592,6 +1615,7 @@ var E_CreateDataNodeCommand_Command = &proto.ExtensionDesc{
Field: 125,
Name: "meta.CreateDataNodeCommand.command",
Tag: "bytes,125,opt,name=command",
Filename: "internal/meta.proto",
}
type UpdateDataNodeCommand struct {
@ -1633,6 +1657,7 @@ var E_UpdateDataNodeCommand_Command = &proto.ExtensionDesc{
Field: 126,
Name: "meta.UpdateDataNodeCommand.command",
Tag: "bytes,126,opt,name=command",
Filename: "internal/meta.proto",
}
type DeleteMetaNodeCommand struct {
@ -1658,6 +1683,7 @@ var E_DeleteMetaNodeCommand_Command = &proto.ExtensionDesc{
Field: 127,
Name: "meta.DeleteMetaNodeCommand.command",
Tag: "bytes,127,opt,name=command",
Filename: "internal/meta.proto",
}
type DeleteDataNodeCommand struct {
@ -1683,6 +1709,7 @@ var E_DeleteDataNodeCommand_Command = &proto.ExtensionDesc{
Field: 128,
Name: "meta.DeleteDataNodeCommand.command",
Tag: "bytes,128,opt,name=command",
Filename: "internal/meta.proto",
}
type Response struct {
@ -1759,6 +1786,7 @@ var E_SetMetaNodeCommand_Command = &proto.ExtensionDesc{
Field: 129,
Name: "meta.SetMetaNodeCommand.command",
Tag: "bytes,129,opt,name=command",
Filename: "internal/meta.proto",
}
type DropShardCommand struct {
@ -1784,6 +1812,7 @@ var E_DropShardCommand_Command = &proto.ExtensionDesc{
Field: 130,
Name: "meta.DropShardCommand.command",
Tag: "bytes,130,opt,name=command",
Filename: "internal/meta.proto",
}
func init() {
@ -1865,107 +1894,118 @@ func init() {
func init() { proto.RegisterFile("internal/meta.proto", fileDescriptorMeta) }
var fileDescriptorMeta = []byte{
// 1617 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x58, 0x5b, 0x6f, 0x1b, 0xc5,
0x17, 0xd7, 0xda, 0x6b, 0xc7, 0x7b, 0x62, 0x27, 0xf6, 0x38, 0x97, 0x4d, 0x9b, 0xa4, 0xee, 0xe8,
0x7f, 0xf1, 0xff, 0x2f, 0x51, 0x24, 0x2b, 0x15, 0x42, 0x5c, 0xdb, 0xb8, 0xa5, 0x11, 0x4a, 0x1a,
0x62, 0x17, 0xde, 0xaa, 0x6e, 0xed, 0x49, 0xb3, 0x60, 0xef, 0x9a, 0xdd, 0x75, 0xd3, 0x50, 0x68,
0x03, 0x12, 0x42, 0x20, 0x21, 0xc1, 0x0b, 0x2f, 0x3c, 0xf1, 0xc6, 0x37, 0x40, 0x3c, 0xf0, 0x29,
0xf8, 0x42, 0x68, 0x66, 0xf6, 0x32, 0xbb, 0x3b, 0xb3, 0x69, 0xfb, 0x66, 0xcf, 0x39, 0x73, 0x7e,
0xbf, 0x39, 0xb7, 0x39, 0xb3, 0xd0, 0xb6, 0x9d, 0x80, 0x78, 0x8e, 0x35, 0x79, 0x7d, 0x4a, 0x02,
0xeb, 0xda, 0xcc, 0x73, 0x03, 0x17, 0xe9, 0xf4, 0x37, 0xfe, 0xad, 0x04, 0x7a, 0xdf, 0x0a, 0x2c,
0x54, 0x07, 0x7d, 0x48, 0xbc, 0xa9, 0xa9, 0x75, 0x4a, 0x5d, 0x1d, 0x35, 0xa0, 0xb2, 0xe7, 0x8c,
0xc9, 0x13, 0xb3, 0xc4, 0xfe, 0xb6, 0xc0, 0xd8, 0x9d, 0xcc, 0xfd, 0x80, 0x78, 0x7b, 0x7d, 0xb3,
0xcc, 0x96, 0xb6, 0xa0, 0x72, 0xe0, 0x8e, 0x89, 0x6f, 0xea, 0x9d, 0x72, 0x77, 0xb1, 0xb7, 0x74,
0x8d, 0x99, 0xa6, 0x4b, 0x7b, 0xce, 0xb1, 0x8b, 0xfe, 0x0d, 0x06, 0x35, 0xfb, 0xd0, 0xf2, 0x89,
0x6f, 0x56, 0x98, 0x0a, 0xe2, 0x2a, 0xd1, 0x32, 0x53, 0xdb, 0x82, 0xca, 0x3d, 0x9f, 0x78, 0xbe,
0x59, 0x15, 0xad, 0xd0, 0x25, 0x26, 0x6e, 0x81, 0xb1, 0x6f, 0x3d, 0x61, 0x46, 0xfb, 0xe6, 0x02,
0xc3, 0x5d, 0x87, 0xe5, 0x7d, 0xeb, 0xc9, 0xe0, 0xc4, 0xf2, 0xc6, 0x1f, 0x78, 0xee, 0x7c, 0xb6,
0xd7, 0x37, 0x6b, 0x4c, 0x80, 0x00, 0x22, 0xc1, 0x5e, 0xdf, 0x34, 0xd8, 0xda, 0x55, 0xce, 0x82,
0x13, 0x05, 0x29, 0xd1, 0xab, 0x60, 0xec, 0x93, 0x48, 0x65, 0x51, 0xa6, 0x82, 0xaf, 0x43, 0x2d,
0x56, 0x07, 0x28, 0xed, 0xf5, 0x43, 0x27, 0xd5, 0x41, 0xbf, 0xe3, 0xfa, 0x01, 0xf3, 0x91, 0x81,
0x96, 0x61, 0x61, 0xb8, 0x7b, 0xc8, 0x16, 0xca, 0x1d, 0xad, 0x6b, 0xe0, 0xdf, 0x35, 0xa8, 0xa7,
0x0e, 0x5b, 0x07, 0xfd, 0xc0, 0x9a, 0x12, 0xb6, 0xdb, 0x40, 0xdb, 0xb0, 0xd6, 0x27, 0xc7, 0xd6,
0x7c, 0x12, 0x1c, 0x91, 0x80, 0x38, 0x81, 0xed, 0x3a, 0x87, 0xee, 0xc4, 0x1e, 0x9d, 0x85, 0xf6,
0x76, 0xa0, 0x95, 0x16, 0xd8, 0xc4, 0x37, 0xcb, 0x8c, 0xe0, 0x06, 0x27, 0x98, 0xd9, 0xc7, 0x30,
0x76, 0xa0, 0xb5, 0xeb, 0x3a, 0x81, 0xed, 0xcc, 0xdd, 0xb9, 0xff, 0xd1, 0x9c, 0x78, 0x76, 0x1c,
0xa2, 0x70, 0x57, 0x5a, 0xcc, 0x76, 0xe1, 0x11, 0xb4, 0x33, 0xc6, 0x06, 0x33, 0x32, 0x12, 0x08,
0x6b, 0x5d, 0x03, 0x35, 0xa1, 0xd6, 0x9f, 0x7b, 0x16, 0xd5, 0x31, 0x4b, 0x1d, 0xad, 0x5b, 0x46,
0x97, 0x00, 0x25, 0x81, 0x88, 0x65, 0x65, 0x26, 0x6b, 0x42, 0xed, 0x88, 0xcc, 0x26, 0xf6, 0xc8,
0x3a, 0x30, 0xf5, 0x8e, 0xd6, 0x6d, 0xe0, 0xbf, 0xb4, 0x1c, 0x8a, 0xc4, 0x2d, 0x69, 0x94, 0x52,
0x01, 0x4a, 0x29, 0x87, 0x52, 0xea, 0x36, 0xd0, 0xff, 0x60, 0x31, 0xd1, 0x8e, 0x52, 0x6f, 0x85,
0x1f, 0x5d, 0xc8, 0x1a, 0x0a, 0xfc, 0x1a, 0x34, 0x06, 0xf3, 0x87, 0xfe, 0xc8, 0xb3, 0x67, 0xd4,
0x64, 0x94, 0x84, 0x6b, 0xa1, 0xb2, 0x20, 0x62, 0x4e, 0xfa, 0x5e, 0x83, 0xa5, 0x8c, 0x05, 0x31,
0x1b, 0x5a, 0x60, 0x0c, 0x02, 0xcb, 0x0b, 0x86, 0xf6, 0x94, 0x84, 0xcc, 0x97, 0x61, 0xe1, 0x96,
0x33, 0x66, 0x0b, 0x9c, 0x6e, 0x0b, 0x8c, 0x3e, 0x99, 0x90, 0x80, 0x8c, 0x6f, 0x04, 0x8c, 0x6f,
0x19, 0x5d, 0x81, 0x2a, 0x33, 0x1a, 0x51, 0x5d, 0x16, 0xa8, 0x32, 0x8c, 0x36, 0x2c, 0x0e, 0xbd,
0xb9, 0x33, 0xb2, 0xf8, 0xae, 0x2a, 0xf5, 0x2e, 0xbe, 0x0b, 0x46, 0xa2, 0x21, 0xb2, 0x58, 0x81,
0xda, 0xdd, 0x53, 0x87, 0xd6, 0xa9, 0x6f, 0x96, 0x3a, 0xe5, 0xae, 0x7e, 0xb3, 0x64, 0x6a, 0xa8,
0x03, 0x55, 0xb6, 0x1a, 0x25, 0x50, 0x53, 0x00, 0x61, 0x02, 0xdc, 0x87, 0x66, 0xf6, 0xc0, 0x99,
0xc0, 0xd4, 0x41, 0xdf, 0x77, 0xc7, 0x24, 0xcc, 0xce, 0x15, 0xa8, 0xf7, 0x89, 0x1f, 0xd8, 0x8e,
0xc5, 0x5d, 0x47, 0xed, 0x1a, 0x78, 0x13, 0x20, 0xb1, 0x89, 0x96, 0xa0, 0x1a, 0x96, 0x2e, 0xe3,
0x86, 0x7b, 0xd0, 0x96, 0x24, 0x5f, 0x06, 0xa6, 0x01, 0x15, 0x26, 0xe2, 0x38, 0xf8, 0x3e, 0xd4,
0xe2, 0x6e, 0x90, 0xe3, 0x73, 0xc7, 0xf2, 0x4f, 0x42, 0x3e, 0x0d, 0xa8, 0xdc, 0x18, 0x4f, 0x6d,
0x9e, 0x17, 0x35, 0xf4, 0x5f, 0x80, 0x43, 0xcf, 0x7e, 0x6c, 0x4f, 0xc8, 0xa3, 0x38, 0xff, 0xdb,
0x49, 0x73, 0x89, 0x65, 0x78, 0x07, 0x1a, 0xa9, 0x05, 0x96, 0x7f, 0x61, 0xd1, 0x86, 0x40, 0x2d,
0x30, 0x62, 0x31, 0x43, 0xab, 0xe0, 0xbf, 0xab, 0xb0, 0xb0, 0xeb, 0x4e, 0xa7, 0x96, 0x33, 0x46,
0x1d, 0xd0, 0x83, 0xb3, 0x19, 0x57, 0x5e, 0x8a, 0x9a, 0x5c, 0x28, 0xbc, 0x36, 0x3c, 0x9b, 0x11,
0xfc, 0x6b, 0x15, 0x74, 0xfa, 0x03, 0xad, 0x42, 0x6b, 0xd7, 0x23, 0x56, 0x40, 0xa8, 0x5b, 0x42,
0x95, 0xa6, 0x46, 0x97, 0x79, 0x56, 0x88, 0xcb, 0x25, 0xb4, 0x01, 0xab, 0x5c, 0x3b, 0xe2, 0x13,
0x89, 0xca, 0x68, 0x1d, 0xda, 0x7d, 0xcf, 0x9d, 0x65, 0x05, 0x3a, 0xea, 0xc0, 0x26, 0xdf, 0x93,
0x29, 0xb4, 0x48, 0xa3, 0x82, 0xb6, 0xe1, 0x12, 0xdd, 0xaa, 0x90, 0x57, 0xd1, 0xbf, 0xa0, 0x33,
0x20, 0x81, 0xbc, 0x33, 0x45, 0x5a, 0x0b, 0x14, 0xe7, 0xde, 0x6c, 0xac, 0xc6, 0xa9, 0xa1, 0xcb,
0xb0, 0xce, 0x99, 0x24, 0x25, 0x13, 0x09, 0x0d, 0x2a, 0xe4, 0x27, 0xce, 0x0b, 0x21, 0x39, 0x43,
0x26, 0x59, 0x22, 0x8d, 0xc5, 0xe8, 0x0c, 0x0a, 0x79, 0x3d, 0xf1, 0x33, 0x0d, 0x6d, 0xb4, 0xdc,
0x40, 0x6d, 0x58, 0xa6, 0xdb, 0xc4, 0xc5, 0x25, 0xaa, 0xcb, 0x4f, 0x22, 0x2e, 0x2f, 0x53, 0x0f,
0x0f, 0x48, 0x10, 0xc7, 0x3d, 0x12, 0x34, 0x11, 0x82, 0x25, 0xea, 0x1f, 0x2b, 0xb0, 0xa2, 0xb5,
0x16, 0xda, 0x04, 0x73, 0x40, 0x02, 0x96, 0x7f, 0xb9, 0x1d, 0x28, 0x41, 0x10, 0xc3, 0xdb, 0x46,
0x5b, 0xb0, 0x11, 0x3a, 0x48, 0xa8, 0xbb, 0x48, 0xbc, 0xca, 0x5c, 0xe4, 0xb9, 0x33, 0x99, 0x70,
0x8d, 0x9a, 0x3c, 0x22, 0x53, 0xf7, 0x31, 0x39, 0x24, 0x09, 0xe9, 0xf5, 0x24, 0x63, 0xa2, 0x1b,
0x2d, 0x12, 0x99, 0xe9, 0x64, 0x12, 0x45, 0x1b, 0x54, 0xc4, 0xf9, 0x65, 0x45, 0x97, 0xa8, 0x88,
0xc7, 0x29, 0x6b, 0xf0, 0x72, 0x22, 0xca, 0xee, 0xda, 0x44, 0x6b, 0x80, 0x06, 0x24, 0xc8, 0x6e,
0xd9, 0x42, 0x2b, 0xd0, 0x64, 0x47, 0xa2, 0x31, 0x8f, 0x56, 0xb7, 0xff, 0x5f, 0xab, 0x8d, 0x9b,
0xe7, 0xe7, 0xe7, 0xe7, 0x25, 0x7c, 0x22, 0x29, 0x8f, 0xf8, 0x92, 0x8d, 0x8b, 0xfe, 0xc8, 0x72,
0xc6, 0x7c, 0x2c, 0xe9, 0xbd, 0x01, 0x0b, 0xa3, 0x50, 0xad, 0x91, 0xaa, 0x3b, 0x93, 0x74, 0xb4,
0xee, 0x62, 0x6f, 0x3d, 0x5c, 0xcc, 0x1a, 0xc5, 0x8f, 0x24, 0x15, 0x97, 0x6a, 0xa3, 0x0d, 0xa8,
0xdc, 0x76, 0xbd, 0x11, 0xaf, 0xf7, 0x5a, 0x01, 0xd0, 0xb1, 0x08, 0x94, 0xb3, 0x89, 0x7f, 0xd1,
0x14, 0x45, 0x9c, 0x69, 0x66, 0x3d, 0x58, 0xce, 0x4f, 0x01, 0x5a, 0xe1, 0x55, 0xdf, 0x7b, 0x4b,
0x49, 0xea, 0x11, 0xdb, 0x7a, 0x59, 0x3c, 0x7d, 0x06, 0x1e, 0xdf, 0x97, 0x76, 0x90, 0x34, 0xab,
0xde, 0x9b, 0x4a, 0x84, 0x13, 0x91, 0x9c, 0xc4, 0x10, 0x1d, 0x7e, 0x0a, 0x3b, 0x91, 0xa4, 0xcf,
0x4a, 0x7d, 0x50, 0x2a, 0xf6, 0xc1, 0x4d, 0x25, 0x43, 0x9b, 0x31, 0xc4, 0xa2, 0x0f, 0xe4, 0x4c,
0xf0, 0xb3, 0xa2, 0x8e, 0x28, 0xe1, 0x19, 0xf9, 0x88, 0x5d, 0x3c, 0xbd, 0xf7, 0x95, 0x0c, 0x3e,
0x65, 0x0c, 0x3a, 0x89, 0x8f, 0x14, 0xf8, 0x3f, 0x68, 0x17, 0xb7, 0xdc, 0x0b, 0x69, 0xdc, 0x56,
0xd2, 0xf8, 0x8c, 0xd1, 0xf8, 0x4f, 0x78, 0xe3, 0x5f, 0x80, 0x83, 0xff, 0xd0, 0x8a, 0x3b, 0xfb,
0x45, 0x44, 0xe8, 0xcc, 0x73, 0x40, 0x4e, 0xd9, 0x42, 0x39, 0x37, 0x36, 0xea, 0xb9, 0xd1, 0xb0,
0x42, 0x47, 0xc3, 0x82, 0x30, 0x4e, 0xc4, 0x30, 0x16, 0x11, 0xc3, 0x3f, 0x6a, 0xca, 0x1b, 0x47,
0x42, 0x7a, 0x09, 0xaa, 0xa9, 0x69, 0xbb, 0x05, 0x06, 0x9d, 0xd3, 0xfc, 0xc0, 0x9a, 0xce, 0xf8,
0xb0, 0xd6, 0x7b, 0x47, 0x49, 0x6a, 0xca, 0x48, 0x6d, 0x89, 0xb9, 0x95, 0xc3, 0xc4, 0x3f, 0x69,
0xca, 0x4b, 0xee, 0x05, 0xf8, 0xac, 0x40, 0x3d, 0xf5, 0xc6, 0x61, 0x8f, 0xae, 0x02, 0x4a, 0x8e,
0x48, 0x49, 0x01, 0x8b, 0x7f, 0xd6, 0x8a, 0xaf, 0xd6, 0x0b, 0x83, 0x1b, 0x0f, 0x67, 0x65, 0x96,
0x74, 0xea, 0xb0, 0xb9, 0xf9, 0xea, 0x93, 0x43, 0x46, 0xd5, 0xf7, 0x6a, 0x84, 0x0a, 0xaa, 0x6f,
0x96, 0xad, 0x3e, 0x05, 0xfe, 0xa9, 0x64, 0x56, 0x78, 0x89, 0x49, 0xb3, 0xe0, 0x6a, 0xf8, 0x3c,
0x7f, 0x07, 0x09, 0x18, 0xf8, 0xe3, 0xdc, 0x34, 0x92, 0xe9, 0xbe, 0xd7, 0x95, 0x96, 0x3d, 0x66,
0x79, 0x35, 0x39, 0x9b, 0x68, 0xf7, 0x44, 0x32, 0xd0, 0x14, 0x1d, 0xa8, 0xe0, 0x04, 0xbe, 0x78,
0x82, 0x9c, 0x51, 0xfc, 0x9d, 0x26, 0x1d, 0x92, 0x68, 0xd0, 0xa8, 0x9a, 0x93, 0x7e, 0xd4, 0x45,
0x61, 0x2c, 0xe5, 0x87, 0x6a, 0xea, 0xc9, 0x4a, 0xc1, 0x6d, 0x13, 0x88, 0xb7, 0x8d, 0x04, 0x11,
0x3f, 0xc8, 0x0e, 0x65, 0xc8, 0xe4, 0x9f, 0x35, 0x18, 0xfe, 0x62, 0x0f, 0x92, 0x4f, 0x0f, 0xbd,
0x1d, 0x25, 0xcc, 0x9c, 0xc1, 0xac, 0x24, 0x9d, 0x32, 0xb1, 0x87, 0x9f, 0xaa, 0x47, 0x3c, 0xc9,
0x79, 0xe3, 0x1c, 0xe1, 0xe3, 0xc3, 0xbb, 0x4a, 0xc8, 0xc7, 0x0c, 0x72, 0x3b, 0x86, 0x94, 0x02,
0xe0, 0x63, 0xc9, 0x04, 0xa9, 0xfe, 0x12, 0x51, 0x10, 0xd0, 0xd3, 0x7c, 0x40, 0xc5, 0x69, 0xe5,
0x4f, 0xad, 0x60, 0x26, 0x95, 0xbc, 0xd3, 0xd3, 0x21, 0x5d, 0xcf, 0xdf, 0xdf, 0xe5, 0xd4, 0xcb,
0x51, 0x97, 0xbe, 0x1c, 0xe9, 0xb3, 0xd7, 0xe8, 0xbd, 0xa7, 0xe4, 0x7c, 0xc6, 0x38, 0x5f, 0x49,
0x35, 0xdb, 0x3c, 0x3b, 0xda, 0xdb, 0x54, 0x03, 0xf3, 0x2b, 0x33, 0x2f, 0xe8, 0xb7, 0x5f, 0xa4,
0xfa, 0xad, 0x1c, 0x97, 0xc6, 0x2d, 0x37, 0xa6, 0xc7, 0x71, 0xd3, 0x78, 0xdc, 0x6e, 0x8c, 0xc7,
0xde, 0x85, 0x71, 0x7b, 0x2a, 0xc6, 0x2d, 0x67, 0x12, 0x7f, 0xab, 0x29, 0x06, 0x7f, 0x7a, 0xd6,
0x3b, 0xc3, 0xe1, 0x21, 0x03, 0xd1, 0x84, 0xcf, 0x54, 0x09, 0x6a, 0x3c, 0x52, 0xf3, 0x1b, 0x46,
0x3d, 0x54, 0x7e, 0x99, 0x1f, 0x2a, 0x33, 0x68, 0xf8, 0x54, 0xf1, 0xc8, 0x78, 0x01, 0x1a, 0x05,
0xc0, 0x5f, 0xc9, 0xa7, 0x59, 0x11, 0xf8, 0xb9, 0xe2, 0x09, 0xf3, 0xa2, 0x9f, 0xeb, 0x8a, 0x09,
0x3c, 0x13, 0x09, 0x48, 0x71, 0xf0, 0x03, 0xc5, 0x43, 0x49, 0x24, 0x50, 0x80, 0xf0, 0x5c, 0x44,
0x90, 0x1a, 0xc2, 0x96, 0xe2, 0xbd, 0x95, 0x42, 0x78, 0x5b, 0x89, 0x70, 0xae, 0xe5, 0x21, 0xb2,
0x87, 0xd8, 0xa1, 0x73, 0x99, 0x3f, 0x73, 0x1d, 0x9f, 0x50, 0xab, 0x77, 0x3f, 0x64, 0x56, 0x6b,
0xb4, 0x9b, 0xdd, 0xf2, 0x3c, 0xd7, 0x63, 0x4f, 0x12, 0x23, 0xf9, 0x36, 0x4c, 0xe7, 0x3b, 0x1d,
0x9f, 0x6b, 0xb2, 0xe7, 0xde, 0xcb, 0x67, 0x9e, 0xba, 0xfd, 0x7f, 0xcd, 0xb9, 0x9b, 0x71, 0x97,
0xcc, 0xfa, 0xe6, 0x93, 0xfc, 0xc3, 0x32, 0xe5, 0x16, 0x75, 0x61, 0x7d, 0xc3, 0x4d, 0xaf, 0x09,
0x75, 0x2c, 0x18, 0xf9, 0x27, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xc6, 0xc9, 0x45, 0x39, 0x17, 0x00,
0x00,
// 1808 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x59, 0x4b, 0x6f, 0x1c, 0xc7,
0x11, 0x46, 0xcf, 0x3e, 0xb8, 0x5b, 0x7c, 0xaa, 0xf9, 0x1a, 0x4a, 0x14, 0xb3, 0x18, 0x08, 0xca,
0x22, 0x08, 0x98, 0x60, 0x03, 0xe8, 0x94, 0x97, 0xc4, 0x95, 0xc4, 0x85, 0xc0, 0x47, 0x66, 0xa9,
0x6b, 0x80, 0x11, 0xb7, 0x25, 0x6e, 0xb2, 0x3b, 0xb3, 0x99, 0x99, 0x95, 0xc4, 0x28, 0x4c, 0x18,
0x5f, 0x7c, 0xb5, 0x61, 0x18, 0x3e, 0xe8, 0x66, 0x1f, 0x7c, 0x34, 0x0c, 0x03, 0x06, 0x0c, 0x9f,
0x7c, 0xf7, 0x1f, 0xf0, 0x7f, 0xb0, 0xcf, 0xbe, 0x1a, 0xdd, 0x3d, 0x3d, 0xdd, 0x33, 0xd3, 0x3d,
0x24, 0x65, 0xf9, 0x36, 0x5d, 0x55, 0xdd, 0xf5, 0x55, 0x75, 0x75, 0x75, 0x55, 0x0f, 0x2c, 0x0f,
0xfd, 0x98, 0x84, 0xbe, 0x37, 0xfa, 0xdd, 0x98, 0xc4, 0xde, 0xf6, 0x24, 0x0c, 0xe2, 0x00, 0x57,
0xe9, 0xb7, 0xf3, 0x5e, 0x05, 0xaa, 0x5d, 0x2f, 0xf6, 0x30, 0x86, 0xea, 0x11, 0x09, 0xc7, 0x36,
0x6a, 0x59, 0xed, 0xaa, 0xcb, 0xbe, 0xf1, 0x0a, 0xd4, 0x7a, 0xfe, 0x80, 0xbc, 0xb4, 0x2d, 0x46,
0xe4, 0x03, 0xbc, 0x09, 0xcd, 0x9d, 0xd1, 0x34, 0x8a, 0x49, 0xd8, 0xeb, 0xda, 0x15, 0xc6, 0x91,
0x04, 0x7c, 0x0b, 0x6a, 0xfb, 0xc1, 0x80, 0x44, 0x76, 0xb5, 0x55, 0x69, 0xcf, 0x76, 0x16, 0xb6,
0x99, 0x4a, 0x4a, 0xea, 0xf9, 0x4f, 0x03, 0x97, 0x33, 0xf1, 0xef, 0xa1, 0x49, 0xb5, 0x3e, 0xf1,
0x22, 0x12, 0xd9, 0x35, 0x26, 0x89, 0xb9, 0xa4, 0x20, 0x33, 0x69, 0x29, 0x44, 0xd7, 0x7d, 0x1c,
0x91, 0x30, 0xb2, 0xeb, 0xea, 0xba, 0x94, 0xc4, 0xd7, 0x65, 0x4c, 0x8a, 0x6d, 0xcf, 0x7b, 0xc9,
0xb4, 0x75, 0xed, 0x19, 0x8e, 0x2d, 0x25, 0xe0, 0x36, 0x2c, 0xee, 0x79, 0x2f, 0xfb, 0x27, 0x5e,
0x38, 0x78, 0x18, 0x06, 0xd3, 0x49, 0xaf, 0x6b, 0x37, 0x98, 0x4c, 0x9e, 0x8c, 0xb7, 0x00, 0x04,
0xa9, 0xd7, 0xb5, 0x9b, 0x4c, 0x48, 0xa1, 0xe0, 0xdf, 0x72, 0xfc, 0xdc, 0x52, 0xd0, 0x5a, 0x2a,
0x05, 0xa8, 0xf4, 0x1e, 0x11, 0xd2, 0xb3, 0x7a, 0xe9, 0x54, 0xc0, 0xd9, 0x85, 0x86, 0x20, 0xe3,
0x05, 0xb0, 0x7a, 0xdd, 0x64, 0x4f, 0xac, 0x5e, 0x97, 0xee, 0xd2, 0x6e, 0x10, 0xc5, 0x6c, 0x43,
0x9a, 0x2e, 0xfb, 0xc6, 0x36, 0xcc, 0x1c, 0xed, 0x1c, 0x32, 0x72, 0xa5, 0x85, 0xda, 0x4d, 0x57,
0x0c, 0x9d, 0xef, 0x11, 0xcc, 0xa9, 0xfe, 0xa4, 0xd3, 0xf7, 0xbd, 0x31, 0x61, 0x0b, 0x36, 0x5d,
0xf6, 0x8d, 0xef, 0xc0, 0x5a, 0x97, 0x3c, 0xf5, 0xa6, 0xa3, 0xd8, 0x25, 0x31, 0xf1, 0xe3, 0x61,
0xe0, 0x1f, 0x06, 0xa3, 0xe1, 0xf1, 0x69, 0xa2, 0xc4, 0xc0, 0xc5, 0x0f, 0xe1, 0x5a, 0x96, 0x34,
0x24, 0x91, 0x5d, 0x61, 0xc6, 0x6d, 0x70, 0xe3, 0x72, 0x33, 0x98, 0x9d, 0xc5, 0x39, 0x74, 0xa1,
0x9d, 0xc0, 0x8f, 0x87, 0xfe, 0x34, 0x98, 0x46, 0x7f, 0x9b, 0x92, 0x70, 0x98, 0x46, 0x4f, 0xb2,
0x50, 0x96, 0x9d, 0x2c, 0x54, 0x98, 0xe3, 0xbc, 0x8f, 0x60, 0x39, 0xa7, 0xb3, 0x3f, 0x21, 0xc7,
0x8a, 0xd5, 0x28, 0xb5, 0xfa, 0x3a, 0x34, 0xba, 0xd3, 0xd0, 0xa3, 0x92, 0xb6, 0xd5, 0x42, 0xed,
0x8a, 0x9b, 0x8e, 0xf1, 0x36, 0x60, 0x19, 0x0c, 0xa9, 0x54, 0x85, 0x49, 0x69, 0x38, 0x74, 0x2d,
0x97, 0x4c, 0x46, 0xc3, 0x63, 0x6f, 0xdf, 0xae, 0xb6, 0x50, 0x7b, 0xde, 0x4d, 0xc7, 0xce, 0xbb,
0x56, 0x01, 0x93, 0x71, 0x27, 0xb2, 0x98, 0xac, 0x4b, 0x61, 0xb2, 0x2e, 0x85, 0xc9, 0x52, 0x31,
0xe1, 0x3b, 0x30, 0x2b, 0x67, 0x88, 0xe3, 0xb7, 0xc2, 0x5d, 0xad, 0x9c, 0x02, 0xea, 0x65, 0x55,
0x10, 0xff, 0x11, 0xe6, 0xfb, 0xd3, 0x27, 0xd1, 0x71, 0x38, 0x9c, 0x50, 0x1d, 0xe2, 0x28, 0xae,
0x25, 0x33, 0x15, 0x16, 0x9b, 0x9b, 0x15, 0x76, 0xbe, 0x41, 0xb0, 0x90, 0x5d, 0xbd, 0x10, 0xdd,
0x9b, 0xd0, 0xec, 0xc7, 0x5e, 0x18, 0x1f, 0x0d, 0xc7, 0x24, 0xf1, 0x80, 0x24, 0xd0, 0x38, 0xbf,
0xef, 0x0f, 0x18, 0x8f, 0xdb, 0x2d, 0x86, 0x74, 0x5e, 0x97, 0x8c, 0x48, 0x4c, 0x06, 0x77, 0x63,
0x66, 0x6d, 0xc5, 0x95, 0x04, 0xfc, 0x6b, 0xa8, 0x33, 0xbd, 0xc2, 0xd2, 0x45, 0xc5, 0x52, 0x06,
0x34, 0x61, 0xe3, 0x16, 0xcc, 0x1e, 0x85, 0x53, 0xff, 0xd8, 0xe3, 0x0b, 0xd5, 0xd9, 0x86, 0xab,
0x24, 0x87, 0x40, 0x33, 0x9d, 0x56, 0x40, 0xbf, 0x05, 0x8d, 0x83, 0x17, 0x3e, 0x4d, 0x82, 0x91,
0x6d, 0xb5, 0x2a, 0xed, 0xea, 0x3d, 0xcb, 0x46, 0x6e, 0x4a, 0xc3, 0x6d, 0xa8, 0xb3, 0x6f, 0x71,
0x4a, 0x96, 0x14, 0x1c, 0x8c, 0xe1, 0x26, 0x7c, 0xe7, 0xef, 0xb0, 0x94, 0xf7, 0xa6, 0x36, 0x60,
0x30, 0x54, 0xf7, 0x82, 0x01, 0x11, 0xd9, 0x80, 0x7e, 0x63, 0x07, 0xe6, 0xba, 0x24, 0x8a, 0x87,
0xbe, 0xc7, 0xf7, 0x88, 0xea, 0x6a, 0xba, 0x19, 0x9a, 0x73, 0x0b, 0x40, 0x6a, 0xc5, 0x6b, 0x50,
0x4f, 0x12, 0x26, 0xb7, 0x25, 0x19, 0x39, 0x7f, 0x81, 0x65, 0xcd, 0xc1, 0xd3, 0x02, 0x59, 0x81,
0x1a, 0x13, 0x48, 0x90, 0xf0, 0x81, 0x73, 0x06, 0x0d, 0x91, 0x9f, 0x4d, 0xf0, 0x77, 0xbd, 0xe8,
0x24, 0x4d, 0x66, 0x5e, 0x74, 0x42, 0x57, 0xba, 0x3b, 0x18, 0x0f, 0x79, 0x68, 0x37, 0x5c, 0x3e,
0xc0, 0x7f, 0x00, 0x38, 0x0c, 0x87, 0xcf, 0x87, 0x23, 0xf2, 0x2c, 0xcd, 0x0d, 0xcb, 0xf2, 0x06,
0x48, 0x79, 0xae, 0x22, 0xe6, 0xf4, 0x60, 0x3e, 0xc3, 0x64, 0xe7, 0x2b, 0xc9, 0x86, 0x09, 0x8e,
0x74, 0x4c, 0x43, 0x28, 0x15, 0x64, 0x80, 0x6a, 0xae, 0x24, 0x38, 0xdf, 0xd5, 0x61, 0x66, 0x27,
0x18, 0x8f, 0x3d, 0x7f, 0x80, 0x6f, 0x43, 0x35, 0x3e, 0x9d, 0xf0, 0x15, 0x16, 0xc4, 0xad, 0x95,
0x30, 0xb7, 0x8f, 0x4e, 0x27, 0xc4, 0x65, 0x7c, 0xe7, 0x75, 0x1d, 0xaa, 0x74, 0x88, 0x57, 0xe1,
0xda, 0x4e, 0x48, 0xbc, 0x98, 0x50, 0xbf, 0x26, 0x82, 0x4b, 0x88, 0x92, 0x79, 0x8c, 0xaa, 0x64,
0x0b, 0x6f, 0xc0, 0x2a, 0x97, 0x16, 0xd0, 0x04, 0xab, 0x82, 0xd7, 0x61, 0xb9, 0x1b, 0x06, 0x93,
0x3c, 0xa3, 0x8a, 0x5b, 0xb0, 0xc9, 0xe7, 0xe4, 0x32, 0x8d, 0x90, 0xa8, 0xe1, 0x2d, 0xb8, 0x4e,
0xa7, 0x1a, 0xf8, 0x75, 0x7c, 0x0b, 0x5a, 0x7d, 0x12, 0xeb, 0x33, 0xbd, 0x90, 0x9a, 0xa1, 0x7a,
0x1e, 0x4f, 0x06, 0x66, 0x3d, 0x0d, 0x7c, 0x03, 0xd6, 0x39, 0x12, 0x79, 0xd2, 0x05, 0xb3, 0x49,
0x99, 0xdc, 0xe2, 0x22, 0x13, 0xa4, 0x0d, 0xb9, 0x98, 0x13, 0x12, 0xb3, 0xc2, 0x06, 0x03, 0x7f,
0x4e, 0xfa, 0x99, 0xee, 0xba, 0x20, 0xcf, 0xe3, 0x65, 0x58, 0xa4, 0xd3, 0x54, 0xe2, 0x02, 0x95,
0xe5, 0x96, 0xa8, 0xe4, 0x45, 0xea, 0xe1, 0x3e, 0x89, 0xd3, 0x7d, 0x17, 0x8c, 0x25, 0x8c, 0x61,
0x81, 0xfa, 0xc7, 0x8b, 0x3d, 0x41, 0xbb, 0x86, 0x37, 0xc1, 0xee, 0x93, 0x98, 0x05, 0x68, 0x61,
0x06, 0x96, 0x1a, 0xd4, 0xed, 0x5d, 0xc6, 0x37, 0x61, 0x23, 0x71, 0x90, 0x72, 0xc0, 0x05, 0x7b,
0x95, 0xb9, 0x28, 0x0c, 0x26, 0x3a, 0xe6, 0x1a, 0x5d, 0xd2, 0x25, 0xe3, 0xe0, 0x39, 0x39, 0x24,
0x12, 0xf4, 0xba, 0x8c, 0x18, 0x51, 0x42, 0x08, 0x96, 0x9d, 0x0d, 0x26, 0x95, 0xb5, 0x41, 0x59,
0x1c, 0x5f, 0x9e, 0x75, 0x9d, 0xb2, 0xf8, 0x3e, 0xe5, 0x17, 0xbc, 0x21, 0x59, 0xf9, 0x59, 0x9b,
0x78, 0x0d, 0x70, 0x9f, 0xc4, 0xf9, 0x29, 0x37, 0xf1, 0x0a, 0x2c, 0x31, 0x93, 0xe8, 0x9e, 0x0b,
0xea, 0xd6, 0x6f, 0x1a, 0x8d, 0xc1, 0xd2, 0xf9, 0xf9, 0xf9, 0xb9, 0xe5, 0x9c, 0x69, 0x8e, 0x47,
0x5a, 0xe7, 0x20, 0xa5, 0xce, 0xc1, 0x50, 0x75, 0x3d, 0x7f, 0x90, 0x14, 0xa3, 0xec, 0xbb, 0xf3,
0x57, 0x98, 0x39, 0x4e, 0xa6, 0xcc, 0x67, 0x4e, 0xa2, 0x4d, 0x5a, 0xa8, 0x3d, 0xdb, 0x59, 0x4f,
0x88, 0x79, 0x05, 0xae, 0x98, 0xe6, 0xbc, 0xd2, 0x1c, 0xc3, 0x42, 0x6a, 0x5f, 0x81, 0xda, 0x83,
0x20, 0x3c, 0xe6, 0x99, 0xa1, 0xe1, 0xf2, 0x41, 0x89, 0xf2, 0xa7, 0xaa, 0xf2, 0xc2, 0xf2, 0x52,
0xf9, 0x97, 0xc8, 0x70, 0xda, 0xb5, 0xf9, 0x72, 0x07, 0x16, 0x8b, 0x25, 0x1a, 0x2a, 0xaf, 0xb7,
0xf2, 0x33, 0x3a, 0x5d, 0x23, 0xe8, 0x67, 0x6c, 0xad, 0x1b, 0xaa, 0xc7, 0x72, 0xa8, 0x24, 0xf0,
0xb1, 0x36, 0x15, 0xe9, 0x50, 0x77, 0xee, 0x19, 0x15, 0x9e, 0xa8, 0xe0, 0x35, 0xcb, 0x49, 0x75,
0xdf, 0xa2, 0xf2, 0x0c, 0x57, 0x9a, 0xda, 0xb5, 0x6e, 0xb3, 0xae, 0xe8, 0xb6, 0x47, 0x46, 0x2b,
0x86, 0xcc, 0x0a, 0x47, 0x75, 0x9b, 0x1e, 0xa4, 0x34, 0xe7, 0x23, 0x54, 0x96, 0x8e, 0x4b, 0x8d,
0x11, 0x1e, 0xb6, 0x14, 0x0f, 0xf7, 0x8c, 0xd8, 0xfe, 0xc1, 0xb0, 0xb5, 0xa4, 0x87, 0x2f, 0x42,
0xf6, 0x09, 0xba, 0xf8, 0x22, 0xb8, 0x32, 0xbe, 0x03, 0x23, 0xbe, 0x7f, 0x32, 0x7c, 0xb7, 0x93,
0x42, 0xe8, 0x02, 0xbd, 0x12, 0xe5, 0x0f, 0xa8, 0xfc, 0x22, 0xba, 0x2a, 0x42, 0x5a, 0x5a, 0xee,
0x93, 0x17, 0x8c, 0x9c, 0xb4, 0x50, 0xc9, 0x30, 0x53, 0x93, 0x57, 0x73, 0x7d, 0x82, 0x5a, 0x63,
0xd7, 0xb2, 0x75, 0x7f, 0x49, 0xbc, 0x8c, 0xd4, 0x78, 0x29, 0xb3, 0x42, 0xda, 0xfb, 0x05, 0x32,
0x5e, 0xab, 0xa5, 0xa6, 0xae, 0x41, 0x3d, 0xd3, 0xca, 0x25, 0x23, 0x5a, 0xec, 0xd0, 0xba, 0x39,
0x8a, 0xbd, 0xf1, 0x24, 0xa9, 0xa5, 0x25, 0xa1, 0xf3, 0xc0, 0x08, 0x7d, 0xcc, 0xa0, 0xdf, 0x54,
0x43, 0xbd, 0x00, 0x48, 0xa2, 0xfe, 0x0a, 0x19, 0xef, 0xfb, 0x37, 0x42, 0xed, 0xc0, 0x5c, 0xa6,
0x75, 0xe7, 0x4f, 0x0f, 0x19, 0x5a, 0x09, 0x76, 0x5f, 0xc5, 0x6e, 0x80, 0x25, 0xb1, 0x7f, 0x8e,
0xca, 0xcb, 0x91, 0x2b, 0x47, 0x58, 0x5a, 0x21, 0x57, 0x94, 0x0a, 0xb9, 0x24, 0x4a, 0x82, 0x62,
0x56, 0xd1, 0x23, 0x29, 0x66, 0x95, 0xb7, 0x83, 0xb8, 0x24, 0xab, 0x4c, 0xf2, 0x59, 0xe5, 0x22,
0x64, 0x1f, 0x20, 0x4d, 0x69, 0xf6, 0xf3, 0x5a, 0x82, 0x92, 0xcb, 0xf7, 0x5f, 0xc5, 0x9b, 0x5f,
0x51, 0x2b, 0x51, 0x91, 0x42, 0x61, 0xa8, 0xbd, 0xbf, 0xfe, 0x6c, 0x54, 0x14, 0x32, 0x45, 0xab,
0xd2, 0x0f, 0x5a, 0x35, 0x67, 0x9a, 0x52, 0xf3, 0xb2, 0xb6, 0x97, 0x58, 0x19, 0xa9, 0x56, 0x16,
0x14, 0x48, 0xf5, 0x9f, 0x21, 0x6d, 0x4d, 0x4b, 0xc3, 0x81, 0xca, 0xfb, 0x12, 0x45, 0x3a, 0xce,
0x84, 0x8a, 0x55, 0xd6, 0x28, 0x55, 0x72, 0x8d, 0x52, 0xc9, 0x65, 0x1f, 0xab, 0x97, 0xbd, 0x06,
0x90, 0x44, 0x1c, 0xe4, 0x6b, 0x6d, 0xbc, 0xc5, 0xdf, 0x28, 0x19, 0xce, 0xd9, 0x0e, 0xc8, 0x87,
0x42, 0x97, 0xd1, 0x3b, 0x7f, 0x32, 0x6a, 0x9d, 0x32, 0xad, 0x2b, 0xf2, 0x82, 0x91, 0xab, 0x4a,
0x85, 0x1f, 0x22, 0x73, 0x25, 0x5f, 0xea, 0xa7, 0x34, 0x32, 0x2d, 0x35, 0x32, 0x1f, 0x1a, 0xd1,
0x3c, 0x67, 0x68, 0xb6, 0x52, 0x34, 0x5a, 0x8d, 0x12, 0xd7, 0xa9, 0xa6, 0x85, 0xb8, 0xcc, 0x8b,
0x60, 0x49, 0xd4, 0xbc, 0x28, 0x46, 0x8d, 0xb6, 0x30, 0xfd, 0x11, 0x95, 0xf4, 0x29, 0xc6, 0xc7,
0x2b, 0x53, 0xcc, 0xb4, 0x8b, 0x15, 0x18, 0x4f, 0x83, 0x79, 0x72, 0xfa, 0xa2, 0x51, 0x2d, 0x79,
0xd1, 0xa8, 0x15, 0x5f, 0x34, 0x3a, 0xbb, 0x46, 0x8b, 0x4f, 0x99, 0xc5, 0xbf, 0xca, 0xdc, 0x59,
0x45, 0x93, 0xa4, 0xe5, 0x5f, 0x23, 0x63, 0x0b, 0xf6, 0xcb, 0xd9, 0x5d, 0x72, 0x6f, 0xfd, 0x3b,
0x73, 0x6f, 0xe9, 0x81, 0x65, 0x42, 0xa6, 0xd0, 0x22, 0xa6, 0x21, 0x83, 0x64, 0xc8, 0xdc, 0x1d,
0x0c, 0x42, 0x11, 0x32, 0xf4, 0xbb, 0x24, 0x64, 0x5e, 0xa9, 0x21, 0x53, 0x58, 0x5c, 0xaa, 0xfe,
0x14, 0x19, 0xfa, 0x50, 0xea, 0xa2, 0xdd, 0xa3, 0xa3, 0x43, 0xa6, 0x33, 0x39, 0x42, 0x62, 0x9c,
0x3c, 0x5e, 0x2b, 0x70, 0xc4, 0x30, 0x6d, 0xf7, 0x2a, 0x4a, 0xbb, 0x67, 0x6e, 0x5e, 0xfe, 0x53,
0x6c, 0x5e, 0x72, 0x30, 0x32, 0xd7, 0x91, 0xbe, 0x2d, 0x7e, 0x33, 0xa4, 0x25, 0xa8, 0xce, 0xf4,
0x2d, 0x95, 0x16, 0xd5, 0x6b, 0x64, 0xe8, 0xc8, 0xaf, 0xfe, 0x13, 0xc0, 0x52, 0x7e, 0x02, 0x94,
0xa0, 0xfb, 0xaf, 0x8a, 0x4e, 0xab, 0x5a, 0x6d, 0xf8, 0xf4, 0x6f, 0x02, 0x79, 0x70, 0x25, 0xea,
0xfe, 0xa7, 0xaa, 0xd3, 0x2e, 0x26, 0xd5, 0xf9, 0x86, 0x77, 0x86, 0x82, 0xba, 0xfb, 0x46, 0x75,
0xe7, 0xa8, 0xa8, 0xcf, 0x68, 0xde, 0x03, 0x5a, 0xca, 0x47, 0x93, 0xc0, 0x8f, 0x08, 0x55, 0x71,
0xf0, 0x88, 0xa9, 0x68, 0xb8, 0xd6, 0xc1, 0x23, 0x9a, 0xe5, 0xef, 0x87, 0x61, 0x10, 0xb2, 0x66,
0xbb, 0xe9, 0xf2, 0x81, 0xfc, 0x37, 0x56, 0x61, 0xe7, 0x8a, 0x0f, 0x9c, 0x8f, 0x91, 0xee, 0x15,
0xe4, 0x2d, 0x9e, 0x00, 0xf3, 0x05, 0xfb, 0x7f, 0x6e, 0xaf, 0x9d, 0xde, 0x2e, 0x46, 0xe7, 0x0e,
0x8a, 0x2f, 0x32, 0x05, 0xbf, 0x9a, 0xf3, 0xc1, 0x3b, 0x5c, 0xcf, 0x9a, 0x92, 0x91, 0x94, 0x85,
0x52, 0x2d, 0x3f, 0x05, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x04, 0x86, 0xd9, 0x75, 0x1c, 0x00, 0x00,
}

View File

@ -8,8 +8,13 @@ import (
"fmt"
"io"
"archive/tar"
"github.com/influxdata/influxdb/services/meta"
"github.com/influxdata/influxdb/tcp"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
)
// Client provides an API for the snapshotter service.
@ -22,6 +27,135 @@ func NewClient(host string) *Client {
return &Client{host: host}
}
// takes a request object, writes a Base64 encoding to the tcp connection, and then sends the request to the snapshotter service.
// returns a mapping of the uploaded metadata shardID's to actual shardID's on the destination system.
func (c *Client) UpdateMeta(req *Request, upStream io.Reader) (map[uint64]uint64, error) {
var err error
// Connect to snapshotter service.
conn, err := tcp.Dial("tcp", c.host, MuxHeader)
if err != nil {
return nil, err
}
defer conn.Close()
if _, err := conn.Write([]byte{byte(req.Type)}); err != nil {
return nil, err
}
if err := json.NewEncoder(conn).Encode(req); err != nil {
return nil, fmt.Errorf("encode snapshot request: %s", err)
}
if n, err := io.Copy(conn, upStream); (err != nil && err != io.EOF) || n != req.UploadSize {
return nil, fmt.Errorf("error uploading file: err=%v, n=%d, uploadSize: %d", err, n, req.UploadSize)
}
resp, err := ioutil.ReadAll(conn)
if err != nil || len(resp) == 0 {
return nil, fmt.Errorf("updating metadata on influxd service failed: err=%v, n=%d", err, len(resp))
}
if len(resp) < 16 {
return nil, fmt.Errorf("response too short to be a metadata update response: %d", len(resp))
}
header, npairs, err := decodeUintPair(resp[:16])
if err != nil {
return nil, err
}
if npairs == 0 {
return nil, fmt.Errorf("DB metadata not changed. database may already exist")
}
pairs := resp[16:]
if header != BackupMagicHeader {
return nil, fmt.Errorf("Response did not contain the proper header tag.")
}
if uint64(len(pairs)) != npairs*16 {
return nil, fmt.Errorf("expected an even number of integer pairs in update meta repsonse")
}
shardIDMap := make(map[uint64]uint64)
for i := 0; i < int(npairs); i++ {
offset := i * 16
k, v, err := decodeUintPair(pairs[offset : offset+16])
if err != nil {
return nil, err
}
shardIDMap[k] = v
}
return shardIDMap, nil
}
func decodeUintPair(bits []byte) (uint64, uint64, error) {
if len(bits) != 16 {
return 0, 0, errors.New("slice must have exactly 16 bytes")
}
v1 := binary.BigEndian.Uint64(bits[:8])
v2 := binary.BigEndian.Uint64(bits[8:16])
return v1, v2, nil
}
func (c *Client) UploadShard(shardID, newShardID uint64, destinationDatabase, restoreRetention string, tr *tar.Reader) error {
conn, err := tcp.Dial("tcp", c.host, MuxHeader)
defer conn.Close()
if err != nil {
return err
}
var shardBytes [9]byte
shardBytes[0] = byte(RequestShardUpdate)
binary.BigEndian.PutUint64(shardBytes[1:], newShardID)
if _, err := conn.Write(shardBytes[:]); err != nil {
return err
}
tw := tar.NewWriter(conn)
defer tw.Close()
for {
hdr, err := tr.Next()
if err == io.EOF {
break
} else if err != nil {
return err
}
names := strings.Split(filepath.FromSlash(hdr.Name), string(filepath.Separator))
if len(names) < 4 {
return fmt.Errorf("error parsing file name from shard tarfile: %s", hdr.Name)
}
if destinationDatabase == "" {
destinationDatabase = names[0]
}
if restoreRetention == "" {
restoreRetention = names[1]
}
filepathArgs := []string{destinationDatabase, restoreRetention, strconv.FormatUint(newShardID, 10)}
filepathArgs = append(filepathArgs, names[3:]...)
hdr.Name = filepath.ToSlash(filepath.Join(filepathArgs...))
if err := tw.WriteHeader(hdr); err != nil {
return err
}
if _, err := io.Copy(tw, tr); err != nil {
return err
}
}
return nil
}
// MetastoreBackup returns a snapshot of the meta store.
func (c *Client) MetastoreBackup() (*meta.Data, error) {
req := &Request{

View File

@ -44,6 +44,9 @@ type Service struct {
ExportShard(id uint64, ExportStart time.Time, ExportEnd time.Time, w io.Writer) error
Shard(id uint64) *tsdb.Shard
ShardRelativePath(id uint64) (string, error)
SetShardEnabled(shardID uint64, enabled bool) error
RestoreShard(id uint64, r io.Reader) error
CreateShard(database, retentionPolicy string, shardID uint64, enabled bool) error
}
Listener net.Listener
@ -89,7 +92,6 @@ func (s *Service) serve() {
for {
// Wait for next connection.
conn, err := s.Listener.Accept()
if err != nil && strings.Contains(err.Error(), "connection closed") {
s.Logger.Info("snapshot listener closed")
return
@ -119,7 +121,11 @@ func (s *Service) handleConn(conn net.Conn) error {
return err
}
r, err := s.readRequest(conn)
if RequestType(typ[0]) == RequestShardUpdate {
return s.updateShardsLive(conn)
}
r, bytes, err := s.readRequest(conn)
if err != nil {
return fmt.Errorf("read request: %s", err)
}
@ -141,6 +147,8 @@ func (s *Service) handleConn(conn net.Conn) error {
return s.writeDatabaseInfo(conn, r.BackupDatabase)
case RequestRetentionPolicyInfo:
return s.writeRetentionPolicyInfo(conn, r.BackupDatabase, r.BackupRetentionPolicy)
case RequestMetaStoreUpdate:
return s.updateMetaStore(conn, bytes, r.BackupDatabase, r.RestoreDatabase, r.BackupRetentionPolicy, r.RestoreRetentionPolicy)
default:
return fmt.Errorf("request type unknown: %v", r.Type)
}
@ -148,10 +156,109 @@ func (s *Service) handleConn(conn net.Conn) error {
return nil
}
func (s *Service) updateShardsLive(conn net.Conn) error {
var sidBytes [8]byte
_, err := conn.Read(sidBytes[:])
if err != nil {
return err
}
sid := binary.BigEndian.Uint64(sidBytes[:])
if err := s.TSDBStore.SetShardEnabled(sid, false); err != nil {
return err
}
defer s.TSDBStore.SetShardEnabled(sid, true)
if err := s.TSDBStore.RestoreShard(sid, conn); err != nil {
return err
}
return nil
}
func (s *Service) updateMetaStore(conn net.Conn, bits []byte, backupDBName, restoreDBName, backupRPName, restoreRPName string) error {
md := meta.Data{}
err := md.UnmarshalBinary(bits)
if err != nil {
if err := s.respondIDMap(conn, map[uint64]uint64{}); err != nil {
return err
}
return fmt.Errorf("failed to decode meta: %s", err)
}
data := s.MetaClient.(*meta.Client).Data()
IDMap, newDBs, err := data.ImportData(md, backupDBName, restoreDBName, backupRPName, restoreRPName)
if err != nil {
if err := s.respondIDMap(conn, map[uint64]uint64{}); err != nil {
return err
}
return err
}
err = s.MetaClient.(*meta.Client).SetData(&data)
if err != nil {
return err
}
err = s.createNewDBShards(data, newDBs)
if err != nil {
return err
}
err = s.respondIDMap(conn, IDMap)
return err
}
// iterate over a list of newDB's that should have just been added to the metadata
// If the db was not created in the metadata return an error.
// None of the shards should exist on a new DB, and CreateShard protects against double-creation.
func (s *Service) createNewDBShards(data meta.Data, newDBs []string) error {
for _, restoreDBName := range newDBs {
dbi := data.Database(restoreDBName)
if dbi == nil {
return fmt.Errorf("db %s not found when creating new db shards", restoreDBName)
}
for _, rpi := range dbi.RetentionPolicies {
for _, sgi := range rpi.ShardGroups {
for _, shard := range sgi.Shards {
err := s.TSDBStore.CreateShard(restoreDBName, rpi.Name, shard.ID, true)
if err != nil {
return err
}
}
}
}
}
return nil
}
// send the IDMapping based on the metadata from the source server vs the shard ID
// metadata on this server. Sends back [BackupMagicHeader,0] if there's no mapped
// values, signaling that nothing should be imported.
func (s *Service) respondIDMap(conn net.Conn, IDMap map[uint64]uint64) error {
npairs := len(IDMap)
// 2 information ints, then npairs of 8byte ints.
numBytes := make([]byte, (npairs+1)*16)
binary.BigEndian.PutUint64(numBytes[:8], BackupMagicHeader)
binary.BigEndian.PutUint64(numBytes[8:16], uint64(npairs))
next := 16
for k, v := range IDMap {
binary.BigEndian.PutUint64(numBytes[next:next+8], k)
binary.BigEndian.PutUint64(numBytes[next+8:next+16], v)
next += 16
}
if _, err := conn.Write(numBytes[:]); err != nil {
return err
}
return nil
}
func (s *Service) writeMetaStore(conn net.Conn) error {
// Retrieve and serialize the current meta data.
metaBlob, err := s.MetaClient.MarshalBinary()
if err != nil {
return fmt.Errorf("marshal meta: %s", err)
}
@ -274,12 +381,39 @@ func (s *Service) writeRetentionPolicyInfo(conn net.Conn, database, retentionPol
}
// readRequest unmarshals a request object from the conn.
func (s *Service) readRequest(conn net.Conn) (Request, error) {
func (s *Service) readRequest(conn net.Conn) (Request, []byte, error) {
var r Request
if err := json.NewDecoder(conn).Decode(&r); err != nil {
return r, err
d := json.NewDecoder(conn)
if err := d.Decode(&r); err != nil {
return r, nil, err
}
return r, nil
bits := make([]byte, r.UploadSize+1)
if r.UploadSize > 0 {
remainder := d.Buffered()
n, err := remainder.Read(bits)
if err != nil && err != io.EOF {
return r, bits, err
}
// it is a bit random but sometimes the Json decoder will consume all the bytes and sometimes
// it will leave a few behind.
if err != io.EOF && n < int(r.UploadSize+1) {
n, err = conn.Read(bits[n:])
}
if err != nil && err != io.EOF {
return r, bits, err
}
// the JSON encoder on the client side seems to write an extra byte, so trim that off the front.
return r, bits[1:], nil
}
return r, bits, nil
}
// RequestType indicates the typeof snapshot request.

View File

@ -8,6 +8,7 @@ import (
"testing"
"time"
"fmt"
"github.com/influxdata/influxdb/cmd/influxd/backup"
"github.com/influxdata/influxdb/cmd/influxd/restore"
)
@ -17,12 +18,19 @@ func TestServer_BackupAndRestore(t *testing.T) {
config.Data.Engine = "tsm1"
config.BindAddress = freePort()
backupDir, _ := ioutil.TempDir("", "backup")
defer os.RemoveAll(backupDir)
fullBackupDir, _ := ioutil.TempDir("", "backup")
defer os.RemoveAll(fullBackupDir)
partialBackupDir, _ := ioutil.TempDir("", "backup")
defer os.RemoveAll(partialBackupDir)
enterpriseBackupDir, _ := ioutil.TempDir("", "backup")
defer os.RemoveAll(enterpriseBackupDir)
db := "mydb"
rp := "forever"
expected := `{"results":[{"statement_id":0,"series":[{"name":"myseries","columns":["time","host","value"],"values":[["1970-01-01T00:00:00.001Z","A",23]]}]}]}`
expected := `{"results":[{"statement_id":0,"series":[{"name":"myseries","columns":["time","host","value"],"values":[["1970-01-01T00:00:00.001Z","A",23],["1970-01-01T00:00:00.005Z","B",24],["1970-01-01T00:00:00.009Z","C",25]]}]}]}`
partialExpected := `{"results":[{"statement_id":0,"series":[{"name":"myseries","columns":["time","host","value"],"values":[["1970-01-01T00:00:00.001Z","A",23],["1970-01-01T00:00:00.005Z","B",24]]}]}]}`
// set the cache snapshot size low so that a single point will cause TSM file creation
config.Data.CacheSnapshotMemorySize = 1
@ -35,7 +43,7 @@ func TestServer_BackupAndRestore(t *testing.T) {
t.Skip("Skipping. Cannot modify remote server config")
}
if err := s.CreateDatabaseAndRetentionPolicy(db, newRetentionPolicySpec(rp, 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy(db, NewRetentionPolicySpec(rp, 1, 0), true); err != nil {
t.Fatal(err)
}
@ -46,7 +54,23 @@ func TestServer_BackupAndRestore(t *testing.T) {
// wait for the snapshot to write
time.Sleep(time.Second)
res, err := s.Query(`select * from "mydb"."forever"."myseries"`)
if _, err := s.Write(db, rp, "myseries,host=B value=24 5000000", nil); err != nil {
t.Fatalf("failed to write: %s", err)
}
// wait for the snapshot to write
time.Sleep(time.Second)
if _, err := s.Write(db, rp, "myseries,host=C value=25 9000000", nil); err != nil {
t.Fatalf("failed to write: %s", err)
}
// wait for the snapshot to write
time.Sleep(time.Second)
res, err := s.Query(`show series on mydb; show retention policies on mydb`)
res, err = s.Query(`select * from "mydb"."forever"."myseries"`)
if err != nil {
t.Fatalf("error querying: %s", err.Error())
}
@ -61,9 +85,18 @@ func TestServer_BackupAndRestore(t *testing.T) {
t.Fatal(err)
}
hostAddress := net.JoinHostPort("localhost", port)
if err := cmd.Run("-host", hostAddress, "-database", "mydb", backupDir); err != nil {
if err := cmd.Run("-host", hostAddress, "-database", "mydb", fullBackupDir); err != nil {
t.Fatalf("error backing up: %s, hostAddress: %s", err.Error(), hostAddress)
}
if err := cmd.Run("-host", hostAddress, "-database", "mydb", "-start", "1970-01-01T00:00:00.001Z", "-end", "1970-01-01T00:00:00.007Z", partialBackupDir); err != nil {
t.Fatalf("error backing up: %s, hostAddress: %s", err.Error(), hostAddress)
}
if err := cmd.Run("-enterprise", "-host", hostAddress, "-database", "mydb", "-start", "1970-01-01T00:00:00.001Z", "-end", "1970-01-01T00:00:00.007Z", enterpriseBackupDir); err != nil {
t.Fatalf("error backing up: %s, hostAddress: %s", err.Error(), hostAddress)
}
}()
if _, err := os.Stat(config.Meta.Dir); err == nil || !os.IsNotExist(err) {
@ -74,10 +107,18 @@ func TestServer_BackupAndRestore(t *testing.T) {
t.Fatalf("meta dir should be deleted")
}
// if doing a real restore, these dirs should exist in the real DB.
if err := os.MkdirAll(config.Data.Dir, 0777); err != nil {
t.Fatalf("error making restore dir: %s", err.Error())
}
if err := os.MkdirAll(config.Meta.Dir, 0777); err != nil {
t.Fatalf("error making restore dir: %s", err.Error())
}
// restore
cmd := restore.NewCommand()
if err := cmd.Run("-metadir", config.Meta.Dir, "-datadir", config.Data.Dir, "-database", "mydb", backupDir); err != nil {
if err := cmd.Run("-metadir", config.Meta.Dir, "-datadir", config.Data.Dir, "-database", "mydb", fullBackupDir); err != nil {
t.Fatalf("error restoring: %s", err.Error())
}
@ -91,6 +132,7 @@ func TestServer_BackupAndRestore(t *testing.T) {
s := OpenServer(config)
defer s.Close()
// 1. offline restore is correct
res, err := s.Query(`select * from "mydb"."forever"."myseries"`)
if err != nil {
t.Fatalf("error querying: %s", err.Error())
@ -98,6 +140,94 @@ func TestServer_BackupAndRestore(t *testing.T) {
if res != expected {
t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", expected, res)
}
_, port, err := net.SplitHostPort(config.BindAddress)
if err != nil {
t.Fatal(err)
}
// 2. online restore of a partial backup is correct.
hostAddress := net.JoinHostPort("localhost", port)
cmd.Run("-host", hostAddress, "-online", "-newdb", "mydbbak", "-db", "mydb", partialBackupDir)
// wait for the import to finish, and unlock the shard engine.
time.Sleep(time.Second)
res, err = s.Query(`select * from "mydbbak"."forever"."myseries"`)
if err != nil {
t.Fatalf("error querying: %s", err.Error())
}
if res != partialExpected {
t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", partialExpected, res)
}
// 3. enterprise should be the same as the non-enterprise live restore
cmd.Run("-host", hostAddress, "-enterprise", "-newdb", "mydbbak2", "-db", "mydb", enterpriseBackupDir)
// wait for the import to finish, and unlock the shard engine.
time.Sleep(time.Second)
res, err = s.Query(`select * from "mydbbak2"."forever"."myseries"`)
if err != nil {
t.Fatalf("error querying: %s", err.Error())
}
if res != partialExpected {
t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", partialExpected, res)
}
// 4. backup all DB's, then drop them, then restore them and all 3 above tests should pass again.
// now backup
bCmd := backup.NewCommand()
if err := bCmd.Run("-enterprise", "-host", hostAddress, enterpriseBackupDir); err != nil {
t.Fatalf("error backing up: %s, hostAddress: %s", err.Error(), hostAddress)
}
res, err = s.Query(`drop database mydb; drop database mydbbak; drop database mydbbak2;`)
if err != nil {
t.Fatalf("Error dropping databases %s", err.Error())
}
// 3. enterprise should be the same as the non-enterprise live restore
cmd.Run("-host", hostAddress, "-enterprise", enterpriseBackupDir)
// wait for the import to finish, and unlock the shard engine.
time.Sleep(3 * time.Second)
res, err = s.Query(`show shards`)
if err != nil {
t.Fatalf("error querying: %s", err.Error())
}
fmt.Println(res)
res, err = s.Query(`select * from "mydbbak"."forever"."myseries"`)
if err != nil {
t.Fatalf("error querying: %s", err.Error())
}
if res != partialExpected {
t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", partialExpected, res)
}
res, err = s.Query(`select * from "mydbbak2"."forever"."myseries"`)
if err != nil {
t.Fatalf("error querying: %s", err.Error())
}
if res != partialExpected {
t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", partialExpected, res)
}
res, err = s.Query(`select * from "mydb"."forever"."myseries"`)
if err != nil {
t.Fatalf("error querying: %s", err.Error())
}
if res != expected {
t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", expected, res)
}
}
func freePort() string {

View File

@ -241,7 +241,7 @@ func OpenServerWithVersion(c *Config, version string) Server {
// OpenDefaultServer opens a test server with a default database & retention policy.
func OpenDefaultServer(c *Config) Server {
s := OpenServer(c)
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
panic(err)
}
return s
@ -529,7 +529,8 @@ func NewConfig() *Config {
return c
}
func newRetentionPolicySpec(name string, rf int, duration time.Duration) *meta.RetentionPolicySpec {
// form a correct retention policy given name, replication factor and duration
func NewRetentionPolicySpec(name string, rf int, duration time.Duration) *meta.RetentionPolicySpec {
return &meta.RetentionPolicySpec{Name: name, ReplicaN: &rf, Duration: &duration}
}
@ -733,7 +734,7 @@ func writeTestData(s Server, t *Test) error {
w.rp = t.retentionPolicy()
}
if err := s.CreateDatabaseAndRetentionPolicy(w.db, newRetentionPolicySpec(w.rp, 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy(w.db, NewRetentionPolicySpec(w.rp, 1, 0), true); err != nil {
return err
}
if res, err := s.Write(w.db, w.rp, w.data, t.params); err != nil {

View File

@ -102,7 +102,7 @@ func TestServer_Query_DropAndRecreateDatabase(t *testing.T) {
test := tests.load(t, "drop_and_recreate_database")
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
t.Fatal(err)
}
@ -132,10 +132,10 @@ func TestServer_Query_DropDatabaseIsolated(t *testing.T) {
test := tests.load(t, "drop_database_isolated")
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
t.Fatal(err)
}
if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicySpec("rp1", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db1", NewRetentionPolicySpec("rp1", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -165,7 +165,7 @@ func TestServer_Query_DeleteSeries(t *testing.T) {
test := tests.load(t, "delete_series_time")
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
t.Fatal(err)
}
@ -194,7 +194,7 @@ func TestServer_Query_DeleteSeries_TagFilter(t *testing.T) {
test := tests.load(t, "delete_series_time_tag_filter")
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
t.Fatal(err)
}
@ -224,7 +224,7 @@ func TestServer_Query_DropAndRecreateSeries(t *testing.T) {
test := tests.load(t, "drop_and_recreate_series")
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
t.Fatal(err)
}
@ -275,7 +275,7 @@ func TestServer_Query_DropSeriesFromRegex(t *testing.T) {
test := tests.load(t, "drop_series_from_regex")
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
t.Fatal(err)
}
@ -567,7 +567,7 @@ func TestServer_Write_FieldTypeConflict(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -618,7 +618,7 @@ func TestServer_Write_LineProtocol_Float(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
t.Fatal(err)
}
@ -643,7 +643,7 @@ func TestServer_Write_LineProtocol_Bool(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
t.Fatal(err)
}
@ -668,7 +668,7 @@ func TestServer_Write_LineProtocol_String(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
t.Fatal(err)
}
@ -693,7 +693,7 @@ func TestServer_Write_LineProtocol_Integer(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
t.Fatal(err)
}
@ -718,7 +718,7 @@ func TestServer_Write_LineProtocol_Unsigned(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
t.Fatal(err)
}
@ -744,7 +744,7 @@ func TestServer_Write_LineProtocol_Partial(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
t.Fatal(err)
}
@ -3086,7 +3086,7 @@ func TestServer_Query_MergeMany(t *testing.T) {
defer s.Close()
// set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -3147,7 +3147,7 @@ func TestServer_Query_SLimitAndSOffset(t *testing.T) {
defer s.Close()
// set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -3204,7 +3204,7 @@ func TestServer_Query_Regex(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -4200,7 +4200,7 @@ func TestServer_Query_Aggregates_Math(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -4267,7 +4267,7 @@ func TestServer_Query_AggregateSelectors(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -4579,7 +4579,7 @@ func TestServer_Query_ExactTimeRange(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -4634,7 +4634,7 @@ func TestServer_Query_Selectors(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -4713,7 +4713,7 @@ func TestServer_Query_TopBottomInt(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -4967,7 +4967,7 @@ func TestServer_Query_TopBottomWriteTags(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -5036,7 +5036,7 @@ func TestServer_Query_Aggregates_IdenticalTime(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -5102,7 +5102,7 @@ func TestServer_Query_GroupByTimeCutoffs(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -5182,7 +5182,7 @@ func TestServer_Query_MapType(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -5258,7 +5258,7 @@ func TestServer_Query_Subqueries(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -5419,7 +5419,7 @@ func TestServer_Query_SubqueryWithGroupBy(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -5491,7 +5491,7 @@ func TestServer_Query_SubqueryMath(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -5537,7 +5537,7 @@ func TestServer_Query_PercentileDerivative(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -5586,7 +5586,7 @@ func TestServer_Query_UnderscoreMeasurement(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -5631,7 +5631,7 @@ func TestServer_Write_Precision(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -5746,7 +5746,7 @@ func TestServer_Query_Wildcards(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -5864,7 +5864,7 @@ func TestServer_Query_WildcardExpansion(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -5942,7 +5942,7 @@ func TestServer_Query_AcrossShardsAndFields(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -6014,7 +6014,7 @@ func TestServer_Query_OrderedAcrossShards(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -6082,7 +6082,7 @@ func TestServer_Query_Where_Fields(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -6302,7 +6302,7 @@ func TestServer_Query_Where_With_Tags(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -6383,7 +6383,7 @@ func TestServer_Query_With_EmptyTags(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -6484,7 +6484,7 @@ func TestServer_Query_LimitAndOffset(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -6599,7 +6599,7 @@ func TestServer_Query_Fill(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -6704,7 +6704,7 @@ func TestServer_Query_ImplicitFill(t *testing.T) {
s := OpenServer(config)
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -6758,7 +6758,7 @@ func TestServer_Query_TimeZone(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -6854,7 +6854,7 @@ func TestServer_Query_Chunk(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -6906,10 +6906,10 @@ func TestServer_Query_DropAndRecreateMeasurement(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db1", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -7068,7 +7068,7 @@ func TestServer_Query_ShowQueries_Future(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -7139,7 +7139,7 @@ func TestServer_Query_ShowSeries(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -7274,7 +7274,7 @@ func TestServer_Query_ShowSeriesCardinalityEstimation(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -7344,7 +7344,7 @@ func TestServer_Query_ShowSeriesExactCardinality(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -7480,7 +7480,7 @@ func TestServer_Query_ShowStats(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -7522,7 +7522,7 @@ func TestServer_Query_ShowMeasurements(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -7643,7 +7643,7 @@ func TestServer_Query_ShowMeasurementCardinalityEstimation(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -7712,7 +7712,7 @@ func TestServer_Query_ShowMeasurementExactCardinality(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -7830,7 +7830,7 @@ func TestServer_Query_ShowTagKeys(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -7942,7 +7942,7 @@ func TestServer_Query_ShowTagValues(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -8121,7 +8121,7 @@ func TestServer_Query_ShowTagKeyCardinality(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -8321,7 +8321,7 @@ func TestServer_Query_ShowFieldKeys(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -8385,7 +8385,7 @@ func TestServer_Query_ShowFieldKeyCardinality(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -8467,7 +8467,7 @@ func TestServer_ContinuousQuery(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -8588,7 +8588,7 @@ func TestServer_ContinuousQuery_Deadlock(t *testing.T) {
s.Close()
}()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -8659,7 +8659,7 @@ func TestServer_Query_EvilIdentifiers(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -8701,7 +8701,7 @@ func TestServer_Query_OrderByTime(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -8779,7 +8779,7 @@ func TestServer_Query_FieldWithMultiplePeriods(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -8830,7 +8830,7 @@ func TestServer_Query_FieldWithMultiplePeriodsMeasurementPrefixMatch(t *testing.
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -8881,7 +8881,7 @@ func TestServer_Query_IntoTarget(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -8938,7 +8938,7 @@ func TestServer_Query_IntoTarget_Sparse(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -8997,7 +8997,7 @@ func TestServer_Query_DuplicateMeasurements(t *testing.T) {
defer s.Close()
// Create a second database.
if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db1", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -9098,7 +9098,7 @@ func TestServer_Query_DotProduct(t *testing.T) {
defer s.Close()
// Create a second database.
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -9178,7 +9178,7 @@ func TestServer_WhereTimeInclusive(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -9286,7 +9286,7 @@ func TestServer_Query_ImplicitEndTime(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -9341,7 +9341,7 @@ func TestServer_Query_Sample_Wildcard(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -9386,7 +9386,7 @@ func TestServer_Query_Sample_LimitOffset(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@ -9446,7 +9446,7 @@ func TestServer_NestedAggregateWithMathPanics(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}

View File

@ -25,6 +25,7 @@ import (
"github.com/influxdata/influxdb/pkg/estimator"
"github.com/influxdata/influxdb/pkg/limiter"
"github.com/influxdata/influxdb/pkg/metrics"
intar "github.com/influxdata/influxdb/pkg/tar"
"github.com/influxdata/influxdb/pkg/tracing"
"github.com/influxdata/influxdb/query"
"github.com/influxdata/influxdb/tsdb"
@ -776,70 +777,20 @@ func (e *Engine) Backup(w io.Writer, basePath string, since time.Time) error {
if err != nil {
return err
}
tw := tar.NewWriter(w)
defer tw.Close()
// Remove the temporary snapshot dir
defer os.RemoveAll(path)
// Recursively read all files from path.
files, err := readDir(path, "")
if err != nil {
return err
}
// Filter paths to only changed files.
var filtered []string
for _, file := range files {
fi, err := os.Stat(filepath.Join(path, file))
if err != nil {
return err
} else if !fi.ModTime().After(since) {
continue
}
filtered = append(filtered, file)
}
if len(filtered) == 0 {
return nil
}
for _, f := range filtered {
if err := e.writeFileToBackup(f, basePath, filepath.Join(path, f), tw); err != nil {
return err
}
}
return nil
return intar.Stream(w, path, basePath, intar.SinceFilterTarFile(since))
}
func (e *Engine) Export(w io.Writer, basePath string, start time.Time, end time.Time) error {
path, err := e.CreateSnapshot()
if err != nil {
return err
}
// Remove the temporary snapshot dir
defer os.RemoveAll(path)
tw := tar.NewWriter(w)
defer tw.Close()
// Recursively read all files from path.
files, err := readDir(path, "")
if err != nil {
return err
}
for _, file := range files {
if !strings.HasSuffix(file, ".tsm") {
if err := e.writeFileToBackup(file, basePath, filepath.Join(path, file), tw); err != nil {
return err
}
func (e *Engine) timeStampFilterTarFile(start, end time.Time) func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error {
return func(fi os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error {
if !strings.HasSuffix(fi.Name(), ".tsm") {
return intar.StreamFile(fi, shardRelativePath, fullPath, tw)
}
var tombstonePath string
f, err := os.Open(filepath.Join(path, file))
f, err := os.Open(fullPath)
if err != nil {
return err
}
@ -851,6 +802,7 @@ func (e *Engine) Export(w io.Writer, basePath string, start time.Time, end time.
// Grab the tombstone file if one exists.
if r.HasTombstones() {
tombstonePath = filepath.Base(r.TombstoneFiles()[0].Path)
return intar.StreamFile(fi, shardRelativePath, tombstonePath, tw)
}
min, max := r.TimeRange()
@ -861,7 +813,7 @@ func (e *Engine) Export(w io.Writer, basePath string, start time.Time, end time.
if min >= stun && min <= eun && max > eun || // overlap to the right
max >= stun && max <= eun && min < stun || // overlap to the left
min <= stun && max >= eun { // TSM file has a range LARGER than the boundary
err := e.filterFileToBackup(r, file, basePath, filepath.Join(path, file), start.UnixNano(), end.UnixNano(), tw)
err := e.filterFileToBackup(r, fi, shardRelativePath, fullPath, start.UnixNano(), end.UnixNano(), tw)
if err != nil {
if err := r.Close(); err != nil {
return err
@ -878,24 +830,26 @@ func (e *Engine) Export(w io.Writer, basePath string, start time.Time, end time.
// the TSM file is 100% inside the range, so we can just write it without scanning each block
if min >= start.UnixNano() && max <= end.UnixNano() {
if err := e.writeFileToBackup(file, basePath, filepath.Join(path, file), tw); err != nil {
if err := intar.StreamFile(fi, shardRelativePath, fullPath, tw); err != nil {
return err
}
}
// if this TSM file had a tombstone we'll write out the whole thing too.
if tombstonePath != "" {
if err := e.writeFileToBackup(tombstonePath, basePath, filepath.Join(path, tombstonePath), tw); err != nil {
return err
}
}
return nil
}
return nil
}
func (e *Engine) filterFileToBackup(r *TSMReader, name, shardRelativePath, fullPath string, start, end int64, tw *tar.Writer) error {
func (e *Engine) Export(w io.Writer, basePath string, start time.Time, end time.Time) error {
path, err := e.CreateSnapshot()
if err != nil {
return err
}
// Remove the temporary snapshot dir
defer os.RemoveAll(path)
return intar.Stream(w, path, basePath, e.timeStampFilterTarFile(start, end))
}
func (e *Engine) filterFileToBackup(r *TSMReader, fi os.FileInfo, shardRelativePath, fullPath string, start, end int64, tw *tar.Writer) error {
path := fullPath + ".tmp"
out, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
@ -943,36 +897,7 @@ func (e *Engine) filterFileToBackup(r *TSMReader, name, shardRelativePath, fullP
return err
}
return e.writeFileToBackup(name, shardRelativePath, path, tw)
}
// writeFileToBackup copies the file into the tar archive. Files will use the shardRelativePath
// in their names. This should be the <db>/<retention policy>/<id> part of the path.
func (e *Engine) writeFileToBackup(name string, shardRelativePath, fullPath string, tw *tar.Writer) error {
f, err := os.Stat(fullPath)
if err != nil {
return err
}
h := &tar.Header{
Name: filepath.ToSlash(filepath.Join(shardRelativePath, name)),
ModTime: f.ModTime(),
Size: f.Size(),
Mode: int64(f.Mode()),
}
if err := tw.WriteHeader(h); err != nil {
return err
}
fr, err := os.Open(fullPath)
if err != nil {
return err
}
defer fr.Close()
_, err = io.CopyN(tw, fr, h.Size)
return err
return intar.StreamFile(fi, shardRelativePath, path, tw)
}
// Restore reads a tar archive generated by Backup().
@ -1034,12 +959,16 @@ func (e *Engine) overlay(r io.Reader, basePath string, asNew bool) error {
readers := make([]chan seriesKey, 0, len(newFiles))
ext := fmt.Sprintf(".%s", TmpTSMFileExtension)
for _, f := range newFiles {
ch := make(chan seriesKey, 1)
readers = append(readers, ch)
// If asNew is true, the files created from readFileFromBackup will be new ones
// having a temp extension.
f = strings.TrimSuffix(f, ext)
if !strings.HasSuffix(f, TSMFileExtension) {
// This isn't a .tsm file.
continue
}
ch := make(chan seriesKey, 1)
readers = append(readers, ch)
fd, err := os.Open(f)
if err != nil {
@ -1089,8 +1018,12 @@ func (e *Engine) readFileFromBackup(tr *tar.Reader, shardRelativePath string, as
return "", err
}
nativeFileName := filepath.FromSlash(hdr.Name)
if !strings.HasSuffix(hdr.Name, TSMFileExtension) {
// This isn't a .tsm file.
return "", nil
}
nativeFileName := filepath.FromSlash(hdr.Name)
// Skip file if it does not have a matching prefix.
if !filepath.HasPrefix(nativeFileName, shardRelativePath) {
return "", nil
@ -1100,6 +1033,14 @@ func (e *Engine) readFileFromBackup(tr *tar.Reader, shardRelativePath string, as
return "", err
}
// If this is a directory entry (usually just `index` for tsi), create it an move on.
if hdr.Typeflag == tar.TypeDir {
if err := os.MkdirAll(filepath.Join(e.path, filename), os.FileMode(hdr.Mode).Perm()); err != nil {
return "", err
}
return "", nil
}
if asNew {
filename = fmt.Sprintf("%09d-%09d.%s", e.FileStore.NextGeneration(), 1, TSMFileExtension)
}
@ -1136,9 +1077,14 @@ func (e *Engine) addToIndexFromKey(key []byte, fieldType influxql.DataType) erro
return err
}
tags := models.ParseTags(seriesKey)
// Build in-memory index, if necessary.
if e.index.Type() == inmem.IndexName {
if err := e.index.InitializeSeries(seriesKey, name, models.ParseTags(seriesKey)); err != nil {
if err := e.index.InitializeSeries(seriesKey, name, tags); err != nil {
return err
}
} else {
if err := e.index.CreateSeriesIfNotExists(seriesKey, name, tags); err != nil {
return err
}
}

View File

@ -1,5 +1,6 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// Code generated by protoc-gen-gogo.
// source: internal/meta.proto
// DO NOT EDIT!
/*
Package tsdb is a generated protocol buffer package.
@ -104,9 +105,8 @@ func (m *MeasurementFields) GetFields() []*Field {
}
type Field struct {
ID int32 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"`
Type int32 `protobuf:"varint,3,opt,name=Type,proto3" json:"Type,omitempty"`
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
Type int32 `protobuf:"varint,2,opt,name=Type,proto3" json:"Type,omitempty"`
}
func (m *Field) Reset() { *m = Field{} }
@ -114,13 +114,6 @@ func (m *Field) String() string { return proto.CompactTextString(m) }
func (*Field) ProtoMessage() {}
func (*Field) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{3} }
func (m *Field) GetID() int32 {
if m != nil {
return m.ID
}
return 0
}
func (m *Field) GetName() string {
if m != nil {
return m.Name
@ -162,21 +155,20 @@ func init() {
func init() { proto.RegisterFile("internal/meta.proto", fileDescriptorMeta) }
var fileDescriptorMeta = []byte{
// 242 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xbd, 0x4b, 0x03, 0x41,
0x10, 0xc5, 0xb9, 0xbd, 0x0f, 0xc8, 0x44, 0x44, 0x27, 0x82, 0xdb, 0x08, 0x61, 0x6d, 0xd2, 0x78,
0x82, 0x56, 0x62, 0x61, 0x13, 0x84, 0xe0, 0x47, 0xb1, 0x39, 0xec, 0x27, 0x64, 0x38, 0x0e, 0xee,
0x2e, 0x61, 0x77, 0x53, 0xe4, 0xbf, 0x97, 0xcc, 0x1e, 0x12, 0x35, 0xdd, 0xdb, 0x37, 0xf3, 0xe6,
0xfd, 0x58, 0x98, 0x34, 0x7d, 0x60, 0xd7, 0x53, 0x7b, 0xdf, 0x71, 0xa0, 0x72, 0xeb, 0x36, 0x61,
0x83, 0x59, 0xf0, 0xeb, 0x95, 0x79, 0x82, 0x62, 0xc9, 0xae, 0x61, 0x8f, 0x17, 0x90, 0xbe, 0xf1,
0x5e, 0x27, 0xd3, 0x64, 0x36, 0xb2, 0x07, 0x89, 0x37, 0x90, 0x55, 0x54, 0x7b, 0xad, 0xa6, 0xe9,
0x6c, 0xfc, 0x30, 0x2a, 0x0f, 0x81, 0xb2, 0xa2, 0xda, 0x8a, 0x6d, 0xee, 0x20, 0xad, 0xa8, 0x3e,
0x91, 0xbb, 0x82, 0xfc, 0x8b, 0xda, 0x1d, 0x6b, 0x25, 0x5e, 0x7c, 0x98, 0x77, 0xb8, 0xfc, 0x60,
0xf2, 0x3b, 0xc7, 0x1d, 0xf7, 0xe1, 0xb5, 0xe1, 0x76, 0xed, 0x11, 0x21, 0xfb, 0xa4, 0x8e, 0x87,
0xb4, 0x68, 0xbc, 0x85, 0x22, 0x4e, 0x87, 0xe2, 0x71, 0x2c, 0x16, 0xcf, 0x0e, 0x23, 0xf3, 0x02,
0xb9, 0x28, 0x3c, 0x07, 0xb5, 0x98, 0x4b, 0x3e, 0xb7, 0x6a, 0x31, 0xff, 0xb9, 0xa8, 0x8e, 0x2e,
0x22, 0x64, 0xd5, 0x7e, 0xcb, 0x3a, 0x95, 0x2d, 0xd1, 0xc6, 0xc2, 0xe4, 0x2f, 0xce, 0x92, 0x03,
0x3e, 0xc3, 0xd9, 0x91, 0xed, 0x75, 0x22, 0x08, 0xd7, 0x11, 0xe1, 0x1f, 0xbf, 0xfd, 0xb5, 0xbc,
0x2a, 0xe4, 0x67, 0x1f, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xac, 0xee, 0x08, 0x52, 0x70, 0x01,
0x00, 0x00,
// 225 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xbf, 0x6b, 0xc3, 0x30,
0x10, 0x85, 0x71, 0x2c, 0x1b, 0x72, 0xe9, 0xd0, 0x5e, 0x0a, 0xd5, 0x52, 0x08, 0xea, 0x92, 0xa5,
0x0e, 0xb4, 0x53, 0xe9, 0xde, 0xa5, 0x3f, 0x06, 0x45, 0x74, 0xbf, 0x90, 0xc3, 0x18, 0x6c, 0x27,
0x48, 0xca, 0x90, 0xff, 0xbe, 0xf8, 0xe4, 0xa1, 0x6d, 0xbc, 0x3d, 0x7d, 0xa7, 0xa7, 0x4f, 0x1c,
0x2c, 0x9b, 0x3e, 0xb2, 0xef, 0xa9, 0xdd, 0x74, 0x1c, 0xa9, 0x3a, 0xfa, 0x43, 0x3c, 0xa0, 0x8a,
0x61, 0xbf, 0x33, 0x2f, 0x50, 0x6e, 0xd9, 0x37, 0x1c, 0xf0, 0x1a, 0xf2, 0x77, 0x3e, 0xeb, 0x6c,
0x95, 0xad, 0xe7, 0x76, 0x88, 0x78, 0x0f, 0xca, 0x51, 0x1d, 0xf4, 0x6c, 0x95, 0xaf, 0x17, 0x4f,
0xf3, 0x6a, 0x28, 0x54, 0x8e, 0x6a, 0x2b, 0xd8, 0x3c, 0x42, 0xee, 0xa8, 0x9e, 0xe8, 0xdd, 0x42,
0xf1, 0x4d, 0xed, 0x89, 0xf5, 0x4c, 0x58, 0x3a, 0x98, 0x0f, 0xb8, 0xf9, 0x64, 0x0a, 0x27, 0xcf,
0x1d, 0xf7, 0xf1, 0xad, 0xe1, 0x76, 0x1f, 0x10, 0x41, 0x7d, 0x51, 0xc7, 0x63, 0x5b, 0x32, 0x3e,
0x40, 0x99, 0xa6, 0xa3, 0x78, 0x91, 0xc4, 0xc2, 0xec, 0x38, 0x32, 0x1b, 0x28, 0x24, 0x4d, 0xbe,
0x80, 0xa0, 0xdc, 0xf9, 0x98, 0xfc, 0x85, 0x95, 0x6c, 0x2c, 0x2c, 0xff, 0xeb, 0xb7, 0x1c, 0xf1,
0x15, 0xae, 0x7e, 0xe1, 0xa0, 0x33, 0x51, 0xde, 0x25, 0xe5, 0xc5, 0x7f, 0xed, 0x9f, 0xcb, 0xbb,
0x52, 0x36, 0xf9, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0xf8, 0x31, 0x1f, 0xb9, 0x60, 0x01, 0x00,
0x00,
}