Replace segment index file path with index file id within segment index meta (#19866)

Signed-off-by: yun.zhang <yun.zhang@zilliz.com>

Signed-off-by: yun.zhang <yun.zhang@zilliz.com>
pull/19898/head
jaime 2022-10-19 16:55:27 +08:00 committed by GitHub
parent 99df2d9b54
commit b79687687d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 1519 additions and 1550 deletions

View File

@ -3,6 +3,7 @@ package meta
import (
"fmt"
"sort"
"strings"
"github.com/milvus-io/milvus/internal/util/typeutil"
@ -142,21 +143,32 @@ func combineToSegmentIndexesMeta220(segmentIndexes SegmentIndexesMeta210, indexB
if !ok {
return nil, fmt.Errorf("index build meta not found, segment id: %d, index id: %d, index build id: %d", segID, indexID, record.GetBuildID())
}
fileKeys := make([]string, len(buildMeta.GetIndexFilePaths()))
for i, filePath := range buildMeta.GetIndexFilePaths() {
parts := strings.Split(filePath, "/")
if len(parts) == 0 {
return nil, fmt.Errorf("invaild index file path: %s", filePath)
}
fileKeys[i] = parts[len(parts)-1]
}
segmentIndexModel := &model.SegmentIndex{
SegmentID: segID,
CollectionID: record.GetCollectionID(),
PartitionID: record.GetPartitionID(),
NumRows: buildMeta.GetReq().GetNumRows(),
IndexID: indexID,
BuildID: record.GetBuildID(),
NodeID: buildMeta.GetNodeID(),
IndexVersion: buildMeta.GetIndexVersion(),
IndexState: buildMeta.GetState(),
FailReason: buildMeta.GetFailReason(),
IsDeleted: buildMeta.GetMarkDeleted(),
CreateTime: record.GetCreateTime(),
IndexFilePaths: buildMeta.GetIndexFilePaths(),
IndexSize: buildMeta.GetSerializeSize(),
SegmentID: segID,
CollectionID: record.GetCollectionID(),
PartitionID: record.GetPartitionID(),
NumRows: buildMeta.GetReq().GetNumRows(),
IndexID: indexID,
BuildID: record.GetBuildID(),
NodeID: buildMeta.GetNodeID(),
IndexVersion: buildMeta.GetIndexVersion(),
IndexState: buildMeta.GetState(),
FailReason: buildMeta.GetFailReason(),
IsDeleted: buildMeta.GetMarkDeleted(),
CreateTime: record.GetCreateTime(),
IndexFileKeys: fileKeys,
IndexSize: buildMeta.GetSerializeSize(),
}
segmentIndexModels.AddRecord(segID, indexID, segmentIndexModel)
}

View File

@ -22,6 +22,8 @@ import (
"sync"
"time"
"github.com/milvus-io/milvus/internal/util/metautil"
"go.uber.org/zap"
"github.com/milvus-io/milvus-proto/go-api/commonpb"
@ -241,16 +243,18 @@ func (gc *garbageCollector) recycleUnusedIndexFiles() {
continue
}
log.Ctx(gc.ctx).Info("index meta can be recycled, recycle index files", zap.Int64("buildID", buildID))
canRecycle, indexFilePaths := gc.metaTable.GetIndexFilePathByBuildID(buildID)
canRecycle, segIdx := gc.metaTable.GetSegmentIndexByBuildID(buildID)
if !canRecycle {
// Even if the index is marked as deleted, the index file will not be recycled, wait for the next gc,
// and delete all index files about the buildID at one time.
log.Ctx(gc.ctx).Warn("IndexCoord garbageCollector can not recycle index files", zap.Int64("buildID", buildID))
continue
}
filesMap := make(map[string]bool)
for _, file := range indexFilePaths {
filesMap[file] = true
filesMap := make(map[string]struct{})
for _, fileID := range segIdx.IndexFileKeys {
filepath := metautil.BuildSegmentIndexFilePath(gc.chunkManager.RootPath(), segIdx.BuildID, segIdx.IndexVersion,
segIdx.PartitionID, segIdx.SegmentID, fileID)
filesMap[filepath] = struct{}{}
}
files, _, err := gc.chunkManager.ListWithPrefix(gc.ctx, key, true)
if err != nil {

View File

@ -79,185 +79,185 @@ func createGarbageCollectorMetaTable(catalog metastore.IndexCoordCatalog) *metaT
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: 3,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFilePaths: nil,
IndexSize: 100,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: 3,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFileKeys: nil,
IndexSize: 100,
WriteHandoff: false,
},
},
segID + 1: {
indexID: {
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: 2,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFilePaths: nil,
IndexSize: 100,
WriteHandoff: false,
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: 2,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFileKeys: nil,
IndexSize: 100,
WriteHandoff: false,
},
},
segID + 2: {
indexID + 2: {
SegmentID: segID + 2,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 2,
BuildID: buildID + 2,
NodeID: 0,
IndexVersion: 1,
IndexState: 1,
FailReason: "",
IsDeleted: true,
CreateTime: 1,
IndexFilePaths: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID + 2,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 2,
BuildID: buildID + 2,
NodeID: 0,
IndexVersion: 1,
IndexState: 1,
FailReason: "",
IsDeleted: true,
CreateTime: 1,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
},
segID + 3: {
indexID + 2: {
SegmentID: segID + 3,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 2,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 1,
IndexState: 1,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFilePaths: []string{"file1", "file2"},
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID + 3,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 2,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 1,
IndexState: 1,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 0,
WriteHandoff: false,
},
},
segID + 4: {
indexID + 2: {
SegmentID: segID + 4,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 2,
BuildID: buildID + 4,
NodeID: 0,
IndexVersion: 1,
IndexState: 2,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFilePaths: []string{},
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID + 4,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 2,
BuildID: buildID + 4,
NodeID: 0,
IndexVersion: 1,
IndexState: 2,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFileKeys: []string{},
IndexSize: 0,
WriteHandoff: false,
},
},
},
buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{
buildID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: 3,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFilePaths: nil,
IndexSize: 100,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: 3,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFileKeys: nil,
IndexSize: 100,
WriteHandoff: false,
},
buildID + 1: {
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: 2,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFilePaths: nil,
IndexSize: 100,
WriteHandoff: false,
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: 2,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFileKeys: nil,
IndexSize: 100,
WriteHandoff: false,
},
buildID + 2: {
SegmentID: segID + 2,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 2,
BuildID: buildID + 2,
NodeID: 0,
IndexVersion: 1,
IndexState: 1,
FailReason: "",
IsDeleted: true,
CreateTime: 1,
IndexFilePaths: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID + 2,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 2,
BuildID: buildID + 2,
NodeID: 0,
IndexVersion: 1,
IndexState: 1,
FailReason: "",
IsDeleted: true,
CreateTime: 1,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
buildID + 3: {
SegmentID: segID + 3,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 2,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 1,
IndexState: 3,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFilePaths: []string{"file1", "file2"},
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID + 3,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 2,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 1,
IndexState: 3,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 0,
WriteHandoff: false,
},
buildID + 4: {
SegmentID: segID + 4,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 2,
BuildID: buildID + 4,
NodeID: 0,
IndexVersion: 1,
IndexState: 2,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFilePaths: []string{},
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID + 4,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 2,
BuildID: buildID + 4,
NodeID: 0,
IndexVersion: 1,
IndexState: 2,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFileKeys: []string{},
IndexSize: 0,
WriteHandoff: false,
},
},
}
@ -695,20 +695,20 @@ func TestIndexCoord_recycleUnusedMetaLoop(t *testing.T) {
metaTable: &metaTable{
buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{
1: {
SegmentID: 0,
CollectionID: 0,
PartitionID: 0,
NumRows: 0,
IndexID: 0,
BuildID: 0,
NodeID: 0,
IndexVersion: 0,
IndexState: 0,
FailReason: "",
IsDeleted: true,
CreateTime: 0,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: 0,
CollectionID: 0,
PartitionID: 0,
NumRows: 0,
IndexID: 0,
BuildID: 0,
NodeID: 0,
IndexVersion: 0,
IndexState: 0,
FailReason: "",
IsDeleted: true,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
},
},
catalog: &indexcoord.Catalog{

View File

@ -207,7 +207,7 @@ func (hd *handoff) process(segID UniqueID, front bool) {
IndexID: indexInfo.IndexID,
BuildID: indexInfo.BuildID,
IndexParams: hd.meta.GetIndexParams(info.CollectionID, indexInfo.IndexID),
//IndexFilePaths: nil,
//IndexFileKeys: nil,
//IndexSize: 0,
})
}

View File

@ -148,7 +148,7 @@ func Test_newHandoff(t *testing.T) {
err = hd.meta.FinishTask(&indexpb.IndexTaskInfo{
BuildID: buildID,
State: commonpb.IndexState_Finished,
IndexFiles: []string{"file1", "file2"},
IndexFileKeys: []string{"file1", "file2"},
SerializedSize: 100,
FailReason: "",
})
@ -156,7 +156,7 @@ func Test_newHandoff(t *testing.T) {
err = hd.meta.FinishTask(&indexpb.IndexTaskInfo{
BuildID: buildID + 3,
State: commonpb.IndexState_Failed,
IndexFiles: nil,
IndexFileKeys: nil,
SerializedSize: 0,
FailReason: "failed",
})

View File

@ -219,7 +219,7 @@ func (ib *indexBuilder) process(buildID UniqueID) bool {
if err := ib.meta.FinishTask(&indexpb.IndexTaskInfo{
BuildID: buildID,
State: commonpb.IndexState_Finished,
IndexFiles: nil,
IndexFileKeys: nil,
SerializedSize: 0,
FailReason: "",
}); err != nil {

View File

@ -68,379 +68,379 @@ func createMetaTable(catalog metastore.IndexCoordCatalog) *metaTable {
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
},
},
segID + 1: {
indexID: {
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
},
segID + 2: {
indexID: {
SegmentID: segID + 2,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 2,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: true,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 2,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 2,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: true,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
},
segID + 3: {
indexID: {
SegmentID: segID + 3,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 3,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
},
segID + 4: {
indexID: {
SegmentID: segID + 4,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 4,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 4,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 4,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
},
segID + 5: {
indexID: {
SegmentID: segID + 5,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 5,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
},
segID + 6: {
indexID: {
SegmentID: segID + 6,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 6,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 6,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 6,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
},
segID + 7: {
indexID: {
SegmentID: segID + 7,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 7,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "error",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 7,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 7,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "error",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
},
segID + 8: {
indexID: {
SegmentID: segID + 8,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 8,
NodeID: nodeID + 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 8,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 8,
NodeID: nodeID + 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
},
segID + 9: {
indexID: {
SegmentID: segID + 9,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 9,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 9,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 9,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
},
segID + 10: {
indexID: {
SegmentID: segID + 10,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 10,
NodeID: nodeID,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 10,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 10,
NodeID: nodeID,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
},
},
buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{
buildID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
},
buildID + 1: {
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
buildID + 2: {
SegmentID: segID + 2,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 2,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: true,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 2,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 2,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: true,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
buildID + 3: {
SegmentID: segID + 3,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 3,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
buildID + 4: {
SegmentID: segID + 4,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 4,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 4,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 4,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
buildID + 5: {
SegmentID: segID + 5,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 5,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
buildID + 6: {
SegmentID: segID + 6,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 6,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 6,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 6,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
buildID + 7: {
SegmentID: segID + 7,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 7,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "error",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 7,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 7,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "error",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
buildID + 8: {
SegmentID: segID + 8,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 8,
NodeID: nodeID + 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 8,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 8,
NodeID: nodeID + 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
buildID + 9: {
SegmentID: segID + 9,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 9,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 9,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 9,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
buildID + 10: {
SegmentID: segID + 10,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 10,
NodeID: nodeID,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 10,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 10,
NodeID: nodeID,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 0,
},
},
}
@ -494,20 +494,20 @@ func TestIndexBuilder(t *testing.T) {
t.Run("enqueue", func(t *testing.T) {
segIdx := &model.SegmentIndex{
SegmentID: segID + 10,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 10,
NodeID: 0,
IndexVersion: 0,
IndexState: 0,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 10,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 10,
NodeID: 0,
IndexVersion: 0,
IndexState: 0,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
}
err := ib.meta.AddIndex(segIdx)
assert.NoError(t, err)
@ -932,7 +932,7 @@ func TestIndexBuilder_Error(t *testing.T) {
{
BuildID: buildID,
State: commonpb.IndexState_Finished,
IndexFiles: nil,
IndexFileKeys: nil,
SerializedSize: 0,
FailReason: "",
},
@ -998,7 +998,7 @@ func Test_indexBuilder_getTaskState(t *testing.T) {
{
BuildID: buildID,
State: commonpb.IndexState_Retry,
IndexFiles: nil,
IndexFileKeys: nil,
SerializedSize: 0,
FailReason: "create index fail",
},
@ -1087,7 +1087,7 @@ func Test_indexBuilder_getTaskState(t *testing.T) {
{
BuildID: buildID,
State: commonpb.IndexState_InProgress,
IndexFiles: nil,
IndexFileKeys: nil,
SerializedSize: 0,
FailReason: "",
},

View File

@ -28,6 +28,8 @@ import (
"syscall"
"time"
"github.com/milvus-io/milvus/internal/util/metautil"
"github.com/milvus-io/milvus/internal/util/errorutil"
"golang.org/x/sync/errgroup"
@ -811,6 +813,9 @@ func (i *IndexCoord) GetIndexInfos(ctx context.Context, req *indexpb.GetIndexInf
if len(segIdxes) != 0 {
ret.SegmentInfo[segID].EnableIndex = true
for _, segIdx := range segIdxes {
indexFilePaths := metautil.BuildSegmentIndexFilePaths(i.chunkManager.RootPath(), segIdx.BuildID, segIdx.IndexVersion,
segIdx.PartitionID, segIdx.SegmentID, segIdx.IndexFileKeys)
ret.SegmentInfo[segID].IndexInfos = append(ret.SegmentInfo[segID].IndexInfos,
&indexpb.IndexFilePathInfo{
SegmentID: segID,
@ -819,7 +824,7 @@ func (i *IndexCoord) GetIndexInfos(ctx context.Context, req *indexpb.GetIndexInf
BuildID: segIdx.BuildID,
IndexName: i.metaTable.GetIndexNameByID(segIdx.CollectionID, segIdx.IndexID),
IndexParams: i.metaTable.GetIndexParams(segIdx.CollectionID, segIdx.IndexID),
IndexFilePaths: segIdx.IndexFilePaths,
IndexFilePaths: indexFilePaths,
SerializedSize: segIdx.IndexSize,
IndexVersion: segIdx.IndexVersion,
NumRows: segIdx.NumRows,

View File

@ -828,7 +828,7 @@ func TestIndexCoord_DropIndex(t *testing.T) {
// indexMeta: &indexpb.IndexMeta{
// IndexBuildID: 1,
// State: commonpb.IndexState_Finished,
// IndexFilePaths: []string{"indexFiles-1", "indexFiles-2"},
// IndexFileKeys: []string{"indexFiles-1", "indexFiles-2"},
// },
// },
// 2: {
@ -848,14 +848,14 @@ func TestIndexCoord_DropIndex(t *testing.T) {
// assert.NoError(t, err)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
// assert.Equal(t, 1, len(resp.FilePaths))
// assert.ElementsMatch(t, resp.FilePaths[0].IndexFilePaths, []string{"indexFiles-1", "indexFiles-2"})
// assert.ElementsMatch(t, resp.FilePaths[0].IndexFileKeys, []string{"indexFiles-1", "indexFiles-2"})
// })
//
// t.Run("GetIndexFilePaths failed", func(t *testing.T) {
// resp, err := ic.GetIndexFilePaths(context.Background(), &indexpb.GetIndexFilePathsRequest{IndexBuildIDs: []UniqueID{2}})
// assert.NoError(t, err)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
// assert.Equal(t, 0, len(resp.FilePaths[0].IndexFilePaths))
// assert.Equal(t, 0, len(resp.FilePaths[0].IndexFileKeys))
// })
//
// t.Run("set DataCoord with nil", func(t *testing.T) {

View File

@ -29,6 +29,7 @@ import (
"go.uber.org/zap"
"github.com/milvus-io/milvus-proto/go-api/commonpb"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/kv"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metastore"
@ -790,54 +791,23 @@ func (mt *metaTable) MarkSegmentsIndexAsDeleted(selector func(index *model.Segme
return nil
}
// GetIndexFilePathInfo gets the index file paths from meta table.
func (mt *metaTable) GetIndexFilePathInfo(segID, indexID UniqueID) (*indexpb.IndexFilePathInfo, error) {
log.Debug("IndexCoord get index file path from meta table", zap.Int64("segmentID", segID))
mt.segmentIndexLock.RLock()
defer mt.segmentIndexLock.RUnlock()
ret := &indexpb.IndexFilePathInfo{
SegmentID: segID,
IndexID: indexID,
}
segIndexes, ok := mt.segmentIndexes[segID]
if !ok {
return nil, ErrNotIndexExist
}
segIdx, ok := segIndexes[indexID]
if !ok || segIdx.IsDeleted {
return nil, ErrNotIndexExist
}
if segIdx.IndexState != commonpb.IndexState_Finished {
return nil, fmt.Errorf("the index state is not finish on segment: %d, index state = %s", segID, segIdx.IndexState.String())
}
ret.BuildID = segIdx.BuildID
ret.IndexFilePaths = segIdx.IndexFilePaths
ret.SerializedSize = segIdx.IndexSize
log.Debug("IndexCoord get index file path success", zap.Int64("segID", segID),
zap.Strings("index files num", ret.IndexFilePaths))
return ret, nil
}
func (mt *metaTable) GetIndexFilePathByBuildID(buildID UniqueID) (bool, []string) {
func (mt *metaTable) GetSegmentIndexByBuildID(buildID UniqueID) (bool, *model.SegmentIndex) {
mt.segmentIndexLock.RLock()
defer mt.segmentIndexLock.RUnlock()
log.Debug("IndexCoord get index file path from meta table", zap.Int64("buildID", buildID))
segIdx, ok := mt.buildID2SegmentIndex[buildID]
if !ok || segIdx.IsDeleted {
return false, []string{}
return false, nil
}
if segIdx.IndexState != commonpb.IndexState_Finished && segIdx.IndexState != commonpb.IndexState_Failed {
return false, []string{}
return false, nil
}
log.Debug("IndexCoord get index file path success", zap.Int64("buildID", buildID),
zap.Strings("index files num", segIdx.IndexFilePaths))
return true, segIdx.IndexFilePaths
log.Debug("IndexCoord get segment index file path success", zap.Int64("buildID", buildID),
zap.Int("index files num", len(segIdx.IndexFileKeys)))
return true, segIdx
}
func (mt *metaTable) IsIndexDeleted(collID, indexID UniqueID) bool {
@ -1051,7 +1021,7 @@ func (mt *metaTable) FinishTask(taskInfo *indexpb.IndexTaskInfo) error {
}
updateFunc := func(segIdx *model.SegmentIndex) error {
segIdx.IndexState = taskInfo.State
segIdx.IndexFilePaths = taskInfo.IndexFiles
segIdx.IndexFileKeys = common.CloneStringList(taskInfo.IndexFileKeys)
segIdx.FailReason = taskInfo.FailReason
segIdx.IndexSize = taskInfo.SerializedSize
return mt.alterSegmentIndexes([]*model.SegmentIndex{segIdx})

View File

@ -145,39 +145,39 @@ func constructMetaTable(catalog metastore.IndexCoordCatalog) *metaTable {
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: &model.SegmentIndex{
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1024,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IndexVersion: 1,
IsDeleted: false,
CreateTime: createTs,
IndexFilePaths: []string{"file1", "file2"},
IndexSize: 1024,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1024,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IndexVersion: 1,
IsDeleted: false,
CreateTime: createTs,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 1024,
},
},
},
buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{
buildID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1024,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IndexVersion: 1,
IsDeleted: false,
CreateTime: createTs,
IndexFilePaths: []string{"file1", "file2"},
IndexSize: 1024,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1024,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IndexVersion: 1,
IsDeleted: false,
CreateTime: createTs,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 1024,
},
},
}
@ -262,20 +262,20 @@ func TestMetaTable_CreateIndex(t *testing.T) {
func TestMetaTable_AddIndex(t *testing.T) {
newBuildID := buildID + 2
segIdx := &model.SegmentIndex{
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1024,
IndexID: indexID,
BuildID: newBuildID,
NodeID: 0,
IndexState: commonpb.IndexState_IndexStateNone,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreateTime: 0,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1024,
IndexID: indexID,
BuildID: newBuildID,
NodeID: 0,
IndexState: commonpb.IndexState_IndexStateNone,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
}
t.Run("success", func(t *testing.T) {
@ -310,17 +310,17 @@ func TestMetaTable_AddIndex(t *testing.T) {
func TestMetaTable_UpdateVersion(t *testing.T) {
newBuildID := buildID + 3
segIdx := &model.SegmentIndex{
SegmentID: segID,
IndexID: indexID,
BuildID: newBuildID,
NodeID: 0,
IndexState: commonpb.IndexState_IndexStateNone,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreateTime: 0,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID,
IndexID: indexID,
BuildID: newBuildID,
NodeID: 0,
IndexState: commonpb.IndexState_IndexStateNone,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
}
t.Run("success", func(t *testing.T) {
kv := &mockETCDKV{
@ -396,17 +396,17 @@ func TestMetaTable_UpdateVersion(t *testing.T) {
func TestMetaTable_BuildIndex(t *testing.T) {
newBuildID := buildID + 4
segIdx := &model.SegmentIndex{
SegmentID: segID,
IndexID: indexID,
BuildID: newBuildID,
NodeID: 0,
IndexState: commonpb.IndexState_IndexStateNone,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreateTime: 0,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID,
IndexID: indexID,
BuildID: newBuildID,
NodeID: 0,
IndexState: commonpb.IndexState_IndexStateNone,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
}
t.Run("success and fail", func(t *testing.T) {
kv := &mockETCDKV{
@ -731,97 +731,43 @@ func TestMetaTable_MarkSegmentsIndexAsDeleted(t *testing.T) {
})
}
func TestMetaTable_GetIndexFilePathInfo(t *testing.T) {
t.Run("success", func(t *testing.T) {
mt := constructMetaTable(&indexcoord.Catalog{})
info, err := mt.GetIndexFilePathInfo(segID, indexID)
assert.NoError(t, err)
assert.ElementsMatch(t, []string{"file1", "file2"}, info.IndexFilePaths)
})
t.Run("fail", func(t *testing.T) {
mt := constructMetaTable(&indexcoord.Catalog{
Txn: &mockETCDKV{
save: func(s string, s2 string) error {
return nil
},
},
})
info, err := mt.GetIndexFilePathInfo(segID, indexID)
assert.NoError(t, err)
assert.ElementsMatch(t, []string{"file1", "file2"}, info.IndexFilePaths)
info, err = mt.GetIndexFilePathInfo(segID+1, indexID)
assert.Error(t, err)
assert.Nil(t, info)
info, err = mt.GetIndexFilePathInfo(segID, indexID+1)
assert.Error(t, err)
assert.Nil(t, info)
err = mt.AddIndex(&model.SegmentIndex{
SegmentID: segID + 1,
CollectionID: collID + 1,
PartitionID: partID + 1,
NumRows: 1024,
IndexID: indexID + 1,
BuildID: buildID + 1,
NodeID: nodeID + 1,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFilePaths: nil,
IndexSize: 0,
})
assert.NoError(t, err)
info, err = mt.GetIndexFilePathInfo(segID+1, indexID+1)
assert.Error(t, err)
assert.Nil(t, info)
})
}
func TestMetaTable_GetIndexFilePathByBuildID(t *testing.T) {
func TestMetaTable_GetSegmentIndexByBuildID(t *testing.T) {
mt := constructMetaTable(&indexcoord.Catalog{
Txn: &mockETCDKV{
save: func(s string, s2 string) error {
return nil
},
}})
canRecycle, files := mt.GetIndexFilePathByBuildID(buildID)
canRecycle, segIdx := mt.GetSegmentIndexByBuildID(buildID)
assert.True(t, canRecycle)
assert.ElementsMatch(t, []string{"file1", "file2"}, files)
assert.ElementsMatch(t, []string{"file1", "file2"}, segIdx.IndexFileKeys)
segIdx := &model.SegmentIndex{
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFilePaths: nil,
IndexSize: 0,
segIdx = &model.SegmentIndex{
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
}
err := mt.AddIndex(segIdx)
assert.NoError(t, err)
canRecycle, files = mt.GetIndexFilePathByBuildID(buildID + 1)
canRecycle, segIdx = mt.GetSegmentIndexByBuildID(buildID + 1)
assert.False(t, canRecycle)
assert.Zero(t, len(files))
assert.Nil(t, segIdx)
canRecycle, files = mt.GetIndexFilePathByBuildID(buildID + 2)
canRecycle, segIdx = mt.GetSegmentIndexByBuildID(buildID + 2)
assert.False(t, canRecycle)
assert.Zero(t, len(files))
assert.Nil(t, segIdx)
}
func TestMetaTable_IsIndexDeleted(t *testing.T) {
@ -1069,41 +1015,41 @@ func TestMetaTable_ResetMeta(t *testing.T) {
catalog: &indexcoord.Catalog{Txn: NewMockEtcdKV()},
buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{
buildID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1024,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFilePaths: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1024,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
},
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1024,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFilePaths: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1024,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
},
},
@ -1132,20 +1078,20 @@ func TestMetaTable_ResetMeta(t *testing.T) {
func TestMetaTable_FinishTask(t *testing.T) {
segIdx := &model.SegmentIndex{
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 10240,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1234,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 10240,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1234,
IndexFileKeys: nil,
IndexSize: 0,
}
t.Run("success", func(t *testing.T) {
mt := constructMetaTable(&indexcoord.Catalog{
@ -1164,7 +1110,7 @@ func TestMetaTable_FinishTask(t *testing.T) {
err = mt.FinishTask(&indexpb.IndexTaskInfo{
BuildID: buildID + 1,
State: commonpb.IndexState_Finished,
IndexFiles: []string{"file3", "file4"},
IndexFileKeys: []string{"file3", "file4"},
SerializedSize: 1025,
FailReason: "",
})
@ -1172,7 +1118,7 @@ func TestMetaTable_FinishTask(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, commonpb.IndexState_Finished, mt.buildID2SegmentIndex[buildID+1].IndexState)
assert.Equal(t, uint64(1025), mt.buildID2SegmentIndex[buildID+1].IndexSize)
assert.ElementsMatch(t, []string{"file3", "file4"}, mt.buildID2SegmentIndex[buildID+1].IndexFilePaths)
assert.ElementsMatch(t, []string{"file3", "file4"}, mt.buildID2SegmentIndex[buildID+1].IndexFileKeys)
})
t.Run("state failed", func(t *testing.T) {
@ -1192,7 +1138,7 @@ func TestMetaTable_FinishTask(t *testing.T) {
err = mt.FinishTask(&indexpb.IndexTaskInfo{
BuildID: buildID + 1,
State: commonpb.IndexState_Failed,
IndexFiles: []string{},
IndexFileKeys: []string{},
SerializedSize: 0,
FailReason: "failed",
})
@ -1218,7 +1164,7 @@ func TestMetaTable_FinishTask(t *testing.T) {
err = mt.FinishTask(&indexpb.IndexTaskInfo{
BuildID: buildID + 1,
State: commonpb.IndexState_Finished,
IndexFiles: []string{"file3", "file4"},
IndexFileKeys: []string{"file3", "file4"},
SerializedSize: 1025,
FailReason: "",
})

View File

@ -101,9 +101,9 @@ func NewIndexNodeMock() *Mock {
indexInfos := make([]*indexpb.IndexTaskInfo, 0)
for _, buildID := range in.BuildIDs {
indexInfos = append(indexInfos, &indexpb.IndexTaskInfo{
BuildID: buildID,
State: commonpb.IndexState_Finished,
IndexFiles: []string{"file1", "file2"},
BuildID: buildID,
State: commonpb.IndexState_Finished,
IndexFileKeys: []string{"file1", "file2"},
})
}
return &indexpb.QueryJobsResponse{

View File

@ -10,6 +10,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/commonpb"
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/proto/indexpb"
@ -107,7 +108,7 @@ func (i *IndexNode) QueryJobs(ctx context.Context, req *indexpb.QueryJobsRequest
if ClusterID == req.ClusterID {
infos[buildID] = &taskInfo{
state: info.state,
indexFiles: info.indexFiles[:],
fileKeys: common.CloneStringList(info.fileKeys),
serializedSize: info.serializedSize,
failReason: info.failReason,
}
@ -125,12 +126,12 @@ func (i *IndexNode) QueryJobs(ctx context.Context, req *indexpb.QueryJobsRequest
ret.IndexInfos = append(ret.IndexInfos, &indexpb.IndexTaskInfo{
BuildID: buildID,
State: commonpb.IndexState_IndexStateNone,
IndexFiles: nil,
IndexFileKeys: nil,
SerializedSize: 0,
})
if info, ok := infos[buildID]; ok {
ret.IndexInfos[i].State = info.state
ret.IndexInfos[i].IndexFiles = info.indexFiles
ret.IndexInfos[i].IndexFileKeys = info.fileKeys
ret.IndexInfos[i].SerializedSize = info.serializedSize
ret.IndexInfos[i].FailReason = info.failReason
log.Ctx(ctx).Debug("querying index build task", zap.String("ClusterID", req.ClusterID),

View File

@ -8,6 +8,8 @@ import (
"testing"
"time"
"github.com/milvus-io/milvus/internal/util/metautil"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus-proto/go-api/commonpb"
@ -123,7 +125,9 @@ func TestIndexNodeSimple(t *testing.T) {
}
assert.NotNil(t, idxInfo)
for _, idxFile := range idxInfo.IndexFiles {
for _, idxFileID := range idxInfo.IndexFileKeys {
idxFile := metautil.BuildSegmentIndexFilePath(mockChunkMgr.RootPath(), buildID, 0,
partID, segID, idxFileID)
_, ok := mockChunkMgr.indexedData.Load(idxFile)
assert.True(t, ok)
t.Logf("indexed file: %s", idxFile)
@ -310,11 +314,13 @@ Loop:
for _, job := range jobresp.IndexInfos {
task := tasks[job.BuildID-buildID0]
if job.State == commonpb.IndexState_Finished {
for _, idxFile := range job.IndexFiles {
for _, idxFileID := range job.IndexFileKeys {
idxFile := metautil.BuildSegmentIndexFilePath(mockChunkMgr.RootPath(), task.buildID,
0, task.partID, task.segID, idxFileID)
_, ok := mockChunkMgr.indexedData.Load(idxFile)
assert.True(t, ok)
}
t.Logf("buildID: %d, indexFiles: %v", job.BuildID, job.IndexFiles)
t.Logf("buildID: %d, indexFiles: %v", job.BuildID, job.IndexFileKeys)
} else {
_, ok := mockChunkMgr.indexedData.Load(dataPath(task.collID, task.partID, task.segID))
assert.False(t, ok)

View File

@ -158,7 +158,7 @@ import (
// err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
// assert.Nil(t, err)
// }
// defer in.chunkManager.MultiRemove(indexMetaTmp.IndexFilePaths)
// defer in.chunkManager.MultiRemove(indexMetaTmp.IndexFileKeys)
// defer func() {
// for k := range kvs {
// err = in.chunkManager.Remove(k)
@ -269,7 +269,7 @@ import (
// err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
// assert.Nil(t, err)
// }
// defer in.chunkManager.MultiRemove(indexMetaTmp.IndexFilePaths)
// defer in.chunkManager.MultiRemove(indexMetaTmp.IndexFileKeys)
// defer func() {
// for k := range kvs {
// err = in.chunkManager.Remove(k)
@ -388,7 +388,7 @@ import (
// // err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
// // assert.Nil(t, err)
// //}
// defer in.chunkManager.MultiRemove(indexMetaTmp.IndexFilePaths)
// defer in.chunkManager.MultiRemove(indexMetaTmp.IndexFileKeys)
// defer func() {
// for k := range kvs {
// err = in.chunkManager.Remove(k)

View File

@ -21,9 +21,9 @@ import (
"encoding/json"
"errors"
"fmt"
"path"
"runtime"
"strconv"
"strings"
"time"
"go.uber.org/zap"
@ -39,6 +39,7 @@ import (
"github.com/milvus-io/milvus/internal/util/indexparamcheck"
"github.com/milvus-io/milvus/internal/util/indexparams"
"github.com/milvus-io/milvus/internal/util/logutil"
"github.com/milvus-io/milvus/internal/util/metautil"
"github.com/milvus-io/milvus/internal/util/retry"
"github.com/milvus-io/milvus/internal/util/timerecord"
)
@ -52,7 +53,7 @@ type Blob = storage.Blob
type taskInfo struct {
cancel context.CancelFunc
state commonpb.IndexState
indexFiles []string
fileKeys []string
serializedSize uint64
failReason string
@ -408,15 +409,13 @@ func (it *indexBuildTask) SaveIndexFiles(ctx context.Context) error {
}
blobCnt := len(it.indexBlobs)
getSavePathByKey := func(key string) string {
return path.Join(it.req.IndexFilePrefix, strconv.Itoa(int(it.req.BuildID)), strconv.Itoa(int(it.req.IndexVersion)),
strconv.Itoa(int(it.partitionID)), strconv.Itoa(int(it.segmentID)), key)
}
savePaths := make([]string, blobCnt)
saveFileKeys := make([]string, blobCnt)
saveIndexFile := func(idx int) error {
blob := it.indexBlobs[idx]
savePath := getSavePathByKey(blob.Key)
savePath := metautil.BuildSegmentIndexFilePath(it.cm.RootPath(), it.req.BuildID,
it.req.IndexVersion, it.partitionID, it.segmentID, blob.Key)
saveFn := func() error {
return it.cm.Write(ctx, savePath, blob.Value)
}
@ -425,6 +424,7 @@ func (it *indexBuildTask) SaveIndexFiles(ctx context.Context) error {
return err
}
savePaths[idx] = savePath
saveFileKeys[idx] = blob.Key
return nil
}
@ -435,7 +435,7 @@ func (it *indexBuildTask) SaveIndexFiles(ctx context.Context) error {
}
it.savePaths = savePaths
it.statistic.EndTime = time.Now().UnixMicro()
it.node.storeIndexFilesAndStatistic(it.ClusterID, it.BuildID, savePaths, it.serializedSize, &it.statistic)
it.node.storeIndexFilesAndStatistic(it.ClusterID, it.BuildID, saveFileKeys, it.serializedSize, &it.statistic)
log.Ctx(ctx).Debug("save index files done", zap.Strings("IndexFiles", savePaths))
saveIndexFileDur := it.tr.Record("index file save done")
metrics.IndexNodeSaveIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(saveIndexFileDur.Milliseconds()))
@ -447,9 +447,19 @@ func (it *indexBuildTask) SaveIndexFiles(ctx context.Context) error {
func (it *indexBuildTask) SaveDiskAnnIndexFiles(ctx context.Context) error {
savePaths := make([]string, len(it.indexBlobs))
saveFileKeys := make([]string, len(it.indexBlobs))
for i, blob := range it.indexBlobs {
savePath := blob.Key
savePaths[i] = savePath
// TODO: unify blob key to file key instead of full path
parts := strings.Split(blob.Key, "/")
if len(parts) == 0 {
return fmt.Errorf("invaild blob key: %s", blob.Key)
}
fileKey := parts[len(parts)-1]
saveFileKeys[i] = fileKey
}
// add indexparams file
@ -469,12 +479,9 @@ func (it *indexBuildTask) SaveDiskAnnIndexFiles(ctx context.Context) error {
return err
}
getSavePathByKey := func(key string) string {
return path.Join("files/index_files", strconv.Itoa(int(it.req.BuildID)), strconv.Itoa(int(it.req.IndexVersion)),
strconv.Itoa(int(it.partitionID)), strconv.Itoa(int(it.segmentID)), key)
}
indexParamPath := metautil.BuildSegmentIndexFilePath(it.cm.RootPath(), it.req.BuildID, it.req.IndexVersion,
it.partitionID, it.segmentID, indexParamBlob.Key)
indexParamPath := getSavePathByKey(indexParamBlob.Key)
saveFn := func() error {
return it.cm.Write(ctx, indexParamPath, indexParamBlob.Value)
}
@ -483,11 +490,12 @@ func (it *indexBuildTask) SaveDiskAnnIndexFiles(ctx context.Context) error {
return err
}
saveFileKeys = append(saveFileKeys, indexParamBlob.Key)
savePaths = append(savePaths, indexParamPath)
it.savePaths = savePaths
it.statistic.EndTime = time.Now().UnixMicro()
it.node.storeIndexFilesAndStatistic(it.ClusterID, it.BuildID, savePaths, it.serializedSize, &it.statistic)
it.node.storeIndexFilesAndStatistic(it.ClusterID, it.BuildID, saveFileKeys, it.serializedSize, &it.statistic)
log.Ctx(ctx).Debug("save index files done", zap.Strings("IndexFiles", savePaths))
saveIndexFileDur := it.tr.Record("index file save done")
metrics.IndexNodeSaveIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(saveIndexFileDur.Milliseconds()))

View File

@ -2,6 +2,7 @@ package indexnode
import (
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/common"
"go.uber.org/zap"
"github.com/milvus-io/milvus-proto/go-api/commonpb"
@ -52,12 +53,12 @@ func (i *IndexNode) foreachTaskInfo(fn func(ClusterID string, buildID UniqueID,
}
}
func (i *IndexNode) storeIndexFilesAndStatistic(ClusterID string, buildID UniqueID, files []string, serializedSize uint64, statistic *indexpb.JobInfo) {
func (i *IndexNode) storeIndexFilesAndStatistic(ClusterID string, buildID UniqueID, fileKeys []string, serializedSize uint64, statistic *indexpb.JobInfo) {
key := taskKey{ClusterID: ClusterID, BuildID: buildID}
i.stateLock.Lock()
defer i.stateLock.Unlock()
if info, ok := i.tasks[key]; ok {
info.indexFiles = files[:]
info.fileKeys = common.CloneStringList(fileKeys)
info.serializedSize = serializedSize
info.statistic = proto.Clone(statistic).(*indexpb.JobInfo)
return

View File

@ -31,7 +31,7 @@ func (s *segmentIndexDb) List(tenantID string) ([]*dbmodel.SegmentIndexResult, e
"segment_indexes.segment_id AS segment_id, segment_indexes.num_rows AS num_rows, segment_indexes.index_id AS index_id, "+
"segment_indexes.build_id AS build_id, segment_indexes.node_id AS node_id, segment_indexes.index_version AS index_version, "+
"segment_indexes.index_state AS index_state,segment_indexes.fail_reason AS fail_reason, segment_indexes.create_time AS create_time,"+
"segment_indexes.index_file_paths AS index_file_paths, segment_indexes.index_size AS index_size, segment_indexes.is_deleted AS is_deleted").
"segment_indexes.index_file_keys AS index_file_keys, segment_indexes.index_size AS index_size, segment_indexes.is_deleted AS is_deleted").
Where("indexes.tenant_id = ?", tenantID)
var rs []*dbmodel.SegmentIndexResult

View File

@ -13,30 +13,30 @@ import (
func TestSegmentIndex_Insert(t *testing.T) {
var segIndexes = []*dbmodel.SegmentIndex{
{
TenantID: tenantID,
CollectionID: collID1,
PartitionID: partitionID1,
SegmentID: segmentID1,
NumRows: NumRows,
IndexID: indexID1,
BuildID: 1002,
NodeID: 3,
IndexVersion: 0,
IndexState: 3,
FailReason: "",
CreateTime: uint64(1011),
IndexFilePaths: "",
IndexSize: 1024,
IsDeleted: false,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
TenantID: tenantID,
CollectionID: collID1,
PartitionID: partitionID1,
SegmentID: segmentID1,
NumRows: NumRows,
IndexID: indexID1,
BuildID: 1002,
NodeID: 3,
IndexVersion: 0,
IndexState: 3,
FailReason: "",
CreateTime: uint64(1011),
IndexFileKeys: "",
IndexSize: 1024,
IsDeleted: false,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
}
// expectation
mock.ExpectBegin()
mock.ExpectExec("INSERT INTO `segment_indexes` (`tenant_id`,`collection_id`,`partition_id`,`segment_id`,`num_rows`,`index_id`,`build_id`,`node_id`,`index_version`,`index_state`,`fail_reason`,`create_time`,`index_file_paths`,`index_size`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)").
WithArgs(segIndexes[0].TenantID, segIndexes[0].CollectionID, segIndexes[0].PartitionID, segIndexes[0].SegmentID, segIndexes[0].NumRows, segIndexes[0].IndexID, segIndexes[0].BuildID, segIndexes[0].NodeID, segIndexes[0].IndexVersion, segIndexes[0].IndexState, segIndexes[0].FailReason, segIndexes[0].CreateTime, segIndexes[0].IndexFilePaths, segIndexes[0].IndexSize, segIndexes[0].IsDeleted, segIndexes[0].CreatedAt, segIndexes[0].UpdatedAt).
mock.ExpectExec("INSERT INTO `segment_indexes` (`tenant_id`,`collection_id`,`partition_id`,`segment_id`,`num_rows`,`index_id`,`build_id`,`node_id`,`index_version`,`index_state`,`fail_reason`,`create_time`,`index_file_keys`,`index_size`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)").
WithArgs(segIndexes[0].TenantID, segIndexes[0].CollectionID, segIndexes[0].PartitionID, segIndexes[0].SegmentID, segIndexes[0].NumRows, segIndexes[0].IndexID, segIndexes[0].BuildID, segIndexes[0].NodeID, segIndexes[0].IndexVersion, segIndexes[0].IndexState, segIndexes[0].FailReason, segIndexes[0].CreateTime, segIndexes[0].IndexFileKeys, segIndexes[0].IndexSize, segIndexes[0].IsDeleted, segIndexes[0].CreatedAt, segIndexes[0].UpdatedAt).
WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectCommit()
@ -48,30 +48,30 @@ func TestSegmentIndex_Insert(t *testing.T) {
func TestSegmentIndex_Insert_Error(t *testing.T) {
var segIndexes = []*dbmodel.SegmentIndex{
{
TenantID: tenantID,
CollectionID: collID1,
PartitionID: partitionID1,
SegmentID: segmentID1,
NumRows: NumRows,
IndexID: indexID1,
BuildID: 1002,
NodeID: 3,
IndexVersion: 0,
IndexState: 3,
FailReason: "",
CreateTime: uint64(1011),
IndexFilePaths: "",
IndexSize: 1024,
IsDeleted: false,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
TenantID: tenantID,
CollectionID: collID1,
PartitionID: partitionID1,
SegmentID: segmentID1,
NumRows: NumRows,
IndexID: indexID1,
BuildID: 1002,
NodeID: 3,
IndexVersion: 0,
IndexState: 3,
FailReason: "",
CreateTime: uint64(1011),
IndexFileKeys: "",
IndexSize: 1024,
IsDeleted: false,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
}
// expectation
mock.ExpectBegin()
mock.ExpectExec("INSERT INTO `segment_indexes` (`tenant_id`,`collection_id`,`partition_id`,`segment_id`,`num_rows`,`index_id`,`build_id`,`node_id`,`index_version`,`index_state`,`fail_reason`,`create_time`,`index_file_paths`,`index_size`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)").
WithArgs(segIndexes[0].TenantID, segIndexes[0].CollectionID, segIndexes[0].PartitionID, segIndexes[0].SegmentID, segIndexes[0].NumRows, segIndexes[0].IndexID, segIndexes[0].BuildID, segIndexes[0].NodeID, segIndexes[0].IndexVersion, segIndexes[0].IndexState, segIndexes[0].FailReason, segIndexes[0].CreateTime, segIndexes[0].IndexFilePaths, segIndexes[0].IndexSize, segIndexes[0].IsDeleted, segIndexes[0].CreatedAt, segIndexes[0].UpdatedAt).
mock.ExpectExec("INSERT INTO `segment_indexes` (`tenant_id`,`collection_id`,`partition_id`,`segment_id`,`num_rows`,`index_id`,`build_id`,`node_id`,`index_version`,`index_state`,`fail_reason`,`create_time`,`index_file_keys`,`index_size`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)").
WithArgs(segIndexes[0].TenantID, segIndexes[0].CollectionID, segIndexes[0].PartitionID, segIndexes[0].SegmentID, segIndexes[0].NumRows, segIndexes[0].IndexID, segIndexes[0].BuildID, segIndexes[0].NodeID, segIndexes[0].IndexVersion, segIndexes[0].IndexState, segIndexes[0].FailReason, segIndexes[0].CreateTime, segIndexes[0].IndexFileKeys, segIndexes[0].IndexSize, segIndexes[0].IsDeleted, segIndexes[0].CreatedAt, segIndexes[0].UpdatedAt).
WillReturnError(errors.New("test error"))
mock.ExpectRollback()
@ -83,30 +83,30 @@ func TestSegmentIndex_Insert_Error(t *testing.T) {
func TestSegmentIndex_Update(t *testing.T) {
var segIndexes = []*dbmodel.SegmentIndex{
{
TenantID: tenantID,
CollectionID: collID1,
PartitionID: partitionID1,
SegmentID: segmentID1,
NumRows: NumRows,
IndexID: indexID1,
BuildID: 1002,
NodeID: 3,
IndexVersion: 0,
IndexState: 3,
FailReason: "",
CreateTime: uint64(1011),
IndexFilePaths: "",
IndexSize: 1024,
IsDeleted: true,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
TenantID: tenantID,
CollectionID: collID1,
PartitionID: partitionID1,
SegmentID: segmentID1,
NumRows: NumRows,
IndexID: indexID1,
BuildID: 1002,
NodeID: 3,
IndexVersion: 0,
IndexState: 3,
FailReason: "",
CreateTime: uint64(1011),
IndexFileKeys: "",
IndexSize: 1024,
IsDeleted: true,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
}
// expectation
mock.ExpectBegin()
mock.ExpectExec("INSERT INTO `segment_indexes` (`tenant_id`,`collection_id`,`partition_id`,`segment_id`,`num_rows`,`index_id`,`build_id`,`node_id`,`index_version`,`index_state`,`fail_reason`,`create_time`,`index_file_paths`,`index_size`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)").
WithArgs(segIndexes[0].TenantID, segIndexes[0].CollectionID, segIndexes[0].PartitionID, segIndexes[0].SegmentID, segIndexes[0].NumRows, segIndexes[0].IndexID, segIndexes[0].BuildID, segIndexes[0].NodeID, segIndexes[0].IndexVersion, segIndexes[0].IndexState, segIndexes[0].FailReason, segIndexes[0].CreateTime, segIndexes[0].IndexFilePaths, segIndexes[0].IndexSize, segIndexes[0].IsDeleted, segIndexes[0].CreatedAt, segIndexes[0].UpdatedAt).
mock.ExpectExec("INSERT INTO `segment_indexes` (`tenant_id`,`collection_id`,`partition_id`,`segment_id`,`num_rows`,`index_id`,`build_id`,`node_id`,`index_version`,`index_state`,`fail_reason`,`create_time`,`index_file_keys`,`index_size`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)").
WithArgs(segIndexes[0].TenantID, segIndexes[0].CollectionID, segIndexes[0].PartitionID, segIndexes[0].SegmentID, segIndexes[0].NumRows, segIndexes[0].IndexID, segIndexes[0].BuildID, segIndexes[0].NodeID, segIndexes[0].IndexVersion, segIndexes[0].IndexState, segIndexes[0].FailReason, segIndexes[0].CreateTime, segIndexes[0].IndexFileKeys, segIndexes[0].IndexSize, segIndexes[0].IsDeleted, segIndexes[0].CreatedAt, segIndexes[0].UpdatedAt).
WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectCommit()
@ -118,30 +118,30 @@ func TestSegmentIndex_Update(t *testing.T) {
func TestSegmentIndex_Upsert_Error(t *testing.T) {
var segIndexes = []*dbmodel.SegmentIndex{
{
TenantID: tenantID,
CollectionID: collID1,
PartitionID: partitionID1,
SegmentID: segmentID1,
NumRows: NumRows,
IndexID: indexID1,
BuildID: 1002,
NodeID: 3,
IndexVersion: 0,
IndexState: 3,
FailReason: "",
CreateTime: uint64(1011),
IndexFilePaths: "",
IndexSize: 1024,
IsDeleted: true,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
TenantID: tenantID,
CollectionID: collID1,
PartitionID: partitionID1,
SegmentID: segmentID1,
NumRows: NumRows,
IndexID: indexID1,
BuildID: 1002,
NodeID: 3,
IndexVersion: 0,
IndexState: 3,
FailReason: "",
CreateTime: uint64(1011),
IndexFileKeys: "",
IndexSize: 1024,
IsDeleted: true,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
}
// expectation
mock.ExpectBegin()
mock.ExpectExec("INSERT INTO `segment_indexes` (`tenant_id`,`collection_id`,`partition_id`,`segment_id`,`num_rows`,`index_id`,`build_id`,`node_id`,`index_version`,`index_state`,`fail_reason`,`create_time`,`index_file_paths`,`index_size`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)").
WithArgs(segIndexes[0].TenantID, segIndexes[0].CollectionID, segIndexes[0].PartitionID, segIndexes[0].SegmentID, segIndexes[0].NumRows, segIndexes[0].IndexID, segIndexes[0].BuildID, segIndexes[0].NodeID, segIndexes[0].IndexVersion, segIndexes[0].IndexState, segIndexes[0].FailReason, segIndexes[0].CreateTime, segIndexes[0].IndexFilePaths, segIndexes[0].IndexSize, segIndexes[0].IsDeleted, segIndexes[0].CreatedAt, segIndexes[0].UpdatedAt).
mock.ExpectExec("INSERT INTO `segment_indexes` (`tenant_id`,`collection_id`,`partition_id`,`segment_id`,`num_rows`,`index_id`,`build_id`,`node_id`,`index_version`,`index_state`,`fail_reason`,`create_time`,`index_file_keys`,`index_size`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)").
WithArgs(segIndexes[0].TenantID, segIndexes[0].CollectionID, segIndexes[0].PartitionID, segIndexes[0].SegmentID, segIndexes[0].NumRows, segIndexes[0].IndexID, segIndexes[0].BuildID, segIndexes[0].NodeID, segIndexes[0].IndexVersion, segIndexes[0].IndexState, segIndexes[0].FailReason, segIndexes[0].CreateTime, segIndexes[0].IndexFileKeys, segIndexes[0].IndexSize, segIndexes[0].IsDeleted, segIndexes[0].CreatedAt, segIndexes[0].UpdatedAt).
WillReturnError(errors.New("test error"))
mock.ExpectRollback()

View File

@ -1,15 +1,8 @@
package dbmodel
import (
"encoding/json"
"time"
"github.com/milvus-io/milvus-proto/go-api/commonpb"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metastore/model"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
@ -22,18 +15,18 @@ type SegmentIndex struct {
SegmentID int64 `gorm:"segment_id"`
NumRows int64 `gorm:"num_rows"`
// IndexInfo (IndexID & IndexName & IndexParams)
IndexID int64 `gorm:"index_id"`
BuildID int64 `gorm:"build_id"`
NodeID int64 `gorm:"node_id"`
IndexVersion int64 `gorm:"index_version"`
IndexState int32 `gorm:"index_state"`
FailReason string `gorm:"fail_reason"`
CreateTime uint64 `gorm:"create_time"`
IndexFilePaths string `gorm:"index_file_paths"`
IndexSize uint64 `gorm:"index_size"`
IsDeleted bool `gorm:"is_deleted"`
CreatedAt time.Time `gorm:"created_at"`
UpdatedAt time.Time `gorm:"updated_at"`
IndexID int64 `gorm:"index_id"`
BuildID int64 `gorm:"build_id"`
NodeID int64 `gorm:"node_id"`
IndexVersion int64 `gorm:"index_version"`
IndexState int32 `gorm:"index_state"`
FailReason string `gorm:"fail_reason"`
CreateTime uint64 `gorm:"create_time"`
IndexFileKeys string `gorm:"index_file_keys"`
IndexSize uint64 `gorm:"index_size"`
IsDeleted bool `gorm:"is_deleted"`
CreatedAt time.Time `gorm:"created_at"`
UpdatedAt time.Time `gorm:"updated_at"`
}
func (v SegmentIndex) TableName() string {
@ -41,20 +34,20 @@ func (v SegmentIndex) TableName() string {
}
type SegmentIndexResult struct {
CollectionID int64
PartitionID int64
SegmentID int64
NumRows int64
IndexID int64
BuildID int64
NodeID int64
IndexVersion int64
IndexState int32
FailReason string
IsDeleted bool
CreateTime uint64
IndexFilePaths string
IndexSize uint64
CollectionID int64
PartitionID int64
SegmentID int64
NumRows int64
IndexID int64
BuildID int64
NodeID int64
IndexVersion int64
IndexState int32
FailReason string
IsDeleted bool
CreateTime uint64
IndexFileKeys string
IndexSize uint64
}
//go:generate mockery --name=ISegmentIndexDb
@ -68,39 +61,39 @@ type ISegmentIndexDb interface {
MarkDeletedByBuildID(tenantID string, idxID typeutil.UniqueID) error
}
func UnmarshalSegmentIndexModel(inputs []*SegmentIndexResult) ([]*model.SegmentIndex, error) {
result := make([]*model.SegmentIndex, 0, len(inputs))
for _, ir := range inputs {
var indexFilePaths []string
if ir.IndexFilePaths != "" {
err := json.Unmarshal([]byte(ir.IndexFilePaths), &indexFilePaths)
if err != nil {
log.Error("unmarshal index file paths of segment index failed", zap.Int64("collID", ir.CollectionID),
zap.Int64("indexID", ir.IndexID), zap.Int64("segmentID", ir.SegmentID),
zap.Int64("buildID", ir.BuildID), zap.Error(err))
return nil, err
}
}
idx := &model.SegmentIndex{
SegmentID: ir.SegmentID,
CollectionID: ir.CollectionID,
PartitionID: ir.PartitionID,
NumRows: ir.NumRows,
IndexID: ir.IndexID,
BuildID: ir.BuildID,
NodeID: ir.NodeID,
IndexVersion: ir.IndexVersion,
IndexState: commonpb.IndexState(ir.IndexState),
FailReason: ir.FailReason,
IsDeleted: ir.IsDeleted,
CreateTime: ir.CreateTime,
IndexFilePaths: indexFilePaths,
IndexSize: ir.IndexSize,
}
result = append(result, idx)
}
return result, nil
}
//func UnmarshalSegmentIndexModel(inputs []*SegmentIndexResult) ([]*model.SegmentIndex, error) {
// result := make([]*model.SegmentIndex, 0, len(inputs))
// for _, ir := range inputs {
//
// var IndexFileKeys []string
// if ir.IndexFileKeys != "" {
// err := json.Unmarshal([]byte(ir.IndexFileKeys), &IndexFileKeys)
// if err != nil {
// log.Error("unmarshal index file paths of segment index failed", zap.Int64("collID", ir.CollectionID),
// zap.Int64("indexID", ir.IndexID), zap.Int64("segmentID", ir.SegmentID),
// zap.Int64("buildID", ir.BuildID), zap.Error(err))
// return nil, err
// }
// }
//
// idx := &model.SegmentIndex{
// SegmentID: ir.SegmentID,
// CollectionID: ir.CollectionID,
// PartitionID: ir.PartitionID,
// NumRows: ir.NumRows,
// IndexID: ir.IndexID,
// BuildID: ir.BuildID,
// NodeID: ir.NodeID,
// IndexVersion: ir.IndexVersion,
// IndexState: commonpb.IndexState(ir.IndexState),
// FailReason: ir.FailReason,
// IsDeleted: ir.IsDeleted,
// CreateTime: ir.CreateTime,
// IndexFileKeys: IndexFileKeys,
// IndexSize: ir.IndexSize,
// }
// result = append(result, idx)
// }
//
// return result, nil
//}

View File

@ -1,345 +1,346 @@
package indexcoord
import (
"context"
"encoding/json"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/util/contextutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
type Catalog struct {
metaDomain dbmodel.IMetaDomain
txImpl dbmodel.ITransaction
}
func NewTableCatalog(txImpl dbmodel.ITransaction, metaDomain dbmodel.IMetaDomain) *Catalog {
return &Catalog{
txImpl: txImpl,
metaDomain: metaDomain,
}
}
func (tc *Catalog) CreateIndex(ctx context.Context, index *model.Index) error {
tenantID := contextutil.TenantID(ctx)
indexParamsBytes, err := json.Marshal(index.IndexParams)
if err != nil {
log.Error("marshal IndexParams of index failed", zap.String("tenant", tenantID),
zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
zap.String("indexName", index.IndexName), zap.Error(err))
return err
}
userIndexParamsBytes, err := json.Marshal(index.UserIndexParams)
if err != nil {
log.Error("marshal userIndexParams of index failed", zap.String("tenant", tenantID),
zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
zap.String("indexName", index.IndexName), zap.Error(err))
return err
}
typeParamsBytes, err := json.Marshal(index.TypeParams)
if err != nil {
log.Error("marshal TypeParams of index failed", zap.String("tenant", tenantID),
zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
zap.String("indexName", index.IndexName), zap.Error(err))
return err
}
idx := &dbmodel.Index{
TenantID: tenantID,
CollectionID: index.CollectionID,
FieldID: index.FieldID,
IndexID: index.IndexID,
IndexName: index.IndexName,
TypeParams: string(typeParamsBytes),
IndexParams: string(indexParamsBytes),
CreateTime: index.CreateTime,
IsDeleted: index.IsDeleted,
IsAutoIndex: index.IsAutoIndex,
UserIndexParams: string(userIndexParamsBytes),
}
err = tc.metaDomain.IndexDb(ctx).Insert([]*dbmodel.Index{idx})
if err != nil {
log.Error("insert indexes failed", zap.String("tenant", tenantID), zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID), zap.String("indexName", index.IndexName), zap.Error(err))
return err
}
return nil
}
func (tc *Catalog) ListIndexes(ctx context.Context) ([]*model.Index, error) {
tenantID := contextutil.TenantID(ctx)
rs, err := tc.metaDomain.IndexDb(ctx).List(tenantID)
if err != nil {
return nil, err
}
result, err := dbmodel.UnmarshalIndexModel(rs)
if err != nil {
return nil, err
}
return result, nil
}
func (tc *Catalog) AlterIndex(ctx context.Context, index *model.Index) error {
tenantID := contextutil.TenantID(ctx)
indexParamsBytes, err := json.Marshal(index.IndexParams)
if err != nil {
log.Error("marshal IndexParams of index failed", zap.String("tenant", tenantID),
zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
zap.String("indexName", index.IndexName), zap.Error(err))
return err
}
userIndexParamsBytes, err := json.Marshal(index.UserIndexParams)
if err != nil {
log.Error("marshal userIndexParams of index failed", zap.String("tenant", tenantID),
zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
zap.String("indexName", index.IndexName), zap.Error(err))
return err
}
typeParamsBytes, err := json.Marshal(index.TypeParams)
if err != nil {
log.Error("marshal TypeParams of index failed", zap.String("tenant", tenantID),
zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
zap.String("indexName", index.IndexName), zap.Error(err))
return err
}
idx := &dbmodel.Index{
TenantID: tenantID,
CollectionID: index.CollectionID,
FieldID: index.FieldID,
IndexID: index.IndexID,
IndexName: index.IndexName,
TypeParams: string(typeParamsBytes),
IndexParams: string(indexParamsBytes),
CreateTime: index.CreateTime,
IsDeleted: index.IsDeleted,
IsAutoIndex: index.IsAutoIndex,
UserIndexParams: string(userIndexParamsBytes),
}
err = tc.metaDomain.IndexDb(ctx).Update(idx)
if err != nil {
return err
}
return nil
}
func (tc *Catalog) AlterIndexes(ctx context.Context, indexes []*model.Index) error {
tenantID := contextutil.TenantID(ctx)
return tc.txImpl.Transaction(ctx, func(txCtx context.Context) error {
for _, index := range indexes {
indexParamsBytes, err := json.Marshal(index.IndexParams)
if err != nil {
log.Error("marshal IndexParams of index failed", zap.String("tenant", tenantID),
zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
zap.String("indexName", index.IndexName), zap.Error(err))
return err
}
userIndexParamsBytes, err := json.Marshal(index.UserIndexParams)
if err != nil {
log.Error("marshal userIndexParams of index failed", zap.String("tenant", tenantID),
zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
zap.String("indexName", index.IndexName), zap.Error(err))
return err
}
typeParamsBytes, err := json.Marshal(index.TypeParams)
if err != nil {
log.Error("marshal TypeParams of index failed", zap.String("tenant", tenantID),
zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
zap.String("indexName", index.IndexName), zap.Error(err))
return err
}
idx := &dbmodel.Index{
TenantID: tenantID,
CollectionID: index.CollectionID,
FieldID: index.FieldID,
IndexID: index.IndexID,
IndexName: index.IndexName,
TypeParams: string(typeParamsBytes),
IndexParams: string(indexParamsBytes),
CreateTime: index.CreateTime,
IsDeleted: index.IsDeleted,
IsAutoIndex: index.IsAutoIndex,
UserIndexParams: string(userIndexParamsBytes),
}
err = tc.metaDomain.IndexDb(ctx).Update(idx)
if err != nil {
return err
}
}
return nil
})
}
func (tc *Catalog) DropIndex(ctx context.Context, collID, dropIdxID typeutil.UniqueID) error {
tenantID := contextutil.TenantID(ctx)
// TODO: really delete.
return tc.txImpl.Transaction(ctx, func(txCtx context.Context) error {
// mark deleted for index
err := tc.metaDomain.IndexDb(txCtx).MarkDeletedByIndexID(tenantID, dropIdxID)
if err != nil {
return err
}
return nil
})
}
func (tc *Catalog) CreateSegmentIndex(ctx context.Context, segIdx *model.SegmentIndex) error {
tenantID := contextutil.TenantID(ctx)
indexFilesBytes, err := json.Marshal(segIdx.IndexFilePaths)
if err != nil {
log.Error("marshal IndexFiles of segment index failed", zap.String("tenant", tenantID),
zap.Int64("collID", segIdx.CollectionID), zap.Int64("indexID", segIdx.IndexID),
zap.Int64("segID", segIdx.SegmentID), zap.Int64("buildID", segIdx.BuildID), zap.Error(err))
return err
}
idx := &dbmodel.SegmentIndex{
TenantID: tenantID,
CollectionID: segIdx.CollectionID,
PartitionID: segIdx.PartitionID,
SegmentID: segIdx.SegmentID,
NumRows: segIdx.NumRows,
IndexID: segIdx.IndexID,
BuildID: segIdx.BuildID,
NodeID: segIdx.NodeID,
IndexVersion: segIdx.IndexVersion,
IndexState: int32(segIdx.IndexState),
FailReason: segIdx.FailReason,
CreateTime: segIdx.CreateTime,
IndexFilePaths: string(indexFilesBytes),
IndexSize: segIdx.IndexSize,
IsDeleted: segIdx.IsDeleted,
}
err = tc.metaDomain.SegmentIndexDb(ctx).Insert([]*dbmodel.SegmentIndex{idx})
if err != nil {
log.Error("insert segment index failed", zap.String("tenant", tenantID),
zap.Int64("collID", segIdx.CollectionID), zap.Int64("indexID", segIdx.IndexID),
zap.Int64("segID", segIdx.SegmentID), zap.Int64("buildID", segIdx.BuildID), zap.Error(err))
return err
}
return nil
}
func (tc *Catalog) ListSegmentIndexes(ctx context.Context) ([]*model.SegmentIndex, error) {
tenantID := contextutil.TenantID(ctx)
rs, err := tc.metaDomain.SegmentIndexDb(ctx).List(tenantID)
if err != nil {
return nil, err
}
result, err := dbmodel.UnmarshalSegmentIndexModel(rs)
if err != nil {
return nil, err
}
return result, nil
}
func (tc *Catalog) AlterSegmentIndex(ctx context.Context, segIndex *model.SegmentIndex) error {
tenantID := contextutil.TenantID(ctx)
indexFilesBytes, err := json.Marshal(segIndex.IndexFilePaths)
if err != nil {
log.Error("marshal index files of segment index failed", zap.String("tenant", tenantID),
zap.Int64("collID", segIndex.CollectionID), zap.Int64("indexID", segIndex.IndexID),
zap.Int64("segID", segIndex.SegmentID), zap.Int64("buildID", segIndex.BuildID), zap.Error(err))
return err
}
idx := &dbmodel.SegmentIndex{
TenantID: tenantID,
CollectionID: segIndex.CollectionID,
PartitionID: segIndex.PartitionID,
SegmentID: segIndex.SegmentID,
NumRows: segIndex.NumRows,
IndexID: segIndex.IndexID,
BuildID: segIndex.BuildID,
NodeID: segIndex.NodeID,
IndexVersion: segIndex.IndexVersion,
IndexState: int32(segIndex.IndexState),
FailReason: segIndex.FailReason,
CreateTime: segIndex.CreateTime,
IndexFilePaths: string(indexFilesBytes),
IndexSize: segIndex.IndexSize,
IsDeleted: segIndex.IsDeleted,
}
err = tc.metaDomain.SegmentIndexDb(ctx).Update(idx)
if err != nil {
return err
}
return nil
}
func (tc *Catalog) AlterSegmentIndexes(ctx context.Context, segIdxes []*model.SegmentIndex) error {
tenantID := contextutil.TenantID(ctx)
return tc.txImpl.Transaction(ctx, func(txCtx context.Context) error {
for _, segIndex := range segIdxes {
indexFilesBytes, err := json.Marshal(segIndex.IndexFilePaths)
if err != nil {
log.Error("marshal index files of segment index failed", zap.String("tenant", tenantID),
zap.Int64("collID", segIndex.CollectionID), zap.Int64("indexID", segIndex.IndexID),
zap.Int64("segID", segIndex.SegmentID), zap.Int64("buildID", segIndex.BuildID), zap.Error(err))
return err
}
idx := &dbmodel.SegmentIndex{
TenantID: tenantID,
CollectionID: segIndex.CollectionID,
PartitionID: segIndex.PartitionID,
SegmentID: segIndex.SegmentID,
NumRows: segIndex.NumRows,
IndexID: segIndex.IndexID,
BuildID: segIndex.BuildID,
NodeID: segIndex.NodeID,
IndexVersion: segIndex.IndexVersion,
IndexState: int32(segIndex.IndexState),
FailReason: segIndex.FailReason,
CreateTime: segIndex.CreateTime,
IndexFilePaths: string(indexFilesBytes),
IndexSize: segIndex.IndexSize,
IsDeleted: segIndex.IsDeleted,
}
err = tc.metaDomain.SegmentIndexDb(ctx).Update(idx)
if err != nil {
return err
}
}
return nil
})
}
func (tc *Catalog) DropSegmentIndex(ctx context.Context, collID, partID, segID, buildID typeutil.UniqueID) error {
tenantID := contextutil.TenantID(ctx)
err := tc.metaDomain.SegmentIndexDb(ctx).MarkDeletedByBuildID(tenantID, buildID)
if err != nil {
return err
}
return nil
}
//
//import (
// "context"
// "encoding/json"
//
// "go.uber.org/zap"
//
// "github.com/milvus-io/milvus/internal/log"
// "github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
// "github.com/milvus-io/milvus/internal/metastore/model"
// "github.com/milvus-io/milvus/internal/util/contextutil"
// "github.com/milvus-io/milvus/internal/util/typeutil"
//)
//
//type Catalog struct {
// metaDomain dbmodel.IMetaDomain
// txImpl dbmodel.ITransaction
//}
//
//func NewTableCatalog(txImpl dbmodel.ITransaction, metaDomain dbmodel.IMetaDomain) *Catalog {
// return &Catalog{
// txImpl: txImpl,
// metaDomain: metaDomain,
// }
//}
//
//func (tc *Catalog) CreateIndex(ctx context.Context, index *model.Index) error {
// tenantID := contextutil.TenantID(ctx)
//
// indexParamsBytes, err := json.Marshal(index.IndexParams)
// if err != nil {
// log.Error("marshal IndexParams of index failed", zap.String("tenant", tenantID),
// zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
// zap.String("indexName", index.IndexName), zap.Error(err))
// return err
// }
//
// userIndexParamsBytes, err := json.Marshal(index.UserIndexParams)
// if err != nil {
// log.Error("marshal userIndexParams of index failed", zap.String("tenant", tenantID),
// zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
// zap.String("indexName", index.IndexName), zap.Error(err))
// return err
// }
//
// typeParamsBytes, err := json.Marshal(index.TypeParams)
// if err != nil {
// log.Error("marshal TypeParams of index failed", zap.String("tenant", tenantID),
// zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
// zap.String("indexName", index.IndexName), zap.Error(err))
// return err
// }
//
// idx := &dbmodel.Index{
// TenantID: tenantID,
// CollectionID: index.CollectionID,
// FieldID: index.FieldID,
// IndexID: index.IndexID,
// IndexName: index.IndexName,
// TypeParams: string(typeParamsBytes),
// IndexParams: string(indexParamsBytes),
// CreateTime: index.CreateTime,
// IsDeleted: index.IsDeleted,
// IsAutoIndex: index.IsAutoIndex,
// UserIndexParams: string(userIndexParamsBytes),
// }
//
// err = tc.metaDomain.IndexDb(ctx).Insert([]*dbmodel.Index{idx})
// if err != nil {
// log.Error("insert indexes failed", zap.String("tenant", tenantID), zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID), zap.String("indexName", index.IndexName), zap.Error(err))
// return err
// }
//
// return nil
//}
//
//func (tc *Catalog) ListIndexes(ctx context.Context) ([]*model.Index, error) {
// tenantID := contextutil.TenantID(ctx)
//
// rs, err := tc.metaDomain.IndexDb(ctx).List(tenantID)
// if err != nil {
// return nil, err
// }
//
// result, err := dbmodel.UnmarshalIndexModel(rs)
// if err != nil {
// return nil, err
// }
//
// return result, nil
//}
//
//func (tc *Catalog) AlterIndex(ctx context.Context, index *model.Index) error {
// tenantID := contextutil.TenantID(ctx)
//
// indexParamsBytes, err := json.Marshal(index.IndexParams)
// if err != nil {
// log.Error("marshal IndexParams of index failed", zap.String("tenant", tenantID),
// zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
// zap.String("indexName", index.IndexName), zap.Error(err))
// return err
// }
//
// userIndexParamsBytes, err := json.Marshal(index.UserIndexParams)
// if err != nil {
// log.Error("marshal userIndexParams of index failed", zap.String("tenant", tenantID),
// zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
// zap.String("indexName", index.IndexName), zap.Error(err))
// return err
// }
//
// typeParamsBytes, err := json.Marshal(index.TypeParams)
// if err != nil {
// log.Error("marshal TypeParams of index failed", zap.String("tenant", tenantID),
// zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
// zap.String("indexName", index.IndexName), zap.Error(err))
// return err
// }
//
// idx := &dbmodel.Index{
// TenantID: tenantID,
// CollectionID: index.CollectionID,
// FieldID: index.FieldID,
// IndexID: index.IndexID,
// IndexName: index.IndexName,
// TypeParams: string(typeParamsBytes),
// IndexParams: string(indexParamsBytes),
// CreateTime: index.CreateTime,
// IsDeleted: index.IsDeleted,
// IsAutoIndex: index.IsAutoIndex,
// UserIndexParams: string(userIndexParamsBytes),
// }
// err = tc.metaDomain.IndexDb(ctx).Update(idx)
// if err != nil {
// return err
// }
// return nil
//}
//
//func (tc *Catalog) AlterIndexes(ctx context.Context, indexes []*model.Index) error {
// tenantID := contextutil.TenantID(ctx)
//
// return tc.txImpl.Transaction(ctx, func(txCtx context.Context) error {
// for _, index := range indexes {
// indexParamsBytes, err := json.Marshal(index.IndexParams)
// if err != nil {
// log.Error("marshal IndexParams of index failed", zap.String("tenant", tenantID),
// zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
// zap.String("indexName", index.IndexName), zap.Error(err))
// return err
// }
//
// userIndexParamsBytes, err := json.Marshal(index.UserIndexParams)
// if err != nil {
// log.Error("marshal userIndexParams of index failed", zap.String("tenant", tenantID),
// zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
// zap.String("indexName", index.IndexName), zap.Error(err))
// return err
// }
// typeParamsBytes, err := json.Marshal(index.TypeParams)
// if err != nil {
// log.Error("marshal TypeParams of index failed", zap.String("tenant", tenantID),
// zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID),
// zap.String("indexName", index.IndexName), zap.Error(err))
// return err
// }
//
// idx := &dbmodel.Index{
// TenantID: tenantID,
// CollectionID: index.CollectionID,
// FieldID: index.FieldID,
// IndexID: index.IndexID,
// IndexName: index.IndexName,
// TypeParams: string(typeParamsBytes),
// IndexParams: string(indexParamsBytes),
// CreateTime: index.CreateTime,
// IsDeleted: index.IsDeleted,
// IsAutoIndex: index.IsAutoIndex,
// UserIndexParams: string(userIndexParamsBytes),
// }
// err = tc.metaDomain.IndexDb(ctx).Update(idx)
// if err != nil {
// return err
// }
// }
// return nil
// })
//}
//
//func (tc *Catalog) DropIndex(ctx context.Context, collID, dropIdxID typeutil.UniqueID) error {
// tenantID := contextutil.TenantID(ctx)
//
// // TODO: really delete.
// return tc.txImpl.Transaction(ctx, func(txCtx context.Context) error {
// // mark deleted for index
// err := tc.metaDomain.IndexDb(txCtx).MarkDeletedByIndexID(tenantID, dropIdxID)
// if err != nil {
// return err
// }
//
// return nil
// })
//}
//
//func (tc *Catalog) CreateSegmentIndex(ctx context.Context, segIdx *model.SegmentIndex) error {
// tenantID := contextutil.TenantID(ctx)
//
// indexFileKeysBytes, err := json.Marshal(segIdx.IndexFileKeys)
// if err != nil {
// log.Error("marshal IndexFiles of segment index failed", zap.String("tenant", tenantID),
// zap.Int64("collID", segIdx.CollectionID), zap.Int64("indexID", segIdx.IndexID),
// zap.Int64("segID", segIdx.SegmentID), zap.Int64("buildID", segIdx.BuildID), zap.Error(err))
// return err
// }
//
// idx := &dbmodel.SegmentIndex{
// TenantID: tenantID,
// CollectionID: segIdx.CollectionID,
// PartitionID: segIdx.PartitionID,
// SegmentID: segIdx.SegmentID,
// NumRows: segIdx.NumRows,
// IndexID: segIdx.IndexID,
// BuildID: segIdx.BuildID,
// NodeID: segIdx.NodeID,
// IndexVersion: segIdx.IndexVersion,
// IndexState: int32(segIdx.IndexState),
// FailReason: segIdx.FailReason,
// CreateTime: segIdx.CreateTime,
// IndexFileKeys: string(indexFileKeysBytes),
// IndexSize: segIdx.IndexSize,
// IsDeleted: segIdx.IsDeleted,
// }
//
// err = tc.metaDomain.SegmentIndexDb(ctx).Insert([]*dbmodel.SegmentIndex{idx})
// if err != nil {
// log.Error("insert segment index failed", zap.String("tenant", tenantID),
// zap.Int64("collID", segIdx.CollectionID), zap.Int64("indexID", segIdx.IndexID),
// zap.Int64("segID", segIdx.SegmentID), zap.Int64("buildID", segIdx.BuildID), zap.Error(err))
// return err
// }
//
// return nil
//}
//
//func (tc *Catalog) ListSegmentIndexes(ctx context.Context) ([]*model.SegmentIndex, error) {
// tenantID := contextutil.TenantID(ctx)
//
// rs, err := tc.metaDomain.SegmentIndexDb(ctx).List(tenantID)
// if err != nil {
// return nil, err
// }
//
// result, err := dbmodel.UnmarshalSegmentIndexModel(rs)
// if err != nil {
// return nil, err
// }
//
// return result, nil
//}
//
//func (tc *Catalog) AlterSegmentIndex(ctx context.Context, segIndex *model.SegmentIndex) error {
// tenantID := contextutil.TenantID(ctx)
//
// indexFileKeysBytes, err := json.Marshal(segIndex.IndexFileKeys)
// if err != nil {
// log.Error("marshal index files of segment index failed", zap.String("tenant", tenantID),
// zap.Int64("collID", segIndex.CollectionID), zap.Int64("indexID", segIndex.IndexID),
// zap.Int64("segID", segIndex.SegmentID), zap.Int64("buildID", segIndex.BuildID), zap.Error(err))
// return err
// }
//
// idx := &dbmodel.SegmentIndex{
// TenantID: tenantID,
// CollectionID: segIndex.CollectionID,
// PartitionID: segIndex.PartitionID,
// SegmentID: segIndex.SegmentID,
// NumRows: segIndex.NumRows,
// IndexID: segIndex.IndexID,
// BuildID: segIndex.BuildID,
// NodeID: segIndex.NodeID,
// IndexVersion: segIndex.IndexVersion,
// IndexState: int32(segIndex.IndexState),
// FailReason: segIndex.FailReason,
// CreateTime: segIndex.CreateTime,
// IndexFileKeys: string(indexFileKeysBytes),
// IndexSize: segIndex.IndexSize,
// IsDeleted: segIndex.IsDeleted,
// }
// err = tc.metaDomain.SegmentIndexDb(ctx).Update(idx)
// if err != nil {
// return err
// }
// return nil
//}
//
//func (tc *Catalog) AlterSegmentIndexes(ctx context.Context, segIdxes []*model.SegmentIndex) error {
// tenantID := contextutil.TenantID(ctx)
//
// return tc.txImpl.Transaction(ctx, func(txCtx context.Context) error {
// for _, segIndex := range segIdxes {
// indexFileKeysBytes, err := json.Marshal(segIndex.IndexFileKeys)
// if err != nil {
// log.Error("marshal index files of segment index failed", zap.String("tenant", tenantID),
// zap.Int64("collID", segIndex.CollectionID), zap.Int64("indexID", segIndex.IndexID),
// zap.Int64("segID", segIndex.SegmentID), zap.Int64("buildID", segIndex.BuildID), zap.Error(err))
// return err
// }
//
// idx := &dbmodel.SegmentIndex{
// TenantID: tenantID,
// CollectionID: segIndex.CollectionID,
// PartitionID: segIndex.PartitionID,
// SegmentID: segIndex.SegmentID,
// NumRows: segIndex.NumRows,
// IndexID: segIndex.IndexID,
// BuildID: segIndex.BuildID,
// NodeID: segIndex.NodeID,
// IndexVersion: segIndex.IndexVersion,
// IndexState: int32(segIndex.IndexState),
// FailReason: segIndex.FailReason,
// CreateTime: segIndex.CreateTime,
// IndexFileKeys: string(indexFileKeysBytes),
// IndexSize: segIndex.IndexSize,
// IsDeleted: segIndex.IsDeleted,
// }
// err = tc.metaDomain.SegmentIndexDb(ctx).Update(idx)
// if err != nil {
// return err
// }
// }
// return nil
// })
//}
//
//func (tc *Catalog) DropSegmentIndex(ctx context.Context, collID, partID, segID, buildID typeutil.UniqueID) error {
// tenantID := contextutil.TenantID(ctx)
//
// err := tc.metaDomain.SegmentIndexDb(ctx).MarkDeletedByBuildID(tenantID, buildID)
// if err != nil {
// return err
// }
//
// return nil
//}

View File

@ -166,7 +166,7 @@ package indexcoord
// },
// BuildID: indexBuildID1,
// CreateTime: 0,
// IndexFilePaths: []string{"a\xc5z"},
// IndexFileKeys: []string{"a\xc5z"},
// },
// },
// }
@ -339,7 +339,7 @@ package indexcoord
//// EnableIndex: false,
//// IndexBuildID: indexBuildID1,
//// IndexSize: 0,
//// IndexFilePaths: "[\"test_index_file_path_1\"]",
//// IndexFileKeys: "[\"test_index_file_path_1\"]",
//// },
//// }
//// out := []*model.Index{
@ -365,7 +365,7 @@ package indexcoord
//// BuildID: indexBuildID1,
//// //EnableIndex: false,
//// CreateTime: 0,
//// IndexFilePaths: []string{"test_index_file_path_1"},
//// IndexFileKeys: []string{"test_index_file_path_1"},
//// },
//// },
//// Extra: nil,

View File

@ -209,20 +209,20 @@ func TestCatalog_DropIndex(t *testing.T) {
func TestCatalog_CreateSegmentIndex(t *testing.T) {
segIdx := &model.SegmentIndex{
SegmentID: 1,
CollectionID: 2,
PartitionID: 3,
NumRows: 1024,
IndexID: 4,
BuildID: 5,
NodeID: 6,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreateTime: 0,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: 1,
CollectionID: 2,
PartitionID: 3,
NumRows: 1024,
IndexID: 4,
BuildID: 5,
NodeID: 6,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
}
t.Run("success", func(t *testing.T) {
@ -257,20 +257,20 @@ func TestCatalog_CreateSegmentIndex(t *testing.T) {
func TestCatalog_ListSegmentIndexes(t *testing.T) {
t.Run("success", func(t *testing.T) {
segIdx := &indexpb.SegmentIndex{
CollectionID: 0,
PartitionID: 0,
SegmentID: 0,
NumRows: 0,
IndexID: 0,
BuildID: 0,
NodeID: 0,
IndexVersion: 0,
State: 0,
FailReason: "",
IndexFilesPaths: nil,
Deleted: false,
CreateTime: 0,
SerializeSize: 0,
CollectionID: 0,
PartitionID: 0,
SegmentID: 0,
NumRows: 0,
IndexID: 0,
BuildID: 0,
NodeID: 0,
IndexVersion: 0,
State: 0,
FailReason: "",
IndexFileKeys: nil,
Deleted: false,
CreateTime: 0,
SerializeSize: 0,
}
v, err := proto.Marshal(segIdx)
assert.NoError(t, err)
@ -320,20 +320,20 @@ func TestCatalog_ListSegmentIndexes(t *testing.T) {
func TestCatalog_AlterSegmentIndex(t *testing.T) {
segIdx := &model.SegmentIndex{
SegmentID: 0,
CollectionID: 0,
PartitionID: 0,
NumRows: 0,
IndexID: 0,
BuildID: 0,
NodeID: 0,
IndexState: 0,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreateTime: 0,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: 0,
CollectionID: 0,
PartitionID: 0,
NumRows: 0,
IndexID: 0,
BuildID: 0,
NodeID: 0,
IndexState: 0,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
}
t.Run("add", func(t *testing.T) {
@ -353,20 +353,20 @@ func TestCatalog_AlterSegmentIndex(t *testing.T) {
func TestCatalog_AlterSegmentIndexes(t *testing.T) {
segIdx := &model.SegmentIndex{
SegmentID: 0,
CollectionID: 0,
PartitionID: 0,
NumRows: 0,
IndexID: 0,
BuildID: 0,
NodeID: 0,
IndexState: 0,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreateTime: 0,
IndexFilePaths: nil,
IndexSize: 0,
SegmentID: 0,
CollectionID: 0,
PartitionID: 0,
NumRows: 0,
IndexID: 0,
BuildID: 0,
NodeID: 0,
IndexState: 0,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
}
t.Run("add", func(t *testing.T) {

View File

@ -2,25 +2,26 @@ package model
import (
"github.com/milvus-io/milvus-proto/go-api/commonpb"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/proto/indexpb"
)
type SegmentIndex struct {
SegmentID int64
CollectionID int64
PartitionID int64
NumRows int64
IndexID int64
BuildID int64
NodeID int64
IndexVersion int64
IndexState commonpb.IndexState
FailReason string
IsDeleted bool
CreateTime uint64
IndexFilePaths []string
IndexSize uint64
WriteHandoff bool
SegmentID int64
CollectionID int64
PartitionID int64
NumRows int64
IndexID int64
BuildID int64
NodeID int64
IndexVersion int64
IndexState commonpb.IndexState
FailReason string
IsDeleted bool
CreateTime uint64
IndexFileKeys []string
IndexSize uint64
WriteHandoff bool
}
func UnmarshalSegmentIndexModel(segIndex *indexpb.SegmentIndex) *SegmentIndex {
@ -29,21 +30,21 @@ func UnmarshalSegmentIndexModel(segIndex *indexpb.SegmentIndex) *SegmentIndex {
}
return &SegmentIndex{
SegmentID: segIndex.SegmentID,
CollectionID: segIndex.CollectionID,
PartitionID: segIndex.PartitionID,
NumRows: segIndex.NumRows,
IndexID: segIndex.IndexID,
BuildID: segIndex.BuildID,
NodeID: segIndex.NodeID,
IndexState: segIndex.State,
FailReason: segIndex.FailReason,
IndexVersion: segIndex.IndexVersion,
IsDeleted: segIndex.Deleted,
CreateTime: segIndex.CreateTime,
IndexFilePaths: segIndex.IndexFilesPaths,
IndexSize: segIndex.SerializeSize,
WriteHandoff: segIndex.WriteHandoff,
SegmentID: segIndex.SegmentID,
CollectionID: segIndex.CollectionID,
PartitionID: segIndex.PartitionID,
NumRows: segIndex.NumRows,
IndexID: segIndex.IndexID,
BuildID: segIndex.BuildID,
NodeID: segIndex.NodeID,
IndexState: segIndex.State,
FailReason: segIndex.FailReason,
IndexVersion: segIndex.IndexVersion,
IsDeleted: segIndex.Deleted,
CreateTime: segIndex.CreateTime,
IndexFileKeys: common.CloneStringList(segIndex.IndexFileKeys),
IndexSize: segIndex.SerializeSize,
WriteHandoff: segIndex.WriteHandoff,
}
}
@ -53,40 +54,40 @@ func MarshalSegmentIndexModel(segIdx *SegmentIndex) *indexpb.SegmentIndex {
}
return &indexpb.SegmentIndex{
CollectionID: segIdx.CollectionID,
PartitionID: segIdx.PartitionID,
SegmentID: segIdx.SegmentID,
NumRows: segIdx.NumRows,
IndexID: segIdx.IndexID,
BuildID: segIdx.BuildID,
NodeID: segIdx.NodeID,
State: segIdx.IndexState,
FailReason: segIdx.FailReason,
IndexVersion: segIdx.IndexVersion,
IndexFilesPaths: segIdx.IndexFilePaths,
Deleted: segIdx.IsDeleted,
CreateTime: segIdx.CreateTime,
SerializeSize: segIdx.IndexSize,
WriteHandoff: segIdx.WriteHandoff,
CollectionID: segIdx.CollectionID,
PartitionID: segIdx.PartitionID,
SegmentID: segIdx.SegmentID,
NumRows: segIdx.NumRows,
IndexID: segIdx.IndexID,
BuildID: segIdx.BuildID,
NodeID: segIdx.NodeID,
State: segIdx.IndexState,
FailReason: segIdx.FailReason,
IndexVersion: segIdx.IndexVersion,
IndexFileKeys: common.CloneStringList(segIdx.IndexFileKeys),
Deleted: segIdx.IsDeleted,
CreateTime: segIdx.CreateTime,
SerializeSize: segIdx.IndexSize,
WriteHandoff: segIdx.WriteHandoff,
}
}
func CloneSegmentIndex(segIndex *SegmentIndex) *SegmentIndex {
return &SegmentIndex{
SegmentID: segIndex.SegmentID,
CollectionID: segIndex.CollectionID,
PartitionID: segIndex.PartitionID,
NumRows: segIndex.NumRows,
IndexID: segIndex.IndexID,
BuildID: segIndex.BuildID,
NodeID: segIndex.NodeID,
IndexState: segIndex.IndexState,
FailReason: segIndex.FailReason,
IndexVersion: segIndex.IndexVersion,
IsDeleted: segIndex.IsDeleted,
CreateTime: segIndex.CreateTime,
IndexFilePaths: segIndex.IndexFilePaths,
IndexSize: segIndex.IndexSize,
WriteHandoff: segIndex.WriteHandoff,
SegmentID: segIndex.SegmentID,
CollectionID: segIndex.CollectionID,
PartitionID: segIndex.PartitionID,
NumRows: segIndex.NumRows,
IndexID: segIndex.IndexID,
BuildID: segIndex.BuildID,
NodeID: segIndex.NodeID,
IndexState: segIndex.IndexState,
FailReason: segIndex.FailReason,
IndexVersion: segIndex.IndexVersion,
IsDeleted: segIndex.IsDeleted,
CreateTime: segIndex.CreateTime,
IndexFileKeys: common.CloneStringList(segIndex.IndexFileKeys),
IndexSize: segIndex.IndexSize,
WriteHandoff: segIndex.WriteHandoff,
}
}

View File

@ -14,37 +14,37 @@ var (
buildID = int64(1)
segmentIdxPb = &indexpb.SegmentIndex{
CollectionID: colID,
PartitionID: partID,
SegmentID: segmentID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
State: commonpb.IndexState_Finished,
FailReason: "",
IndexFilesPaths: nil,
Deleted: false,
CreateTime: 1,
SerializeSize: 0,
CollectionID: colID,
PartitionID: partID,
SegmentID: segmentID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
State: commonpb.IndexState_Finished,
FailReason: "",
IndexFileKeys: nil,
Deleted: false,
CreateTime: 1,
SerializeSize: 0,
}
indexModel2 = &SegmentIndex{
CollectionID: colID,
PartitionID: partID,
SegmentID: segmentID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreateTime: 1,
IndexFilePaths: nil,
IndexSize: 0,
CollectionID: colID,
PartitionID: partID,
SegmentID: segmentID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreateTime: 1,
IndexFileKeys: nil,
IndexSize: 0,
}
)

View File

@ -75,7 +75,7 @@ message SegmentIndex {
int64 index_version = 8;
common.IndexState state = 9;
string fail_reason = 10;
repeated string index_files_paths = 11;
repeated string index_file_keys = 11;
bool deleted = 12;
uint64 create_time = 13;
uint64 serialize_size = 14;
@ -224,7 +224,7 @@ message QueryJobsRequest {
message IndexTaskInfo {
int64 buildID = 1;
common.IndexState state = 2;
repeated string index_files = 3;
repeated string index_file_keys = 3;
uint64 serialized_size = 4;
string fail_reason = 5;
}

View File

@ -222,7 +222,7 @@ type SegmentIndex struct {
IndexVersion int64 `protobuf:"varint,8,opt,name=index_version,json=indexVersion,proto3" json:"index_version,omitempty"`
State commonpb.IndexState `protobuf:"varint,9,opt,name=state,proto3,enum=milvus.proto.common.IndexState" json:"state,omitempty"`
FailReason string `protobuf:"bytes,10,opt,name=fail_reason,json=failReason,proto3" json:"fail_reason,omitempty"`
IndexFilesPaths []string `protobuf:"bytes,11,rep,name=index_files_paths,json=indexFilesPaths,proto3" json:"index_files_paths,omitempty"`
IndexFileKeys []string `protobuf:"bytes,11,rep,name=index_file_keys,json=indexFileKeys,proto3" json:"index_file_keys,omitempty"`
Deleted bool `protobuf:"varint,12,opt,name=deleted,proto3" json:"deleted,omitempty"`
CreateTime uint64 `protobuf:"varint,13,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
SerializeSize uint64 `protobuf:"varint,14,opt,name=serialize_size,json=serializeSize,proto3" json:"serialize_size,omitempty"`
@ -327,9 +327,9 @@ func (m *SegmentIndex) GetFailReason() string {
return ""
}
func (m *SegmentIndex) GetIndexFilesPaths() []string {
func (m *SegmentIndex) GetIndexFileKeys() []string {
if m != nil {
return m.IndexFilesPaths
return m.IndexFileKeys
}
return nil
}
@ -1617,7 +1617,7 @@ func (m *QueryJobsRequest) GetBuildIDs() []int64 {
type IndexTaskInfo struct {
BuildID int64 `protobuf:"varint,1,opt,name=buildID,proto3" json:"buildID,omitempty"`
State commonpb.IndexState `protobuf:"varint,2,opt,name=state,proto3,enum=milvus.proto.common.IndexState" json:"state,omitempty"`
IndexFiles []string `protobuf:"bytes,3,rep,name=index_files,json=indexFiles,proto3" json:"index_files,omitempty"`
IndexFileKeys []string `protobuf:"bytes,3,rep,name=index_file_keys,json=indexFileKeys,proto3" json:"index_file_keys,omitempty"`
SerializedSize uint64 `protobuf:"varint,4,opt,name=serialized_size,json=serializedSize,proto3" json:"serialized_size,omitempty"`
FailReason string `protobuf:"bytes,5,opt,name=fail_reason,json=failReason,proto3" json:"fail_reason,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
@ -1664,9 +1664,9 @@ func (m *IndexTaskInfo) GetState() commonpb.IndexState {
return commonpb.IndexState_IndexStateNone
}
func (m *IndexTaskInfo) GetIndexFiles() []string {
func (m *IndexTaskInfo) GetIndexFileKeys() []string {
if m != nil {
return m.IndexFiles
return m.IndexFileKeys
}
return nil
}
@ -2022,141 +2022,141 @@ func init() { proto.RegisterFile("index_coord.proto", fileDescriptor_f9e019eb3fd
var fileDescriptor_f9e019eb3fda53c2 = []byte{
// 2165 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x59, 0xcd, 0x6e, 0x1b, 0xc9,
0xf1, 0xf7, 0x90, 0x94, 0xc4, 0xa9, 0x21, 0x25, 0xb9, 0xad, 0xfd, 0xff, 0xb9, 0xb4, 0x1d, 0xcb,
0xe3, 0xf5, 0x9a, 0x09, 0xb0, 0x92, 0xa3, 0xcd, 0x06, 0x9b, 0x20, 0x09, 0xa0, 0x8f, 0xb5, 0x4d,
0x79, 0x25, 0x28, 0x43, 0x63, 0x81, 0x5d, 0x04, 0x98, 0x0c, 0x39, 0x4d, 0xa9, 0x57, 0xe4, 0x34,
0x3d, 0xdd, 0x63, 0x5b, 0x0e, 0x10, 0xe4, 0x92, 0xc3, 0x2e, 0x16, 0x08, 0x90, 0x43, 0xf2, 0x02,
0x39, 0x6d, 0x0e, 0xb9, 0xe7, 0x92, 0x07, 0x48, 0x9e, 0x22, 0x2f, 0x91, 0x6b, 0xd0, 0x1f, 0x33,
0x9c, 0x19, 0x0e, 0x45, 0xea, 0x23, 0x97, 0xe4, 0xc6, 0xae, 0xa9, 0xfe, 0xaa, 0xfa, 0x55, 0xfd,
0xaa, 0x9a, 0x70, 0x93, 0x04, 0x3e, 0x7e, 0xe3, 0xf6, 0x28, 0x0d, 0xfd, 0x8d, 0x51, 0x48, 0x39,
0x45, 0x68, 0x48, 0x06, 0xaf, 0x22, 0xa6, 0x46, 0x1b, 0xf2, 0x7b, 0xb3, 0xd6, 0xa3, 0xc3, 0x21,
0x0d, 0x94, 0xac, 0xb9, 0x4c, 0x02, 0x8e, 0xc3, 0xc0, 0x1b, 0xe8, 0x71, 0x2d, 0x3d, 0xc3, 0xfe,
0x4b, 0x05, 0xcc, 0xb6, 0x98, 0xd5, 0x0e, 0xfa, 0x14, 0xd9, 0x50, 0xeb, 0xd1, 0xc1, 0x00, 0xf7,
0x38, 0xa1, 0x41, 0x7b, 0xaf, 0x61, 0xac, 0x1b, 0xad, 0xb2, 0x93, 0x91, 0xa1, 0x06, 0x2c, 0xf5,
0x09, 0x1e, 0xf8, 0xed, 0xbd, 0x46, 0x49, 0x7e, 0x8e, 0x87, 0xe8, 0x2e, 0x80, 0x3a, 0x60, 0xe0,
0x0d, 0x71, 0xa3, 0xbc, 0x6e, 0xb4, 0x4c, 0xc7, 0x94, 0x92, 0x43, 0x6f, 0x88, 0xc5, 0x44, 0x39,
0x68, 0xef, 0x35, 0x2a, 0x6a, 0xa2, 0x1e, 0xa2, 0x1d, 0xb0, 0xf8, 0xd9, 0x08, 0xbb, 0x23, 0x2f,
0xf4, 0x86, 0xac, 0xb1, 0xb0, 0x5e, 0x6e, 0x59, 0x5b, 0xf7, 0x37, 0x32, 0x57, 0xd3, 0x77, 0x7a,
0x8e, 0xcf, 0x3e, 0xf3, 0x06, 0x11, 0x3e, 0xf2, 0x48, 0xe8, 0x80, 0x98, 0x75, 0x24, 0x27, 0xa1,
0x3d, 0xa8, 0xa9, 0xcd, 0xf5, 0x22, 0x8b, 0xf3, 0x2e, 0x62, 0xc9, 0x69, 0x7a, 0x95, 0xfb, 0x7a,
0x15, 0xec, 0xbb, 0x21, 0x7d, 0xcd, 0x1a, 0x4b, 0xf2, 0xa0, 0x96, 0x96, 0x39, 0xf4, 0x35, 0x13,
0xb7, 0xe4, 0x94, 0x7b, 0x03, 0xa5, 0x50, 0x95, 0x0a, 0xa6, 0x94, 0xc8, 0xcf, 0x1f, 0xc1, 0x02,
0xe3, 0x1e, 0xc7, 0x0d, 0x73, 0xdd, 0x68, 0x2d, 0x6f, 0xdd, 0x2b, 0x3c, 0x80, 0xb4, 0x78, 0x47,
0xa8, 0x39, 0x4a, 0x1b, 0x7d, 0x04, 0xff, 0xaf, 0x8e, 0x2f, 0x87, 0x6e, 0xdf, 0x23, 0x03, 0x37,
0xc4, 0x1e, 0xa3, 0x41, 0x03, 0xa4, 0x21, 0xd7, 0x48, 0x32, 0xe7, 0x89, 0x47, 0x06, 0x8e, 0xfc,
0x86, 0x6c, 0xa8, 0x13, 0xe6, 0x7a, 0x11, 0xa7, 0xae, 0xfc, 0xde, 0xb0, 0xd6, 0x8d, 0x56, 0xd5,
0xb1, 0x08, 0xdb, 0x8e, 0x38, 0x95, 0xdb, 0xa0, 0x03, 0xb8, 0x19, 0x31, 0x1c, 0xba, 0x19, 0xf3,
0xd4, 0xe6, 0x35, 0xcf, 0x8a, 0x98, 0xdb, 0x1e, 0x9b, 0xc8, 0xfe, 0xad, 0x01, 0xf0, 0x44, 0x7a,
0x5c, 0xae, 0xfe, 0x93, 0xd8, 0xe9, 0x24, 0xe8, 0x53, 0x09, 0x18, 0x6b, 0xeb, 0xee, 0xc6, 0x24,
0x2a, 0x37, 0x12, 0x94, 0x69, 0x4c, 0x48, 0xc0, 0x35, 0x60, 0xc9, 0xc7, 0x03, 0xcc, 0xb1, 0x2f,
0xc1, 0x54, 0x75, 0xe2, 0x21, 0xba, 0x07, 0x56, 0x2f, 0xc4, 0xc2, 0x16, 0x9c, 0x68, 0x34, 0x55,
0x1c, 0x50, 0xa2, 0x17, 0x64, 0x88, 0xed, 0xaf, 0x2a, 0x50, 0xeb, 0xe0, 0xe3, 0x21, 0x0e, 0xb8,
0x3a, 0xc9, 0x3c, 0xe0, 0x5d, 0x07, 0x6b, 0xe4, 0x85, 0x9c, 0x68, 0x15, 0x05, 0xe0, 0xb4, 0x08,
0xdd, 0x01, 0x93, 0xe9, 0x55, 0xf7, 0xe4, 0xae, 0x65, 0x67, 0x2c, 0x40, 0xef, 0x42, 0x35, 0x88,
0x86, 0xca, 0xf5, 0x1a, 0xc4, 0x41, 0x34, 0x94, 0x8e, 0x4f, 0xc1, 0x7b, 0x21, 0x0b, 0xef, 0x06,
0x2c, 0x75, 0x23, 0x22, 0x23, 0x66, 0x51, 0x7d, 0xd1, 0x43, 0xf4, 0x7f, 0xb0, 0x18, 0x50, 0x1f,
0xb7, 0xf7, 0x34, 0xd0, 0xf4, 0x08, 0x3d, 0x80, 0xba, 0x32, 0xea, 0x2b, 0x1c, 0x32, 0x42, 0x03,
0x0d, 0x33, 0x85, 0xcd, 0xcf, 0x94, 0xec, 0xb2, 0x48, 0xbb, 0x07, 0xd6, 0x24, 0xba, 0xa0, 0x3f,
0xc6, 0xd4, 0xf7, 0xe2, 0x3c, 0xd3, 0x27, 0x03, 0xcc, 0xdc, 0x91, 0xc7, 0x4f, 0x58, 0xc3, 0x5a,
0x2f, 0xb7, 0x4c, 0x67, 0x45, 0x7e, 0x78, 0x22, 0xe4, 0x47, 0x42, 0x9c, 0xf6, 0x5f, 0xed, 0x5c,
0xff, 0xd5, 0xf3, 0xfe, 0x43, 0x0f, 0x61, 0x99, 0xe1, 0x90, 0x78, 0x03, 0xf2, 0x16, 0xbb, 0x8c,
0xbc, 0xc5, 0x8d, 0x65, 0xa9, 0x53, 0x4f, 0xa4, 0x1d, 0xf2, 0x16, 0x0b, 0x53, 0xbc, 0x0e, 0x09,
0xc7, 0xee, 0x89, 0x17, 0xf8, 0xb4, 0xdf, 0x6f, 0xac, 0xc8, 0x7d, 0x6a, 0x52, 0xf8, 0x4c, 0xc9,
0xec, 0x3f, 0x1a, 0x70, 0xcb, 0xc1, 0xc7, 0x84, 0x71, 0x1c, 0x1e, 0x52, 0x1f, 0x3b, 0xf8, 0x65,
0x84, 0x19, 0x47, 0x8f, 0xa1, 0xd2, 0xf5, 0x18, 0xd6, 0xb0, 0xbc, 0x53, 0x68, 0xa1, 0x03, 0x76,
0xbc, 0xe3, 0x31, 0xec, 0x48, 0x4d, 0xf4, 0x43, 0x58, 0xf2, 0x7c, 0x3f, 0xc4, 0x8c, 0x49, 0x70,
0x4c, 0x9b, 0xb4, 0xad, 0x74, 0x9c, 0x58, 0x39, 0xe5, 0xc9, 0x72, 0xda, 0x93, 0xf6, 0xef, 0x0c,
0x58, 0xcb, 0x9e, 0x8c, 0x8d, 0x68, 0xc0, 0x30, 0xfa, 0x10, 0x16, 0x85, 0x3f, 0x22, 0xa6, 0x0f,
0x77, 0xbb, 0x70, 0x9f, 0x8e, 0x54, 0x71, 0xb4, 0xaa, 0x48, 0x94, 0x24, 0x20, 0x3c, 0x0e, 0x62,
0x75, 0xc2, 0xfb, 0xf9, 0x68, 0xd3, 0xe9, 0xbe, 0x1d, 0x10, 0xae, 0x62, 0xd6, 0x01, 0x92, 0xfc,
0xb6, 0x3f, 0x87, 0xb5, 0xa7, 0x98, 0xa7, 0x70, 0xa1, 0x6d, 0x35, 0x4f, 0xf8, 0x64, 0x33, 0x7c,
0x29, 0x97, 0xe1, 0xed, 0x3f, 0x19, 0xf0, 0x4e, 0x6e, 0xed, 0xab, 0xdc, 0x36, 0x01, 0x78, 0xe9,
0x2a, 0x00, 0x2f, 0xe7, 0x01, 0x6e, 0xff, 0xc6, 0x80, 0xdb, 0x4f, 0x31, 0x4f, 0x27, 0x8f, 0x6b,
0xb6, 0x04, 0xfa, 0x0e, 0x40, 0x92, 0x34, 0x58, 0xa3, 0xbc, 0x5e, 0x6e, 0x95, 0x9d, 0x94, 0xc4,
0xfe, 0xca, 0x80, 0x9b, 0x13, 0xfb, 0x67, 0x73, 0x8f, 0x91, 0xcf, 0x3d, 0xff, 0x29, 0x73, 0xfc,
0xde, 0x80, 0x3b, 0xc5, 0xe6, 0xb8, 0x8a, 0xf3, 0x7e, 0xaa, 0x26, 0x61, 0x81, 0x52, 0x41, 0x35,
0x0f, 0x8b, 0x38, 0x61, 0x72, 0x4f, 0x3d, 0xc9, 0xfe, 0xa6, 0x0c, 0x68, 0x57, 0x26, 0x0b, 0xf9,
0xf1, 0x22, 0xae, 0xb9, 0x74, 0x81, 0x92, 0x2b, 0x43, 0x2a, 0xd7, 0x51, 0x86, 0x2c, 0x5c, 0xaa,
0x0c, 0xb9, 0x03, 0xa6, 0xc8, 0x9a, 0x8c, 0x7b, 0xc3, 0x91, 0xe4, 0x8c, 0x8a, 0x33, 0x16, 0x4c,
0x92, 0xfe, 0xd2, 0x9c, 0xa4, 0x5f, 0xbd, 0x34, 0xe9, 0xbf, 0x81, 0x5b, 0x71, 0x60, 0x4b, 0x0a,
0xbf, 0x80, 0x3b, 0xb2, 0xa1, 0x50, 0xca, 0x87, 0xc2, 0x0c, 0xa7, 0xd8, 0xff, 0x2a, 0xc1, 0xcd,
0x76, 0xcc, 0x3a, 0x82, 0x74, 0x64, 0xdd, 0x70, 0x7e, 0xa4, 0x4c, 0x47, 0x40, 0x8a, 0xa4, 0xcb,
0x53, 0x49, 0xba, 0x92, 0x25, 0xe9, 0xec, 0x01, 0x17, 0xf2, 0xa8, 0xb9, 0x9e, 0xc2, 0xb3, 0x05,
0xab, 0x63, 0xd2, 0xd5, 0x9c, 0xbb, 0x24, 0x39, 0x77, 0x99, 0xa4, 0x6f, 0xcf, 0xd0, 0x23, 0x58,
0x49, 0x18, 0xd2, 0x57, 0xc4, 0x59, 0x95, 0x08, 0x19, 0xd3, 0xa9, 0x1f, 0x33, 0x67, 0xb6, 0x88,
0x30, 0x0b, 0x8a, 0x88, 0x74, 0x41, 0x03, 0x99, 0x82, 0xc6, 0xfe, 0xab, 0x01, 0x56, 0x12, 0xa0,
0x73, 0x36, 0x07, 0x19, 0xbf, 0x94, 0xf2, 0x7e, 0xb9, 0x0f, 0x35, 0x1c, 0x78, 0xdd, 0x01, 0xd6,
0xb8, 0x2d, 0x2b, 0xdc, 0x2a, 0x99, 0xc2, 0xed, 0x13, 0xb0, 0xc6, 0xe5, 0x64, 0x1c, 0x83, 0x0f,
0xa7, 0xd6, 0x93, 0x69, 0x50, 0x38, 0x90, 0xd4, 0x95, 0xcc, 0xfe, 0xba, 0x34, 0xa6, 0x39, 0x85,
0xd8, 0xab, 0x24, 0xb3, 0x5f, 0x40, 0x4d, 0xdf, 0x42, 0x95, 0xb9, 0x2a, 0xa5, 0xfd, 0xa8, 0xe8,
0x58, 0x45, 0x9b, 0x6e, 0xa4, 0xcc, 0xf8, 0x49, 0xc0, 0xc3, 0x33, 0xc7, 0x62, 0x63, 0x49, 0xd3,
0x85, 0xd5, 0xbc, 0x02, 0x5a, 0x85, 0xf2, 0x29, 0x3e, 0xd3, 0x36, 0x16, 0x3f, 0x45, 0xfa, 0x7f,
0x25, 0xb0, 0xa3, 0x59, 0xff, 0xde, 0xb9, 0xf9, 0xb4, 0x4f, 0x1d, 0xa5, 0xfd, 0xe3, 0xd2, 0xc7,
0x86, 0x7d, 0x06, 0xab, 0x7b, 0x21, 0x1d, 0x5d, 0x38, 0x93, 0xda, 0x50, 0x4b, 0x95, 0xc6, 0x71,
0xf0, 0x66, 0x64, 0xb3, 0xc2, 0xf7, 0x73, 0x58, 0xdb, 0xc3, 0xac, 0x17, 0x92, 0xee, 0xc5, 0x13,
0xf9, 0x8c, 0x6a, 0xe3, 0x1b, 0x03, 0xde, 0xc9, 0xad, 0x7d, 0x15, 0x1f, 0xff, 0x2c, 0x8b, 0x3c,
0xe5, 0xe2, 0x19, 0x9d, 0x4c, 0x1a, 0x71, 0x9e, 0x64, 0x51, 0xf9, 0x6d, 0x47, 0x64, 0x8e, 0xa3,
0x90, 0x1e, 0xcb, 0x1a, 0xf1, 0xfa, 0x6e, 0xfc, 0x07, 0x03, 0xee, 0x4e, 0xd9, 0xe3, 0x2a, 0x37,
0xcf, 0x37, 0xbd, 0xa5, 0x59, 0x4d, 0x6f, 0x39, 0xd7, 0xf4, 0xda, 0x7f, 0x2e, 0x41, 0xbd, 0xc3,
0x69, 0xe8, 0x1d, 0xe3, 0x5d, 0x1a, 0xf4, 0xc9, 0xb1, 0x48, 0xa7, 0x71, 0x1d, 0x6d, 0xc8, 0x6b,
0x24, 0x95, 0xf2, 0x7d, 0xa8, 0x79, 0xbd, 0x1e, 0x66, 0xcc, 0x3d, 0xc5, 0x67, 0x3a, 0x4b, 0x98,
0x8e, 0xa5, 0x64, 0xcf, 0x85, 0x48, 0x74, 0x20, 0x0c, 0xf7, 0x42, 0xcc, 0xdd, 0xb1, 0xa6, 0x86,
0xd6, 0x8a, 0xfa, 0xb0, 0x1d, 0x6b, 0x8b, 0xc2, 0x3b, 0x62, 0xb8, 0xd3, 0xf9, 0x54, 0xa6, 0xed,
0xaa, 0xa3, 0x47, 0xa2, 0xec, 0xe9, 0x46, 0xbd, 0x53, 0xcc, 0xd3, 0x69, 0x1b, 0x94, 0x48, 0xe6,
0xed, 0xdb, 0x60, 0x86, 0x94, 0x72, 0x99, 0x6b, 0x25, 0xc7, 0x9a, 0x4e, 0x55, 0x08, 0x44, 0x3a,
0xd1, 0xab, 0xb6, 0xb7, 0x0f, 0x34, 0xb7, 0xea, 0x91, 0xe8, 0x1f, 0xdb, 0xdb, 0x07, 0x9f, 0x04,
0xfe, 0x88, 0x92, 0x80, 0xcb, 0xc4, 0x6b, 0x3a, 0x69, 0x91, 0xb8, 0x1e, 0x53, 0x96, 0x70, 0x45,
0x59, 0x20, 0x93, 0xae, 0xe9, 0x58, 0x5a, 0xf6, 0xe2, 0x6c, 0x84, 0xed, 0x7f, 0x96, 0x61, 0x55,
0xd5, 0x36, 0xfb, 0xb4, 0x1b, 0xc3, 0xe3, 0x0e, 0x98, 0xbd, 0x41, 0x24, 0xda, 0x04, 0x8d, 0x0d,
0xd3, 0x19, 0x0b, 0xb2, 0x3d, 0x99, 0x3b, 0x0a, 0x71, 0x9f, 0xbc, 0xd1, 0x96, 0x1b, 0xf7, 0x64,
0x47, 0x52, 0x9c, 0x66, 0xb2, 0xf2, 0x04, 0x93, 0xf9, 0x1e, 0xf7, 0x34, 0xbd, 0x54, 0x24, 0xbd,
0x98, 0x42, 0xa2, 0x98, 0x65, 0x82, 0x30, 0x16, 0x0a, 0x08, 0x23, 0xc5, 0xa0, 0x8b, 0x59, 0x06,
0xcd, 0x82, 0x77, 0x29, 0xcf, 0x93, 0xcf, 0x60, 0x39, 0x36, 0x4c, 0x4f, 0x62, 0x44, 0x5a, 0xaf,
0xa0, 0x7d, 0x91, 0x89, 0x2c, 0x0d, 0x26, 0xa7, 0xce, 0x32, 0xd8, 0xca, 0x33, 0xae, 0x79, 0x29,
0xc6, 0xcd, 0x55, 0x7b, 0x70, 0x99, 0x6a, 0x2f, 0xcd, 0x9e, 0x56, 0x96, 0x3d, 0x3f, 0x85, 0xd5,
0x9f, 0x47, 0x38, 0x3c, 0xdb, 0xa7, 0x5d, 0x36, 0x9f, 0x8f, 0x9b, 0x50, 0xd5, 0x8e, 0x8a, 0x33,
0x6d, 0x32, 0xb6, 0xff, 0x6e, 0x40, 0x5d, 0x86, 0xfd, 0x0b, 0x8f, 0x9d, 0xc6, 0x2f, 0x27, 0xb1,
0x97, 0x8d, 0xac, 0x97, 0x2f, 0xdf, 0x27, 0xa4, 0xda, 0x7e, 0xd9, 0xb3, 0x98, 0x3a, 0xc1, 0xc9,
0x86, 0xbf, 0xa8, 0xf0, 0xa8, 0x14, 0x16, 0x1e, 0xb9, 0x8e, 0x63, 0x61, 0xa2, 0xe3, 0xf8, 0xd6,
0x80, 0x9b, 0x29, 0xe3, 0x5c, 0x25, 0x77, 0x65, 0x4c, 0x5a, 0xca, 0x9b, 0x74, 0x27, 0x9b, 0xd3,
0xcb, 0x45, 0x3e, 0x4e, 0xe5, 0xf4, 0xd8, 0xb8, 0x99, 0xbc, 0xfe, 0x1c, 0x56, 0x04, 0x79, 0x5e,
0x8f, 0x1f, 0xff, 0x61, 0xc0, 0xd2, 0x3e, 0xed, 0x4a, 0x0f, 0xa6, 0xc1, 0x63, 0x64, 0xdf, 0x92,
0x56, 0xa1, 0xec, 0x93, 0xa1, 0x4e, 0xc4, 0xe2, 0xa7, 0x08, 0x2e, 0xc6, 0xbd, 0x90, 0x8f, 0x5f,
0xc3, 0x44, 0x65, 0x25, 0x24, 0xf2, 0x31, 0xe5, 0x5d, 0xa8, 0xe2, 0xc0, 0x57, 0x1f, 0x75, 0xf9,
0x8a, 0x03, 0x5f, 0x7e, 0xba, 0x9e, 0x8e, 0x64, 0x0d, 0x16, 0x46, 0x74, 0xfc, 0x82, 0xa5, 0x06,
0xf6, 0x1a, 0xa0, 0xa7, 0x98, 0xef, 0xd3, 0xae, 0xf0, 0x4a, 0x6c, 0x1e, 0xfb, 0x6f, 0x25, 0xd9,
0x2d, 0x8c, 0xc5, 0x57, 0x71, 0xb0, 0x0d, 0x75, 0xc5, 0x3c, 0x5f, 0xd2, 0xae, 0x1b, 0x44, 0xb1,
0x51, 0x2c, 0x29, 0xdc, 0xa7, 0xdd, 0xc3, 0x68, 0x88, 0x3e, 0x80, 0x5b, 0x24, 0x70, 0x47, 0x9a,
0x0c, 0x13, 0x4d, 0x65, 0xa5, 0x55, 0x12, 0xc4, 0x34, 0xa9, 0xd5, 0xdf, 0x87, 0x15, 0x1c, 0xbc,
0x8c, 0x70, 0x84, 0x13, 0x55, 0x65, 0xb3, 0xba, 0x16, 0x6b, 0x3d, 0x41, 0x7a, 0x1e, 0x3b, 0x75,
0xd9, 0x80, 0x72, 0xa6, 0x93, 0xa1, 0x29, 0x24, 0x1d, 0x21, 0x40, 0x1f, 0x83, 0x29, 0xa6, 0x2b,
0x68, 0xa9, 0xaa, 0xff, 0x76, 0x11, 0xb4, 0xb4, 0xbf, 0x9d, 0xea, 0x97, 0xea, 0x07, 0x13, 0x01,
0xa2, 0xeb, 0x60, 0x9f, 0xb0, 0x53, 0x4d, 0x31, 0xa0, 0x44, 0x7b, 0x84, 0x9d, 0x6e, 0x7d, 0x0d,
0x00, 0x12, 0x91, 0xbb, 0x94, 0x86, 0x3e, 0x1a, 0x48, 0x33, 0xef, 0xd2, 0xe1, 0x88, 0x06, 0x38,
0xe0, 0x32, 0x6c, 0x19, 0xda, 0xc8, 0x6e, 0xa6, 0x07, 0x93, 0x8a, 0xda, 0x2d, 0xcd, 0xf7, 0x0a,
0xf5, 0x73, 0xca, 0xf6, 0x0d, 0xf4, 0x52, 0x56, 0xce, 0x62, 0x48, 0x18, 0x27, 0x3d, 0xb6, 0x7b,
0xe2, 0x05, 0x01, 0x1e, 0xa0, 0xad, 0x29, 0xef, 0x4c, 0x45, 0xca, 0xf1, 0x9e, 0x0f, 0x0a, 0xf7,
0xec, 0xf0, 0x90, 0x04, 0xc7, 0x31, 0x2e, 0xec, 0x1b, 0xe8, 0x05, 0x58, 0xa9, 0x66, 0x1f, 0xbd,
0x5f, 0x64, 0xc6, 0xc9, 0xd7, 0x80, 0xe6, 0x79, 0x00, 0xb2, 0x6f, 0xa0, 0x3e, 0xd4, 0x33, 0xaf,
0x51, 0xa8, 0x75, 0x5e, 0xc1, 0x9e, 0x7e, 0x02, 0x6a, 0x7e, 0x77, 0x0e, 0xcd, 0xe4, 0xf4, 0xbf,
0x52, 0x06, 0x9b, 0x78, 0xce, 0xd9, 0x9c, 0xb2, 0xc8, 0xb4, 0x87, 0xa7, 0xe6, 0xe3, 0xf9, 0x27,
0x24, 0x9b, 0xfb, 0xe3, 0x4b, 0x2a, 0x70, 0x3d, 0x9a, 0xdd, 0x95, 0xa8, 0xdd, 0x5a, 0xf3, 0xb6,
0x2f, 0xf6, 0x0d, 0x74, 0x04, 0x66, 0xd2, 0x41, 0xa0, 0xf7, 0x8a, 0x26, 0xe6, 0x1b, 0x8c, 0x39,
0x9c, 0x93, 0x29, 0xde, 0x8b, 0x9d, 0x53, 0xd4, 0x3b, 0x14, 0x3b, 0xa7, 0xb0, 0x13, 0xb0, 0x6f,
0xa0, 0x5f, 0x8f, 0x9f, 0x24, 0x33, 0x25, 0x33, 0x7a, 0x7c, 0xde, 0xf5, 0x8b, 0x2a, 0xf8, 0xe6,
0xf7, 0x2f, 0x30, 0x23, 0x05, 0x0e, 0xd4, 0x39, 0xa1, 0xaf, 0x55, 0xe9, 0x12, 0x85, 0x9e, 0xa8,
0xf4, 0x0b, 0x36, 0xd7, 0xb1, 0x34, 0xa9, 0x3a, 0x75, 0xf3, 0x73, 0x66, 0x24, 0x9b, 0xbb, 0x00,
0x4f, 0x31, 0x3f, 0xc0, 0x3c, 0x24, 0x3d, 0x96, 0x0f, 0xab, 0x71, 0xc2, 0xd0, 0x0a, 0xf1, 0x56,
0x8f, 0x66, 0xea, 0x25, 0x1b, 0x74, 0xc1, 0xda, 0x3d, 0xc1, 0xbd, 0xd3, 0x67, 0xd8, 0x1b, 0xf0,
0x13, 0x54, 0x3c, 0x33, 0xa5, 0x31, 0x05, 0x7b, 0x45, 0x8a, 0xf1, 0x1e, 0x5b, 0xdf, 0x2e, 0xea,
0xbf, 0x28, 0x0f, 0xa9, 0x8f, 0xff, 0xfb, 0x73, 0xe1, 0x11, 0x98, 0x49, 0x73, 0x50, 0x1c, 0x6a,
0xf9, 0xde, 0x61, 0x56, 0xa8, 0x7d, 0x01, 0x66, 0x52, 0x6d, 0x15, 0xaf, 0x98, 0xaf, 0x54, 0x9b,
0x0f, 0x67, 0x68, 0x25, 0xa7, 0x3d, 0x84, 0x6a, 0x5c, 0x1d, 0xa1, 0x07, 0xd3, 0xf2, 0x42, 0x7a,
0xe5, 0x19, 0x67, 0xfd, 0x25, 0x58, 0xa9, 0xd2, 0xa1, 0x98, 0x09, 0x26, 0x4b, 0x8e, 0xe6, 0xa3,
0x99, 0x7a, 0xff, 0x1b, 0x01, 0xb9, 0xf3, 0x83, 0x2f, 0xb6, 0x8e, 0x09, 0x3f, 0x89, 0xba, 0xc2,
0xb2, 0x9b, 0x4a, 0xf3, 0x03, 0x42, 0xf5, 0xaf, 0xcd, 0xf8, 0x94, 0x9b, 0x72, 0xa5, 0x4d, 0x69,
0xa7, 0x51, 0xb7, 0xbb, 0x28, 0x87, 0x1f, 0xfe, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x56, 0x61, 0x38,
0x0e, 0x61, 0x20, 0x00, 0x00,
0x11, 0xf6, 0x70, 0x28, 0x89, 0x53, 0x43, 0xea, 0xa7, 0xad, 0x4d, 0xb8, 0xb4, 0x1d, 0xcb, 0xe3,
0xb5, 0xcd, 0x04, 0x58, 0xd9, 0xd1, 0x66, 0x83, 0x4d, 0x90, 0x04, 0xb0, 0xa4, 0xb5, 0x4d, 0x79,
0x25, 0x28, 0x43, 0x63, 0x81, 0x5d, 0x04, 0x60, 0x86, 0x9c, 0xa6, 0xd4, 0x2b, 0x72, 0x9a, 0x9e,
0xee, 0xb1, 0x2d, 0x07, 0x08, 0x72, 0xd9, 0x43, 0x16, 0x0b, 0x04, 0xc8, 0x21, 0x79, 0x81, 0x9c,
0x36, 0x87, 0xdc, 0x73, 0xc9, 0x0b, 0xe4, 0x94, 0x47, 0xc8, 0x4b, 0xe4, 0x1a, 0xf4, 0xcf, 0x0c,
0x67, 0x86, 0x43, 0x91, 0xfa, 0xc9, 0x25, 0x7b, 0x63, 0xd7, 0x54, 0xff, 0x55, 0x7d, 0x55, 0x5f,
0x55, 0x13, 0xd6, 0x48, 0xe0, 0xe3, 0x37, 0x9d, 0x1e, 0xa5, 0xa1, 0xbf, 0x39, 0x0a, 0x29, 0xa7,
0x08, 0x0d, 0xc9, 0xe0, 0x55, 0xc4, 0xd4, 0x68, 0x53, 0x7e, 0x6f, 0x54, 0x7b, 0x74, 0x38, 0xa4,
0x81, 0x92, 0x35, 0x96, 0x49, 0xc0, 0x71, 0x18, 0x78, 0x03, 0x3d, 0xae, 0xa6, 0x67, 0x38, 0x7f,
0x2b, 0x83, 0xd5, 0x12, 0xb3, 0x5a, 0x41, 0x9f, 0x22, 0x07, 0xaa, 0x3d, 0x3a, 0x18, 0xe0, 0x1e,
0x27, 0x34, 0x68, 0xed, 0xd6, 0x8d, 0x0d, 0xa3, 0x69, 0xba, 0x19, 0x19, 0xaa, 0xc3, 0x52, 0x9f,
0xe0, 0x81, 0xdf, 0xda, 0xad, 0x97, 0xe4, 0xe7, 0x78, 0x88, 0x6e, 0x01, 0xa8, 0x03, 0x06, 0xde,
0x10, 0xd7, 0xcd, 0x0d, 0xa3, 0x69, 0xb9, 0x96, 0x94, 0x1c, 0x78, 0x43, 0x2c, 0x26, 0xca, 0x41,
0x6b, 0xb7, 0x5e, 0x56, 0x13, 0xf5, 0x10, 0x6d, 0x83, 0xcd, 0x4f, 0x47, 0xb8, 0x33, 0xf2, 0x42,
0x6f, 0xc8, 0xea, 0x0b, 0x1b, 0x66, 0xd3, 0xde, 0xba, 0xb3, 0x99, 0xb9, 0x9a, 0xbe, 0xd3, 0x73,
0x7c, 0xfa, 0xa9, 0x37, 0x88, 0xf0, 0xa1, 0x47, 0x42, 0x17, 0xc4, 0xac, 0x43, 0x39, 0x09, 0xed,
0x42, 0x55, 0x6d, 0xae, 0x17, 0x59, 0x9c, 0x77, 0x11, 0x5b, 0x4e, 0xd3, 0xab, 0xdc, 0xd1, 0xab,
0x60, 0xbf, 0x13, 0xd2, 0xd7, 0xac, 0xbe, 0x24, 0x0f, 0x6a, 0x6b, 0x99, 0x4b, 0x5f, 0x33, 0x71,
0x4b, 0x4e, 0xb9, 0x37, 0x50, 0x0a, 0x15, 0xa9, 0x60, 0x49, 0x89, 0xfc, 0xfc, 0x21, 0x2c, 0x30,
0xee, 0x71, 0x5c, 0xb7, 0x36, 0x8c, 0xe6, 0xf2, 0xd6, 0xed, 0xc2, 0x03, 0x48, 0x8b, 0xb7, 0x85,
0x9a, 0xab, 0xb4, 0xd1, 0x87, 0xf0, 0x5d, 0x75, 0x7c, 0x39, 0xec, 0xf4, 0x3d, 0x32, 0xe8, 0x84,
0xd8, 0x63, 0x34, 0xa8, 0x83, 0x34, 0xe4, 0x3a, 0x49, 0xe6, 0x3c, 0xf1, 0xc8, 0xc0, 0x95, 0xdf,
0x90, 0x03, 0x35, 0xc2, 0x3a, 0x5e, 0xc4, 0x69, 0x47, 0x7e, 0xaf, 0xdb, 0x1b, 0x46, 0xb3, 0xe2,
0xda, 0x84, 0x3d, 0x8e, 0x38, 0x95, 0xdb, 0xa0, 0x7d, 0x58, 0x8b, 0x18, 0x0e, 0x3b, 0x19, 0xf3,
0x54, 0xe7, 0x35, 0xcf, 0x8a, 0x98, 0xdb, 0x1a, 0x9b, 0xc8, 0xf9, 0xd2, 0x00, 0x78, 0x22, 0x3d,
0x2e, 0x57, 0xff, 0x59, 0xec, 0x74, 0x12, 0xf4, 0xa9, 0x04, 0x8c, 0xbd, 0x75, 0x6b, 0x73, 0x12,
0x95, 0x9b, 0x09, 0xca, 0x34, 0x26, 0x24, 0xe0, 0xea, 0xb0, 0xe4, 0xe3, 0x01, 0xe6, 0xd8, 0x97,
0x60, 0xaa, 0xb8, 0xf1, 0x10, 0xdd, 0x06, 0xbb, 0x17, 0x62, 0x61, 0x0b, 0x4e, 0x34, 0x9a, 0xca,
0x2e, 0x28, 0xd1, 0x0b, 0x32, 0xc4, 0xce, 0x97, 0x65, 0xa8, 0xb6, 0xf1, 0xd1, 0x10, 0x07, 0x5c,
0x9d, 0x64, 0x1e, 0xf0, 0x6e, 0x80, 0x3d, 0xf2, 0x42, 0x4e, 0xb4, 0x8a, 0x02, 0x70, 0x5a, 0x84,
0x6e, 0x82, 0xc5, 0xf4, 0xaa, 0xbb, 0x72, 0x57, 0xd3, 0x1d, 0x0b, 0xd0, 0xbb, 0x50, 0x09, 0xa2,
0xa1, 0x72, 0xbd, 0x06, 0x71, 0x10, 0x0d, 0xa5, 0xe3, 0x53, 0xf0, 0x5e, 0xc8, 0xc2, 0xbb, 0x0e,
0x4b, 0xdd, 0x88, 0xc8, 0x88, 0x59, 0x54, 0x5f, 0xf4, 0x10, 0x7d, 0x07, 0x16, 0x03, 0xea, 0xe3,
0xd6, 0xae, 0x06, 0x9a, 0x1e, 0xa1, 0xbb, 0x50, 0x53, 0x46, 0x7d, 0x85, 0x43, 0x46, 0x68, 0xa0,
0x61, 0xa6, 0xb0, 0xf9, 0xa9, 0x92, 0x5d, 0x14, 0x69, 0xb7, 0xc1, 0x9e, 0x44, 0x17, 0xf4, 0xc7,
0x98, 0xba, 0x0f, 0x2b, 0x6a, 0xf3, 0x3e, 0x19, 0xe0, 0xce, 0x09, 0x3e, 0x65, 0x75, 0x7b, 0xc3,
0x6c, 0x5a, 0xae, 0x3a, 0xd3, 0x13, 0x32, 0xc0, 0xcf, 0xf1, 0x29, 0x4b, 0xfb, 0xae, 0x7a, 0xa6,
0xef, 0x6a, 0x79, 0xdf, 0xa1, 0x7b, 0xb0, 0xcc, 0x70, 0x48, 0xbc, 0x01, 0x79, 0x8b, 0x3b, 0x8c,
0xbc, 0xc5, 0xf5, 0x65, 0xa9, 0x53, 0x4b, 0xa4, 0x6d, 0xf2, 0x16, 0x0b, 0x33, 0xbc, 0x0e, 0x09,
0xc7, 0x9d, 0x63, 0x2f, 0xf0, 0x69, 0xbf, 0x5f, 0x5f, 0x91, 0xfb, 0x54, 0xa5, 0xf0, 0x99, 0x92,
0x39, 0x7f, 0x36, 0xe0, 0xba, 0x8b, 0x8f, 0x08, 0xe3, 0x38, 0x3c, 0xa0, 0x3e, 0x76, 0xf1, 0xcb,
0x08, 0x33, 0x8e, 0x1e, 0x41, 0xb9, 0xeb, 0x31, 0xac, 0x21, 0x79, 0xb3, 0xd0, 0x3a, 0xfb, 0xec,
0x68, 0xdb, 0x63, 0xd8, 0x95, 0x9a, 0xe8, 0xc7, 0xb0, 0xe4, 0xf9, 0x7e, 0x88, 0x19, 0x93, 0xc0,
0x98, 0x36, 0xe9, 0xb1, 0xd2, 0x71, 0x63, 0xe5, 0x94, 0x17, 0xcd, 0xb4, 0x17, 0x9d, 0x3f, 0x18,
0xb0, 0x9e, 0x3d, 0x19, 0x1b, 0xd1, 0x80, 0x61, 0xf4, 0x01, 0x2c, 0x0a, 0x5f, 0x44, 0x4c, 0x1f,
0xee, 0x46, 0xe1, 0x3e, 0x6d, 0xa9, 0xe2, 0x6a, 0x55, 0x91, 0x24, 0x49, 0x40, 0x78, 0x1c, 0xc0,
0xea, 0x84, 0x77, 0xf2, 0x91, 0xa6, 0x53, 0x7d, 0x2b, 0x20, 0x5c, 0xc5, 0xab, 0x0b, 0x24, 0xf9,
0xed, 0x7c, 0x06, 0xeb, 0x4f, 0x31, 0x4f, 0x61, 0x42, 0xdb, 0x6a, 0x9e, 0xd0, 0xc9, 0x66, 0xf7,
0x52, 0x2e, 0xbb, 0x3b, 0x7f, 0x31, 0xe0, 0x9d, 0xdc, 0xda, 0x97, 0xb9, 0x6d, 0x02, 0xee, 0xd2,
0x65, 0xc0, 0x6d, 0xe6, 0xc1, 0xed, 0xfc, 0xce, 0x80, 0x1b, 0x4f, 0x31, 0x4f, 0x27, 0x8e, 0x2b,
0xb6, 0x04, 0xfa, 0x1e, 0x40, 0x92, 0x30, 0x58, 0xdd, 0xdc, 0x30, 0x9b, 0xa6, 0x9b, 0x92, 0x38,
0xbf, 0x37, 0x60, 0x6d, 0x62, 0xff, 0x6c, 0xde, 0x31, 0xf2, 0x79, 0xe7, 0x7f, 0x65, 0x8e, 0x3f,
0x1a, 0x70, 0xb3, 0xd8, 0x1c, 0x97, 0x71, 0xde, 0xcf, 0xd5, 0x24, 0x2c, 0x50, 0x2a, 0x68, 0xe6,
0x5e, 0x11, 0x1f, 0x4c, 0xee, 0xa9, 0x27, 0x39, 0x5f, 0x9b, 0x80, 0x76, 0x64, 0xb2, 0x90, 0x1f,
0xcf, 0xe3, 0x9a, 0x0b, 0x17, 0x27, 0xb9, 0x12, 0xa4, 0x7c, 0x15, 0x25, 0xc8, 0xc2, 0x85, 0x4a,
0x90, 0x9b, 0x60, 0x89, 0xac, 0xc9, 0xb8, 0x37, 0x1c, 0x49, 0xbe, 0x28, 0xbb, 0x63, 0xc1, 0x24,
0xe1, 0x2f, 0xcd, 0x49, 0xf8, 0x95, 0x0b, 0x13, 0xfe, 0x1b, 0xb8, 0x1e, 0x07, 0xb6, 0xa4, 0xef,
0x73, 0xb8, 0x23, 0x1b, 0x0a, 0xa5, 0x7c, 0x28, 0xcc, 0x70, 0x8a, 0xf3, 0x9f, 0x12, 0xac, 0xb5,
0x62, 0xce, 0x39, 0xf4, 0xf8, 0xb1, 0xac, 0x19, 0xce, 0x8e, 0x94, 0xe9, 0x08, 0x48, 0x11, 0xb4,
0x39, 0x95, 0xa0, 0xcb, 0x59, 0x82, 0xce, 0x1e, 0x70, 0x21, 0x8f, 0x9a, 0xab, 0x29, 0x3a, 0x9b,
0xb0, 0x9a, 0x22, 0xdc, 0x91, 0xc7, 0x8f, 0x45, 0xe1, 0x29, 0x18, 0x77, 0x99, 0xa4, 0x6f, 0xcf,
0xd0, 0x03, 0x58, 0x49, 0x18, 0xd2, 0x57, 0xc4, 0x59, 0x91, 0x08, 0x19, 0xd3, 0xa9, 0x1f, 0x33,
0x67, 0xb6, 0x80, 0xb0, 0x0a, 0x0a, 0x88, 0x74, 0x31, 0x03, 0x99, 0x62, 0xc6, 0xf9, 0xbb, 0x01,
0x76, 0x12, 0xa0, 0x73, 0x36, 0x06, 0x19, 0xbf, 0x94, 0xf2, 0x7e, 0xb9, 0x03, 0x55, 0x1c, 0x78,
0xdd, 0x01, 0xd6, 0xb8, 0x35, 0x15, 0x6e, 0x95, 0x4c, 0xe1, 0xf6, 0x09, 0xd8, 0xe3, 0x52, 0x32,
0x8e, 0xc1, 0x7b, 0x53, 0x6b, 0xc9, 0x34, 0x28, 0x5c, 0x48, 0x6a, 0x4a, 0xe6, 0x7c, 0x55, 0x1a,
0xd3, 0x9c, 0x42, 0xec, 0x65, 0x92, 0xd9, 0xaf, 0xa0, 0xaa, 0x6f, 0xa1, 0x4a, 0x5c, 0x95, 0xd2,
0x7e, 0x52, 0x74, 0xac, 0xa2, 0x4d, 0x37, 0x53, 0x66, 0xfc, 0x38, 0xe0, 0xe1, 0xa9, 0x6b, 0xb3,
0xb1, 0xa4, 0xd1, 0x81, 0xd5, 0xbc, 0x02, 0x5a, 0x05, 0xf3, 0x04, 0x9f, 0x6a, 0x1b, 0x8b, 0x9f,
0x22, 0xfd, 0xbf, 0x12, 0xd8, 0xd1, 0xac, 0x7f, 0xfb, 0xcc, 0x7c, 0xda, 0xa7, 0xae, 0xd2, 0xfe,
0x69, 0xe9, 0x23, 0xc3, 0x39, 0x85, 0xd5, 0xdd, 0x90, 0x8e, 0xce, 0x9d, 0x49, 0x1d, 0xa8, 0xa6,
0xca, 0xe2, 0x38, 0x78, 0x33, 0xb2, 0x59, 0xe1, 0xfb, 0x19, 0xac, 0xef, 0x62, 0xd6, 0x0b, 0x49,
0xf7, 0xfc, 0x89, 0x7c, 0x46, 0xb5, 0xf1, 0xb5, 0x01, 0xef, 0xe4, 0xd6, 0xbe, 0x8c, 0x8f, 0x7f,
0x91, 0x45, 0x9e, 0x72, 0xf1, 0x8c, 0x2e, 0x26, 0x8d, 0x38, 0x4f, 0xb2, 0xa8, 0xfc, 0xb6, 0x2d,
0x32, 0xc7, 0x61, 0x48, 0x8f, 0x64, 0x8d, 0x78, 0x75, 0x37, 0xfe, 0x93, 0x01, 0xb7, 0xa6, 0xec,
0x71, 0x99, 0x9b, 0xe7, 0x1b, 0xde, 0xd2, 0xac, 0x86, 0xd7, 0xcc, 0x35, 0xbc, 0xce, 0x5f, 0x4b,
0x50, 0x6b, 0x73, 0x1a, 0x7a, 0x47, 0x78, 0x87, 0x06, 0x7d, 0x72, 0x24, 0xd2, 0x69, 0x5c, 0x47,
0x1b, 0xf2, 0x1a, 0x49, 0xa5, 0x7c, 0x07, 0xaa, 0x5e, 0xaf, 0x87, 0x19, 0x13, 0x6d, 0x85, 0xce,
0x12, 0x96, 0x6b, 0x2b, 0xd9, 0x73, 0x21, 0x42, 0x3f, 0x80, 0x35, 0x86, 0x7b, 0x21, 0xe6, 0x9d,
0xb1, 0xa6, 0x86, 0xd6, 0x8a, 0xfa, 0xf0, 0x38, 0xd6, 0x16, 0x85, 0x77, 0xc4, 0x70, 0xbb, 0xfd,
0x89, 0x4c, 0xdb, 0x15, 0x57, 0x8f, 0x44, 0xd9, 0xd3, 0x8d, 0x7a, 0x27, 0x98, 0xa7, 0xd3, 0x36,
0x28, 0x91, 0xcc, 0xdb, 0x37, 0xc0, 0x0a, 0x29, 0xe5, 0x32, 0xd7, 0x4a, 0x8e, 0xb5, 0xdc, 0x8a,
0x10, 0x88, 0x74, 0xa2, 0x57, 0x6d, 0x3d, 0xde, 0xd7, 0xdc, 0xaa, 0x47, 0xa2, 0x77, 0x6c, 0x3d,
0xde, 0xff, 0x38, 0xf0, 0x47, 0x94, 0x04, 0x5c, 0x26, 0x5e, 0xcb, 0x4d, 0x8b, 0xc4, 0xf5, 0x98,
0xb2, 0x44, 0x47, 0x94, 0x05, 0x32, 0xe9, 0x5a, 0xae, 0xad, 0x65, 0x2f, 0x4e, 0x47, 0xd8, 0xf9,
0xb7, 0x09, 0xab, 0xaa, 0xb6, 0xd9, 0xa3, 0xdd, 0x18, 0x1e, 0x37, 0xc1, 0xea, 0x0d, 0x22, 0xd1,
0x26, 0x68, 0x6c, 0x58, 0xee, 0x58, 0x20, 0x2c, 0x92, 0xa6, 0x87, 0x10, 0xf7, 0xc9, 0x1b, 0x6d,
0xb9, 0x95, 0x31, 0x3f, 0x48, 0x71, 0x9a, 0xc9, 0xcc, 0x09, 0x26, 0xf3, 0x3d, 0xee, 0x69, 0x7a,
0x29, 0x4b, 0x7a, 0xb1, 0x84, 0x44, 0x31, 0xcb, 0x04, 0x61, 0x2c, 0x14, 0x10, 0x46, 0x8a, 0x41,
0x17, 0xb3, 0x0c, 0x9a, 0x05, 0xef, 0x52, 0x9e, 0x27, 0x9f, 0xc1, 0x72, 0x6c, 0x98, 0x9e, 0xc4,
0x88, 0xb4, 0x5e, 0x41, 0xfb, 0x22, 0x13, 0x59, 0x1a, 0x4c, 0x6e, 0x8d, 0x65, 0xb0, 0x95, 0x67,
0x5c, 0xeb, 0x42, 0x8c, 0x9b, 0xab, 0xf6, 0xe0, 0x22, 0xd5, 0x5e, 0x9a, 0x3d, 0xed, 0x2c, 0x7b,
0x7e, 0x02, 0xab, 0xbf, 0x8c, 0x70, 0x78, 0xba, 0x47, 0xbb, 0x6c, 0x3e, 0x1f, 0x37, 0xa0, 0xa2,
0x1d, 0x15, 0x67, 0xda, 0x64, 0xec, 0xfc, 0xcb, 0x80, 0x9a, 0x0c, 0xfb, 0x17, 0x1e, 0x3b, 0x89,
0x5f, 0x4d, 0x62, 0x2f, 0x1b, 0x59, 0x2f, 0x5f, 0xb0, 0x4f, 0x28, 0x68, 0xf9, 0xcd, 0xa2, 0x96,
0xbf, 0xa0, 0xfe, 0x28, 0x17, 0xd6, 0x1f, 0xb9, 0xc6, 0x63, 0x61, 0xa2, 0xf1, 0xf8, 0xc6, 0x80,
0xb5, 0x94, 0x8d, 0x2e, 0x93, 0xc2, 0x32, 0x96, 0x2d, 0xe5, 0x2d, 0xbb, 0x9d, 0x4d, 0xed, 0x66,
0x91, 0xab, 0x53, 0xa9, 0x3d, 0xb6, 0x71, 0x26, 0xbd, 0x3f, 0x87, 0x15, 0xc1, 0xa1, 0x57, 0xe3,
0xce, 0x7f, 0x1a, 0xb0, 0xb4, 0x47, 0xbb, 0xd2, 0x91, 0x69, 0x0c, 0x19, 0xd9, 0xe7, 0xa4, 0x55,
0x30, 0x7d, 0x32, 0xd4, 0xf9, 0x58, 0xfc, 0x14, 0x31, 0xc6, 0xb8, 0x17, 0xf2, 0xf1, 0x83, 0x98,
0x28, 0xb0, 0x84, 0x44, 0xbe, 0xa9, 0xbc, 0x0b, 0x15, 0x1c, 0xf8, 0xea, 0xa3, 0xae, 0x62, 0x71,
0xe0, 0xcb, 0x4f, 0x57, 0xd3, 0x98, 0xac, 0xc3, 0xc2, 0x88, 0x8e, 0x1f, 0xb1, 0xd4, 0xc0, 0x59,
0x07, 0xf4, 0x14, 0xf3, 0x3d, 0xda, 0x15, 0x5e, 0x89, 0xcd, 0xe3, 0xfc, 0xa3, 0x24, 0x9b, 0x86,
0xb1, 0xf8, 0x32, 0x0e, 0x76, 0xa0, 0xa6, 0x08, 0xe8, 0x0b, 0xda, 0xed, 0x04, 0x51, 0x6c, 0x14,
0x5b, 0x0a, 0xf7, 0x68, 0xf7, 0x20, 0x1a, 0xa2, 0xf7, 0xe1, 0x3a, 0x09, 0x3a, 0x23, 0xcd, 0x89,
0x89, 0xa6, 0xb2, 0xd2, 0x2a, 0x09, 0x62, 0xb6, 0xd4, 0xea, 0xf7, 0x61, 0x05, 0x07, 0x2f, 0x23,
0x1c, 0xe1, 0x44, 0x55, 0xd9, 0xac, 0xa6, 0xc5, 0x5a, 0x4f, 0x70, 0x9f, 0xc7, 0x4e, 0x3a, 0x6c,
0x40, 0x39, 0xd3, 0x39, 0xd1, 0x12, 0x92, 0xb6, 0x10, 0xa0, 0x8f, 0xc0, 0x12, 0xd3, 0x15, 0xb4,
0x54, 0xf1, 0x7f, 0xa3, 0x08, 0x5a, 0xda, 0xdf, 0x6e, 0xe5, 0x0b, 0xf5, 0x83, 0x89, 0x00, 0xd1,
0xe5, 0xb0, 0x4f, 0xd8, 0x89, 0x66, 0x1a, 0x50, 0xa2, 0x5d, 0xc2, 0x4e, 0xb6, 0xbe, 0x02, 0x00,
0x89, 0xc8, 0x1d, 0x4a, 0x43, 0x1f, 0x0d, 0xa4, 0x99, 0x77, 0xe8, 0x70, 0x44, 0x03, 0x1c, 0x70,
0x19, 0xbd, 0x0c, 0x6d, 0x66, 0x37, 0xd3, 0x83, 0x49, 0x45, 0xed, 0x96, 0xc6, 0x7b, 0x85, 0xfa,
0x39, 0x65, 0xe7, 0x1a, 0x7a, 0x29, 0x0b, 0x68, 0x31, 0x24, 0x8c, 0x93, 0x1e, 0xdb, 0x39, 0xf6,
0x82, 0x00, 0x0f, 0xd0, 0xd6, 0x94, 0xe7, 0xa6, 0x22, 0xe5, 0x78, 0xcf, 0xbb, 0x85, 0x7b, 0xb6,
0x79, 0x48, 0x82, 0xa3, 0x18, 0x17, 0xce, 0x35, 0xf4, 0x02, 0xec, 0x54, 0xcf, 0x8f, 0xee, 0x17,
0x99, 0x71, 0xf2, 0x51, 0xa0, 0x71, 0x16, 0x80, 0x9c, 0x6b, 0xa8, 0x0f, 0xb5, 0xcc, 0xa3, 0x14,
0x6a, 0x9e, 0x55, 0xb7, 0xa7, 0x5f, 0x82, 0x1a, 0xdf, 0x9f, 0x43, 0x33, 0x39, 0xfd, 0x6f, 0x94,
0xc1, 0x26, 0x5e, 0x75, 0x1e, 0x4e, 0x59, 0x64, 0xda, 0xfb, 0x53, 0xe3, 0xd1, 0xfc, 0x13, 0x92,
0xcd, 0xfd, 0xf1, 0x25, 0x15, 0xb8, 0x1e, 0xcc, 0x6e, 0x4e, 0xd4, 0x6e, 0xcd, 0x79, 0xbb, 0x18,
0xe7, 0x1a, 0x3a, 0x04, 0x2b, 0x69, 0x24, 0xd0, 0x7b, 0x45, 0x13, 0xf3, 0x7d, 0xc6, 0x1c, 0xce,
0xc9, 0xd4, 0xf0, 0xc5, 0xce, 0x29, 0x6a, 0x21, 0x8a, 0x9d, 0x53, 0xd8, 0x10, 0x38, 0xd7, 0xd0,
0x6f, 0xc7, 0x2f, 0x93, 0x99, 0xca, 0x19, 0x3d, 0x3a, 0xeb, 0xfa, 0x45, 0x85, 0x7c, 0xe3, 0x87,
0xe7, 0x98, 0x91, 0x02, 0x07, 0x6a, 0x1f, 0xd3, 0xd7, 0xaa, 0x82, 0x89, 0x42, 0x4f, 0x14, 0xfc,
0x05, 0x9b, 0xeb, 0x58, 0x9a, 0x54, 0x9d, 0xba, 0xf9, 0x19, 0x33, 0x92, 0xcd, 0x3b, 0x00, 0x4f,
0x31, 0xdf, 0xc7, 0x3c, 0x24, 0x3d, 0x96, 0x0f, 0xab, 0x71, 0xc2, 0xd0, 0x0a, 0xf1, 0x56, 0x0f,
0x66, 0xea, 0x25, 0x1b, 0x74, 0xc1, 0xde, 0x39, 0xc6, 0xbd, 0x93, 0x67, 0xd8, 0x1b, 0xf0, 0x63,
0x54, 0x3c, 0x33, 0xa5, 0x31, 0x05, 0x7b, 0x45, 0x8a, 0xf1, 0x1e, 0x5b, 0xdf, 0x2c, 0xea, 0x7f,
0x29, 0x0f, 0xa8, 0x8f, 0xff, 0xff, 0x73, 0xe1, 0x21, 0x58, 0x49, 0x8f, 0x50, 0x1c, 0x6a, 0xf9,
0x16, 0x62, 0x56, 0xa8, 0x7d, 0x0e, 0x56, 0x52, 0x6d, 0x15, 0xaf, 0x98, 0x2f, 0x58, 0x1b, 0xf7,
0x66, 0x68, 0x25, 0xa7, 0x3d, 0x80, 0x4a, 0x5c, 0x1d, 0xa1, 0xbb, 0xd3, 0xf2, 0x42, 0x7a, 0xe5,
0x19, 0x67, 0xfd, 0x35, 0xd8, 0xa9, 0xd2, 0xa1, 0x98, 0x09, 0x26, 0x4b, 0x8e, 0xc6, 0x83, 0x99,
0x7a, 0xdf, 0x8e, 0x80, 0xdc, 0xfe, 0xd1, 0xe7, 0x5b, 0x47, 0x84, 0x1f, 0x47, 0x5d, 0x61, 0xd9,
0x87, 0x4a, 0xf3, 0x7d, 0x42, 0xf5, 0xaf, 0x87, 0xf1, 0x29, 0x1f, 0xca, 0x95, 0x1e, 0x4a, 0x3b,
0x8d, 0xba, 0xdd, 0x45, 0x39, 0xfc, 0xe0, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x3a, 0x77, 0xe5,
0xae, 0x64, 0x20, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.

View File

@ -0,0 +1,21 @@
package metautil
import (
"path"
"github.com/milvus-io/milvus/internal/common"
)
func BuildSegmentIndexFilePath(rootPath string, buildID, indexVersion, partID, segID int64, fileKey string) string {
k := JoinIDPath(buildID, indexVersion, partID, segID)
return path.Join(rootPath, common.SegmentIndexPath, k, fileKey)
}
func BuildSegmentIndexFilePaths(rootPath string, buildID, indexVersion, partID, segID int64, fileKeys []string) []string {
paths := make([]string, 0, len(fileKeys))
for _, fileKey := range fileKeys {
path := BuildSegmentIndexFilePath(rootPath, buildID, indexVersion, partID, segID, fileKey)
paths = append(paths, path)
}
return paths
}

View File

@ -170,7 +170,7 @@ CREATE TABLE if not exists milvus_meta.segment_indexes (
index_build_id BIGINT,
enable_index BOOL NOT NULL,
create_time bigint unsigned,
index_file_paths VARCHAR(4096),
index_file_keys VARCHAR(4096),
index_size BIGINT UNSIGNED,
`version` INT UNSIGNED,
is_deleted BOOL DEFAULT FALSE COMMENT 'as mark_deleted',