timestamp decided if the pks were the same (#20166)

Signed-off-by: lixinguo <xinguo.li@zilliz.com>

Signed-off-by: lixinguo <xinguo.li@zilliz.com>
Co-authored-by: lixinguo <xinguo.li@zilliz.com>
pull/20738/head
smellthemoon 2022-11-21 10:55:10 +08:00 committed by GitHub
parent d44d50e735
commit f5ab719f21
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 316 additions and 37 deletions

View File

@ -35,7 +35,7 @@ struct ExtractedPlanInfo {
void
add_involved_field(FieldId field_id) {
auto pos = field_id.get() - START_USER_FIELDID;
AssertInfo(pos >= 0, "field id is invalid");
AssertInfo(pos >= 0, "invalid field id");
involved_fields_.set(pos);
}

View File

@ -300,7 +300,8 @@ SegmentGrowingImpl::bulk_subscript(SystemFieldType system_type,
void* output) const {
switch (system_type) {
case SystemFieldType::Timestamp:
PanicInfo("timestamp unsupported");
bulk_subscript_impl<Timestamp>(this->insert_record_.timestamps_, seg_offsets, count, output);
break;
case SystemFieldType::RowId:
bulk_subscript_impl<int64_t>(this->insert_record_.row_ids_, seg_offsets, count, output);
break;

View File

@ -10,6 +10,9 @@
// or implied. See the License for the specific language governing permissions and limitations under the License
#include "SegmentInterface.h"
#include <cstdint>
#include "common/SystemProperty.h"
#include "common/Types.h"
#include "query/generated/ExecPlanNodeVisitor.h"
#include "Utils.h"
@ -76,14 +79,34 @@ SegmentInternalInterface::Retrieve(const query::RetrievePlan* plan, Timestamp ti
auto ids = results->mutable_ids();
auto pk_field_id = plan->schema_.get_primary_field_id();
for (auto field_id : plan->field_ids_) {
auto& field_mata = plan->schema_[field_id];
if (SystemProperty::Instance().IsSystem(field_id)) {
auto system_type = SystemProperty::Instance().GetSystemFieldType(field_id);
auto size = retrieve_results.result_offsets_.size();
FixedVector<int64_t> output(size);
bulk_subscript(system_type, retrieve_results.result_offsets_.data(), size, output.data());
auto data_array = std::make_unique<DataArray>();
data_array->set_field_id(field_id.get());
data_array->set_type(milvus::proto::schema::DataType::Int64);
auto scalar_array = data_array->mutable_scalars();
auto data = reinterpret_cast<const int64_t*>(output.data());
auto obj = scalar_array->mutable_long_data();
obj->mutable_data()->Add(data, data + size);
fields_data->AddAllocated(data_array.release());
continue;
}
auto& field_meta = plan->schema_[field_id];
auto col =
bulk_subscript(field_id, retrieve_results.result_offsets_.data(), retrieve_results.result_offsets_.size());
auto col_data = col.release();
fields_data->AddAllocated(col_data);
if (pk_field_id.has_value() && pk_field_id.value() == field_id) {
switch (field_mata.get_data_type()) {
switch (field_meta.get_data_type()) {
case DataType::INT64: {
auto int_ids = ids->mutable_int_id();
auto src_data = col_data->scalars().long_data();

View File

@ -181,7 +181,6 @@ class SegmentInternalInterface : public SegmentInterface {
virtual const index::IndexBase*
chunk_index_impl(FieldId field_id, int64_t chunk_id) const = 0;
// TODO remove system fields
// calculate output[i] = Vec[seg_offsets[i]}, where Vec binds to system_type
virtual void
bulk_subscript(SystemFieldType system_type, const int64_t* seg_offsets, int64_t count, void* output) const = 0;

View File

@ -29,6 +29,7 @@ static inline bool
get_bit(const BitsetType& bitset, FieldId field_id) {
auto pos = field_id.get() - START_USER_FIELDID;
AssertInfo(pos >= 0, "invalid field id");
return bitset[pos];
}
@ -424,10 +425,21 @@ SegmentSealedImpl::bulk_subscript(SystemFieldType system_type,
int64_t count,
void* output) const {
AssertInfo(is_system_field_ready(), "System field isn't ready when do bulk_insert");
AssertInfo(system_type == SystemFieldType::RowId, "System field type of id column is not RowId");
AssertInfo(insert_record_.row_ids_.num_chunk() == 1, "num chunk not equal to 1 for sealed segment");
auto field_data = insert_record_.row_ids_.get_chunk_data(0);
bulk_subscript_impl<int64_t>(field_data, seg_offsets, count, output);
switch (system_type) {
case SystemFieldType::Timestamp:
AssertInfo(insert_record_.timestamps_.num_chunk() == 1,
"num chunk of timestamp not equal to 1 for sealed segment");
bulk_subscript_impl<Timestamp>(this->insert_record_.timestamps_.get_chunk_data(0), seg_offsets, count,
output);
break;
case SystemFieldType::RowId:
AssertInfo(insert_record_.row_ids_.num_chunk() == 1,
"num chunk of rowID not equal to 1 for sealed segment");
bulk_subscript_impl<int64_t>(this->insert_record_.row_ids_.get_chunk_data(0), seg_offsets, count, output);
break;
default:
PanicInfo("unknown subscript fields");
}
}
template <typename T>
@ -592,8 +604,6 @@ SegmentSealedImpl::bulk_subscript(FieldId field_id, const int64_t* seg_offsets,
bool
SegmentSealedImpl::HasIndex(FieldId field_id) const {
std::shared_lock lck(mutex_);
AssertInfo(!SystemProperty::Instance().IsSystem(field_id),
"Field id:" + std::to_string(field_id.get()) + " isn't one of system type when drop index");
return get_bit(index_ready_bitset_, field_id);
}

View File

@ -279,6 +279,8 @@ TEST(Retrieve, Delete) {
auto fid_vec = schema->AddDebugField("vector_64", DataType::VECTOR_FLOAT, DIM, knowhere::metric::L2);
schema->set_primary_field_id(fid_64);
auto fid_ts = schema->AddDebugField("Timestamp", DataType::INT64);
int64_t N = 10;
int64_t req_size = 10;
auto choose = [=](int i) { return i; };
@ -287,8 +289,13 @@ TEST(Retrieve, Delete) {
auto segment = CreateSealedSegment(schema);
SealedLoadFieldData(dataset, *segment);
auto i64_col = dataset.get_col<int64_t>(fid_64);
auto ts_col = dataset.get_col<int64_t>(fid_ts);
auto plan = std::make_unique<query::RetrievePlan>(*schema);
std::vector<int64_t> timestamps;
for (int i = 0; i < req_size; ++i) {
timestamps.emplace_back(ts_col[choose(i)]);
}
std::vector<int64_t> values;
for (int i = 0; i < req_size; ++i) {
values.emplace_back(i64_col[choose(i)]);
@ -296,12 +303,12 @@ TEST(Retrieve, Delete) {
auto term_expr = std::make_unique<query::TermExprImpl<int64_t>>(fid_64, DataType::INT64, values);
plan->plan_node_ = std::make_unique<query::RetrievePlanNode>();
plan->plan_node_->predicate_ = std::move(term_expr);
std::vector<FieldId> target_offsets{fid_64, fid_vec};
std::vector<FieldId> target_offsets{fid_ts, fid_64, fid_vec};
plan->field_ids_ = target_offsets;
{
auto retrieve_results = segment->Retrieve(plan.get(), 100);
Assert(retrieve_results->fields_data_size() == target_offsets.size());
ASSERT_EQ(retrieve_results->fields_data_size(), target_offsets.size());
auto field0 = retrieve_results->fields_data(0);
Assert(field0.has_scalars());
auto field0_data = field0.scalars().long_data();
@ -309,18 +316,24 @@ TEST(Retrieve, Delete) {
for (int i = 0; i < req_size; ++i) {
auto index = choose(i);
auto data = field0_data.data(i);
}
for (int i = 0; i < req_size; ++i) {
auto index = choose(i);
auto data = field0_data.data(i);
ASSERT_EQ(data, i64_col[index]);
ASSERT_EQ(data, ts_col[index]);
}
auto field1 = retrieve_results->fields_data(1);
Assert(field1.has_vectors());
auto field1_data = field1.vectors().float_vector();
ASSERT_EQ(field1_data.data_size(), DIM * req_size);
Assert(field1.has_scalars());
auto field1_data = field1.scalars().long_data();
for (int i = 0; i < req_size; ++i) {
auto index = choose(i);
auto data = field1_data.data(i);
ASSERT_EQ(data, i64_col[index]);
}
auto field2 = retrieve_results->fields_data(2);
Assert(field2.has_vectors());
auto field2_data = field2.vectors().float_vector();
ASSERT_EQ(field2_data.data_size(), DIM * req_size);
}
int64_t row_count = 0;
@ -350,19 +363,19 @@ TEST(Retrieve, Delete) {
{
auto retrieve_results = segment->Retrieve(plan.get(), 100);
Assert(retrieve_results->fields_data_size() == target_offsets.size());
auto field0 = retrieve_results->fields_data(0);
Assert(field0.has_scalars());
auto field0_data = field0.scalars().long_data();
auto field1 = retrieve_results->fields_data(1);
Assert(field1.has_scalars());
auto field1_data = field1.scalars().long_data();
auto size = req_size - new_count;
for (int i = 0; i < size; ++i) {
auto index = choose(i);
auto data = field0_data.data(i);
auto data = field1_data.data(i);
ASSERT_EQ(data, i64_col[index + new_count]);
}
auto field1 = retrieve_results->fields_data(1);
Assert(field1.has_vectors());
auto field1_data = field1.vectors().float_vector();
ASSERT_EQ(field1_data.data_size(), DIM * size);
auto field2 = retrieve_results->fields_data(2);
Assert(field2.has_vectors());
auto field2_data = field2.vectors().float_vector();
ASSERT_EQ(field2_data.data_size(), DIM * size);
}
}

View File

@ -32,4 +32,29 @@ TEST(TimestampIndex, Naive) {
for (auto i = 0; i < lengths.size(); ++i) {
ASSERT_EQ(guessed_slice[i], lengths[i]);
}
Timestamp query_ts = 0;
auto range = index.get_active_range(query_ts);
ASSERT_EQ(range.first, 0);
ASSERT_EQ(range.second, 0);
query_ts = 1;
range = index.get_active_range(query_ts);
ASSERT_EQ(range.first, 0);
ASSERT_EQ(range.second, 2);
query_ts = 11;
range = index.get_active_range(query_ts);
ASSERT_EQ(range.first, 2);
ASSERT_EQ(range.second, 5);
query_ts = 21;
range = index.get_active_range(query_ts);
ASSERT_EQ(range.first, 5);
ASSERT_EQ(range.second, 8);
query_ts = 22;
range = index.get_active_range(query_ts);
ASSERT_EQ(range.first, 8);
ASSERT_EQ(range.second, 8);
}

View File

@ -263,6 +263,7 @@ func (t *queryTask) PreExecute(ctx context.Context) error {
if err != nil {
return err
}
outputFieldIDs = append(outputFieldIDs, common.TimeStampField)
t.RetrieveRequest.OutputFieldsId = outputFieldIDs
plan.OutputFieldIds = outputFieldIDs
log.Ctx(ctx).Debug("translate output fields to field ids",
@ -384,6 +385,11 @@ func (t *queryTask) PostExecute(ctx context.Context) error {
return err
}
for i := 0; i < len(t.result.FieldsData); i++ {
if t.OutputFieldsId[i] == common.TimeStampField {
t.result.FieldsData = append(t.result.FieldsData[:i], t.result.FieldsData[(i+1):]...)
i--
continue
}
for _, field := range schema.Fields {
if field.FieldID == t.OutputFieldsId[i] {
t.result.FieldsData[i].FieldName = field.Name

View File

@ -123,6 +123,7 @@ func TestQueryTask_all(t *testing.T) {
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
FieldsData: []*schemapb.FieldData{},
},
request: &milvuspb.QueryRequest{
Base: &commonpb.MsgBase{
@ -173,7 +174,8 @@ func TestQueryTask_all(t *testing.T) {
for fieldName, dataType := range fieldName2Types {
result1.FieldsData = append(result1.FieldsData, generateFieldData(dataType, fieldName, hitNum))
}
result1.FieldsData = append(result1.FieldsData, generateFieldData(schemapb.DataType_Int64, common.TimeStampFieldName, hitNum))
task.RetrieveRequest.OutputFieldsId = append(task.RetrieveRequest.OutputFieldsId, common.TimeStampField)
task.ctx = ctx
qn.queryError = fmt.Errorf("mock error")
assert.Error(t, task.Execute(ctx))
@ -199,6 +201,10 @@ func TestQueryTask_all(t *testing.T) {
assert.NoError(t, task.Execute(ctx))
assert.NoError(t, task.PostExecute(ctx))
for i := 0; i < len(task.result.FieldsData); i++ {
assert.NotEqual(t, task.result.FieldsData[i].FieldId, common.TimeStampField)
}
}
func Test_translateToOutputFieldIDs(t *testing.T) {

View File

@ -233,8 +233,7 @@ func getOutputFieldIDs(schema *schemapb.CollectionSchema, outputFields []string)
}
}
if !hitField {
errMsg := "Field " + name + " not exist"
return nil, errors.New(errMsg)
return nil, fmt.Errorf("Field %s not exist", name)
}
}
return outputFieldIDs, nil

View File

@ -261,7 +261,6 @@ func mergeInternalRetrieveResult(ctx context.Context, retrieveResults []*interna
ret = &internalpb.RetrieveResults{
Ids: &schemapb.IDs{},
}
skipDupCnt int64
loopEnd int
)
@ -285,7 +284,7 @@ func mergeInternalRetrieveResult(ctx context.Context, retrieveResults []*interna
}
ret.FieldsData = make([]*schemapb.FieldData, len(validRetrieveResults[0].GetFieldsData()))
idSet := make(map[interface{}]struct{})
idTsMap := make(map[interface{}]uint64)
cursors := make([]int64, len(validRetrieveResults))
for j := 0; j < loopEnd; j++ {
sel := typeutil.SelectMinPK(validRetrieveResults, cursors)
@ -294,13 +293,19 @@ func mergeInternalRetrieveResult(ctx context.Context, retrieveResults []*interna
}
pk := typeutil.GetPK(validRetrieveResults[sel].GetIds(), cursors[sel])
if _, ok := idSet[pk]; !ok {
ts := typeutil.GetTS(validRetrieveResults[sel], cursors[sel])
if _, ok := idTsMap[pk]; !ok {
typeutil.AppendPKs(ret.Ids, pk)
typeutil.AppendFieldData(ret.FieldsData, validRetrieveResults[sel].GetFieldsData(), cursors[sel])
idSet[pk] = struct{}{}
idTsMap[pk] = ts
} else {
// primary keys duplicate
skipDupCnt++
if ts != 0 && ts > idTsMap[pk] {
idTsMap[pk] = ts
typeutil.DeleteFieldData(ret.FieldsData)
typeutil.AppendFieldData(ret.FieldsData, validRetrieveResults[sel].GetFieldsData(), cursors[sel])
}
}
cursors[sel]++
}

View File

@ -250,6 +250,42 @@ func TestResult_mergeInternalRetrieveResults(t *testing.T) {
assert.Empty(t, ret.GetFieldsData())
})
t.Run("test timestamp decided", func(t *testing.T) {
ret1 := &internalpb.RetrieveResults{
Ids: &schemapb.IDs{
IdField: &schemapb.IDs_IntId{
IntId: &schemapb.LongArray{
Data: []int64{0, 1},
}},
},
FieldsData: []*schemapb.FieldData{
genFieldData(common.TimeStampFieldName, common.TimeStampField, schemapb.DataType_Int64,
[]int64{1, 2}, 1),
genFieldData(Int64FieldName, Int64FieldID, schemapb.DataType_Int64,
[]int64{3, 4}, 1),
},
}
ret2 := &internalpb.RetrieveResults{
Ids: &schemapb.IDs{
IdField: &schemapb.IDs_IntId{
IntId: &schemapb.LongArray{
Data: []int64{0, 1},
}},
},
FieldsData: []*schemapb.FieldData{
genFieldData(common.TimeStampFieldName, common.TimeStampField, schemapb.DataType_Int64,
[]int64{5, 6}, 1),
genFieldData(Int64FieldName, Int64FieldID, schemapb.DataType_Int64,
[]int64{7, 8}, 1),
},
}
result, err := mergeInternalRetrieveResult(context.Background(), []*internalpb.RetrieveResults{ret1, ret2}, typeutil.Unlimited)
assert.NoError(t, err)
assert.Equal(t, 2, len(result.GetFieldsData()))
assert.Equal(t, []int64{0, 1}, result.GetIds().GetIntId().GetData())
assert.Equal(t, []int64{7, 8}, result.GetFieldsData()[1].GetScalars().GetLongData().Data)
})
t.Run("test merge", func(t *testing.T) {
r1 := &internalpb.RetrieveResults{
Ids: &schemapb.IDs{

View File

@ -23,7 +23,9 @@ import (
"strconv"
"github.com/milvus-io/milvus-proto/go-api/schemapb"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"go.uber.org/zap"
)
@ -395,6 +397,52 @@ func AppendFieldData(dst []*schemapb.FieldData, src []*schemapb.FieldData, idx i
}
}
// DeleteFieldData delete fields data appended last time
func DeleteFieldData(dst []*schemapb.FieldData) {
for i, fieldData := range dst {
switch fieldType := fieldData.Field.(type) {
case *schemapb.FieldData_Scalars:
if dst[i] == nil || dst[i].GetScalars() == nil {
log.Info("empty field data can't be deleted")
return
}
dstScalar := dst[i].GetScalars()
switch fieldType.Scalars.Data.(type) {
case *schemapb.ScalarField_BoolData:
dstScalar.GetBoolData().Data = dstScalar.GetBoolData().Data[:len(dstScalar.GetBoolData().Data)-1]
case *schemapb.ScalarField_IntData:
dstScalar.GetIntData().Data = dstScalar.GetIntData().Data[:len(dstScalar.GetIntData().Data)-1]
case *schemapb.ScalarField_LongData:
dstScalar.GetLongData().Data = dstScalar.GetLongData().Data[:len(dstScalar.GetLongData().Data)-1]
case *schemapb.ScalarField_FloatData:
dstScalar.GetFloatData().Data = dstScalar.GetFloatData().Data[:len(dstScalar.GetFloatData().Data)-1]
case *schemapb.ScalarField_DoubleData:
dstScalar.GetDoubleData().Data = dstScalar.GetDoubleData().Data[:len(dstScalar.GetDoubleData().Data)-1]
case *schemapb.ScalarField_StringData:
dstScalar.GetStringData().Data = dstScalar.GetStringData().Data[:len(dstScalar.GetStringData().Data)-1]
default:
log.Error("wrong field type added", zap.String("field type", fieldData.Type.String()))
}
case *schemapb.FieldData_Vectors:
if dst[i] == nil || dst[i].GetVectors() == nil {
log.Info("empty field data can't be deleted")
return
}
dim := fieldType.Vectors.Dim
dstVector := dst[i].GetVectors()
switch fieldType.Vectors.Data.(type) {
case *schemapb.VectorField_BinaryVector:
dstBinaryVector := dstVector.Data.(*schemapb.VectorField_BinaryVector)
dstBinaryVector.BinaryVector = dstBinaryVector.BinaryVector[:len(dstBinaryVector.BinaryVector)-int(dim/8)]
case *schemapb.VectorField_FloatVector:
dstVector.GetFloatVector().Data = dstVector.GetFloatVector().Data[:len(dstVector.GetFloatVector().Data)-int(dim)]
default:
log.Error("wrong field type added", zap.String("field type", fieldData.Type.String()))
}
}
}
}
// MergeFieldData appends fields data to dst
func MergeFieldData(dst []*schemapb.FieldData, src []*schemapb.FieldData) {
fieldID2Data := make(map[int64]*schemapb.FieldData)
@ -637,6 +685,24 @@ func GetPK(data *schemapb.IDs, idx int64) interface{} {
return nil
}
func GetTS(i *internalpb.RetrieveResults, idx int64) uint64 {
if i.FieldsData == nil {
return 0
}
for _, fieldData := range i.FieldsData {
fieldID := fieldData.FieldId
if fieldID == common.TimeStampField {
res := fieldData.GetScalars().GetLongData().Data
timeStamp := make([]uint64, len(res))
for i, v := range res {
timeStamp[i] = uint64(v)
}
return timeStamp[idx]
}
}
return 0
}
func AppendPKs(pks *schemapb.IDs, pk interface{}) {
switch realPK := pk.(type) {
case int64:

View File

@ -26,6 +26,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/schemapb"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -514,6 +515,75 @@ func TestAppendFieldData(t *testing.T) {
assert.Equal(t, FloatVector, result[6].GetVectors().GetFloatVector().Data)
}
func TestDeleteFieldData(t *testing.T) {
const (
Dim = 8
BoolFieldName = "BoolField"
Int32FieldName = "Int32Field"
Int64FieldName = "Int64Field"
FloatFieldName = "FloatField"
DoubleFieldName = "DoubleField"
BinaryVectorFieldName = "BinaryVectorField"
FloatVectorFieldName = "FloatVectorField"
BoolFieldID = common.StartOfUserFieldID + 1
Int32FieldID = common.StartOfUserFieldID + 2
Int64FieldID = common.StartOfUserFieldID + 3
FloatFieldID = common.StartOfUserFieldID + 4
DoubleFieldID = common.StartOfUserFieldID + 5
BinaryVectorFieldID = common.StartOfUserFieldID + 6
FloatVectorFieldID = common.StartOfUserFieldID + 7
)
BoolArray := []bool{true, false}
Int32Array := []int32{1, 2}
Int64Array := []int64{11, 22}
FloatArray := []float32{1.0, 2.0}
DoubleArray := []float64{11.0, 22.0}
BinaryVector := []byte{0x12, 0x34}
FloatVector := []float32{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 11.0, 22.0, 33.0, 44.0, 55.0, 66.0, 77.0, 88.0}
result1 := make([]*schemapb.FieldData, 7)
result2 := make([]*schemapb.FieldData, 7)
var fieldDataArray1 []*schemapb.FieldData
fieldDataArray1 = append(fieldDataArray1, genFieldData(BoolFieldName, BoolFieldID, schemapb.DataType_Bool, BoolArray[0:1], 1))
fieldDataArray1 = append(fieldDataArray1, genFieldData(Int32FieldName, Int32FieldID, schemapb.DataType_Int32, Int32Array[0:1], 1))
fieldDataArray1 = append(fieldDataArray1, genFieldData(Int64FieldName, Int64FieldID, schemapb.DataType_Int64, Int64Array[0:1], 1))
fieldDataArray1 = append(fieldDataArray1, genFieldData(FloatFieldName, FloatFieldID, schemapb.DataType_Float, FloatArray[0:1], 1))
fieldDataArray1 = append(fieldDataArray1, genFieldData(DoubleFieldName, DoubleFieldID, schemapb.DataType_Double, DoubleArray[0:1], 1))
fieldDataArray1 = append(fieldDataArray1, genFieldData(BinaryVectorFieldName, BinaryVectorFieldID, schemapb.DataType_BinaryVector, BinaryVector[0:Dim/8], Dim))
fieldDataArray1 = append(fieldDataArray1, genFieldData(FloatVectorFieldName, FloatVectorFieldID, schemapb.DataType_FloatVector, FloatVector[0:Dim], Dim))
var fieldDataArray2 []*schemapb.FieldData
fieldDataArray2 = append(fieldDataArray2, genFieldData(BoolFieldName, BoolFieldID, schemapb.DataType_Bool, BoolArray[1:2], 1))
fieldDataArray2 = append(fieldDataArray2, genFieldData(Int32FieldName, Int32FieldID, schemapb.DataType_Int32, Int32Array[1:2], 1))
fieldDataArray2 = append(fieldDataArray2, genFieldData(Int64FieldName, Int64FieldID, schemapb.DataType_Int64, Int64Array[1:2], 1))
fieldDataArray2 = append(fieldDataArray2, genFieldData(FloatFieldName, FloatFieldID, schemapb.DataType_Float, FloatArray[1:2], 1))
fieldDataArray2 = append(fieldDataArray2, genFieldData(DoubleFieldName, DoubleFieldID, schemapb.DataType_Double, DoubleArray[1:2], 1))
fieldDataArray2 = append(fieldDataArray2, genFieldData(BinaryVectorFieldName, BinaryVectorFieldID, schemapb.DataType_BinaryVector, BinaryVector[Dim/8:2*Dim/8], Dim))
fieldDataArray2 = append(fieldDataArray2, genFieldData(FloatVectorFieldName, FloatVectorFieldID, schemapb.DataType_FloatVector, FloatVector[Dim:2*Dim], Dim))
AppendFieldData(result1, fieldDataArray1, 0)
AppendFieldData(result1, fieldDataArray2, 0)
DeleteFieldData(result1)
assert.Equal(t, BoolArray[0:1], result1[0].GetScalars().GetBoolData().Data)
assert.Equal(t, Int32Array[0:1], result1[1].GetScalars().GetIntData().Data)
assert.Equal(t, Int64Array[0:1], result1[2].GetScalars().GetLongData().Data)
assert.Equal(t, FloatArray[0:1], result1[3].GetScalars().GetFloatData().Data)
assert.Equal(t, DoubleArray[0:1], result1[4].GetScalars().GetDoubleData().Data)
assert.Equal(t, BinaryVector[0:Dim/8], result1[5].GetVectors().Data.(*schemapb.VectorField_BinaryVector).BinaryVector)
assert.Equal(t, FloatVector[0:Dim], result1[6].GetVectors().GetFloatVector().Data)
AppendFieldData(result2, fieldDataArray2, 0)
AppendFieldData(result2, fieldDataArray1, 0)
DeleteFieldData(result2)
assert.Equal(t, BoolArray[1:2], result2[0].GetScalars().GetBoolData().Data)
assert.Equal(t, Int32Array[1:2], result2[1].GetScalars().GetIntData().Data)
assert.Equal(t, Int64Array[1:2], result2[2].GetScalars().GetLongData().Data)
assert.Equal(t, FloatArray[1:2], result2[3].GetScalars().GetFloatData().Data)
assert.Equal(t, DoubleArray[1:2], result2[4].GetScalars().GetDoubleData().Data)
assert.Equal(t, BinaryVector[Dim/8:2*Dim/8], result2[5].GetVectors().Data.(*schemapb.VectorField_BinaryVector).BinaryVector)
assert.Equal(t, FloatVector[Dim:2*Dim], result2[6].GetVectors().GetFloatVector().Data)
}
func TestGetPrimaryFieldSchema(t *testing.T) {
int64Field := &schemapb.FieldSchema{
FieldID: 1,
@ -600,6 +670,26 @@ func TestGetPK(t *testing.T) {
}
}
func TestGetTS(t *testing.T) {
var timeStampFieldData = [5]Timestamp{0, 1, 2, 3, 4}
result := &internalpb.RetrieveResults{
FieldsData: []*schemapb.FieldData{
genFieldData(common.TimeStampFieldName, common.TimeStampField, schemapb.DataType_Int64,
[]int64{0, 1, 2, 3, 4}, 1),
},
}
timeStamp := GetTS(result, 0)
assert.Equal(t, timeStampFieldData[0], timeStamp)
timeStamp = GetTS(result, 1)
assert.Equal(t, timeStampFieldData[1], timeStamp)
timeStamp = GetTS(result, 2)
assert.Equal(t, timeStampFieldData[2], timeStamp)
timeStamp = GetTS(result, 3)
assert.Equal(t, timeStampFieldData[3], timeStamp)
timeStamp = GetTS(result, 4)
assert.Equal(t, timeStampFieldData[4], timeStamp)
}
func TestAppendPKs(t *testing.T) {
intPks := &schemapb.IDs{}
AppendPKs(intPks, int64(1))