Extend datatype for milvus (#19597)

Signed-off-by: qwert170 <2501159222@qq.com>
summer2022
qwert170 2022-10-31 14:22:43 +08:00 committed by GitHub
parent c346f29a4d
commit 95a81e0662
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
92 changed files with 6719 additions and 243 deletions

View File

@ -32,6 +32,10 @@ const (
DataType_Int16 DataType = 3
DataType_Int32 DataType = 4
DataType_Int64 DataType = 5
DataType_UInt8 DataType = 6
DataType_UInt16 DataType = 7
DataType_UInt32 DataType = 8
DataType_UInt64 DataType = 9
DataType_Float DataType = 10
DataType_Double DataType = 11
DataType_String DataType = 20
@ -47,6 +51,10 @@ var DataType_name = map[int32]string{
3: "Int16",
4: "Int32",
5: "Int64",
6: "UInt8",
7: "UInt16",
8: "UInt32",
9: "UInt64",
10: "Float",
11: "Double",
20: "String",
@ -62,6 +70,10 @@ var DataType_value = map[string]int32{
"Int16": 3,
"Int32": 4,
"Int64": 5,
"UInt8": 6,
"UInt16": 7,
"UInt32": 8,
"UInt64": 9,
"Float": 10,
"Double": 11,
"String": 20,
@ -396,6 +408,84 @@ func (m *LongArray) GetData() []int64 {
return nil
}
type UIntArray struct {
Data []uint32 `protobuf:"varint,1,rep,packed,name=data,proto3" json:"data,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UIntArray) Reset() { *m = UIntArray{} }
func (m *UIntArray) String() string { return proto.CompactTextString(m) }
func (*UIntArray) ProtoMessage() {}
func (*UIntArray) Descriptor() ([]byte, []int) {
return fileDescriptor_1c5fb4d8cc22d66a, []int{5}
}
func (m *UIntArray) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UIntArray.Unmarshal(m, b)
}
func (m *UIntArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_UIntArray.Marshal(b, m, deterministic)
}
func (m *UIntArray) XXX_Merge(src proto.Message) {
xxx_messageInfo_UIntArray.Merge(m, src)
}
func (m *UIntArray) XXX_Size() int {
return xxx_messageInfo_UIntArray.Size(m)
}
func (m *UIntArray) XXX_DiscardUnknown() {
xxx_messageInfo_UIntArray.DiscardUnknown(m)
}
var xxx_messageInfo_UIntArray proto.InternalMessageInfo
func (m *UIntArray) GetData() []uint32 {
if m != nil {
return m.Data
}
return nil
}
type ULongArray struct {
Data []uint64 `protobuf:"varint,1,rep,packed,name=data,proto3" json:"data,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ULongArray) Reset() { *m = ULongArray{} }
func (m *ULongArray) String() string { return proto.CompactTextString(m) }
func (*ULongArray) ProtoMessage() {}
func (*ULongArray) Descriptor() ([]byte, []int) {
return fileDescriptor_1c5fb4d8cc22d66a, []int{6}
}
func (m *ULongArray) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ULongArray.Unmarshal(m, b)
}
func (m *ULongArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ULongArray.Marshal(b, m, deterministic)
}
func (m *ULongArray) XXX_Merge(src proto.Message) {
xxx_messageInfo_ULongArray.Merge(m, src)
}
func (m *ULongArray) XXX_Size() int {
return xxx_messageInfo_ULongArray.Size(m)
}
func (m *ULongArray) XXX_DiscardUnknown() {
xxx_messageInfo_ULongArray.DiscardUnknown(m)
}
var xxx_messageInfo_ULongArray proto.InternalMessageInfo
func (m *ULongArray) GetData() []uint64 {
if m != nil {
return m.Data
}
return nil
}
type FloatArray struct {
Data []float32 `protobuf:"fixed32,1,rep,packed,name=data,proto3" json:"data,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
@ -407,7 +497,7 @@ func (m *FloatArray) Reset() { *m = FloatArray{} }
func (m *FloatArray) String() string { return proto.CompactTextString(m) }
func (*FloatArray) ProtoMessage() {}
func (*FloatArray) Descriptor() ([]byte, []int) {
return fileDescriptor_1c5fb4d8cc22d66a, []int{5}
return fileDescriptor_1c5fb4d8cc22d66a, []int{7}
}
func (m *FloatArray) XXX_Unmarshal(b []byte) error {
@ -446,7 +536,7 @@ func (m *DoubleArray) Reset() { *m = DoubleArray{} }
func (m *DoubleArray) String() string { return proto.CompactTextString(m) }
func (*DoubleArray) ProtoMessage() {}
func (*DoubleArray) Descriptor() ([]byte, []int) {
return fileDescriptor_1c5fb4d8cc22d66a, []int{6}
return fileDescriptor_1c5fb4d8cc22d66a, []int{8}
}
func (m *DoubleArray) XXX_Unmarshal(b []byte) error {
@ -486,7 +576,7 @@ func (m *BytesArray) Reset() { *m = BytesArray{} }
func (m *BytesArray) String() string { return proto.CompactTextString(m) }
func (*BytesArray) ProtoMessage() {}
func (*BytesArray) Descriptor() ([]byte, []int) {
return fileDescriptor_1c5fb4d8cc22d66a, []int{7}
return fileDescriptor_1c5fb4d8cc22d66a, []int{9}
}
func (m *BytesArray) XXX_Unmarshal(b []byte) error {
@ -525,7 +615,7 @@ func (m *StringArray) Reset() { *m = StringArray{} }
func (m *StringArray) String() string { return proto.CompactTextString(m) }
func (*StringArray) ProtoMessage() {}
func (*StringArray) Descriptor() ([]byte, []int) {
return fileDescriptor_1c5fb4d8cc22d66a, []int{8}
return fileDescriptor_1c5fb4d8cc22d66a, []int{10}
}
func (m *StringArray) XXX_Unmarshal(b []byte) error {
@ -562,6 +652,8 @@ type ScalarField struct {
// *ScalarField_DoubleData
// *ScalarField_StringData
// *ScalarField_BytesData
// *ScalarField_UintData
// *ScalarField_UlongData
Data isScalarField_Data `protobuf_oneof:"data"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@ -572,7 +664,7 @@ func (m *ScalarField) Reset() { *m = ScalarField{} }
func (m *ScalarField) String() string { return proto.CompactTextString(m) }
func (*ScalarField) ProtoMessage() {}
func (*ScalarField) Descriptor() ([]byte, []int) {
return fileDescriptor_1c5fb4d8cc22d66a, []int{9}
return fileDescriptor_1c5fb4d8cc22d66a, []int{11}
}
func (m *ScalarField) XXX_Unmarshal(b []byte) error {
@ -625,6 +717,14 @@ type ScalarField_BytesData struct {
BytesData *BytesArray `protobuf:"bytes,7,opt,name=bytes_data,json=bytesData,proto3,oneof"`
}
type ScalarField_UintData struct {
UintData *UIntArray `protobuf:"bytes,8,opt,name=uint_data,json=uintData,proto3,oneof"`
}
type ScalarField_UlongData struct {
UlongData *ULongArray `protobuf:"bytes,9,opt,name=ulong_data,json=ulongData,proto3,oneof"`
}
func (*ScalarField_BoolData) isScalarField_Data() {}
func (*ScalarField_IntData) isScalarField_Data() {}
@ -639,6 +739,10 @@ func (*ScalarField_StringData) isScalarField_Data() {}
func (*ScalarField_BytesData) isScalarField_Data() {}
func (*ScalarField_UintData) isScalarField_Data() {}
func (*ScalarField_UlongData) isScalarField_Data() {}
func (m *ScalarField) GetData() isScalarField_Data {
if m != nil {
return m.Data
@ -695,6 +799,20 @@ func (m *ScalarField) GetBytesData() *BytesArray {
return nil
}
func (m *ScalarField) GetUintData() *UIntArray {
if x, ok := m.GetData().(*ScalarField_UintData); ok {
return x.UintData
}
return nil
}
func (m *ScalarField) GetUlongData() *ULongArray {
if x, ok := m.GetData().(*ScalarField_UlongData); ok {
return x.UlongData
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*ScalarField) XXX_OneofWrappers() []interface{} {
return []interface{}{
@ -705,6 +823,8 @@ func (*ScalarField) XXX_OneofWrappers() []interface{} {
(*ScalarField_DoubleData)(nil),
(*ScalarField_StringData)(nil),
(*ScalarField_BytesData)(nil),
(*ScalarField_UintData)(nil),
(*ScalarField_UlongData)(nil),
}
}
@ -723,7 +843,7 @@ func (m *VectorField) Reset() { *m = VectorField{} }
func (m *VectorField) String() string { return proto.CompactTextString(m) }
func (*VectorField) ProtoMessage() {}
func (*VectorField) Descriptor() ([]byte, []int) {
return fileDescriptor_1c5fb4d8cc22d66a, []int{10}
return fileDescriptor_1c5fb4d8cc22d66a, []int{12}
}
func (m *VectorField) XXX_Unmarshal(b []byte) error {
@ -813,7 +933,7 @@ func (m *FieldData) Reset() { *m = FieldData{} }
func (m *FieldData) String() string { return proto.CompactTextString(m) }
func (*FieldData) ProtoMessage() {}
func (*FieldData) Descriptor() ([]byte, []int) {
return fileDescriptor_1c5fb4d8cc22d66a, []int{11}
return fileDescriptor_1c5fb4d8cc22d66a, []int{13}
}
func (m *FieldData) XXX_Unmarshal(b []byte) error {
@ -914,7 +1034,7 @@ func (m *IDs) Reset() { *m = IDs{} }
func (m *IDs) String() string { return proto.CompactTextString(m) }
func (*IDs) ProtoMessage() {}
func (*IDs) Descriptor() ([]byte, []int) {
return fileDescriptor_1c5fb4d8cc22d66a, []int{12}
return fileDescriptor_1c5fb4d8cc22d66a, []int{14}
}
func (m *IDs) XXX_Unmarshal(b []byte) error {
@ -996,7 +1116,7 @@ func (m *SearchResultData) Reset() { *m = SearchResultData{} }
func (m *SearchResultData) String() string { return proto.CompactTextString(m) }
func (*SearchResultData) ProtoMessage() {}
func (*SearchResultData) Descriptor() ([]byte, []int) {
return fileDescriptor_1c5fb4d8cc22d66a, []int{13}
return fileDescriptor_1c5fb4d8cc22d66a, []int{15}
}
func (m *SearchResultData) XXX_Unmarshal(b []byte) error {
@ -1067,6 +1187,8 @@ func init() {
proto.RegisterType((*BoolArray)(nil), "milvus.proto.schema.BoolArray")
proto.RegisterType((*IntArray)(nil), "milvus.proto.schema.IntArray")
proto.RegisterType((*LongArray)(nil), "milvus.proto.schema.LongArray")
proto.RegisterType((*UIntArray)(nil), "milvus.proto.schema.UIntArray")
proto.RegisterType((*ULongArray)(nil), "milvus.proto.schema.ULongArray")
proto.RegisterType((*FloatArray)(nil), "milvus.proto.schema.FloatArray")
proto.RegisterType((*DoubleArray)(nil), "milvus.proto.schema.DoubleArray")
proto.RegisterType((*BytesArray)(nil), "milvus.proto.schema.BytesArray")
@ -1081,71 +1203,75 @@ func init() {
func init() { proto.RegisterFile("schema.proto", fileDescriptor_1c5fb4d8cc22d66a) }
var fileDescriptor_1c5fb4d8cc22d66a = []byte{
// 1044 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x5f, 0x6f, 0x1b, 0x45,
0x10, 0xf7, 0xfa, 0xfc, 0xe7, 0x6e, 0xce, 0x0d, 0xc7, 0xb6, 0xa0, 0x03, 0xa9, 0x8d, 0x6b, 0x81,
0x64, 0x45, 0x22, 0x51, 0x53, 0x28, 0xa5, 0xa2, 0x02, 0x1c, 0x2b, 0x8a, 0x95, 0xaa, 0x0a, 0x17,
0x94, 0x07, 0x5e, 0xac, 0xb5, 0x6f, 0x9b, 0xac, 0x72, 0x77, 0x7b, 0xdc, 0xae, 0x2b, 0xfc, 0x01,
0xf8, 0x06, 0x3c, 0x20, 0xc4, 0x03, 0x5f, 0x8c, 0x27, 0x3e, 0x07, 0x12, 0xda, 0xd9, 0xf5, 0x9f,
0x36, 0x8e, 0x95, 0xb7, 0xd9, 0xb9, 0xf9, 0xfd, 0x6e, 0xe6, 0x37, 0xb3, 0xb3, 0xd0, 0x51, 0xd3,
0x2b, 0x9e, 0xb3, 0xfd, 0xb2, 0x92, 0x5a, 0xd2, 0xfb, 0xb9, 0xc8, 0xde, 0xce, 0x94, 0x3d, 0xed,
0xdb, 0x4f, 0x9f, 0x76, 0xa6, 0x32, 0xcf, 0x65, 0x61, 0x9d, 0xbd, 0x3f, 0x3c, 0x08, 0x8f, 0x05,
0xcf, 0xd2, 0x73, 0xfc, 0x4a, 0x63, 0x68, 0xbf, 0x31, 0xc7, 0xd1, 0x30, 0x26, 0x5d, 0xd2, 0xf7,
0x92, 0xc5, 0x91, 0x52, 0x68, 0x14, 0x2c, 0xe7, 0x71, 0xbd, 0x4b, 0xfa, 0x41, 0x82, 0x36, 0xfd,
0x0c, 0x76, 0x84, 0x1a, 0x97, 0x95, 0xc8, 0x59, 0x35, 0x1f, 0x5f, 0xf3, 0x79, 0xec, 0x75, 0x49,
0xdf, 0x4f, 0x3a, 0x42, 0x9d, 0x59, 0xe7, 0x29, 0x9f, 0xd3, 0x2e, 0x84, 0x29, 0x57, 0xd3, 0x4a,
0x94, 0x5a, 0xc8, 0x22, 0x6e, 0x20, 0xc1, 0xba, 0x8b, 0xbe, 0x80, 0x20, 0x65, 0x9a, 0x8d, 0xf5,
0xbc, 0xe4, 0x71, 0xb3, 0x4b, 0xfa, 0x3b, 0x87, 0x0f, 0xf7, 0x37, 0x24, 0xbf, 0x3f, 0x64, 0x9a,
0xfd, 0x34, 0x2f, 0x79, 0xe2, 0xa7, 0xce, 0xa2, 0x03, 0x08, 0x0d, 0x6c, 0x5c, 0xb2, 0x8a, 0xe5,
0x2a, 0x6e, 0x75, 0xbd, 0x7e, 0x78, 0xf8, 0xf8, 0x5d, 0xb4, 0x2b, 0xf9, 0x94, 0xcf, 0x2f, 0x58,
0x36, 0xe3, 0x67, 0x4c, 0x54, 0x09, 0x18, 0xd4, 0x19, 0x82, 0xe8, 0x10, 0x3a, 0xa2, 0x48, 0xf9,
0xaf, 0x0b, 0x92, 0xf6, 0x5d, 0x49, 0x42, 0x84, 0x39, 0x96, 0x8f, 0xa1, 0xc5, 0x66, 0x5a, 0x8e,
0x86, 0xb1, 0x8f, 0x2a, 0xb8, 0x13, 0xfd, 0x0a, 0x9a, 0x4a, 0x33, 0xcd, 0xe3, 0x00, 0x2b, 0xdb,
0xdd, 0x58, 0x99, 0x6d, 0x82, 0x09, 0x4b, 0x6c, 0x74, 0xef, 0x4f, 0x02, 0xd1, 0x91, 0xcc, 0x32,
0x3e, 0x35, 0x1a, 0xb9, 0xfe, 0x2c, 0xba, 0x40, 0xd6, 0xba, 0xf0, 0x9e, 0xbe, 0xf5, 0x9b, 0xfa,
0xae, 0x32, 0xf3, 0xde, 0xc9, 0xec, 0x39, 0xb4, 0xb0, 0xbd, 0x2a, 0x6e, 0x60, 0xc5, 0xdd, 0x2d,
0xa9, 0xa1, 0x9d, 0xb8, 0xf8, 0xde, 0x2e, 0x04, 0x03, 0x29, 0xb3, 0x1f, 0xaa, 0x8a, 0xcd, 0x4d,
0x52, 0xa6, 0x1d, 0x31, 0xe9, 0x7a, 0x7d, 0x3f, 0x41, 0xbb, 0xf7, 0x08, 0xfc, 0x51, 0xa1, 0x6f,
0x7e, 0x6f, 0xba, 0xef, 0xbb, 0x10, 0xbc, 0x92, 0xc5, 0xe5, 0xcd, 0x00, 0xcf, 0x05, 0x74, 0x01,
0x8e, 0x33, 0xc9, 0x36, 0x50, 0xd4, 0x5d, 0xc4, 0x63, 0x08, 0x87, 0x72, 0x36, 0xc9, 0xf8, 0xcd,
0x10, 0xb2, 0x22, 0x19, 0xcc, 0x35, 0x57, 0x37, 0x23, 0x3a, 0x2b, 0x92, 0x73, 0x5d, 0x89, 0x4d,
0x99, 0x04, 0x2e, 0xe4, 0x1f, 0x0f, 0xc2, 0xf3, 0x29, 0xcb, 0x58, 0x85, 0x4a, 0xd0, 0x97, 0x10,
0x4c, 0xa4, 0xcc, 0xc6, 0x2e, 0x90, 0xf4, 0xc3, 0xc3, 0x47, 0x1b, 0x85, 0x5b, 0x2a, 0x74, 0x52,
0x4b, 0x7c, 0x03, 0x31, 0xe3, 0x4b, 0x5f, 0x80, 0x2f, 0x0a, 0x6d, 0xd1, 0x75, 0x44, 0x6f, 0x9e,
0xf5, 0x85, 0x7c, 0x27, 0xb5, 0xa4, 0x2d, 0x0a, 0x8d, 0xd8, 0x97, 0x10, 0x64, 0xb2, 0xb8, 0xb4,
0x60, 0x6f, 0xcb, 0xaf, 0x97, 0xda, 0x9a, 0x5f, 0x1b, 0x08, 0xc2, 0xbf, 0x07, 0x78, 0x63, 0x34,
0xb5, 0xf8, 0x06, 0xe2, 0x6f, 0x19, 0xc7, 0xa5, 0xf4, 0x27, 0xb5, 0x24, 0x40, 0x10, 0x32, 0x1c,
0x41, 0x98, 0xa2, 0xe6, 0x96, 0xa2, 0x89, 0x14, 0x9b, 0xc7, 0x66, 0xad, 0x37, 0x27, 0xb5, 0x04,
0x2c, 0x6c, 0x41, 0xa2, 0x50, 0x73, 0x4b, 0xd2, 0xda, 0x42, 0xb2, 0xd6, 0x1b, 0x43, 0x62, 0x61,
0x8b, 0x5a, 0x26, 0xa6, 0xb5, 0x96, 0xa3, 0xbd, 0xa5, 0x96, 0xd5, 0x04, 0x98, 0x5a, 0x10, 0x64,
0x18, 0x06, 0x2d, 0xdb, 0xeb, 0xde, 0xef, 0x04, 0xc2, 0x0b, 0x3e, 0xd5, 0xd2, 0xf5, 0x37, 0x02,
0x2f, 0x15, 0xb9, 0xdb, 0x7f, 0xc6, 0x34, 0xfb, 0xc1, 0xea, 0xf6, 0x16, 0xc3, 0x5c, 0xdb, 0xee,
0xa0, 0x5c, 0x88, 0x30, 0x4b, 0x4e, 0x3f, 0x87, 0x7b, 0x13, 0x51, 0x98, 0x4d, 0xe9, 0x68, 0x4c,
0x03, 0x3b, 0x27, 0xb5, 0xa4, 0x63, 0xdd, 0x36, 0x6c, 0x99, 0xd6, 0x7f, 0x04, 0x02, 0x4c, 0x08,
0xcb, 0x7d, 0x02, 0x0d, 0xdc, 0x8e, 0xe4, 0x2e, 0xdb, 0x11, 0x43, 0xe9, 0x43, 0x00, 0xbc, 0xad,
0xe3, 0xb5, 0xbd, 0x1d, 0xa0, 0xe7, 0xb5, 0x59, 0x1b, 0xdf, 0x42, 0x5b, 0xe1, 0x54, 0x2b, 0x37,
0x49, 0xb7, 0x74, 0x60, 0x35, 0xf9, 0x66, 0x12, 0x1d, 0xc4, 0xa0, 0x6d, 0x15, 0xca, 0xcd, 0xd1,
0x66, 0xf4, 0x9a, 0xae, 0x06, 0xed, 0x20, 0xf4, 0x13, 0xf0, 0x6d, 0x6a, 0x22, 0xc5, 0x19, 0x5a,
0xbe, 0x33, 0xe9, 0xa0, 0x0d, 0x4d, 0x34, 0x7b, 0xbf, 0x11, 0xf0, 0x46, 0x43, 0x45, 0xbf, 0x86,
0x96, 0xb9, 0x2f, 0x22, 0xdd, 0x7a, 0xd7, 0xd6, 0x07, 0xbe, 0x29, 0x0a, 0x3d, 0x4a, 0xe9, 0x37,
0xd0, 0x52, 0xba, 0x32, 0xc0, 0xfa, 0x9d, 0x27, 0xac, 0xa9, 0x74, 0x35, 0x4a, 0x07, 0x00, 0xbe,
0x48, 0xc7, 0x36, 0x8f, 0x7f, 0x09, 0x44, 0xe7, 0x9c, 0x55, 0xd3, 0xab, 0x84, 0xab, 0x59, 0x66,
0xef, 0xc1, 0x2e, 0x84, 0xc5, 0x2c, 0x1f, 0xff, 0x32, 0xe3, 0x95, 0xe0, 0xca, 0xcd, 0x0a, 0x14,
0xb3, 0xfc, 0x47, 0xeb, 0xa1, 0xf7, 0xa1, 0xa9, 0x65, 0x39, 0xbe, 0xc6, 0x7f, 0x7b, 0x49, 0x43,
0xcb, 0xf2, 0x94, 0x7e, 0x07, 0xa1, 0xdd, 0x9f, 0x8b, 0x0b, 0xec, 0xdd, 0x5a, 0xcf, 0xb2, 0xf3,
0x89, 0x6d, 0x22, 0x8e, 0xac, 0x59, 0xe4, 0x6a, 0x2a, 0x2b, 0x6e, 0x17, 0x76, 0x3d, 0x71, 0x27,
0xba, 0x07, 0x9e, 0x48, 0x95, 0xbb, 0x8e, 0xf1, 0xe6, 0x75, 0x32, 0x54, 0x89, 0x09, 0xa2, 0x0f,
0x30, 0xb3, 0x6b, 0xfb, 0x54, 0x7a, 0x89, 0x3d, 0xec, 0xfd, 0x45, 0xc0, 0x5f, 0xcc, 0x0f, 0xf5,
0xa1, 0xf1, 0x5a, 0x16, 0x3c, 0xaa, 0x19, 0xcb, 0x6c, 0xb1, 0x88, 0x18, 0x6b, 0x54, 0xe8, 0xe7,
0x51, 0x9d, 0x06, 0xd0, 0x1c, 0x15, 0xfa, 0xc9, 0xb3, 0xc8, 0x73, 0xe6, 0xd3, 0xc3, 0xa8, 0xe1,
0xcc, 0x67, 0x5f, 0x46, 0x4d, 0x63, 0xe2, 0x2d, 0x88, 0x80, 0x02, 0xb4, 0xec, 0x1e, 0x88, 0x42,
0x63, 0x5b, 0xb1, 0xa3, 0x07, 0x34, 0x84, 0xf6, 0x05, 0xab, 0x8e, 0xae, 0x58, 0x15, 0x7d, 0x44,
0x23, 0xe8, 0x0c, 0xd6, 0x6e, 0x40, 0x94, 0xd2, 0x0f, 0x20, 0x3c, 0x5e, 0xdd, 0x9c, 0x88, 0xef,
0x5d, 0x00, 0xac, 0x5e, 0x48, 0x03, 0xc0, 0xd3, 0x51, 0xc5, 0x99, 0xe6, 0x69, 0x54, 0xa3, 0x1f,
0xc2, 0xbd, 0x95, 0xc7, 0xfc, 0x82, 0x2c, 0x5d, 0xc3, 0x4a, 0x96, 0xa5, 0x71, 0xd5, 0x97, 0x38,
0x74, 0xf1, 0x34, 0xf2, 0x06, 0xaf, 0x60, 0x47, 0xc8, 0x85, 0x5e, 0x97, 0x55, 0x39, 0x1d, 0x84,
0xf6, 0xa5, 0x3b, 0x33, 0xda, 0x9d, 0x91, 0x9f, 0xfb, 0x97, 0x42, 0x5f, 0xcd, 0x26, 0xe6, 0xf5,
0x3f, 0xb0, 0x61, 0x5f, 0x08, 0xe9, 0xac, 0x03, 0x56, 0x8a, 0x03, 0x2b, 0x6f, 0x39, 0xf9, 0x9b,
0x90, 0x49, 0x0b, 0x15, 0x7f, 0xfa, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x20, 0xa3, 0xa5, 0x38,
0x87, 0x09, 0x00, 0x00,
// 1109 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xcd, 0x6e, 0xdb, 0x46,
0x10, 0x16, 0x45, 0x89, 0x22, 0x87, 0xb2, 0xcb, 0x6e, 0xd2, 0x80, 0x2d, 0x90, 0x58, 0x11, 0x5a,
0x40, 0x30, 0x50, 0x1b, 0xb1, 0x53, 0x37, 0x0d, 0x1a, 0x34, 0x95, 0x05, 0xc3, 0x82, 0x83, 0xc0,
0xa5, 0x2b, 0x1f, 0x7a, 0x11, 0x56, 0xe2, 0xc6, 0x5e, 0x98, 0xe2, 0xb2, 0xdc, 0x65, 0x50, 0x3d,
0x40, 0xdf, 0xa0, 0x87, 0xa2, 0xa7, 0xbe, 0x4f, 0xdf, 0xa1, 0x97, 0x3e, 0x47, 0x81, 0x62, 0x7f,
0x28, 0x2a, 0x35, 0x2d, 0xf8, 0x36, 0x3b, 0xfa, 0xbe, 0xe1, 0xec, 0x7c, 0x33, 0xb3, 0x82, 0x2e,
0x9f, 0x5f, 0x93, 0x05, 0xde, 0xcb, 0x72, 0x26, 0x18, 0x7a, 0xb0, 0xa0, 0xc9, 0xfb, 0x82, 0xeb,
0xd3, 0x9e, 0xfe, 0xe9, 0xb3, 0xee, 0x9c, 0x2d, 0x16, 0x2c, 0xd5, 0xce, 0xfe, 0xef, 0x36, 0xf8,
0x27, 0x94, 0x24, 0xf1, 0x85, 0xfa, 0x15, 0x85, 0xd0, 0x79, 0x27, 0x8f, 0xe3, 0x51, 0x68, 0xf5,
0xac, 0x81, 0x1d, 0x95, 0x47, 0x84, 0xa0, 0x95, 0xe2, 0x05, 0x09, 0x9b, 0x3d, 0x6b, 0xe0, 0x45,
0xca, 0x46, 0x9f, 0xc3, 0x36, 0xe5, 0xd3, 0x2c, 0xa7, 0x0b, 0x9c, 0x2f, 0xa7, 0x37, 0x64, 0x19,
0xda, 0x3d, 0x6b, 0xe0, 0x46, 0x5d, 0xca, 0xcf, 0xb5, 0xf3, 0x8c, 0x2c, 0x51, 0x0f, 0xfc, 0x98,
0xf0, 0x79, 0x4e, 0x33, 0x41, 0x59, 0x1a, 0xb6, 0x54, 0x80, 0x75, 0x17, 0x7a, 0x09, 0x5e, 0x8c,
0x05, 0x9e, 0x8a, 0x65, 0x46, 0xc2, 0x76, 0xcf, 0x1a, 0x6c, 0x1f, 0x3c, 0xde, 0xab, 0x49, 0x7e,
0x6f, 0x84, 0x05, 0xfe, 0x71, 0x99, 0x91, 0xc8, 0x8d, 0x8d, 0x85, 0x86, 0xe0, 0x4b, 0xda, 0x34,
0xc3, 0x39, 0x5e, 0xf0, 0xd0, 0xe9, 0xd9, 0x03, 0xff, 0xe0, 0xe9, 0x87, 0x6c, 0x73, 0xe5, 0x33,
0xb2, 0xbc, 0xc4, 0x49, 0x41, 0xce, 0x31, 0xcd, 0x23, 0x90, 0xac, 0x73, 0x45, 0x42, 0x23, 0xe8,
0xd2, 0x34, 0x26, 0xbf, 0x94, 0x41, 0x3a, 0xf7, 0x0d, 0xe2, 0x2b, 0x9a, 0x89, 0xf2, 0x08, 0x1c,
0x5c, 0x08, 0x36, 0x1e, 0x85, 0xae, 0xaa, 0x82, 0x39, 0xa1, 0xaf, 0xa0, 0xcd, 0x05, 0x16, 0x24,
0xf4, 0xd4, 0xcd, 0x76, 0x6a, 0x6f, 0xa6, 0x45, 0x90, 0xb0, 0x48, 0xa3, 0xfb, 0x7f, 0x58, 0x10,
0x1c, 0xb3, 0x24, 0x21, 0x73, 0x59, 0x23, 0xa3, 0x4f, 0xa9, 0x82, 0xb5, 0xa6, 0xc2, 0xff, 0xea,
0xdb, 0xbc, 0x5d, 0xdf, 0x2a, 0x33, 0xfb, 0x83, 0xcc, 0x5e, 0x80, 0xa3, 0xe4, 0xe5, 0x61, 0x4b,
0xdd, 0xb8, 0xb7, 0x21, 0x35, 0x65, 0x47, 0x06, 0xdf, 0xdf, 0x01, 0x6f, 0xc8, 0x58, 0xf2, 0x7d,
0x9e, 0xe3, 0xa5, 0x4c, 0x4a, 0xca, 0x11, 0x5a, 0x3d, 0x7b, 0xe0, 0x46, 0xca, 0xee, 0x3f, 0x01,
0x77, 0x9c, 0x8a, 0xdb, 0xbf, 0xb7, 0xcd, 0xef, 0x3b, 0xe0, 0xbd, 0x61, 0xe9, 0xd5, 0x6d, 0x80,
0x5d, 0x01, 0x26, 0xb5, 0x11, 0xb6, 0x0c, 0xa0, 0x07, 0x30, 0xa9, 0x0f, 0xd1, 0xaa, 0x10, 0x27,
0x09, 0xc3, 0x35, 0x31, 0x9a, 0x06, 0xf1, 0x14, 0xfc, 0x11, 0x2b, 0x66, 0x09, 0xb9, 0x0d, 0xb1,
0xaa, 0x20, 0xc3, 0xa5, 0x20, 0xfc, 0x36, 0xa2, 0x5b, 0x05, 0xb9, 0x10, 0x39, 0xad, 0xcb, 0xc4,
0x33, 0x90, 0xbf, 0x5b, 0xe0, 0x5f, 0xcc, 0x71, 0x82, 0x73, 0x55, 0x4c, 0xf4, 0x0a, 0xbc, 0x19,
0x63, 0xc9, 0xd4, 0x00, 0xad, 0x81, 0x7f, 0xf0, 0xa4, 0xb6, 0xf6, 0xab, 0x22, 0x9f, 0x36, 0x22,
0x57, 0x52, 0xe4, 0x04, 0xa0, 0x97, 0xe0, 0xd2, 0x54, 0x68, 0x76, 0x53, 0xb1, 0xeb, 0xc7, 0xa5,
0xac, 0xdf, 0x69, 0x23, 0xea, 0xd0, 0x54, 0x28, 0xee, 0x2b, 0xf0, 0x12, 0x96, 0x5e, 0x69, 0xb2,
0xbd, 0xe1, 0xd3, 0xab, 0xda, 0xca, 0x4f, 0x4b, 0x8a, 0xa2, 0xbf, 0x06, 0x78, 0x27, 0x6b, 0xaa,
0xf9, 0x2d, 0xc5, 0xbf, 0xa3, 0xa3, 0x57, 0xa5, 0x3f, 0x6d, 0x44, 0x9e, 0x22, 0xa9, 0x08, 0xc7,
0xe0, 0xc7, 0xaa, 0xe6, 0x3a, 0x44, 0x5b, 0x85, 0xa8, 0xef, 0xbc, 0x35, 0x6d, 0x4e, 0x1b, 0x11,
0x68, 0x5a, 0x19, 0x84, 0xab, 0x9a, 0xeb, 0x20, 0xce, 0x86, 0x20, 0x6b, 0xda, 0xc8, 0x20, 0x9a,
0x56, 0xde, 0x65, 0x26, 0xa5, 0xd5, 0x31, 0x3a, 0x1b, 0xee, 0x52, 0x75, 0x80, 0xbc, 0x8b, 0x22,
0x95, 0xc5, 0x2c, 0x56, 0x4a, 0xb8, 0x1b, 0x8a, 0x39, 0x59, 0x93, 0xc2, 0x2d, 0x4a, 0x2d, 0x5e,
0x03, 0x14, 0x95, 0x18, 0xde, 0x86, 0x04, 0x26, 0xeb, 0x6a, 0x78, 0x45, 0x29, 0xc7, 0xd0, 0xd1,
0xcd, 0xd6, 0xff, 0xcd, 0x02, 0xff, 0x92, 0xcc, 0x05, 0x33, 0x0d, 0x16, 0x80, 0x1d, 0xd3, 0x85,
0xd9, 0xe1, 0xd2, 0x94, 0x3b, 0x4e, 0x0b, 0xf7, 0x5e, 0xc1, 0x4c, 0xdf, 0xdc, 0x43, 0x3a, 0x5f,
0xd1, 0x74, 0x70, 0xf4, 0x05, 0x6c, 0xcd, 0x68, 0x2a, 0xb7, 0xbd, 0x09, 0x23, 0x3b, 0xa8, 0x7b,
0xda, 0x88, 0xba, 0xda, 0xad, 0x61, 0xab, 0xb4, 0xfe, 0xb5, 0xc0, 0x53, 0x09, 0xa9, 0xeb, 0x3e,
0x83, 0x96, 0xda, 0xf0, 0xd6, 0x7d, 0x36, 0xbc, 0x82, 0xa2, 0xc7, 0x00, 0x6a, 0xe3, 0x4c, 0xd7,
0xde, 0x1e, 0x4f, 0x79, 0xde, 0xca, 0xd5, 0xf7, 0x2d, 0x74, 0xb8, 0x1a, 0x2b, 0x6e, 0x5a, 0xf9,
0x8e, 0x16, 0xa8, 0x46, 0x4f, 0x8e, 0x82, 0xa1, 0x48, 0xb6, 0xbe, 0x05, 0x37, 0x8d, 0x5c, 0xcf,
0x5e, 0xab, 0xab, 0x64, 0x1b, 0x0a, 0xfa, 0x14, 0x5c, 0x9d, 0x1a, 0x8d, 0x55, 0x13, 0xaf, 0xde,
0xca, 0x78, 0xd8, 0x81, 0xb6, 0x32, 0xfb, 0xbf, 0x5a, 0x60, 0x8f, 0x47, 0x1c, 0x7d, 0x0d, 0x8e,
0x6c, 0x13, 0x1a, 0x6f, 0x1c, 0xf6, 0x75, 0x8d, 0xdb, 0x34, 0x15, 0xe3, 0x18, 0x7d, 0x03, 0x0e,
0x17, 0xb9, 0x24, 0x36, 0xef, 0xdd, 0xe2, 0x6d, 0x2e, 0xf2, 0x71, 0x3c, 0x04, 0x70, 0x69, 0x3c,
0xd5, 0x79, 0xfc, 0x63, 0x41, 0x70, 0x41, 0x70, 0x3e, 0xbf, 0x8e, 0x08, 0x2f, 0x12, 0xdd, 0x7d,
0x3b, 0xe0, 0xa7, 0xc5, 0x62, 0xfa, 0x73, 0x41, 0x72, 0x4a, 0xb8, 0xe9, 0x15, 0x48, 0x8b, 0xc5,
0x0f, 0xda, 0x83, 0x1e, 0x40, 0x5b, 0xb0, 0x6c, 0x7a, 0xa3, 0xbe, 0x6d, 0x47, 0x2d, 0xc1, 0xb2,
0x33, 0xf4, 0x1d, 0xf8, 0xfa, 0x0d, 0x28, 0x37, 0x88, 0x7d, 0xe7, 0x7d, 0x56, 0xca, 0x47, 0x5a,
0x44, 0x3d, 0x33, 0x8f, 0xc0, 0xe1, 0x73, 0x96, 0x13, 0xfd, 0xe8, 0x34, 0x23, 0x73, 0x42, 0xbb,
0x60, 0xd3, 0x98, 0x9b, 0x7d, 0x10, 0xd6, 0xef, 0xb3, 0x11, 0x8f, 0x24, 0x08, 0x3d, 0x54, 0x99,
0xdd, 0xe8, 0xe7, 0xde, 0x8e, 0xf4, 0x61, 0xf7, 0x2f, 0x0b, 0xdc, 0xb2, 0x7f, 0x90, 0x0b, 0xad,
0xb7, 0x2c, 0x25, 0x41, 0x43, 0x5a, 0x72, 0x8d, 0x06, 0x96, 0xb4, 0xc6, 0xa9, 0x78, 0x11, 0x34,
0x91, 0x07, 0xed, 0x71, 0x2a, 0x9e, 0x1d, 0x05, 0xb6, 0x31, 0x0f, 0x0f, 0x82, 0x96, 0x31, 0x8f,
0x9e, 0x07, 0x6d, 0x69, 0x4e, 0x14, 0xd6, 0x41, 0x00, 0xce, 0x44, 0x83, 0x3b, 0xa5, 0x7d, 0x78,
0x10, 0xb8, 0xa5, 0x7d, 0xf4, 0x3c, 0xf0, 0x24, 0x5c, 0x0d, 0x4d, 0x00, 0xd2, 0xad, 0xf7, 0x56,
0xe0, 0x4b, 0x5b, 0x6b, 0x13, 0x3c, 0x44, 0x3e, 0x74, 0x2e, 0x71, 0x7e, 0x7c, 0x8d, 0xf3, 0xe0,
0x13, 0x14, 0x40, 0x77, 0xb8, 0x36, 0x30, 0x41, 0x8c, 0x3e, 0x02, 0xff, 0xa4, 0x1a, 0xb4, 0x80,
0xec, 0x5e, 0x02, 0x54, 0x7f, 0x0a, 0x24, 0x41, 0x9d, 0x8e, 0x73, 0x82, 0x05, 0x89, 0x83, 0x06,
0xfa, 0x18, 0xb6, 0x2a, 0x8f, 0xfc, 0x84, 0xb5, 0x72, 0x8d, 0x72, 0x96, 0x65, 0xd2, 0xd5, 0x5c,
0xf1, 0x94, 0x8b, 0xc4, 0x81, 0x3d, 0x7c, 0x03, 0xdb, 0x94, 0x95, 0xe5, 0xbd, 0xca, 0xb3, 0xf9,
0xd0, 0xd7, 0x8f, 0xfb, 0xb9, 0x2c, 0xf5, 0xb9, 0xf5, 0xd3, 0xe0, 0x8a, 0x8a, 0xeb, 0x62, 0x26,
0xff, 0xf0, 0xec, 0x6b, 0xd8, 0x97, 0x94, 0x19, 0x6b, 0x1f, 0x67, 0x74, 0x5f, 0xab, 0x91, 0xcd,
0xfe, 0xb4, 0xac, 0x99, 0xa3, 0x04, 0x3a, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xd2, 0x87, 0xd9,
0x4c, 0x7a, 0x0a, 0x00, 0x00,
}

View File

@ -52,6 +52,31 @@ GetDType<int64_t>() {
return Int64;
}
template <>
inline CDataType
GetDType<uint8_t>() {
return UInt8;
}
template <>
inline CDataType
GetDType<uint16_t>() {
return UInt16;
}
template <>
inline CDataType
GetDType<uint32_t>() {
return UInt32;
}
template <>
inline CDataType
GetDType<uint64_t>() {
return UInt64;
}
//
template <>
inline CDataType
GetDType<float>() {

View File

@ -39,6 +39,14 @@ datatype_sizeof(DataType data_type, int dim = 1) {
return sizeof(int32_t);
case DataType::INT64:
return sizeof(int64_t);
case DataType::UINT8:
return sizeof(uint8_t);
case DataType::UINT16:
return sizeof(uint16_t);
case DataType::UINT32:
return sizeof(uint32_t);
case DataType::UINT64:
return sizeof(uint64_t);
case DataType::FLOAT:
return sizeof(float);
case DataType::DOUBLE:
@ -69,6 +77,14 @@ datatype_name(DataType data_type) {
return "int32_t";
case DataType::INT64:
return "int64_t";
case DataType::UINT8:
return "uint8_t";
case DataType::UINT16:
return "uint16_t";
case DataType::UINT32:
return "uint32_t";
case DataType::UINT64:
return "uint64_t";
case DataType::FLOAT:
return "float";
case DataType::DOUBLE:
@ -110,6 +126,10 @@ datatype_is_integer(DataType datatype) {
case DataType::INT16:
case DataType::INT32:
case DataType::INT64:
case DataType::UINT8:
case DataType::UINT16:
case DataType::UINT32:
case DataType::UINT64:
return true;
default:
return false;

View File

@ -53,6 +53,11 @@ enum class DataType {
INT32 = 4,
INT64 = 5,
UINT8 = 6,
UINT16 = 7,
UINT32 = 8,
UINT64 = 9,
FLOAT = 10,
DOUBLE = 11,

View File

@ -47,6 +47,11 @@ enum CDataType {
Int32 = 4,
Int64 = 5,
UInt8 = 6,
UInt16 = 7,
UInt32 = 8,
UInt64 = 9,
Float = 10,
Double = 11,

View File

@ -51,6 +51,14 @@ IndexFactory::CreateScalarIndex(const CreateIndexInfo& create_index_info) {
return CreateScalarIndex<int32_t>(index_type);
case DataType::INT64:
return CreateScalarIndex<int64_t>(index_type);
case DataType::UINT8:
return CreateScalarIndex<uint8_t>(index_type);
case DataType::UINT16:
return CreateScalarIndex<uint16_t>(index_type);
case DataType::UINT32:
return CreateScalarIndex<uint32_t>(index_type);
case DataType::UINT64:
return CreateScalarIndex<uint64_t>(index_type);
case DataType::FLOAT:
return CreateScalarIndex<float>(index_type);
case DataType::DOUBLE:

View File

@ -89,6 +89,13 @@ ScalarIndex<int8_t>::BuildWithRawData(size_t n, const void* values, const Config
Build(n, data);
}
template <>
inline void
ScalarIndex<uint8_t>::BuildWithRawData(size_t n, const void* values, const Config& config) {
auto data = reinterpret_cast<uint8_t*>(const_cast<void*>(values));
Build(n, data);
}
template <>
inline void
ScalarIndex<int16_t>::BuildWithRawData(size_t n, const void* values, const Config& config) {
@ -96,6 +103,13 @@ ScalarIndex<int16_t>::BuildWithRawData(size_t n, const void* values, const Confi
Build(n, data);
}
template <>
inline void
ScalarIndex<uint16_t>::BuildWithRawData(size_t n, const void* values, const Config& config) {
auto data = reinterpret_cast<uint16_t*>(const_cast<void*>(values));
Build(n, data);
}
template <>
inline void
ScalarIndex<int32_t>::BuildWithRawData(size_t n, const void* values, const Config& config) {
@ -103,6 +117,13 @@ ScalarIndex<int32_t>::BuildWithRawData(size_t n, const void* values, const Confi
Build(n, data);
}
template <>
inline void
ScalarIndex<uint32_t>::BuildWithRawData(size_t n, const void* values, const Config& config) {
auto data = reinterpret_cast<uint32_t*>(const_cast<void*>(values));
Build(n, data);
}
template <>
inline void
ScalarIndex<int64_t>::BuildWithRawData(size_t n, const void* values, const Config& config) {
@ -110,6 +131,13 @@ ScalarIndex<int64_t>::BuildWithRawData(size_t n, const void* values, const Confi
Build(n, data);
}
template <>
inline void
ScalarIndex<uint64_t>::BuildWithRawData(size_t n, const void* values, const Config& config) {
auto data = reinterpret_cast<uint64_t*>(const_cast<void*>(values));
Build(n, data);
}
template <>
inline void
ScalarIndex<float>::BuildWithRawData(size_t n, const void* values, const Config& config) {

View File

@ -49,6 +49,10 @@ class IndexFactory {
case DataType::INT16:
case DataType::INT32:
case DataType::INT64:
case DataType::UINT8:
case DataType::UINT16:
case DataType::UINT32:
case DataType::UINT64:
case DataType::FLOAT:
case DataType::DOUBLE:
case DataType::VARCHAR:

File diff suppressed because it is too large Load Diff

View File

@ -49,7 +49,7 @@ struct TableStruct_schema_2eproto {
PROTOBUF_SECTION_VARIABLE(protodesc_cold);
static const ::PROTOBUF_NAMESPACE_ID::internal::AuxillaryParseTableField aux[]
PROTOBUF_SECTION_VARIABLE(protodesc_cold);
static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[14]
static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[16]
PROTOBUF_SECTION_VARIABLE(protodesc_cold);
static const ::PROTOBUF_NAMESPACE_ID::internal::FieldMetadata field_metadata[];
static const ::PROTOBUF_NAMESPACE_ID::internal::SerializationTable serialization_table[];
@ -98,6 +98,12 @@ extern SearchResultDataDefaultTypeInternal _SearchResultData_default_instance_;
class StringArray;
class StringArrayDefaultTypeInternal;
extern StringArrayDefaultTypeInternal _StringArray_default_instance_;
class UIntArray;
class UIntArrayDefaultTypeInternal;
extern UIntArrayDefaultTypeInternal _UIntArray_default_instance_;
class ULongArray;
class ULongArrayDefaultTypeInternal;
extern ULongArrayDefaultTypeInternal _ULongArray_default_instance_;
class VectorField;
class VectorFieldDefaultTypeInternal;
extern VectorFieldDefaultTypeInternal _VectorField_default_instance_;
@ -118,6 +124,8 @@ template<> ::milvus::proto::schema::LongArray* Arena::CreateMaybeMessage<::milvu
template<> ::milvus::proto::schema::ScalarField* Arena::CreateMaybeMessage<::milvus::proto::schema::ScalarField>(Arena*);
template<> ::milvus::proto::schema::SearchResultData* Arena::CreateMaybeMessage<::milvus::proto::schema::SearchResultData>(Arena*);
template<> ::milvus::proto::schema::StringArray* Arena::CreateMaybeMessage<::milvus::proto::schema::StringArray>(Arena*);
template<> ::milvus::proto::schema::UIntArray* Arena::CreateMaybeMessage<::milvus::proto::schema::UIntArray>(Arena*);
template<> ::milvus::proto::schema::ULongArray* Arena::CreateMaybeMessage<::milvus::proto::schema::ULongArray>(Arena*);
template<> ::milvus::proto::schema::VectorField* Arena::CreateMaybeMessage<::milvus::proto::schema::VectorField>(Arena*);
PROTOBUF_NAMESPACE_CLOSE
namespace milvus {
@ -131,6 +139,10 @@ enum DataType : int {
Int16 = 3,
Int32 = 4,
Int64 = 5,
UInt8 = 6,
UInt16 = 7,
UInt32 = 8,
UInt64 = 9,
Float = 10,
Double = 11,
String = 20,
@ -983,6 +995,282 @@ class LongArray :
};
// -------------------------------------------------------------------
class UIntArray :
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.proto.schema.UIntArray) */ {
public:
UIntArray();
virtual ~UIntArray();
UIntArray(const UIntArray& from);
UIntArray(UIntArray&& from) noexcept
: UIntArray() {
*this = ::std::move(from);
}
inline UIntArray& operator=(const UIntArray& from) {
CopyFrom(from);
return *this;
}
inline UIntArray& operator=(UIntArray&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
return GetDescriptor();
}
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
return GetMetadataStatic().descriptor;
}
static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
return GetMetadataStatic().reflection;
}
static const UIntArray& default_instance();
static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY
static inline const UIntArray* internal_default_instance() {
return reinterpret_cast<const UIntArray*>(
&_UIntArray_default_instance_);
}
static constexpr int kIndexInFileMessages =
5;
friend void swap(UIntArray& a, UIntArray& b) {
a.Swap(&b);
}
inline void Swap(UIntArray* other) {
if (other == this) return;
InternalSwap(other);
}
// implements Message ----------------------------------------------
inline UIntArray* New() const final {
return CreateMaybeMessage<UIntArray>(nullptr);
}
UIntArray* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
return CreateMaybeMessage<UIntArray>(arena);
}
void CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final;
void MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final;
void CopyFrom(const UIntArray& from);
void MergeFrom(const UIntArray& from);
PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
bool IsInitialized() const final;
size_t ByteSizeLong() const final;
#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
#else
bool MergePartialFromCodedStream(
::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) final;
#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
void SerializeWithCachedSizes(
::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const final;
::PROTOBUF_NAMESPACE_ID::uint8* InternalSerializeWithCachedSizesToArray(
::PROTOBUF_NAMESPACE_ID::uint8* target) const final;
int GetCachedSize() const final { return _cached_size_.Get(); }
private:
inline void SharedCtor();
inline void SharedDtor();
void SetCachedSize(int size) const final;
void InternalSwap(UIntArray* other);
friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
return "milvus.proto.schema.UIntArray";
}
private:
inline ::PROTOBUF_NAMESPACE_ID::Arena* GetArenaNoVirtual() const {
return nullptr;
}
inline void* MaybeArenaPtr() const {
return nullptr;
}
public:
::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;
private:
static ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadataStatic() {
::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&::descriptor_table_schema_2eproto);
return ::descriptor_table_schema_2eproto.file_level_metadata[kIndexInFileMessages];
}
public:
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
enum : int {
kDataFieldNumber = 1,
};
// repeated uint32 data = 1;
int data_size() const;
void clear_data();
::PROTOBUF_NAMESPACE_ID::uint32 data(int index) const;
void set_data(int index, ::PROTOBUF_NAMESPACE_ID::uint32 value);
void add_data(::PROTOBUF_NAMESPACE_ID::uint32 value);
const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::uint32 >&
data() const;
::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::uint32 >*
mutable_data();
// @@protoc_insertion_point(class_scope:milvus.proto.schema.UIntArray)
private:
class _Internal;
::PROTOBUF_NAMESPACE_ID::internal::InternalMetadataWithArena _internal_metadata_;
::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::uint32 > data_;
mutable std::atomic<int> _data_cached_byte_size_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
friend struct ::TableStruct_schema_2eproto;
};
// -------------------------------------------------------------------
class ULongArray :
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.proto.schema.ULongArray) */ {
public:
ULongArray();
virtual ~ULongArray();
ULongArray(const ULongArray& from);
ULongArray(ULongArray&& from) noexcept
: ULongArray() {
*this = ::std::move(from);
}
inline ULongArray& operator=(const ULongArray& from) {
CopyFrom(from);
return *this;
}
inline ULongArray& operator=(ULongArray&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
return GetDescriptor();
}
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
return GetMetadataStatic().descriptor;
}
static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
return GetMetadataStatic().reflection;
}
static const ULongArray& default_instance();
static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY
static inline const ULongArray* internal_default_instance() {
return reinterpret_cast<const ULongArray*>(
&_ULongArray_default_instance_);
}
static constexpr int kIndexInFileMessages =
6;
friend void swap(ULongArray& a, ULongArray& b) {
a.Swap(&b);
}
inline void Swap(ULongArray* other) {
if (other == this) return;
InternalSwap(other);
}
// implements Message ----------------------------------------------
inline ULongArray* New() const final {
return CreateMaybeMessage<ULongArray>(nullptr);
}
ULongArray* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
return CreateMaybeMessage<ULongArray>(arena);
}
void CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final;
void MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final;
void CopyFrom(const ULongArray& from);
void MergeFrom(const ULongArray& from);
PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
bool IsInitialized() const final;
size_t ByteSizeLong() const final;
#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
#else
bool MergePartialFromCodedStream(
::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) final;
#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
void SerializeWithCachedSizes(
::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const final;
::PROTOBUF_NAMESPACE_ID::uint8* InternalSerializeWithCachedSizesToArray(
::PROTOBUF_NAMESPACE_ID::uint8* target) const final;
int GetCachedSize() const final { return _cached_size_.Get(); }
private:
inline void SharedCtor();
inline void SharedDtor();
void SetCachedSize(int size) const final;
void InternalSwap(ULongArray* other);
friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
return "milvus.proto.schema.ULongArray";
}
private:
inline ::PROTOBUF_NAMESPACE_ID::Arena* GetArenaNoVirtual() const {
return nullptr;
}
inline void* MaybeArenaPtr() const {
return nullptr;
}
public:
::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;
private:
static ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadataStatic() {
::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&::descriptor_table_schema_2eproto);
return ::descriptor_table_schema_2eproto.file_level_metadata[kIndexInFileMessages];
}
public:
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
enum : int {
kDataFieldNumber = 1,
};
// repeated uint64 data = 1;
int data_size() const;
void clear_data();
::PROTOBUF_NAMESPACE_ID::uint64 data(int index) const;
void set_data(int index, ::PROTOBUF_NAMESPACE_ID::uint64 value);
void add_data(::PROTOBUF_NAMESPACE_ID::uint64 value);
const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::uint64 >&
data() const;
::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::uint64 >*
mutable_data();
// @@protoc_insertion_point(class_scope:milvus.proto.schema.ULongArray)
private:
class _Internal;
::PROTOBUF_NAMESPACE_ID::internal::InternalMetadataWithArena _internal_metadata_;
::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::uint64 > data_;
mutable std::atomic<int> _data_cached_byte_size_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
friend struct ::TableStruct_schema_2eproto;
};
// -------------------------------------------------------------------
class FloatArray :
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.proto.schema.FloatArray) */ {
public:
@ -1025,7 +1313,7 @@ class FloatArray :
&_FloatArray_default_instance_);
}
static constexpr int kIndexInFileMessages =
5;
7;
friend void swap(FloatArray& a, FloatArray& b) {
a.Swap(&b);
@ -1163,7 +1451,7 @@ class DoubleArray :
&_DoubleArray_default_instance_);
}
static constexpr int kIndexInFileMessages =
6;
8;
friend void swap(DoubleArray& a, DoubleArray& b) {
a.Swap(&b);
@ -1301,7 +1589,7 @@ class BytesArray :
&_BytesArray_default_instance_);
}
static constexpr int kIndexInFileMessages =
7;
9;
friend void swap(BytesArray& a, BytesArray& b) {
a.Swap(&b);
@ -1444,7 +1732,7 @@ class StringArray :
&_StringArray_default_instance_);
}
static constexpr int kIndexInFileMessages =
8;
10;
friend void swap(StringArray& a, StringArray& b) {
a.Swap(&b);
@ -1589,6 +1877,8 @@ class ScalarField :
kDoubleData = 5,
kStringData = 6,
kBytesData = 7,
kUintData = 8,
kUlongData = 9,
DATA_NOT_SET = 0,
};
@ -1598,7 +1888,7 @@ class ScalarField :
&_ScalarField_default_instance_);
}
static constexpr int kIndexInFileMessages =
9;
11;
friend void swap(ScalarField& a, ScalarField& b) {
a.Swap(&b);
@ -1676,6 +1966,8 @@ class ScalarField :
kDoubleDataFieldNumber = 5,
kStringDataFieldNumber = 6,
kBytesDataFieldNumber = 7,
kUintDataFieldNumber = 8,
kUlongDataFieldNumber = 9,
};
// .milvus.proto.schema.BoolArray bool_data = 1;
bool has_bool_data() const;
@ -1733,6 +2025,22 @@ class ScalarField :
::milvus::proto::schema::BytesArray* mutable_bytes_data();
void set_allocated_bytes_data(::milvus::proto::schema::BytesArray* bytes_data);
// .milvus.proto.schema.UIntArray uint_data = 8;
bool has_uint_data() const;
void clear_uint_data();
const ::milvus::proto::schema::UIntArray& uint_data() const;
::milvus::proto::schema::UIntArray* release_uint_data();
::milvus::proto::schema::UIntArray* mutable_uint_data();
void set_allocated_uint_data(::milvus::proto::schema::UIntArray* uint_data);
// .milvus.proto.schema.ULongArray ulong_data = 9;
bool has_ulong_data() const;
void clear_ulong_data();
const ::milvus::proto::schema::ULongArray& ulong_data() const;
::milvus::proto::schema::ULongArray* release_ulong_data();
::milvus::proto::schema::ULongArray* mutable_ulong_data();
void set_allocated_ulong_data(::milvus::proto::schema::ULongArray* ulong_data);
void clear_data();
DataCase data_case() const;
// @@protoc_insertion_point(class_scope:milvus.proto.schema.ScalarField)
@ -1745,6 +2053,8 @@ class ScalarField :
void set_has_double_data();
void set_has_string_data();
void set_has_bytes_data();
void set_has_uint_data();
void set_has_ulong_data();
inline bool has_data() const;
inline void clear_has_data();
@ -1759,6 +2069,8 @@ class ScalarField :
::milvus::proto::schema::DoubleArray* double_data_;
::milvus::proto::schema::StringArray* string_data_;
::milvus::proto::schema::BytesArray* bytes_data_;
::milvus::proto::schema::UIntArray* uint_data_;
::milvus::proto::schema::ULongArray* ulong_data_;
} data_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
::PROTOBUF_NAMESPACE_ID::uint32 _oneof_case_[1];
@ -1815,7 +2127,7 @@ class VectorField :
&_VectorField_default_instance_);
}
static constexpr int kIndexInFileMessages =
10;
12;
friend void swap(VectorField& a, VectorField& b) {
a.Swap(&b);
@ -1990,7 +2302,7 @@ class FieldData :
&_FieldData_default_instance_);
}
static constexpr int kIndexInFileMessages =
11;
13;
friend void swap(FieldData& a, FieldData& b) {
a.Swap(&b);
@ -2179,7 +2491,7 @@ class IDs :
&_IDs_default_instance_);
}
static constexpr int kIndexInFileMessages =
12;
14;
friend void swap(IDs& a, IDs& b) {
a.Swap(&b);
@ -2335,7 +2647,7 @@ class SearchResultData :
&_SearchResultData_default_instance_);
}
static constexpr int kIndexInFileMessages =
13;
15;
friend void swap(SearchResultData& a, SearchResultData& b) {
a.Swap(&b);
@ -2971,6 +3283,74 @@ LongArray::mutable_data() {
// -------------------------------------------------------------------
// UIntArray
// repeated uint32 data = 1;
inline int UIntArray::data_size() const {
return data_.size();
}
inline void UIntArray::clear_data() {
data_.Clear();
}
inline ::PROTOBUF_NAMESPACE_ID::uint32 UIntArray::data(int index) const {
// @@protoc_insertion_point(field_get:milvus.proto.schema.UIntArray.data)
return data_.Get(index);
}
inline void UIntArray::set_data(int index, ::PROTOBUF_NAMESPACE_ID::uint32 value) {
data_.Set(index, value);
// @@protoc_insertion_point(field_set:milvus.proto.schema.UIntArray.data)
}
inline void UIntArray::add_data(::PROTOBUF_NAMESPACE_ID::uint32 value) {
data_.Add(value);
// @@protoc_insertion_point(field_add:milvus.proto.schema.UIntArray.data)
}
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::uint32 >&
UIntArray::data() const {
// @@protoc_insertion_point(field_list:milvus.proto.schema.UIntArray.data)
return data_;
}
inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::uint32 >*
UIntArray::mutable_data() {
// @@protoc_insertion_point(field_mutable_list:milvus.proto.schema.UIntArray.data)
return &data_;
}
// -------------------------------------------------------------------
// ULongArray
// repeated uint64 data = 1;
inline int ULongArray::data_size() const {
return data_.size();
}
inline void ULongArray::clear_data() {
data_.Clear();
}
inline ::PROTOBUF_NAMESPACE_ID::uint64 ULongArray::data(int index) const {
// @@protoc_insertion_point(field_get:milvus.proto.schema.ULongArray.data)
return data_.Get(index);
}
inline void ULongArray::set_data(int index, ::PROTOBUF_NAMESPACE_ID::uint64 value) {
data_.Set(index, value);
// @@protoc_insertion_point(field_set:milvus.proto.schema.ULongArray.data)
}
inline void ULongArray::add_data(::PROTOBUF_NAMESPACE_ID::uint64 value) {
data_.Add(value);
// @@protoc_insertion_point(field_add:milvus.proto.schema.ULongArray.data)
}
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::uint64 >&
ULongArray::data() const {
// @@protoc_insertion_point(field_list:milvus.proto.schema.ULongArray.data)
return data_;
}
inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::uint64 >*
ULongArray::mutable_data() {
// @@protoc_insertion_point(field_mutable_list:milvus.proto.schema.ULongArray.data)
return &data_;
}
// -------------------------------------------------------------------
// FloatArray
// repeated float data = 1;
@ -3466,6 +3846,88 @@ inline ::milvus::proto::schema::BytesArray* ScalarField::mutable_bytes_data() {
return data_.bytes_data_;
}
// .milvus.proto.schema.UIntArray uint_data = 8;
inline bool ScalarField::has_uint_data() const {
return data_case() == kUintData;
}
inline void ScalarField::set_has_uint_data() {
_oneof_case_[0] = kUintData;
}
inline void ScalarField::clear_uint_data() {
if (has_uint_data()) {
delete data_.uint_data_;
clear_has_data();
}
}
inline ::milvus::proto::schema::UIntArray* ScalarField::release_uint_data() {
// @@protoc_insertion_point(field_release:milvus.proto.schema.ScalarField.uint_data)
if (has_uint_data()) {
clear_has_data();
::milvus::proto::schema::UIntArray* temp = data_.uint_data_;
data_.uint_data_ = nullptr;
return temp;
} else {
return nullptr;
}
}
inline const ::milvus::proto::schema::UIntArray& ScalarField::uint_data() const {
// @@protoc_insertion_point(field_get:milvus.proto.schema.ScalarField.uint_data)
return has_uint_data()
? *data_.uint_data_
: *reinterpret_cast< ::milvus::proto::schema::UIntArray*>(&::milvus::proto::schema::_UIntArray_default_instance_);
}
inline ::milvus::proto::schema::UIntArray* ScalarField::mutable_uint_data() {
if (!has_uint_data()) {
clear_data();
set_has_uint_data();
data_.uint_data_ = CreateMaybeMessage< ::milvus::proto::schema::UIntArray >(
GetArenaNoVirtual());
}
// @@protoc_insertion_point(field_mutable:milvus.proto.schema.ScalarField.uint_data)
return data_.uint_data_;
}
// .milvus.proto.schema.ULongArray ulong_data = 9;
inline bool ScalarField::has_ulong_data() const {
return data_case() == kUlongData;
}
inline void ScalarField::set_has_ulong_data() {
_oneof_case_[0] = kUlongData;
}
inline void ScalarField::clear_ulong_data() {
if (has_ulong_data()) {
delete data_.ulong_data_;
clear_has_data();
}
}
inline ::milvus::proto::schema::ULongArray* ScalarField::release_ulong_data() {
// @@protoc_insertion_point(field_release:milvus.proto.schema.ScalarField.ulong_data)
if (has_ulong_data()) {
clear_has_data();
::milvus::proto::schema::ULongArray* temp = data_.ulong_data_;
data_.ulong_data_ = nullptr;
return temp;
} else {
return nullptr;
}
}
inline const ::milvus::proto::schema::ULongArray& ScalarField::ulong_data() const {
// @@protoc_insertion_point(field_get:milvus.proto.schema.ScalarField.ulong_data)
return has_ulong_data()
? *data_.ulong_data_
: *reinterpret_cast< ::milvus::proto::schema::ULongArray*>(&::milvus::proto::schema::_ULongArray_default_instance_);
}
inline ::milvus::proto::schema::ULongArray* ScalarField::mutable_ulong_data() {
if (!has_ulong_data()) {
clear_data();
set_has_ulong_data();
data_.ulong_data_ = CreateMaybeMessage< ::milvus::proto::schema::ULongArray >(
GetArenaNoVirtual());
}
// @@protoc_insertion_point(field_mutable:milvus.proto.schema.ScalarField.ulong_data)
return data_.ulong_data_;
}
inline bool ScalarField::has_data() const {
return data_case() != DATA_NOT_SET;
}
@ -4104,6 +4566,10 @@ SearchResultData::mutable_topks() {
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// @@protoc_insertion_point(namespace_scope)

View File

@ -107,6 +107,14 @@ Parser::ParseRangeNode(const Json& out_body) {
return ParseRangeNodeImpl<int32_t>(field_name, body);
case DataType::INT64:
return ParseRangeNodeImpl<int64_t>(field_name, body);
case DataType::UINT8:
return ParseRangeNodeImpl<uint8_t>(field_name, body);
case DataType::UINT16:
return ParseRangeNodeImpl<uint16_t>(field_name, body);
case DataType::UINT32:
return ParseRangeNodeImpl<uint32_t>(field_name, body);
case DataType::UINT64:
return ParseRangeNodeImpl<uint64_t>(field_name, body);
case DataType::FLOAT:
return ParseRangeNodeImpl<float>(field_name, body);
case DataType::DOUBLE:
@ -163,6 +171,18 @@ Parser::ParseTermNode(const Json& out_body) {
case DataType::INT64: {
return ParseTermNodeImpl<int64_t>(field_name, body);
}
case DataType::UINT8: {
return ParseTermNodeImpl<uint8_t>(field_name, body);
}
case DataType::UINT16: {
return ParseTermNodeImpl<uint16_t>(field_name, body);
}
case DataType::UINT32: {
return ParseTermNodeImpl<uint32_t>(field_name, body);
}
case DataType::UINT64: {
return ParseTermNodeImpl<uint64_t>(field_name, body);
}
case DataType::FLOAT: {
return ParseTermNodeImpl<float>(field_name, body);
}

View File

@ -238,6 +238,18 @@ ProtoParser::ParseUnaryRangeExpr(const proto::plan::UnaryRangeExpr& expr_pb) {
case DataType::INT64: {
return ExtractUnaryRangeExprImpl<int64_t>(field_id, data_type, expr_pb);
}
case DataType::UINT8: {
return ExtractUnaryRangeExprImpl<uint8_t>(field_id, data_type, expr_pb);
}
case DataType::UINT16: {
return ExtractUnaryRangeExprImpl<uint16_t>(field_id, data_type, expr_pb);
}
case DataType::UINT32: {
return ExtractUnaryRangeExprImpl<uint32_t>(field_id, data_type, expr_pb);
}
case DataType::UINT64: {
return ExtractUnaryRangeExprImpl<uint64_t>(field_id, data_type, expr_pb);
}
case DataType::FLOAT: {
return ExtractUnaryRangeExprImpl<float>(field_id, data_type, expr_pb);
}
@ -279,6 +291,19 @@ ProtoParser::ParseBinaryRangeExpr(const proto::plan::BinaryRangeExpr& expr_pb) {
case DataType::INT64: {
return ExtractBinaryRangeExprImpl<int64_t>(field_id, data_type, expr_pb);
}
case DataType::UINT8: {
return ExtractBinaryRangeExprImpl<uint8_t>(field_id, data_type, expr_pb);
}
case DataType::UINT16: {
return ExtractBinaryRangeExprImpl<uint16_t>(field_id, data_type, expr_pb);
}
case DataType::UINT32: {
return ExtractBinaryRangeExprImpl<uint32_t>(field_id, data_type, expr_pb);
}
case DataType::UINT64: {
return ExtractBinaryRangeExprImpl<uint64_t>(field_id, data_type, expr_pb);
}
case DataType::FLOAT: {
return ExtractBinaryRangeExprImpl<float>(field_id, data_type, expr_pb);
}
@ -344,6 +369,18 @@ ProtoParser::ParseTermExpr(const proto::plan::TermExpr& expr_pb) {
case DataType::INT64: {
return ExtractTermExprImpl<int64_t>(field_id, data_type, expr_pb);
}
case DataType::UINT8: {
return ExtractTermExprImpl<uint8_t>(field_id, data_type, expr_pb);
}
case DataType::UINT16: {
return ExtractTermExprImpl<uint16_t>(field_id, data_type, expr_pb);
}
case DataType::UINT32: {
return ExtractTermExprImpl<uint32_t>(field_id, data_type, expr_pb);
}
case DataType::UINT64: {
return ExtractTermExprImpl<uint64_t>(field_id, data_type, expr_pb);
}
case DataType::FLOAT: {
return ExtractTermExprImpl<float>(field_id, data_type, expr_pb);
}
@ -398,6 +435,18 @@ ProtoParser::ParseBinaryArithOpEvalRangeExpr(const proto::plan::BinaryArithOpEva
case DataType::INT64: {
return ExtractBinaryArithOpEvalRangeExprImpl<int64_t>(field_id, data_type, expr_pb);
}
case DataType::UINT8: {
return ExtractBinaryArithOpEvalRangeExprImpl<uint8_t>(field_id, data_type, expr_pb);
}
case DataType::UINT16: {
return ExtractBinaryArithOpEvalRangeExprImpl<uint16_t>(field_id, data_type, expr_pb);
}
case DataType::UINT32: {
return ExtractBinaryArithOpEvalRangeExprImpl<uint32_t>(field_id, data_type, expr_pb);
}
case DataType::UINT64: {
return ExtractBinaryArithOpEvalRangeExprImpl<uint64_t>(field_id, data_type, expr_pb);
}
case DataType::FLOAT: {
return ExtractBinaryArithOpEvalRangeExprImpl<float>(field_id, data_type, expr_pb);
}

View File

@ -51,6 +51,14 @@ generate_scalar_index(SpanBase data, DataType data_type) {
return generate_scalar_index(Span<int32_t>(data));
case DataType::INT64:
return generate_scalar_index(Span<int64_t>(data));
case DataType::UINT8:
return generate_scalar_index(Span<uint8_t>(data));
case DataType::UINT16:
return generate_scalar_index(Span<uint16_t>(data));
case DataType::UINT32:
return generate_scalar_index(Span<uint32_t>(data));
case DataType::UINT64:
return generate_scalar_index(Span<uint64_t>(data));
case DataType::FLOAT:
return generate_scalar_index(Span<float>(data));
case DataType::DOUBLE:

View File

@ -468,6 +468,22 @@ ExecExprVisitor::visit(UnaryRangeExpr& expr) {
res = ExecUnaryRangeVisitorDispatcher<int64_t>(expr);
break;
}
case DataType::UINT8: {
res = ExecUnaryRangeVisitorDispatcher<uint8_t>(expr);
break;
}
case DataType::UINT16: {
res = ExecUnaryRangeVisitorDispatcher<uint16_t>(expr);
break;
}
case DataType::UINT32: {
res = ExecUnaryRangeVisitorDispatcher<uint32_t>(expr);
break;
}
case DataType::UINT64: {
res = ExecUnaryRangeVisitorDispatcher<uint64_t>(expr);
break;
}
case DataType::FLOAT: {
res = ExecUnaryRangeVisitorDispatcher<float>(expr);
break;
@ -510,6 +526,22 @@ ExecExprVisitor::visit(BinaryArithOpEvalRangeExpr& expr) {
res = ExecBinaryArithOpEvalRangeVisitorDispatcher<int64_t>(expr);
break;
}
case DataType::UINT8: {
res = ExecBinaryArithOpEvalRangeVisitorDispatcher<uint8_t>(expr);
break;
}
case DataType::UINT16: {
res = ExecBinaryArithOpEvalRangeVisitorDispatcher<uint16_t>(expr);
break;
}
case DataType::UINT32: {
res = ExecBinaryArithOpEvalRangeVisitorDispatcher<uint32_t>(expr);
break;
}
case DataType::UINT64: {
res = ExecBinaryArithOpEvalRangeVisitorDispatcher<uint64_t>(expr);
break;
}
case DataType::FLOAT: {
res = ExecBinaryArithOpEvalRangeVisitorDispatcher<float>(expr);
break;
@ -552,6 +584,23 @@ ExecExprVisitor::visit(BinaryRangeExpr& expr) {
res = ExecBinaryRangeVisitorDispatcher<int64_t>(expr);
break;
}
case DataType::UINT8: {
res = ExecBinaryRangeVisitorDispatcher<uint8_t>(expr);
break;
}
case DataType::UINT16: {
res = ExecBinaryRangeVisitorDispatcher<uint16_t>(expr);
break;
}
case DataType::UINT32: {
res = ExecBinaryRangeVisitorDispatcher<uint32_t>(expr);
break;
}
case DataType::UINT64: {
res = ExecBinaryRangeVisitorDispatcher<uint64_t>(expr);
break;
}
case DataType::FLOAT: {
res = ExecBinaryRangeVisitorDispatcher<float>(expr);
break;
@ -588,7 +637,8 @@ struct relational {
template <typename Op>
auto
ExecExprVisitor::ExecCompareExprDispatcher(CompareExpr& expr, Op op) -> BitsetType {
using number = boost::variant<bool, int8_t, int16_t, int32_t, int64_t, float, double, std::string>;
using number = boost::variant<bool, int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t, float,
double, std::string>;
auto size_per_chunk = segment_.size_per_chunk();
auto num_chunk = upper_div(row_count_, size_per_chunk);
std::deque<BitsetType> bitsets;
@ -659,6 +709,46 @@ ExecExprVisitor::ExecCompareExprDispatcher(CompareExpr& expr, Op op) -> BitsetTy
return [&indexing](int i) -> const number { return indexing.Reverse_Lookup(i); };
}
}
case DataType::UINT8: {
if (chunk_id < data_barrier) {
auto chunk_data = segment_.chunk_data<uint8_t>(field_id, chunk_id).data();
return [chunk_data](int i) -> const number { return chunk_data[i]; };
} else {
// for case, sealed segment has loaded index for scalar field instead of raw data
auto& indexing = segment_.chunk_scalar_index<uint8_t>(field_id, chunk_id);
return [&indexing](int i) -> const number { return indexing.Reverse_Lookup(i); };
}
}
case DataType::UINT16: {
if (chunk_id < data_barrier) {
auto chunk_data = segment_.chunk_data<uint16_t>(field_id, chunk_id).data();
return [chunk_data](int i) -> const number { return chunk_data[i]; };
} else {
// for case, sealed segment has loaded index for scalar field instead of raw data
auto& indexing = segment_.chunk_scalar_index<uint16_t>(field_id, chunk_id);
return [&indexing](int i) -> const number { return indexing.Reverse_Lookup(i); };
}
}
case DataType::UINT32: {
if (chunk_id < data_barrier) {
auto chunk_data = segment_.chunk_data<uint32_t>(field_id, chunk_id).data();
return [chunk_data](int i) -> const number { return chunk_data[i]; };
} else {
// for case, sealed segment has loaded index for scalar field instead of raw data
auto& indexing = segment_.chunk_scalar_index<uint32_t>(field_id, chunk_id);
return [&indexing](int i) -> const number { return indexing.Reverse_Lookup(i); };
}
}
case DataType::UINT64: {
if (chunk_id < data_barrier) {
auto chunk_data = segment_.chunk_data<uint64_t>(field_id, chunk_id).data();
return [chunk_data](int i) -> const number { return chunk_data[i]; };
} else {
// for case, sealed segment has loaded index for scalar field instead of raw data
auto& indexing = segment_.chunk_scalar_index<uint64_t>(field_id, chunk_id);
return [&indexing](int i) -> const number { return indexing.Reverse_Lookup(i); };
}
}
case DataType::FLOAT: {
if (chunk_id < data_barrier) {
auto chunk_data = segment_.chunk_data<float>(field_id, chunk_id).data();
@ -875,6 +965,23 @@ ExecExprVisitor::visit(TermExpr& expr) {
res = ExecTermVisitorImpl<int64_t>(expr);
break;
}
case DataType::UINT8: {
res = ExecTermVisitorImpl<uint8_t>(expr);
break;
}
case DataType::UINT16: {
res = ExecTermVisitorImpl<uint16_t>(expr);
break;
}
case DataType::UINT32: {
res = ExecTermVisitorImpl<uint32_t>(expr);
break;
}
case DataType::UINT64: {
res = ExecTermVisitorImpl<uint64_t>(expr);
break;
}
case DataType::FLOAT: {
res = ExecTermVisitorImpl<float>(expr);
break;

View File

@ -122,6 +122,14 @@ ShowExprVisitor::visit(TermExpr& expr) {
return TermExtract<int32_t>(expr);
case DataType::INT64:
return TermExtract<int64_t>(expr);
case DataType::UINT8:
return TermExtract<uint8_t>(expr);
case DataType::UINT16:
return TermExtract<uint16_t>(expr);
case DataType::UINT32:
return TermExtract<uint32_t>(expr);
case DataType::UINT64:
return TermExtract<uint64_t>(expr);
case DataType::DOUBLE:
return TermExtract<double>(expr);
case DataType::FLOAT:
@ -174,6 +182,18 @@ ShowExprVisitor::visit(UnaryRangeExpr& expr) {
case DataType::INT64:
json_opt_ = UnaryRangeExtract<int64_t>(expr);
return;
case DataType::UINT8:
json_opt_ = UnaryRangeExtract<uint8_t>(expr);
return;
case DataType::UINT16:
json_opt_ = UnaryRangeExtract<uint16_t>(expr);
return;
case DataType::UINT32:
json_opt_ = UnaryRangeExtract<uint32_t>(expr);
return;
case DataType::UINT64:
json_opt_ = UnaryRangeExtract<uint64_t>(expr);
return;
case DataType::DOUBLE:
json_opt_ = UnaryRangeExtract<double>(expr);
return;
@ -222,6 +242,18 @@ ShowExprVisitor::visit(BinaryRangeExpr& expr) {
case DataType::INT64:
json_opt_ = BinaryRangeExtract<int64_t>(expr);
return;
case DataType::UINT8:
json_opt_ = BinaryRangeExtract<uint8_t>(expr);
return;
case DataType::UINT16:
json_opt_ = BinaryRangeExtract<uint16_t>(expr);
return;
case DataType::UINT32:
json_opt_ = BinaryRangeExtract<uint32_t>(expr);
return;
case DataType::UINT64:
json_opt_ = BinaryRangeExtract<uint64_t>(expr);
return;
case DataType::DOUBLE:
json_opt_ = BinaryRangeExtract<double>(expr);
return;
@ -286,6 +318,18 @@ ShowExprVisitor::visit(BinaryArithOpEvalRangeExpr& expr) {
case DataType::INT64:
json_opt_ = BinaryArithOpEvalRangeExtract<int64_t>(expr);
return;
case DataType::UINT8:
json_opt_ = BinaryArithOpEvalRangeExtract<uint8_t>(expr);
return;
case DataType::UINT16:
json_opt_ = BinaryArithOpEvalRangeExtract<uint16_t>(expr);
return;
case DataType::UINT32:
json_opt_ = BinaryArithOpEvalRangeExtract<uint32_t>(expr);
return;
case DataType::UINT64:
json_opt_ = BinaryArithOpEvalRangeExtract<uint64_t>(expr);
return;
case DataType::DOUBLE:
json_opt_ = BinaryArithOpEvalRangeExtract<double>(expr);
return;

View File

@ -50,6 +50,24 @@ VectorBase::set_data_raw(ssize_t element_offset,
case DataType::INT64: {
return set_data_raw(element_offset, data->scalars().long_data().data().data(), element_count);
}
case DataType::UINT8: {
auto src_data = data->scalars().uint_data().data();
std::vector<uint8_t> data_raw(src_data.size());
std::copy_n(src_data.data(), src_data.size(), data_raw.data());
return set_data_raw(element_offset, data_raw.data(), element_count);
}
case DataType::UINT16: {
auto src_data = data->scalars().uint_data().data();
std::vector<uint16_t> data_raw(src_data.size());
std::copy_n(src_data.data(), src_data.size(), data_raw.data());
return set_data_raw(element_offset, data_raw.data(), element_count);
}
case DataType::UINT32: {
return set_data_raw(element_offset, data->scalars().uint_data().data().data(), element_count);
}
case DataType::UINT64: {
return set_data_raw(element_offset, data->scalars().ulong_data().data().data(), element_count);
}
case DataType::FLOAT: {
return set_data_raw(element_offset, data->scalars().float_data().data().data(), element_count);
}
@ -102,6 +120,24 @@ VectorBase::fill_chunk_data(ssize_t element_count, const DataArray* data, const
case DataType::INT64: {
return fill_chunk_data(data->scalars().long_data().data().data(), element_count);
}
case DataType::UINT8: {
auto src_data = data->scalars().uint_data().data();
std::vector<uint8_t> data_raw(src_data.size());
std::copy_n(src_data.data(), src_data.size(), data_raw.data());
return fill_chunk_data(data_raw.data(), element_count);
}
case DataType::UINT16: {
auto src_data = data->scalars().uint_data().data();
std::vector<uint16_t> data_raw(src_data.size());
std::copy_n(src_data.data(), src_data.size(), data_raw.data());
return fill_chunk_data(data_raw.data(), element_count);
}
case DataType::UINT32: {
return fill_chunk_data(data->scalars().uint_data().data().data(), element_count);
}
case DataType::UINT64: {
return fill_chunk_data(data->scalars().ulong_data().data().data(), element_count);
}
case DataType::FLOAT: {
return fill_chunk_data(data->scalars().float_data().data().data(), element_count);
}

View File

@ -118,6 +118,14 @@ CreateIndex(const FieldMeta& field_meta, const SegcoreConfig& segcore_config) {
return std::make_unique<ScalarFieldIndexing<int32_t>>(field_meta, segcore_config);
case DataType::INT64:
return std::make_unique<ScalarFieldIndexing<int64_t>>(field_meta, segcore_config);
case DataType::UINT8:
return std::make_unique<ScalarFieldIndexing<uint8_t>>(field_meta, segcore_config);
case DataType::UINT16:
return std::make_unique<ScalarFieldIndexing<uint16_t>>(field_meta, segcore_config);
case DataType::UINT32:
return std::make_unique<ScalarFieldIndexing<uint32_t>>(field_meta, segcore_config);
case DataType::UINT64:
return std::make_unique<ScalarFieldIndexing<uint64_t>>(field_meta, segcore_config);
case DataType::FLOAT:
return std::make_unique<ScalarFieldIndexing<float>>(field_meta, segcore_config);
case DataType::DOUBLE:

View File

@ -196,6 +196,22 @@ struct InsertRecord {
this->append_field_data<int64_t>(field_id, size_per_chunk);
break;
}
case DataType::UINT8: {
this->append_field_data<uint8_t>(field_id, size_per_chunk);
break;
}
case DataType::UINT16: {
this->append_field_data<uint16_t>(field_id, size_per_chunk);
break;
}
case DataType::UINT32: {
this->append_field_data<uint32_t>(field_id, size_per_chunk);
break;
}
case DataType::UINT64: {
this->append_field_data<uint64_t>(field_id, size_per_chunk);
break;
}
case DataType::FLOAT: {
this->append_field_data<float>(field_id, size_per_chunk);
break;

View File

@ -232,6 +232,26 @@ SegmentGrowingImpl::bulk_subscript(FieldId field_id, const int64_t* seg_offsets,
bulk_subscript_impl<int64_t>(*vec_ptr, seg_offsets, count, output.data());
return CreateScalarDataArrayFrom(output.data(), count, field_meta);
}
case DataType::UINT8: {
FixedVector<bool> output(count);
bulk_subscript_impl<uint8_t>(*vec_ptr, seg_offsets, count, output.data());
return CreateScalarDataArrayFrom(output.data(), count, field_meta);
}
case DataType::UINT16: {
FixedVector<uint16_t> output(count);
bulk_subscript_impl<uint16_t>(*vec_ptr, seg_offsets, count, output.data());
return CreateScalarDataArrayFrom(output.data(), count, field_meta);
}
case DataType::UINT32: {
FixedVector<uint32_t> output(count);
bulk_subscript_impl<uint32_t>(*vec_ptr, seg_offsets, count, output.data());
return CreateScalarDataArrayFrom(output.data(), count, field_meta);
}
case DataType::UINT64: {
FixedVector<uint64_t> output(count);
bulk_subscript_impl<uint64_t>(*vec_ptr, seg_offsets, count, output.data());
return CreateScalarDataArrayFrom(output.data(), count, field_meta);
}
case DataType::FLOAT: {
FixedVector<float> output(count);
bulk_subscript_impl<float>(*vec_ptr, seg_offsets, count, output.data());

View File

@ -483,6 +483,22 @@ SegmentSealedImpl::fill_with_empty(FieldId field_id, int64_t count) const {
FixedVector<int64_t> output(count);
return CreateScalarDataArrayFrom(output.data(), count, field_meta);
}
case DataType::UINT8: {
FixedVector<uint8_t> output(count);
return CreateScalarDataArrayFrom(output.data(), count, field_meta);
}
case DataType::UINT16: {
FixedVector<uint16_t> output(count);
return CreateScalarDataArrayFrom(output.data(), count, field_meta);
}
case DataType::UINT32: {
FixedVector<uint32_t> output(count);
return CreateScalarDataArrayFrom(output.data(), count, field_meta);
}
case DataType::UINT64: {
FixedVector<uint64_t> output(count);
return CreateScalarDataArrayFrom(output.data(), count, field_meta);
}
case DataType::FLOAT: {
FixedVector<float> output(count);
return CreateScalarDataArrayFrom(output.data(), count, field_meta);
@ -560,6 +576,26 @@ SegmentSealedImpl::bulk_subscript(FieldId field_id, const int64_t* seg_offsets,
bulk_subscript_impl<int64_t>(src_vec, seg_offsets, count, output.data());
return CreateScalarDataArrayFrom(output.data(), count, field_meta);
}
case DataType::UINT8: {
FixedVector<uint8_t> output(count);
bulk_subscript_impl<uint8_t>(src_vec, seg_offsets, count, output.data());
return CreateScalarDataArrayFrom(output.data(), count, field_meta);
}
case DataType::UINT16: {
FixedVector<uint16_t> output(count);
bulk_subscript_impl<uint16_t>(src_vec, seg_offsets, count, output.data());
return CreateScalarDataArrayFrom(output.data(), count, field_meta);
}
case DataType::UINT32: {
FixedVector<uint32_t> output(count);
bulk_subscript_impl<uint32_t>(src_vec, seg_offsets, count, output.data());
return CreateScalarDataArrayFrom(output.data(), count, field_meta);
}
case DataType::UINT64: {
FixedVector<uint64_t> output(count);
bulk_subscript_impl<uint64_t>(src_vec, seg_offsets, count, output.data());
return CreateScalarDataArrayFrom(output.data(), count, field_meta);
}
case DataType::FLOAT: {
FixedVector<float> output(count);
bulk_subscript_impl<float>(src_vec, seg_offsets, count, output.data());

View File

@ -106,6 +106,30 @@ CreateScalarDataArrayFrom(const void* data_raw, int64_t count, const FieldMeta&
obj->mutable_data()->Add(data, data + count);
break;
}
case DataType::UINT8: {
auto data = reinterpret_cast<const uint8_t*>(data_raw);
auto obj = scalar_array->mutable_uint_data();
obj->mutable_data()->Add(data, data + count);
break;
}
case DataType::UINT16: {
auto data = reinterpret_cast<const uint16_t*>(data_raw);
auto obj = scalar_array->mutable_uint_data();
obj->mutable_data()->Add(data, data + count);
break;
}
case DataType::UINT32: {
auto data = reinterpret_cast<const uint32_t*>(data_raw);
auto obj = scalar_array->mutable_uint_data();
obj->mutable_data()->Add(data, data + count);
break;
}
case DataType::UINT64: {
auto data = reinterpret_cast<const uint64_t*>(data_raw);
auto obj = scalar_array->mutable_ulong_data();
obj->mutable_data()->Add(data, data + count);
break;
}
case DataType::FLOAT: {
auto data = reinterpret_cast<const float*>(data_raw);
auto obj = scalar_array->mutable_float_data();
@ -230,6 +254,20 @@ MergeDataArray(std::vector<std::pair<milvus::SearchResult*, int64_t>>& result_of
*(obj->mutable_data()->Add()) = data[src_offset];
continue;
}
case DataType::UINT8:
case DataType::UINT16:
case DataType::UINT32: {
auto data = src_field_data->scalars().uint_data().data().data();
auto obj = scalar_array->mutable_uint_data();
*(obj->mutable_data()->Add()) = data[src_offset];
continue;
}
case DataType::UINT64: {
auto data = src_field_data->scalars().ulong_data().data().data();
auto obj = scalar_array->mutable_ulong_data();
*(obj->mutable_data()->Add()) = data[src_offset];
continue;
}
case DataType::FLOAT: {
auto data = src_field_data->scalars().float_data().data().data();
auto obj = scalar_array->mutable_float_data();
@ -325,6 +363,50 @@ ReverseDataFromIndex(const index::IndexBase* index,
*(obj->mutable_data()) = {raw_data.begin(), raw_data.end()};
break;
}
case DataType::UINT8: {
using IndexType = index::ScalarIndex<uint8_t>;
auto ptr = dynamic_cast<const IndexType*>(index);
std::vector<uint8_t> raw_data(count);
for (int64_t i = 0; i < count; ++i) {
raw_data[i] = ptr->Reverse_Lookup(seg_offsets[i]);
}
auto obj = scalar_array->mutable_uint_data();
*(obj->mutable_data()) = {raw_data.begin(), raw_data.end()};
break;
}
case DataType::UINT16: {
using IndexType = index::ScalarIndex<uint16_t>;
auto ptr = dynamic_cast<const IndexType*>(index);
std::vector<uint16_t> raw_data(count);
for (int64_t i = 0; i < count; ++i) {
raw_data[i] = ptr->Reverse_Lookup(seg_offsets[i]);
}
auto obj = scalar_array->mutable_uint_data();
*(obj->mutable_data()) = {raw_data.begin(), raw_data.end()};
break;
}
case DataType::UINT32: {
using IndexType = index::ScalarIndex<uint32_t>;
auto ptr = dynamic_cast<const IndexType*>(index);
std::vector<uint32_t> raw_data(count);
for (int64_t i = 0; i < count; ++i) {
raw_data[i] = ptr->Reverse_Lookup(seg_offsets[i]);
}
auto obj = scalar_array->mutable_uint_data();
*(obj->mutable_data()) = {raw_data.begin(), raw_data.end()};
break;
}
case DataType::UINT64: {
using IndexType = index::ScalarIndex<uint64_t>;
auto ptr = dynamic_cast<const IndexType*>(index);
std::vector<uint64_t> raw_data(count);
for (int64_t i = 0; i < count; ++i) {
raw_data[i] = ptr->Reverse_Lookup(seg_offsets[i]);
}
auto obj = scalar_array->mutable_ulong_data();
*(obj->mutable_data()) = {raw_data.begin(), raw_data.end()};
break;
}
case DataType::FLOAT: {
using IndexType = index::ScalarIndex<float>;
auto ptr = dynamic_cast<const IndexType*>(index);

View File

@ -76,9 +76,12 @@ PayloadWriter::finish() {
auto table = arrow::Table::Make(schema_, {array});
output_ = std::make_shared<storage::PayloadOutputStream>();
auto mem_pool = arrow::default_memory_pool();
ast = parquet::arrow::WriteTable(
*table, mem_pool, output_, 1024 * 1024 * 1024,
parquet::WriterProperties::Builder().compression(arrow::Compression::ZSTD)->compression_level(3)->build());
ast = parquet::arrow::WriteTable(*table, mem_pool, output_, 1024 * 1024 * 1024,
parquet::WriterProperties::Builder()
.compression(arrow::Compression::ZSTD)
->compression_level(3)
->version(parquet::ParquetVersion::PARQUET_2_4)
->build());
AssertInfo(ast.ok(), "write data to output stream failed");
}

View File

@ -89,6 +89,26 @@ AddPayloadToArrowBuilder(std::shared_ptr<arrow::ArrayBuilder> builder, const Pay
add_numeric_payload<int64_t, arrow::Int64Builder>(builder, int64_data, length);
break;
}
case DataType::UINT8: {
auto uint8_data = reinterpret_cast<uint8_t*>(raw_data);
add_numeric_payload<uint8_t, arrow::UInt8Builder>(builder, uint8_data, length);
break;
}
case DataType::UINT16: {
auto uint16_data = reinterpret_cast<uint16_t*>(raw_data);
add_numeric_payload<uint16_t, arrow::UInt16Builder>(builder, uint16_data, length);
break;
}
case DataType::UINT32: {
auto uint32_data = reinterpret_cast<uint32_t*>(raw_data);
add_numeric_payload<uint32_t, arrow::UInt32Builder>(builder, uint32_data, length);
break;
}
case DataType::UINT64: {
auto uint64_data = reinterpret_cast<uint64_t*>(raw_data);
add_numeric_payload<uint64_t, arrow::UInt64Builder>(builder, uint64_data, length);
break;
}
case DataType::FLOAT: {
auto float_data = reinterpret_cast<float*>(raw_data);
add_numeric_payload<float, arrow::FloatBuilder>(builder, float_data, length);
@ -141,6 +161,18 @@ CreateArrowBuilder(DataType data_type) {
case DataType::INT64: {
return std::make_shared<arrow::Int64Builder>();
}
case DataType::UINT8: {
return std::make_shared<arrow::UInt8Builder>();
}
case DataType::UINT16: {
return std::make_shared<arrow::UInt16Builder>();
}
case DataType::UINT32: {
return std::make_shared<arrow::UInt32Builder>();
}
case DataType::UINT64: {
return std::make_shared<arrow::UInt64Builder>();
}
case DataType::FLOAT: {
return std::make_shared<arrow::FloatBuilder>();
}
@ -192,6 +224,18 @@ CreateArrowSchema(DataType data_type) {
case DataType::INT64: {
return arrow::schema({arrow::field("val", arrow::int64())});
}
case DataType::UINT8: {
return arrow::schema({arrow::field("val", arrow::uint8())});
}
case DataType::UINT16: {
return arrow::schema({arrow::field("val", arrow::uint16())});
}
case DataType::UINT32: {
return arrow::schema({arrow::field("val", arrow::uint32())});
}
case DataType::UINT64: {
return arrow::schema({arrow::field("val", arrow::uint64())});
}
case DataType::FLOAT: {
return arrow::schema({arrow::field("val", arrow::float32())});
}
@ -239,6 +283,14 @@ GetPayloadSize(const Payload* payload) {
return payload->rows * sizeof(int32_t);
case DataType::INT64:
return payload->rows * sizeof(int64_t);
case DataType::UINT8:
return payload->rows * sizeof(uint8_t);
case DataType::UINT16:
return payload->rows * sizeof(uint16_t);
case DataType::UINT32:
return payload->rows * sizeof(uint32_t);
case DataType::UINT64:
return payload->rows * sizeof(uint64_t);
case DataType::FLOAT:
return payload->rows * sizeof(float);
case DataType::DOUBLE:
@ -279,6 +331,26 @@ GetRawValuesFromArrowArray(std::shared_ptr<arrow::Array> data, DataType data_typ
auto array = std::dynamic_pointer_cast<arrow::Int64Array>(data);
return reinterpret_cast<const uint8_t*>(array->raw_values());
}
case DataType::UINT8: {
AssertInfo(data->type()->id() == arrow::Type::type::UINT8, "inconsistent data type");
auto array = std::dynamic_pointer_cast<arrow::UInt8Array>(data);
return reinterpret_cast<const uint8_t*>(array->raw_values());
}
case DataType::UINT16: {
AssertInfo(data->type()->id() == arrow::Type::type::UINT16, "inconsistent data type");
auto array = std::dynamic_pointer_cast<arrow::UInt16Array>(data);
return reinterpret_cast<const uint8_t*>(array->raw_values());
}
case DataType::UINT32: {
AssertInfo(data->type()->id() == arrow::Type::type::UINT32, "inconsistent data type");
auto array = std::dynamic_pointer_cast<arrow::UInt32Array>(data);
return reinterpret_cast<const uint8_t*>(array->raw_values());
}
case DataType::UINT64: {
AssertInfo(data->type()->id() == arrow::Type::type::UINT64, "inconsistent data type");
auto array = std::dynamic_pointer_cast<arrow::UInt64Array>(data);
return reinterpret_cast<const uint8_t*>(array->raw_values());
}
case DataType::FLOAT: {
AssertInfo(data->type()->id() == arrow::Type::type::FLOAT, "inconsistent data type");
auto array = std::dynamic_pointer_cast<arrow::FloatArray>(data);

View File

@ -90,6 +90,30 @@ AddInt64ToPayload(CPayloadWriter payloadWriter, int64_t* values, int length) {
return AddValuesToPayload(payloadWriter, raw_data_info);
}
extern "C" CStatus
AddUInt8ToPayload(CPayloadWriter payloadWriter, uint8_t* values, int length) {
auto raw_data_info = Payload{milvus::DataType::UINT8, reinterpret_cast<const uint8_t*>(values), length};
return AddValuesToPayload(payloadWriter, raw_data_info);
}
extern "C" CStatus
AddUInt16ToPayload(CPayloadWriter payloadWriter, uint16_t* values, int length) {
auto raw_data_info = Payload{milvus::DataType::UINT16, reinterpret_cast<const uint8_t*>(values), length};
return AddValuesToPayload(payloadWriter, raw_data_info);
}
extern "C" CStatus
AddUInt32ToPayload(CPayloadWriter payloadWriter, uint32_t* values, int length) {
auto raw_data_info = Payload{milvus::DataType::UINT32, reinterpret_cast<const uint8_t*>(values), length};
return AddValuesToPayload(payloadWriter, raw_data_info);
}
extern "C" CStatus
AddUInt64ToPayload(CPayloadWriter payloadWriter, uint64_t* values, int length) {
auto raw_data_info = Payload{milvus::DataType::UINT64, reinterpret_cast<const uint8_t*>(values), length};
return AddValuesToPayload(payloadWriter, raw_data_info);
}
extern "C" CStatus
AddFloatToPayload(CPayloadWriter payloadWriter, float* values, int length) {
auto raw_data_info = Payload{milvus::DataType::FLOAT, reinterpret_cast<const uint8_t*>(values), length};
@ -188,6 +212,10 @@ NewPayloadReader(int columnType, uint8_t* buffer, int64_t buf_size) {
case milvus::DataType::INT16:
case milvus::DataType::INT32:
case milvus::DataType::INT64:
case milvus::DataType::UINT8:
case milvus::DataType::UINT16:
case milvus::DataType::UINT32:
case milvus::DataType::UINT64:
case milvus::DataType::FLOAT:
case milvus::DataType::DOUBLE:
case milvus::DataType::STRING:
@ -200,7 +228,6 @@ NewPayloadReader(int columnType, uint8_t* buffer, int64_t buf_size) {
return nullptr;
}
}
auto p = std::make_unique<PayloadReader>(buffer, buf_size, column_type);
return reinterpret_cast<CPayloadReader>(p.release());
}
@ -272,6 +299,62 @@ GetInt64FromPayload(CPayloadReader payloadReader, int64_t** values, int* length)
}
}
extern "C" CStatus
GetUInt8FromPayload(CPayloadReader payloadReader, uint8_t** values, int* length) {
try {
auto p = reinterpret_cast<PayloadReader*>(payloadReader);
auto ret = p->get_payload();
auto raw_data = const_cast<uint8_t*>(ret->raw_data);
*values = reinterpret_cast<uint8_t*>(raw_data);
*length = ret->rows;
return milvus::SuccessCStatus();
} catch (std::exception& e) {
return milvus::FailureCStatus(UnexpectedError, e.what());
}
}
extern "C" CStatus
GetUInt16FromPayload(CPayloadReader payloadReader, uint16_t** values, int* length) {
try {
auto p = reinterpret_cast<PayloadReader*>(payloadReader);
auto ret = p->get_payload();
auto raw_data = const_cast<uint8_t*>(ret->raw_data);
*values = reinterpret_cast<uint16_t*>(raw_data);
*length = ret->rows;
return milvus::SuccessCStatus();
} catch (std::exception& e) {
return milvus::FailureCStatus(UnexpectedError, e.what());
}
}
extern "C" CStatus
GetUInt32FromPayload(CPayloadReader payloadReader, uint32_t** values, int* length) {
try {
auto p = reinterpret_cast<PayloadReader*>(payloadReader);
auto ret = p->get_payload();
auto raw_data = const_cast<uint8_t*>(ret->raw_data);
*values = reinterpret_cast<uint32_t*>(raw_data);
*length = ret->rows;
return milvus::SuccessCStatus();
} catch (std::exception& e) {
return milvus::FailureCStatus(UnexpectedError, e.what());
}
}
extern "C" CStatus
GetUInt64FromPayload(CPayloadReader payloadReader, uint64_t** values, int* length) {
try {
auto p = reinterpret_cast<PayloadReader*>(payloadReader);
auto ret = p->get_payload();
auto raw_data = const_cast<uint8_t*>(ret->raw_data);
*values = reinterpret_cast<uint64_t*>(raw_data);
*length = ret->rows;
return milvus::SuccessCStatus();
} catch (std::exception& e) {
return milvus::FailureCStatus(UnexpectedError, e.what());
}
}
extern "C" CStatus
GetFloatFromPayload(CPayloadReader payloadReader, float** values, int* length) {
try {

View File

@ -47,6 +47,14 @@ AddInt32ToPayload(CPayloadWriter payloadWriter, int32_t* values, int length);
CStatus
AddInt64ToPayload(CPayloadWriter payloadWriter, int64_t* values, int length);
CStatus
AddUInt8ToPayload(CPayloadWriter payloadWriter, uint8_t* values, int length);
CStatus
AddUInt16ToPayload(CPayloadWriter payloadWriter, uint16_t* values, int length);
CStatus
AddUInt32ToPayload(CPayloadWriter payloadWriter, uint32_t* values, int length);
CStatus
AddUInt64ToPayload(CPayloadWriter payloadWriter, uint64_t* values, int length);
CStatus
AddFloatToPayload(CPayloadWriter payloadWriter, float* values, int length);
CStatus
AddDoubleToPayload(CPayloadWriter payloadWriter, double* values, int length);
@ -81,6 +89,14 @@ GetInt32FromPayload(CPayloadReader payloadReader, int32_t** values, int* length)
CStatus
GetInt64FromPayload(CPayloadReader payloadReader, int64_t** values, int* length);
CStatus
GetUInt8FromPayload(CPayloadReader payloadReader, uint8_t** values, int* length);
CStatus
GetUInt16FromPayload(CPayloadReader payloadReader, uint16_t** values, int* length);
CStatus
GetUInt32FromPayload(CPayloadReader payloadReader, uint32_t** values, int* length);
CStatus
GetUInt64FromPayload(CPayloadReader payloadReader, uint64_t** values, int* length);
CStatus
GetFloatFromPayload(CPayloadReader payloadReader, float** values, int* length);
CStatus
GetDoubleFromPayload(CPayloadReader payloadReader, double** values, int* length);

View File

@ -3367,6 +3367,10 @@ TEST(CApiTest, RetriveScalarFieldFromSealedSegmentWithIndex) {
auto i16_fid = schema->AddDebugField("age16", DataType::INT16);
auto i32_fid = schema->AddDebugField("age32", DataType::INT32);
auto i64_fid = schema->AddDebugField("age64", DataType::INT64);
auto u8_fid = schema->AddDebugField("ageu8", DataType::UINT8);
auto u16_fid = schema->AddDebugField("ageu16", DataType::UINT16);
auto u32_fid = schema->AddDebugField("ageu32", DataType::UINT32);
auto u64_fid = schema->AddDebugField("ageu64", DataType::UINT64);
auto float_fid = schema->AddDebugField("age_float", DataType::FLOAT);
auto double_fid = schema->AddDebugField("age_double", DataType::DOUBLE);
schema->set_primary_field_id(i64_fid);
@ -3407,7 +3411,7 @@ TEST(CApiTest, RetriveScalarFieldFromSealedSegmentWithIndex) {
load_index_info.index = std::move(age8_index);
segment->LoadIndex(load_index_info);
// load index for 16 field
// load index for int16 field
auto age16_col = raw_data.get_col<int16_t>(i16_fid);
GenScalarIndexing(N, age16_col.data());
auto age16_index = milvus::index::CreateScalarIndexSort<int16_t>();
@ -3437,6 +3441,46 @@ TEST(CApiTest, RetriveScalarFieldFromSealedSegmentWithIndex) {
load_index_info.index = std::move(age64_index);
segment->LoadIndex(load_index_info);
// load index for uint8 field
auto ageu8_col = raw_data.get_col<uint8_t>(u8_fid);
GenScalarIndexing(N, ageu8_col.data());
auto ageu8_index = milvus::index::CreateScalarIndexSort<uint8_t>();
ageu8_index->Build(N, ageu8_col.data());
load_index_info.field_id = u8_fid.get();
load_index_info.field_type = DataType::UINT8;
load_index_info.index = std::move(ageu8_index);
segment->LoadIndex(load_index_info);
// load index for uint16 field
auto ageu16_col = raw_data.get_col<uint16_t>(u16_fid);
GenScalarIndexing(N, ageu16_col.data());
auto ageu16_index = milvus::index::CreateScalarIndexSort<uint16_t>();
ageu16_index->Build(N, ageu16_col.data());
load_index_info.field_id = u16_fid.get();
load_index_info.field_type = DataType::UINT16;
load_index_info.index = std::move(ageu16_index);
segment->LoadIndex(load_index_info);
// load index for uint32 field
auto ageu32_col = raw_data.get_col<uint32_t>(u32_fid);
GenScalarIndexing(N, ageu32_col.data());
auto ageu32_index = milvus::index::CreateScalarIndexSort<uint32_t>();
ageu32_index->Build(N, ageu32_col.data());
load_index_info.field_id = u32_fid.get();
load_index_info.field_type = DataType::UINT32;
load_index_info.index = std::move(ageu32_index);
segment->LoadIndex(load_index_info);
// load index for uint64 field
auto ageu64_col = raw_data.get_col<uint64_t>(u64_fid);
GenScalarIndexing(N, ageu64_col.data());
auto ageu64_index = milvus::index::CreateScalarIndexSort<uint64_t>();
ageu64_index->Build(N, ageu64_col.data());
load_index_info.field_id = u64_fid.get();
load_index_info.field_type = DataType::UINT64;
load_index_info.index = std::move(ageu64_index);
segment->LoadIndex(load_index_info);
// load index for float field
auto age_float_col = raw_data.get_col<float>(float_fid);
GenScalarIndexing(N, age_float_col.data());
@ -3495,6 +3539,22 @@ TEST(CApiTest, RetriveScalarFieldFromSealedSegmentWithIndex) {
ASSERT_EQ(iter->scalars().long_data().data(0), age64_col[0]);
break;
}
case proto::schema::DataType::UInt8: {
ASSERT_EQ(iter->scalars().uint_data().data(0), ageu8_col[0]);
break;
}
case proto::schema::DataType::UInt16: {
ASSERT_EQ(iter->scalars().uint_data().data(0), ageu16_col[0]);
break;
}
case proto::schema::DataType::UInt32: {
ASSERT_EQ(iter->scalars().uint_data().data(0), ageu32_col[0]);
break;
}
case proto::schema::DataType::UInt64: {
ASSERT_EQ(iter->scalars().ulong_data().data(0), ageu64_col[0]);
break;
}
case proto::schema::DataType::Float: {
ASSERT_EQ(iter->scalars().float_data().data(0), age_float_col[0]);
break;

View File

@ -741,7 +741,7 @@ TEST(Expr, TestCompareWithScalarIndexMaris) {
load_index_info.field_type = DataType::VARCHAR;
load_index_info.index = std::move(str2_index);
seg->LoadIndex(load_index_info);
//
ExecExprVisitor visitor(*seg, seg->get_row_count(), MAX_TIMESTAMP);
for (auto [clause, ref_func] : testcases) {
auto dsl_string =
@ -795,6 +795,34 @@ TEST(Expr, TestBinaryArithOpEvalRange) {
}
})",
[](int64_t v) { return (v / 2) == 1000; }, DataType::INT64},
{R"("EQ": {
"ADD": {
"right_operand": 4,
"value": 8
}
})",
[](uint8_t v) { return (v + 4) == 8; }, DataType::UINT8},
{R"("EQ": {
"SUB": {
"right_operand": 500,
"value": 1500
}
})",
[](uint16_t v) { return (v - 500) == 1500; }, DataType::UINT16},
{R"("EQ": {
"MUL": {
"right_operand": 2,
"value": 4000
}
})",
[](uint32_t v) { return (v * 2) == 4000; }, DataType::UINT32},
{R"("EQ": {
"DIV": {
"right_operand": 2,
"value": 1000
}
})",
[](uint64_t v) { return (v / 2) == 1000; }, DataType::UINT64},
{R"("EQ": {
"MOD": {
"right_operand": 100,
@ -859,6 +887,34 @@ TEST(Expr, TestBinaryArithOpEvalRange) {
}
})",
[](int64_t v) { return (v + 500) != 2500; }, DataType::INT64},
{R"("NE": {
"MUL": {
"right_operand": 2,
"value": 2
}
})",
[](uint8_t v) { return (v * 2) != 2; }, DataType::UINT8},
{R"("NE": {
"DIV": {
"right_operand": 2,
"value": 1000
}
})",
[](uint16_t v) { return (v / 2) != 1000; }, DataType::UINT16},
{R"("NE": {
"MOD": {
"right_operand": 100,
"value": 0
}
})",
[](uint32_t v) { return (v % 100) != 0; }, DataType::UINT32},
{R"("NE": {
"ADD": {
"right_operand": 500,
"value": 2500
}
})",
[](uint64_t v) { return (v + 500) != 2500; }, DataType::UINT64},
};
std::string dsl_string_tmp = R"({
@ -906,6 +962,26 @@ TEST(Expr, TestBinaryArithOpEvalRange) {
@@@@
})";
std::string dsl_string_uint8 = R"(
"ageu8": {
@@@@
})";
std::string dsl_string_uint16 = R"(
"ageu16": {
@@@@
})";
std::string dsl_string_uint32 = R"(
"ageu32": {
@@@@
})";
std::string dsl_string_uint64 = R"(
"ageu64": {
@@@@
})";
std::string dsl_string_float = R"(
"age_float": {
@@@@
@ -922,6 +998,10 @@ TEST(Expr, TestBinaryArithOpEvalRange) {
auto i16_fid = schema->AddDebugField("age16", DataType::INT16);
auto i32_fid = schema->AddDebugField("age32", DataType::INT32);
auto i64_fid = schema->AddDebugField("age64", DataType::INT64);
auto u8_fid = schema->AddDebugField("ageu8", DataType::UINT8);
auto u16_fid = schema->AddDebugField("ageu16", DataType::UINT16);
auto u32_fid = schema->AddDebugField("ageu32", DataType::UINT32);
auto u64_fid = schema->AddDebugField("ageu64", DataType::UINT64);
auto float_fid = schema->AddDebugField("age_float", DataType::FLOAT);
auto double_fid = schema->AddDebugField("age_double", DataType::DOUBLE);
schema->set_primary_field_id(i64_fid);
@ -932,6 +1012,10 @@ TEST(Expr, TestBinaryArithOpEvalRange) {
std::vector<int16_t> age16_col;
std::vector<int32_t> age32_col;
std::vector<int64_t> age64_col;
std::vector<uint8_t> ageu8_col;
std::vector<uint16_t> ageu16_col;
std::vector<uint32_t> ageu32_col;
std::vector<uint64_t> ageu64_col;
std::vector<float> age_float_col;
std::vector<double> age_double_col;
int num_iters = 100;
@ -942,6 +1026,10 @@ TEST(Expr, TestBinaryArithOpEvalRange) {
auto new_age16_col = raw_data.get_col<int16_t>(i16_fid);
auto new_age32_col = raw_data.get_col<int32_t>(i32_fid);
auto new_age64_col = raw_data.get_col<int64_t>(i64_fid);
auto new_ageu8_col = raw_data.get_col<uint8_t>(u8_fid);
auto new_ageu16_col = raw_data.get_col<uint16_t>(u16_fid);
auto new_ageu32_col = raw_data.get_col<uint32_t>(u32_fid);
auto new_ageu64_col = raw_data.get_col<uint64_t>(u64_fid);
auto new_age_float_col = raw_data.get_col<float>(float_fid);
auto new_age_double_col = raw_data.get_col<double>(double_fid);
@ -949,6 +1037,10 @@ TEST(Expr, TestBinaryArithOpEvalRange) {
age16_col.insert(age16_col.end(), new_age16_col.begin(), new_age16_col.end());
age32_col.insert(age32_col.end(), new_age32_col.begin(), new_age32_col.end());
age64_col.insert(age64_col.end(), new_age64_col.begin(), new_age64_col.end());
ageu8_col.insert(ageu8_col.end(), new_ageu8_col.begin(), new_ageu8_col.end());
ageu16_col.insert(ageu16_col.end(), new_ageu16_col.begin(), new_ageu16_col.end());
ageu32_col.insert(ageu32_col.end(), new_ageu32_col.begin(), new_ageu32_col.end());
ageu64_col.insert(ageu64_col.end(), new_ageu64_col.begin(), new_ageu64_col.end());
age_float_col.insert(age_float_col.end(), new_age_float_col.begin(), new_age_float_col.end());
age_double_col.insert(age_double_col.end(), new_age_double_col.begin(), new_age_double_col.end());
@ -969,6 +1061,14 @@ TEST(Expr, TestBinaryArithOpEvalRange) {
dsl_string.replace(loc, 5, dsl_string_int32);
} else if (dtype == DataType::INT64) {
dsl_string.replace(loc, 5, dsl_string_int64);
} else if (dtype == DataType::UINT8) {
dsl_string.replace(loc, 5, dsl_string_uint8);
} else if (dtype == DataType::UINT16) {
dsl_string.replace(loc, 5, dsl_string_uint16);
} else if (dtype == DataType::UINT32) {
dsl_string.replace(loc, 5, dsl_string_uint32);
} else if (dtype == DataType::UINT64) {
dsl_string.replace(loc, 5, dsl_string_uint64);
} else if (dtype == DataType::FLOAT) {
dsl_string.replace(loc, 5, dsl_string_float);
} else if (dtype == DataType::DOUBLE) {
@ -1000,6 +1100,22 @@ TEST(Expr, TestBinaryArithOpEvalRange) {
auto val = age64_col[i];
auto ref = ref_func(val);
ASSERT_EQ(ans, ref) << clause << "@" << i << "!!" << val;
} else if (dtype == DataType::UINT8) {
auto val = ageu8_col[i];
auto ref = ref_func(val);
ASSERT_EQ(ans, ref) << clause << "@" << i << "!!" << val;
} else if (dtype == DataType::UINT16) {
auto val = ageu16_col[i];
auto ref = ref_func(val);
ASSERT_EQ(ans, ref) << clause << "@" << i << "!!" << val;
} else if (dtype == DataType::UINT32) {
auto val = ageu32_col[i];
auto ref = ref_func(val);
ASSERT_EQ(ans, ref) << clause << "@" << i << "!!" << val;
} else if (dtype == DataType::UINT64) {
auto val = ageu64_col[i];
auto ref = ref_func(val);
ASSERT_EQ(ans, ref) << clause << "@" << i << "!!" << val;
} else if (dtype == DataType::FLOAT) {
auto val = age_float_col[i];
auto ref = ref_func(val);
@ -1181,6 +1297,42 @@ TEST(Expr, TestBinaryArithOpEvalRangeWithScalarSortIndex) {
int64_val: 1000
>)",
[](int64_t v) { return (v / 2) == 1000; }, DataType::INT64},
{R"(arith_op: Add
right_operand: <
int64_val: 4
>
op: Equal
value: <
int64_val: 8
>)",
[](uint8_t v) { return (v + 4) == 8; }, DataType::UINT8},
{R"(arith_op: Sub
right_operand: <
int64_val: 500
>
op: Equal
value: <
int64_val: 1500
>)",
[](uint16_t v) { return (v - 500) == 1500; }, DataType::UINT16},
{R"(arith_op: Mul
right_operand: <
int64_val: 2
>
op: Equal
value: <
int64_val: 4000
>)",
[](uint32_t v) { return (v * 2) == 4000; }, DataType::UINT32},
{R"(arith_op: Div
right_operand: <
int64_val: 2
>
op: Equal
value: <
int64_val: 1000
>)",
[](uint64_t v) { return (v / 2) == 1000; }, DataType::UINT64},
{R"(arith_op: Mod
right_operand: <
int64_val: 100
@ -1262,6 +1414,43 @@ TEST(Expr, TestBinaryArithOpEvalRangeWithScalarSortIndex) {
int64_val: 2000
>)",
[](int64_t v) { return (v + 500) != 2000; }, DataType::INT64},
{R"(arith_op: Mul
right_operand: <
int64_val: 2
>
op: NotEqual
value: <
int64_val: 2
>)",
[](uint8_t v) { return (v * 2) != 2; }, DataType::UINT8},
{R"(arith_op: Div
right_operand: <
int64_val: 2
>
op: NotEqual
value: <
int64_val: 2000
>)",
[](uint16_t v) { return (v / 2) != 2000; }, DataType::UINT16},
{R"(arith_op: Mod
right_operand: <
int64_val: 100
>
op: NotEqual
value: <
int64_val: 1
>)",
[](uint32_t v) { return (v % 100) != 1; }, DataType::UINT32},
{R"(arith_op: Add
right_operand: <
int64_val: 500
>
op: NotEqual
value: <
int64_val: 2000
>)",
[](uint64_t v) { return (v + 500) != 2000; }, DataType::UINT64},
};
std::string serialized_expr_plan = R"(vector_anns: <
@ -1293,6 +1482,10 @@ TEST(Expr, TestBinaryArithOpEvalRangeWithScalarSortIndex) {
auto i16_fid = schema->AddDebugField("age16", DataType::INT16);
auto i32_fid = schema->AddDebugField("age32", DataType::INT32);
auto i64_fid = schema->AddDebugField("age64", DataType::INT64);
auto u8_fid = schema->AddDebugField("ageu8", DataType::UINT8);
auto u16_fid = schema->AddDebugField("ageu16", DataType::UINT16);
auto u32_fid = schema->AddDebugField("ageu32", DataType::UINT32);
auto u64_fid = schema->AddDebugField("ageu64", DataType::UINT64);
auto float_fid = schema->AddDebugField("age_float", DataType::FLOAT);
auto double_fid = schema->AddDebugField("age_double", DataType::DOUBLE);
schema->set_primary_field_id(i64_fid);
@ -1346,6 +1539,50 @@ TEST(Expr, TestBinaryArithOpEvalRangeWithScalarSortIndex) {
load_index_info.index = std::move(age64_index);
seg->LoadIndex(load_index_info);
// load index for uint8 field
auto ageu8_col = raw_data.get_col<uint8_t>(u8_fid);
ageu8_col[0] = 4;
GenScalarIndexing(N, ageu8_col.data());
auto ageu8_index = milvus::index::CreateScalarIndexSort<uint8_t>();
ageu8_index->Build(N, ageu8_col.data());
load_index_info.field_id = u8_fid.get();
load_index_info.field_type = DataType::UINT8;
load_index_info.index = std::move(ageu8_index);
seg->LoadIndex(load_index_info);
// load index for 16 field
auto ageu16_col = raw_data.get_col<uint16_t>(u16_fid);
ageu16_col[0] = 2000;
GenScalarIndexing(N, ageu16_col.data());
auto ageu16_index = milvus::index::CreateScalarIndexSort<uint16_t>();
ageu16_index->Build(N, ageu16_col.data());
load_index_info.field_id = u16_fid.get();
load_index_info.field_type = DataType::UINT16;
load_index_info.index = std::move(ageu16_index);
seg->LoadIndex(load_index_info);
// load index for uint32 field
auto ageu32_col = raw_data.get_col<uint32_t>(u32_fid);
ageu32_col[0] = 2000;
GenScalarIndexing(N, ageu32_col.data());
auto ageu32_index = milvus::index::CreateScalarIndexSort<uint32_t>();
ageu32_index->Build(N, ageu32_col.data());
load_index_info.field_id = u32_fid.get();
load_index_info.field_type = DataType::UINT32;
load_index_info.index = std::move(ageu32_index);
seg->LoadIndex(load_index_info);
// load index for uint64 field
auto ageu64_col = raw_data.get_col<uint64_t>(u64_fid);
ageu64_col[0] = 2000;
GenScalarIndexing(N, ageu64_col.data());
auto ageu64_index = milvus::index::CreateScalarIndexSort<uint64_t>();
ageu64_index->Build(N, ageu64_col.data());
load_index_info.field_id = u64_fid.get();
load_index_info.field_type = DataType::UINT64;
load_index_info.index = std::move(ageu64_index);
seg->LoadIndex(load_index_info);
// load index for float field
auto age_float_col = raw_data.get_col<float>(float_fid);
age_float_col[0] = 2000;
@ -1390,6 +1627,18 @@ TEST(Expr, TestBinaryArithOpEvalRangeWithScalarSortIndex) {
} else if (dtype == DataType::INT64) {
expr = boost::format(expr_plan) % vec_fid.get() % i64_fid.get() %
proto::schema::DataType_Name(int(DataType::INT64));
} else if (dtype == DataType::UINT8) {
expr = boost::format(expr_plan) % vec_fid.get() % u8_fid.get() %
proto::schema::DataType_Name(int(DataType::UINT8));
} else if (dtype == DataType::UINT16) {
expr = boost::format(expr_plan) % vec_fid.get() % u16_fid.get() %
proto::schema::DataType_Name(int(DataType::UINT16));
} else if (dtype == DataType::UINT32) {
expr = boost::format(expr_plan) % vec_fid.get() % u32_fid.get() %
proto::schema::DataType_Name(int(DataType::UINT32));
} else if (dtype == DataType::UINT64) {
expr = boost::format(expr_plan) % vec_fid.get() % u64_fid.get() %
proto::schema::DataType_Name(int(DataType::UINT64));
} else if (dtype == DataType::FLOAT) {
expr = boost::format(expr_plan) % vec_fid.get() % float_fid.get() %
proto::schema::DataType_Name(int(DataType::FLOAT));
@ -1424,6 +1673,22 @@ TEST(Expr, TestBinaryArithOpEvalRangeWithScalarSortIndex) {
auto val = age64_col[i];
auto ref = ref_func(val);
ASSERT_EQ(ans, ref) << clause << "@" << i << "!!" << val;
} else if (dtype == DataType::UINT8) {
auto val = ageu8_col[i];
auto ref = ref_func(val);
ASSERT_EQ(ans, ref) << clause << "@" << i << "!!" << val;
} else if (dtype == DataType::UINT16) {
auto val = ageu16_col[i];
auto ref = ref_func(val);
ASSERT_EQ(ans, ref) << clause << "@" << i << "!!" << val;
} else if (dtype == DataType::UINT32) {
auto val = ageu32_col[i];
auto ref = ref_func(val);
ASSERT_EQ(ans, ref) << clause << "@" << i << "!!" << val;
} else if (dtype == DataType::UINT64) {
auto val = ageu64_col[i];
auto ref = ref_func(val);
ASSERT_EQ(ans, ref) << clause << "@" << i << "!!" << val;
} else if (dtype == DataType::FLOAT) {
auto val = age_float_col[i];
auto ref = ref_func(val);

View File

@ -20,6 +20,7 @@
#include "storage/parquet_c.h"
#include "storage/PayloadReader.h"
#include "storage/PayloadWriter.h"
#include "storage/Util.h"
namespace wrapper = milvus::storage;
@ -122,7 +123,7 @@ TEST(storage, boolean) {
#define NUMERIC_TEST(TEST_NAME, COLUMN_TYPE, DATA_TYPE, ADD_FUNC, GET_FUNC, ARRAY_TYPE) \
TEST(wrapper, TEST_NAME) { \
auto payload = NewPayloadWriter(COLUMN_TYPE); \
DATA_TYPE data[] = {-1, 1, -100, 100}; \
DATA_TYPE data[] = {DATA_TYPE(-1), 1, 101, 100}; \
\
auto st = ADD_FUNC(payload, data, 4); \
ASSERT_EQ(st.error_code, ErrorCode::Success); \
@ -156,6 +157,13 @@ NUMERIC_TEST(int8, int(milvus::DataType::INT8), int8_t, AddInt8ToPayload, GetInt
NUMERIC_TEST(int16, int(milvus::DataType::INT16), int16_t, AddInt16ToPayload, GetInt16FromPayload, arrow::Int16Array)
NUMERIC_TEST(int32, int(milvus::DataType::INT32), int32_t, AddInt32ToPayload, GetInt32FromPayload, arrow::Int32Array)
NUMERIC_TEST(int64, int(milvus::DataType::INT64), int64_t, AddInt64ToPayload, GetInt64FromPayload, arrow::Int64Array)
NUMERIC_TEST(uint8, int(milvus::DataType::UINT8), uint8_t, AddUInt8ToPayload, GetUInt8FromPayload, arrow::UInt8Array)
NUMERIC_TEST(
uint16, int(milvus::DataType::UINT16), uint16_t, AddUInt16ToPayload, GetUInt16FromPayload, arrow::UInt16Array)
NUMERIC_TEST(
uint32, int(milvus::DataType::UINT32), uint32_t, AddUInt32ToPayload, GetUInt32FromPayload, arrow::UInt32Array)
NUMERIC_TEST(
uint64, int(milvus::DataType::UINT64), uint64_t, AddUInt64ToPayload, GetUInt64FromPayload, arrow::UInt64Array)
NUMERIC_TEST(float32, int(milvus::DataType::FLOAT), float, AddFloatToPayload, GetFloatFromPayload, arrow::FloatArray)
NUMERIC_TEST(
float64, int(milvus::DataType::DOUBLE), double, AddDoubleToPayload, GetDoubleFromPayload, arrow::DoubleArray)

View File

@ -35,6 +35,10 @@ getStandardSchema() {
schema->AddDebugField("Int32Field", DataType::INT32);
schema->AddDebugField("Int16Field", DataType::INT16);
schema->AddDebugField("Int8Field", DataType::INT8);
schema->AddDebugField("UInt64Field", DataType::UINT64);
schema->AddDebugField("UInt32Field", DataType::UINT32);
schema->AddDebugField("UInt16Field", DataType::UINT16);
schema->AddDebugField("UInt8Field", DataType::UINT8);
schema->AddDebugField("DoubleField", DataType::DOUBLE);
schema->AddDebugField("FloatField", DataType::FLOAT);
return schema;
@ -58,7 +62,11 @@ INSTANTIATE_TEST_CASE_P(InstName,
std::make_tuple("Int64Field"), //
std::make_tuple("Int32Field"), //
std::make_tuple("Int16Field"), //
std::make_tuple("Int8Field") //
std::make_tuple("Int8Field"), //
std::make_tuple("UInt64Field"), //
std::make_tuple("UInt32Field"), //
std::make_tuple("UInt16Field"), //
std::make_tuple("UInt8Field") //
));
TEST_P(PlanProtoTest, Range) {

View File

@ -88,12 +88,11 @@ TEST(InsertRecordTest, growing_int64_t) {
auto i64_fid = schema->AddDebugField("age", DataType::INT64);
schema->set_primary_field_id(i64_fid);
auto record = milvus::segcore::InsertRecord<false>(*schema, int64_t(32));
const int N=100000;
const int N = 100000;
for (int i = 1; i <= N; i++)
record.insert_pk(PkType(int64_t(i)), int64_t(i));
for (int i = 1; i <= N; i++) record.insert_pk(PkType(int64_t(i)), int64_t(i));
for (int i = 1; i <= N; i++){
for (int i = 1; i <= N; i++) {
std::vector<SegOffset> offset = record.search_pk(PkType(int64_t(i)), int64_t(N + 1));
ASSERT_EQ(offset[0].get(), int64_t(i));
}
@ -108,10 +107,9 @@ TEST(InsertRecordTest, growing_string) {
auto record = milvus::segcore::InsertRecord<false>(*schema, int64_t(32));
const int N = 100000;
for (int i = 1; i <= N; i++)
record.insert_pk(PkType(std::to_string(i)), int64_t(i));
for (int i = 1; i <= N; i++) record.insert_pk(PkType(std::to_string(i)), int64_t(i));
for (int i = 1; i <= N; i++){
for (int i = 1; i <= N; i++) {
std::vector<SegOffset> offset = record.search_pk(std::to_string(i), int64_t(N + 1));
ASSERT_EQ(offset[0].get(), int64_t(i));
}
@ -126,11 +124,10 @@ TEST(InsertRecordTest, sealed_int64_t) {
auto record = milvus::segcore::InsertRecord<true>(*schema, int64_t(32));
const int N = 100000;
for (int i = N; i >= 1; i--)
record.insert_pk(PkType(int64_t(i)), int64_t(i));
for (int i = N; i >= 1; i--) record.insert_pk(PkType(int64_t(i)), int64_t(i));
record.seal_pks();
for (int i = 1;i <= N; i++){
for (int i = 1; i <= N; i++) {
std::vector<SegOffset> offset = record.search_pk(PkType(int64_t(i)), int64_t(N + 1));
ASSERT_EQ(offset[0].get(), int64_t(i));
}
@ -145,12 +142,11 @@ TEST(InsertRecordTest, sealed_string) {
auto record = milvus::segcore::InsertRecord<true>(*schema, int64_t(32));
const int N = 100000;
for (int i = 1; i <= N; i++)
record.insert_pk(PkType(std::to_string(i)), int64_t(i));
for (int i = 1; i <= N; i++) record.insert_pk(PkType(std::to_string(i)), int64_t(i));
record.seal_pks();
for (int i = 1; i <= N; i++){
for (int i = 1; i <= N; i++) {
std::vector<SegOffset> offset = record.search_pk(std::to_string(i), int64_t(N + 1));
ASSERT_EQ(offset[0].get(), int64_t(i));
}

View File

@ -87,6 +87,19 @@ struct GeneratedData {
std::copy_n(src_data, raw_->num_rows(), ret.data());
break;
}
case DataType::UINT8:
case DataType::UINT16:
case DataType::UINT32: {
auto src_data =
reinterpret_cast<const uint32_t*>(target_field_data.scalars().uint_data().data().data());
std::copy_n(src_data, raw_->num_rows(), ret.data());
break;
}
case DataType::UINT64: {
auto src_data = reinterpret_cast<const T*>(target_field_data.scalars().ulong_data().data().data());
std::copy_n(src_data, raw_->num_rows(), ret.data());
break;
}
case DataType::FLOAT: {
auto src_data = reinterpret_cast<const T*>(target_field_data.scalars().float_data().data().data());
std::copy_n(src_data, raw_->num_rows(), ret.data());
@ -214,6 +227,38 @@ DataGen(SchemaPtr schema, int64_t N, uint64_t seed = 42, uint64_t ts_offset = 0,
insert_cols(data, N, field_meta);
break;
}
case DataType::UINT64: {
vector<uint64_t> data(N);
for (int i = 0; i < N; i++) {
data[i] = i / repeat_count;
}
insert_cols(data, N, field_meta);
break;
}
case DataType::UINT32: {
vector<uint> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data, N, field_meta);
break;
}
case DataType::UINT16: {
vector<uint16_t> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data, N, field_meta);
break;
}
case DataType::UINT8: {
vector<uint8_t> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data, N, field_meta);
break;
}
case DataType::FLOAT: {
vector<float> data(N);
for (auto& x : data) {

View File

@ -55,7 +55,7 @@ func TestBinlogIOInterfaceMethods(t *testing.T) {
p, err := b.upload(context.TODO(), 1, 10, []*InsertData{iData}, []byte{}, dData, meta)
assert.NoError(t, err)
assert.Equal(t, 12, len(p.inPaths))
assert.Equal(t, 16, len(p.inPaths))
assert.Equal(t, 1, len(p.statsPaths))
assert.Equal(t, 1, len(p.inPaths[0].GetBinlogs()))
assert.Equal(t, 1, len(p.statsPaths[0].GetBinlogs()))
@ -63,7 +63,7 @@ func TestBinlogIOInterfaceMethods(t *testing.T) {
p, err = b.upload(context.TODO(), 1, 10, []*InsertData{iData, iData}, []byte{}, dData, meta)
assert.NoError(t, err)
assert.Equal(t, 12, len(p.inPaths))
assert.Equal(t, 16, len(p.inPaths))
assert.Equal(t, 1, len(p.statsPaths))
assert.Equal(t, 2, len(p.inPaths[0].GetBinlogs()))
assert.Equal(t, 1, len(p.statsPaths[0].GetBinlogs()))
@ -73,7 +73,7 @@ func TestBinlogIOInterfaceMethods(t *testing.T) {
in, err := b.uploadInsertLog(ctx, 1, 10, iData, meta)
assert.NoError(t, err)
assert.Equal(t, 12, len(in))
assert.Equal(t, 16, len(in))
assert.Equal(t, 1, len(in[0].GetBinlogs()))
stats, err := b.uploadStatsLog(ctx, 1, 10, []byte{}, meta)
@ -350,8 +350,8 @@ func TestBinlogIOInnerMethods(t *testing.T) {
return
}
assert.NoError(t, err)
assert.Equal(t, 12, len(pin))
assert.Equal(t, 12, len(kvs))
assert.Equal(t, 16, len(pin))
assert.Equal(t, 16, len(kvs))
log.Debug("test paths",
zap.Any("kvs no.", len(kvs)),

View File

@ -718,6 +718,66 @@ func interface2FieldData(schemaDataType schemapb.DataType, content []interface{}
}
rst = data
case schemapb.DataType_UInt8:
var data = &storage.UInt8FieldData{
NumRows: numOfRows,
Data: make([]uint8, 0, len(content)),
}
for _, c := range content {
r, ok := c.(uint8)
if !ok {
return nil, errTransferType
}
data.Data = append(data.Data, r)
}
rst = data
case schemapb.DataType_UInt16:
var data = &storage.UInt16FieldData{
NumRows: numOfRows,
Data: make([]uint16, 0, len(content)),
}
for _, c := range content {
r, ok := c.(uint16)
if !ok {
return nil, errTransferType
}
data.Data = append(data.Data, r)
}
rst = data
case schemapb.DataType_UInt32:
var data = &storage.UInt32FieldData{
NumRows: numOfRows,
Data: make([]uint32, 0, len(content)),
}
for _, c := range content {
r, ok := c.(uint32)
if !ok {
return nil, errTransferType
}
data.Data = append(data.Data, r)
}
rst = data
case schemapb.DataType_UInt64:
var data = &storage.UInt64FieldData{
NumRows: numOfRows,
Data: make([]uint64, 0, len(content)),
}
for _, c := range content {
r, ok := c.(uint64)
if !ok {
return nil, errTransferType
}
data.Data = append(data.Data, r)
}
rst = data
case schemapb.DataType_Float:
var data = &storage.FloatFieldData{
NumRows: numOfRows,

View File

@ -95,6 +95,10 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
{true, schemapb.DataType_Int16, []interface{}{int16(1), int16(2)}, "valid int16"},
{true, schemapb.DataType_Int32, []interface{}{int32(1), int32(2)}, "valid int32"},
{true, schemapb.DataType_Int64, []interface{}{int64(1), int64(2)}, "valid int64"},
{true, schemapb.DataType_UInt8, []interface{}{uint8(1), uint8(2)}, "valid uint8"},
{true, schemapb.DataType_UInt16, []interface{}{uint16(1), uint16(2)}, "valid uint16"},
{true, schemapb.DataType_UInt32, []interface{}{uint32(1), uint32(2)}, "valid uint32"},
{true, schemapb.DataType_UInt64, []interface{}{uint64(1), uint64(2)}, "valid uint64"},
{true, schemapb.DataType_Float, []interface{}{float32(1), float32(2)}, "valid float32"},
{true, schemapb.DataType_Double, []interface{}{float64(1), float64(2)}, "valid float64"},
{true, schemapb.DataType_VarChar, []interface{}{"test1", "test2"}, "valid varChar"},
@ -105,6 +109,11 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
{false, schemapb.DataType_Int16, []interface{}{nil, nil}, "invalid int16"},
{false, schemapb.DataType_Int32, []interface{}{nil, nil}, "invalid int32"},
{false, schemapb.DataType_Int64, []interface{}{nil, nil}, "invalid int64"},
{false, schemapb.DataType_UInt8, []interface{}{nil, nil}, "invalid uint8"},
{false, schemapb.DataType_UInt16, []interface{}{nil, nil}, "invalid uint16"},
{false, schemapb.DataType_UInt32, []interface{}{nil, nil}, "invalid uint32"},
{false, schemapb.DataType_UInt64, []interface{}{nil, nil}, "invalid uint64"},
{false, schemapb.DataType_Float, []interface{}{nil, nil}, "invalid float32"},
{false, schemapb.DataType_Double, []interface{}{nil, nil}, "invalid float64"},
{false, schemapb.DataType_VarChar, []interface{}{nil, nil}, "invalid varChar"},
@ -279,7 +288,7 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
var allPaths [][]string
inpath, err := mockbIO.uploadInsertLog(context.Background(), 1, 0, iData, meta)
assert.NoError(t, err)
assert.Equal(t, 12, len(inpath))
assert.Equal(t, 16, len(inpath))
binlogNum := len(inpath[0].GetBinlogs())
assert.Equal(t, 1, binlogNum)
@ -317,7 +326,7 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
var allPaths [][]string
inpath, err := mockbIO.uploadInsertLog(context.Background(), 1, 0, iData, meta)
assert.NoError(t, err)
assert.Equal(t, 12, len(inpath))
assert.Equal(t, 16, len(inpath))
binlogNum := len(inpath[0].GetBinlogs())
assert.Equal(t, 1, binlogNum)
@ -350,7 +359,7 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
var allPaths [][]string
inpath, err := mockbIO.uploadInsertLog(context.Background(), 1, 0, iData, meta)
assert.NoError(t, err)
assert.Equal(t, 12, len(inpath))
assert.Equal(t, 16, len(inpath))
binlogNum := len(inpath[0].GetBinlogs())
assert.Equal(t, 1, binlogNum)
@ -384,7 +393,7 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
var allPaths [][]string
inpath, err := mockbIO.uploadInsertLog(context.Background(), 1, 0, iData, meta)
assert.NoError(t, err)
assert.Equal(t, 12, len(inpath))
assert.Equal(t, 16, len(inpath))
binlogNum := len(inpath[0].GetBinlogs())
assert.Equal(t, 1, binlogNum)
@ -421,7 +430,7 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
var allPaths [][]string
inpath, err := mockbIO.uploadInsertLog(context.Background(), 1, 0, iData, meta)
assert.NoError(t, err)
assert.Equal(t, 12, len(inpath))
assert.Equal(t, 16, len(inpath))
binlogNum := len(inpath[0].GetBinlogs())
assert.Equal(t, 1, binlogNum)
@ -643,11 +652,11 @@ func TestCompactorInterfaceMethods(t *testing.T) {
cpaths1, err := mockbIO.upload(context.TODO(), c.segID1, c.parID, []*InsertData{iData1}, []byte{}, dData1, meta)
require.NoError(t, err)
require.Equal(t, 12, len(cpaths1.inPaths))
require.Equal(t, 16, len(cpaths1.inPaths))
cpaths2, err := mockbIO.upload(context.TODO(), c.segID2, c.parID, []*InsertData{iData2}, []byte{}, dData2, meta)
require.NoError(t, err)
require.Equal(t, 12, len(cpaths2.inPaths))
require.Equal(t, 16, len(cpaths2.inPaths))
plan := &datapb.CompactionPlan{
PlanID: 10080,
@ -775,11 +784,11 @@ func TestCompactorInterfaceMethods(t *testing.T) {
cpaths1, err := mockbIO.upload(context.TODO(), segID1, partID, []*InsertData{iData1}, []byte{}, dData1, meta)
require.NoError(t, err)
require.Equal(t, 12, len(cpaths1.inPaths))
require.Equal(t, 16, len(cpaths1.inPaths))
cpaths2, err := mockbIO.upload(context.TODO(), segID2, partID, []*InsertData{iData2}, []byte{}, dData2, meta)
require.NoError(t, err)
require.Equal(t, 12, len(cpaths2.inPaths))
require.Equal(t, 16, len(cpaths2.inPaths))
plan := &datapb.CompactionPlan{
PlanID: 20080,

View File

@ -422,11 +422,11 @@ func TestDataNode(t *testing.T) {
}
content := []byte(`{
"rows":[
{"bool_field": true, "int8_field": 10, "int16_field": 101, "int32_field": 1001, "int64_field": 10001, "float32_field": 3.14, "float64_field": 1.56, "varChar_field": "hello world", "binary_vector_field": [254, 0, 254, 0], "float_vector_field": [1.1, 1.2]},
{"bool_field": false, "int8_field": 11, "int16_field": 102, "int32_field": 1002, "int64_field": 10002, "float32_field": 3.15, "float64_field": 2.56, "varChar_field": "hello world", "binary_vector_field": [253, 0, 253, 0], "float_vector_field": [2.1, 2.2]},
{"bool_field": true, "int8_field": 12, "int16_field": 103, "int32_field": 1003, "int64_field": 10003, "float32_field": 3.16, "float64_field": 3.56, "varChar_field": "hello world", "binary_vector_field": [252, 0, 252, 0], "float_vector_field": [3.1, 3.2]},
{"bool_field": false, "int8_field": 13, "int16_field": 104, "int32_field": 1004, "int64_field": 10004, "float32_field": 3.17, "float64_field": 4.56, "varChar_field": "hello world", "binary_vector_field": [251, 0, 251, 0], "float_vector_field": [4.1, 4.2]},
{"bool_field": true, "int8_field": 14, "int16_field": 105, "int32_field": 1005, "int64_field": 10005, "float32_field": 3.18, "float64_field": 5.56, "varChar_field": "hello world", "binary_vector_field": [250, 0, 250, 0], "float_vector_field": [5.1, 5.2]}
{"bool_field": true, "int8_field": 10, "int16_field": 101, "int32_field": 1001, "int64_field": 10001, "uint8_field": 15, "uint16_field": 106, "uint32_field": 1006, "uint64_field": 10006, "float32_field": 3.14, "float64_field": 1.56, "varChar_field": "hello world", "binary_vector_field": [254, 0, 254, 0], "float_vector_field": [1.1, 1.2]},
{"bool_field": false, "int8_field": 11, "int16_field": 102, "int32_field": 1002, "int64_field": 10002, "uint8_field": 16, "uint16_field": 107, "uint32_field": 1007, "uint64_field": 10007, "float32_field": 3.15, "float64_field": 2.56, "varChar_field": "hello world", "binary_vector_field": [253, 0, 253, 0], "float_vector_field": [2.1, 2.2]},
{"bool_field": true, "int8_field": 12, "int16_field": 103, "int32_field": 1003, "int64_field": 10003, "uint8_field": 17, "uint16_field": 108, "uint32_field": 1008, "uint64_field": 10008, "float32_field": 3.16, "float64_field": 3.56, "varChar_field": "hello world", "binary_vector_field": [252, 0, 252, 0], "float_vector_field": [3.1, 3.2]},
{"bool_field": false, "int8_field": 13, "int16_field": 104, "int32_field": 1004, "int64_field": 10004, "uint8_field": 18, "uint16_field": 109, "uint32_field": 1009, "uint64_field": 10009, "float32_field": 3.17, "float64_field": 4.56, "varChar_field": "hello world", "binary_vector_field": [251, 0, 251, 0], "float_vector_field": [4.1, 4.2]},
{"bool_field": true, "int8_field": 14, "int16_field": 105, "int32_field": 1005, "int64_field": 10005, "uint8_field": 19, "uint16_field": 110, "uint32_field": 1010, "uint64_field": 10010, "float32_field": 3.18, "float64_field": 5.56, "varChar_field": "hello world", "binary_vector_field": [250, 0, 250, 0], "float_vector_field": [5.1, 5.2]}
]
}`)
@ -520,11 +520,11 @@ func TestDataNode(t *testing.T) {
content := []byte(`{
"rows":[
{"bool_field": true, "int8_field": 10, "int16_field": 101, "int32_field": 1001, "int64_field": 10001, "float32_field": 3.14, "float64_field": 1.56, "varChar_field": "hello world", "binary_vector_field": [254, 0, 254, 0], "float_vector_field": [1.1, 1.2]},
{"bool_field": false, "int8_field": 11, "int16_field": 102, "int32_field": 1002, "int64_field": 10002, "float32_field": 3.15, "float64_field": 2.56, "varChar_field": "hello world", "binary_vector_field": [253, 0, 253, 0], "float_vector_field": [2.1, 2.2]},
{"bool_field": true, "int8_field": 12, "int16_field": 103, "int32_field": 1003, "int64_field": 10003, "float32_field": 3.16, "float64_field": 3.56, "varChar_field": "hello world", "binary_vector_field": [252, 0, 252, 0], "float_vector_field": [3.1, 3.2]},
{"bool_field": false, "int8_field": 13, "int16_field": 104, "int32_field": 1004, "int64_field": 10004, "float32_field": 3.17, "float64_field": 4.56, "varChar_field": "hello world", "binary_vector_field": [251, 0, 251, 0], "float_vector_field": [4.1, 4.2]},
{"bool_field": true, "int8_field": 14, "int16_field": 105, "int32_field": 1005, "int64_field": 10005, "float32_field": 3.18, "float64_field": 5.56, "varChar_field": "hello world", "binary_vector_field": [250, 0, 250, 0], "float_vector_field": [5.1, 5.2]}
{"bool_field": true, "int8_field": 10, "int16_field": 101, "int32_field": 1001, "int64_field": 10001, "uint8_field": 15, "uint16_field": 106, "uint32_field": 1006, "uint64_field": 10006, "float32_field": 3.14, "float64_field": 1.56, "varChar_field": "hello world", "binary_vector_field": [254, 0, 254, 0], "float_vector_field": [1.1, 1.2]},
{"bool_field": false, "int8_field": 11, "int16_field": 102, "int32_field": 1002, "int64_field": 10002, "uint8_field": 16, "uint16_field": 107, "uint32_field": 1007, "uint64_field": 10007, "float32_field": 3.15, "float64_field": 2.56, "varChar_field": "hello world", "binary_vector_field": [253, 0, 253, 0], "float_vector_field": [2.1, 2.2]},
{"bool_field": true, "int8_field": 12, "int16_field": 103, "int32_field": 1003, "int64_field": 10003, "uint8_field": 17, "uint16_field": 108, "uint32_field": 1008, "uint64_field": 10008, "float32_field": 3.16, "float64_field": 3.56, "varChar_field": "hello world", "binary_vector_field": [252, 0, 252, 0], "float_vector_field": [3.1, 3.2]},
{"bool_field": false, "int8_field": 13, "int16_field": 104, "int32_field": 1004, "int64_field": 10004, "uint8_field": 18, "uint16_field": 109, "uint32_field": 1009, "uint64_field": 10009, "float32_field": 3.17, "float64_field": 4.56, "varChar_field": "hello world", "binary_vector_field": [251, 0, 251, 0], "float_vector_field": [4.1, 4.2]},
{"bool_field": true, "int8_field": 14, "int16_field": 105, "int32_field": 1005, "int64_field": 10005, "uint8_field": 19, "uint16_field": 110, "uint32_field": 1010, "uint64_field": 10010, "float32_field": 3.18, "float64_field": 5.56, "varChar_field": "hello world", "binary_vector_field": [250, 0, 250, 0], "float_vector_field": [5.1, 5.2]}
]
}`)
@ -553,11 +553,11 @@ func TestDataNode(t *testing.T) {
}
content := []byte(`{
"rows":[
{"bool_field": true, "int8_field": 10, "int16_field": 101, "int32_field": 1001, "int64_field": 10001, "float32_field": 3.14, "float64_field": 1.56, "varChar_field": "hello world", "binary_vector_field": [254, 0, 254, 0], "float_vector_field": [1.1, 1.2]},
{"bool_field": false, "int8_field": 11, "int16_field": 102, "int32_field": 1002, "int64_field": 10002, "float32_field": 3.15, "float64_field": 2.56, "varChar_field": "hello world", "binary_vector_field": [253, 0, 253, 0], "float_vector_field": [2.1, 2.2]},
{"bool_field": true, "int8_field": 12, "int16_field": 103, "int32_field": 1003, "int64_field": 10003, "float32_field": 3.16, "float64_field": 3.56, "varChar_field": "hello world", "binary_vector_field": [252, 0, 252, 0], "float_vector_field": [3.1, 3.2]},
{"bool_field": false, "int8_field": 13, "int16_field": 104, "int32_field": 1004, "int64_field": 10004, "float32_field": 3.17, "float64_field": 4.56, "varChar_field": "hello world", "binary_vector_field": [251, 0, 251, 0], "float_vector_field": [4.1, 4.2]},
{"bool_field": true, "int8_field": 14, "int16_field": 105, "int32_field": 1005, "int64_field": 10005, "float32_field": 3.18, "float64_field": 5.56, "varChar_field": "hello world", "binary_vector_field": [250, 0, 250, 0], "float_vector_field": [5.1, 5.2]}
{"bool_field": true, "int8_field": 10, "int16_field": 101, "int32_field": 1001, "int64_field": 10001, "uint8_field": 15, "uint16_field": 106, "uint32_field": 1006, "uint64_field": 10006, "float32_field": 3.14, "float64_field": 1.56, "varChar_field": "hello world", "binary_vector_field": [254, 0, 254, 0], "float_vector_field": [1.1, 1.2]},
{"bool_field": false, "int8_field": 11, "int16_field": 102, "int32_field": 1002, "int64_field": 10002, "uint8_field": 16, "uint16_field": 107, "uint32_field": 1007, "uint64_field": 10007, "float32_field": 3.15, "float64_field": 2.56, "varChar_field": "hello world", "binary_vector_field": [253, 0, 253, 0], "float_vector_field": [2.1, 2.2]},
{"bool_field": true, "int8_field": 12, "int16_field": 103, "int32_field": 1003, "int64_field": 10003, "uint8_field": 17, "uint16_field": 108, "uint32_field": 1008, "uint64_field": 10008, "float32_field": 3.16, "float64_field": 3.56, "varChar_field": "hello world", "binary_vector_field": [252, 0, 252, 0], "float_vector_field": [3.1, 3.2]},
{"bool_field": false, "int8_field": 13, "int16_field": 104, "int32_field": 1004, "int64_field": 10004, "uint8_field": 18, "uint16_field": 109, "uint32_field": 1009, "uint64_field": 10009, "float32_field": 3.17, "float64_field": 4.56, "varChar_field": "hello world", "binary_vector_field": [251, 0, 251, 0], "float_vector_field": [4.1, 4.2]},
{"bool_field": true, "int8_field": 14, "int16_field": 105, "int32_field": 1005, "int64_field": 10005, "uint8_field": 19, "uint16_field": 110, "uint32_field": 1010, "uint64_field": 10010, "float32_field": 3.18, "float64_field": 5.56, "varChar_field": "hello world", "binary_vector_field": [250, 0, 250, 0], "float_vector_field": [5.1, 5.2]}
]
}`)

View File

@ -454,6 +454,38 @@ func (mf *MetaFactory) GetFieldSchema() []*schemapb.FieldSchema {
},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 110,
Name: "uint8_field",
Description: "field 110",
DataType: schemapb.DataType_UInt8,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 111,
Name: "uint16_field",
Description: "field 111",
DataType: schemapb.DataType_UInt16,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 112,
Name: "uint32_field",
Description: "field 112",
DataType: schemapb.DataType_UInt32,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 113,
Name: "uint64_field",
Description: "field 113",
DataType: schemapb.DataType_UInt64,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
}
return fields
@ -522,6 +554,38 @@ func GenRowData() (rawData []byte) {
}
rawData = append(rawData, bint64.Bytes()...)
// uint8
var dataUInt8 uint8 = 100
ubint8 := new(bytes.Buffer)
if err := binary.Write(ubint8, common.Endian, dataUInt8); err != nil {
panic(err)
}
rawData = append(rawData, ubint8.Bytes()...)
// uint16
var dataUInt16 uint16 = 200
ubint16 := new(bytes.Buffer)
if err := binary.Write(ubint16, common.Endian, dataUInt16); err != nil {
panic(err)
}
rawData = append(rawData, ubint16.Bytes()...)
// uint32
var dataUInt32 uint32 = 300
ubint32 := new(bytes.Buffer)
if err := binary.Write(ubint32, common.Endian, dataUInt32); err != nil {
panic(err)
}
rawData = append(rawData, ubint32.Bytes()...)
// uint64
var dataUInt64 uint64 = 400
ubint64 := new(bytes.Buffer)
if err := binary.Write(ubint64, common.Endian, dataUInt64); err != nil {
panic(err)
}
rawData = append(rawData, ubint64.Bytes()...)
// float32
var datafloat float32 = 1.1
bfloat32 := new(bytes.Buffer)
@ -670,6 +734,78 @@ func GenColumnData() (fieldsData []*schemapb.FieldData) {
}
fieldsData = append(fieldsData, int64FieldData)
// uint8
uint8Data := []uint32{100}
uint8FieldData := &schemapb.FieldData{
Type: schemapb.DataType_UInt8,
FieldName: "uint8_field",
FieldId: 110,
Field: &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: uint8Data,
},
},
},
},
}
fieldsData = append(fieldsData, uint8FieldData)
// uint16
uint16Data := []uint32{200}
uint16FieldData := &schemapb.FieldData{
Type: schemapb.DataType_UInt16,
FieldName: "uint16_field",
FieldId: 111,
Field: &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: uint16Data,
},
},
},
},
}
fieldsData = append(fieldsData, uint16FieldData)
// uint32
uint32Data := []uint32{300}
uint32FieldData := &schemapb.FieldData{
Type: schemapb.DataType_UInt32,
FieldName: "uint32_field",
FieldId: 112,
Field: &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: uint32Data,
},
},
},
},
}
fieldsData = append(fieldsData, uint32FieldData)
// uint64
uint64Data := []uint64{400}
uint64FieldData := &schemapb.FieldData{
Type: schemapb.DataType_UInt64,
FieldName: "uint64_field",
FieldId: 113,
Field: &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UlongData{
UlongData: &schemapb.ULongArray{
Data: uint64Data,
},
},
},
},
}
fieldsData = append(fieldsData, uint64FieldData)
// float
floatData := []float32{1.1}
floatFieldData := &schemapb.FieldData{
@ -1120,6 +1256,22 @@ func genInsertData() *InsertData {
NumRows: []int64{2},
Data: []string{"test1", "test2"},
},
110: &s.UInt8FieldData{
NumRows: []int64{2},
Data: []uint8{5, 6},
},
111: &s.UInt16FieldData{
NumRows: []int64{2},
Data: []uint16{7, 8},
},
112: &s.UInt32FieldData{
NumRows: []int64{2},
Data: []uint32{9, 10},
},
113: &s.UInt64FieldData{
NumRows: []int64{2},
Data: []uint64{1, 2},
},
}}
}
@ -1176,6 +1328,22 @@ func genEmptyInsertData() *InsertData {
NumRows: []int64{0},
Data: []string{},
},
110: &s.UInt8FieldData{
NumRows: []int64{0},
Data: []uint8{},
},
111: &s.UInt16FieldData{
NumRows: []int64{0},
Data: []uint16{},
},
112: &s.UInt32FieldData{
NumRows: []int64{0},
Data: []uint32{},
},
113: &s.UInt64FieldData{
NumRows: []int64{0},
Data: []uint64{},
},
}}
}
@ -1232,6 +1400,22 @@ func genInsertDataWithExpiredTS() *InsertData {
NumRows: []int64{2},
Data: []string{"test1", "test2"},
},
110: &s.UInt8FieldData{
NumRows: []int64{2},
Data: []uint8{5, 6},
},
111: &s.UInt16FieldData{
NumRows: []int64{2},
Data: []uint16{7, 8},
},
112: &s.UInt32FieldData{
NumRows: []int64{2},
Data: []uint32{9, 10},
},
113: &s.UInt64FieldData{
NumRows: []int64{2},
Data: []uint64{1, 2},
},
}}
}

View File

@ -149,6 +149,46 @@ func (f FieldData) AsSchemapb() (*schemapb.FieldData, error) {
},
},
}
case schemapb.DataType_UInt8, schemapb.DataType_UInt16, schemapb.DataType_UInt32:
if len(raw) > 0 {
_, ok := raw[0].(float64)
if !ok {
return nil, newTypeError(raw[0])
}
}
data := make([]uint32, len(raw))
for i, v := range raw {
data[i] = uint32(v.(float64))
}
ret.Field = &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: data,
},
},
},
}
case schemapb.DataType_UInt64:
if len(raw) > 0 {
_, ok := raw[0].(float64)
if !ok {
return nil, newTypeError(raw[0])
}
}
data := make([]uint64, len(raw))
for i, v := range raw {
data[i] = uint64(v.(float64))
}
ret.Field = &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UlongData{
UlongData: &schemapb.ULongArray{
Data: data,
},
},
},
}
case schemapb.DataType_Float:
if len(raw) > 0 {
_, ok := raw[0].(float64)

View File

@ -112,6 +112,67 @@ func TestFieldData_AsSchemapb(t *testing.T) {
_, err := fieldData.AsSchemapb()
assert.Error(t, err)
})
t.Run("uint8_ok", func(t *testing.T) {
fieldData := FieldData{
Type: schemapb.DataType_UInt8,
Field: []interface{}{1, 2, 3},
}
raw, _ := json.Marshal(fieldData)
json.Unmarshal(raw, &fieldData)
_, err := fieldData.AsSchemapb()
assert.NoError(t, err)
})
t.Run("uint8_error", func(t *testing.T) {
fieldData := FieldData{
Type: schemapb.DataType_UInt8,
Field: []interface{}{"a", "b", "c"},
}
raw, _ := json.Marshal(fieldData)
json.Unmarshal(raw, &fieldData)
_, err := fieldData.AsSchemapb()
assert.Error(t, err)
})
t.Run("uint32_ok", func(t *testing.T) {
fieldData := FieldData{
Type: schemapb.DataType_UInt32,
Field: []interface{}{1, 2, 3},
}
raw, _ := json.Marshal(fieldData)
json.Unmarshal(raw, &fieldData)
_, err := fieldData.AsSchemapb()
assert.NoError(t, err)
})
t.Run("uint32_error", func(t *testing.T) {
fieldData := FieldData{
Type: schemapb.DataType_UInt32,
Field: []interface{}{"a", "b", "c"},
}
raw, _ := json.Marshal(fieldData)
json.Unmarshal(raw, &fieldData)
_, err := fieldData.AsSchemapb()
assert.Error(t, err)
})
t.Run("uint64_ok", func(t *testing.T) {
fieldData := FieldData{
Type: schemapb.DataType_UInt64,
Field: []interface{}{1, 2, 3},
}
raw, _ := json.Marshal(fieldData)
json.Unmarshal(raw, &fieldData)
_, err := fieldData.AsSchemapb()
assert.NoError(t, err)
})
t.Run("uint64_error", func(t *testing.T) {
fieldData := FieldData{
Type: schemapb.DataType_UInt64,
Field: []interface{}{"a", "b", "c"},
}
raw, _ := json.Marshal(fieldData)
json.Unmarshal(raw, &fieldData)
_, err := fieldData.AsSchemapb()
assert.Error(t, err)
})
t.Run("float_ok", func(t *testing.T) {
fieldData := FieldData{
Type: schemapb.DataType_Float,

View File

@ -587,7 +587,8 @@ func (v *ParserVisitor) VisitRange(ctx *parser.RangeContext) interface{} {
}
case schemapb.DataType_Bool:
return fmt.Errorf("invalid range operations on boolean expr")
case schemapb.DataType_Int8, schemapb.DataType_Int16, schemapb.DataType_Int32, schemapb.DataType_Int64:
case schemapb.DataType_Int8, schemapb.DataType_Int16, schemapb.DataType_Int32, schemapb.DataType_Int64,
schemapb.DataType_UInt8, schemapb.DataType_UInt16, schemapb.DataType_UInt32, schemapb.DataType_UInt64:
if !IsInteger(lowerValue) || !IsInteger(upperValue) {
return fmt.Errorf("invalid range operations")
}
@ -671,7 +672,8 @@ func (v *ParserVisitor) VisitReverseRange(ctx *parser.ReverseRangeContext) inter
}
case schemapb.DataType_Bool:
return fmt.Errorf("invalid range operations on boolean expr")
case schemapb.DataType_Int8, schemapb.DataType_Int16, schemapb.DataType_Int32, schemapb.DataType_Int64:
case schemapb.DataType_Int8, schemapb.DataType_Int16, schemapb.DataType_Int32, schemapb.DataType_Int64,
schemapb.DataType_UInt8, schemapb.DataType_UInt16, schemapb.DataType_UInt32, schemapb.DataType_UInt64:
if !IsInteger(lowerValue) || !IsInteger(upperValue) {
return fmt.Errorf("invalid range operations")
}

View File

@ -62,6 +62,10 @@ func TestExpr_Term(t *testing.T) {
`Int16Field in [3, 4]`,
`Int32Field in [5, 6]`,
`Int64Field in [7, 8]`,
`UInt8Field in [1, 2]`,
`UInt16Field in [3, 4]`,
`UInt32Field in [5, 6]`,
`UInt64Field in [7, 8]`,
`FloatField in [9.0, 10.0]`,
`DoubleField in [11.0, 12.0]`,
`StringField in ["str13", "str14"]`,
@ -85,6 +89,10 @@ func TestExpr_Compare(t *testing.T) {
`Int16Field <= Int32Field`,
`Int32Field > Int64Field`,
`Int64Field >= FloatField`,
`UInt8Field < UInt16Field`,
`UInt16Field <= UInt32Field`,
`UInt32Field > UInt64Field`,
`UInt64Field >= FloatField`,
`FloatField == DoubleField`,
`StringField != VarCharField`,
}
@ -103,6 +111,10 @@ func TestExpr_UnaryRange(t *testing.T) {
`Int16Field <= 1`,
`Int32Field > 2`,
`Int64Field >= 3`,
`UInt8Field < 0`,
`UInt16Field <= 1`,
`UInt32Field > 2`,
`UInt64Field >= 3`,
`FloatField == 4.0`,
`FloatField == 2`,
`DoubleField != 5.0`,
@ -225,6 +237,10 @@ func TestExpr_Identifier(t *testing.T) {
`Int16Field`,
`Int32Field`,
`Int64Field`,
`UInt8Field`,
`UInt16Field`,
`UInt32Field`,
`UInt64Field`,
`FloatField`,
`DoubleField`,
`StringField`,

View File

@ -20,7 +20,10 @@ enum DataType {
Int16 = 3;
Int32 = 4;
Int64 = 5;
UInt8 = 6;
UInt16 = 7;
UInt32 = 8;
UInt64 = 9;
Float = 10;
Double = 11;
@ -75,6 +78,14 @@ message LongArray {
repeated int64 data = 1;
}
message UIntArray {
repeated uint32 data = 1;
}
message ULongArray {
repeated uint64 data = 1;
}
message FloatArray {
repeated float data = 1;
}
@ -101,6 +112,8 @@ message ScalarField {
DoubleArray double_data = 5;
StringArray string_data = 6;
BytesArray bytes_data = 7;
UIntArray uint_data = 8;
ULongArray ulong_data = 9;
}
}

View File

@ -407,6 +407,29 @@ func generateFieldData(dataType schemapb.DataType, fieldName string, numRows int
},
},
}
case schemapb.DataType_UInt32:
fieldData.FieldName = testUInt32Field
fieldData.Field = &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: generateUInt32Array(numRows),
},
},
},
}
case schemapb.DataType_UInt64:
fieldData.FieldName = testUInt64Field
fieldData.Field = &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UlongData{
UlongData: &schemapb.ULongArray{
Data: generateUint64Array(numRows),
},
},
},
}
//
case schemapb.DataType_Float:
fieldData.FieldName = testFloatField
fieldData.Field = &schemapb.FieldData_Scalars{
@ -509,6 +532,30 @@ func generateInt64Array(numRows int) []int64 {
return ret
}
func generateUInt8Array(numRows int) []uint8 {
ret := make([]uint8, 0, numRows)
for i := 0; i < numRows; i++ {
ret = append(ret, uint8(rand.Int()))
}
return ret
}
func generateUInt16Array(numRows int) []uint16 {
ret := make([]uint16, 0, numRows)
for i := 0; i < numRows; i++ {
ret = append(ret, uint16(rand.Int()))
}
return ret
}
func generateUInt32Array(numRows int) []uint32 {
ret := make([]uint32, 0, numRows)
for i := 0; i < numRows; i++ {
ret = append(ret, rand.Uint32())
}
return ret
}
func generateUint64Array(numRows int) []uint64 {
ret := make([]uint64, 0, numRows)
for i := 0; i < numRows; i++ {
@ -619,6 +666,46 @@ func newScalarFieldData(fieldSchema *schemapb.FieldSchema, fieldName string, num
},
},
}
case schemapb.DataType_UInt8:
ret.Field = &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: generateUInt32Array(numRows),
},
},
},
}
case schemapb.DataType_UInt16:
ret.Field = &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: generateUInt32Array(numRows),
},
},
},
}
case schemapb.DataType_UInt32:
ret.Field = &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: generateUInt32Array(numRows),
},
},
},
}
case schemapb.DataType_UInt64:
ret.Field = &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UlongData{
UlongData: &schemapb.ULongArray{
Data: generateUint64Array(numRows),
},
},
},
}
case schemapb.DataType_Float:
ret.Field = &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{

View File

@ -161,6 +161,10 @@ func TestInsertTask_CheckAligned(t *testing.T) {
int16FieldSchema := &schemapb.FieldSchema{DataType: schemapb.DataType_Int16}
int32FieldSchema := &schemapb.FieldSchema{DataType: schemapb.DataType_Int32}
int64FieldSchema := &schemapb.FieldSchema{DataType: schemapb.DataType_Int64}
uint8FieldSchema := &schemapb.FieldSchema{DataType: schemapb.DataType_UInt8}
uint16FieldSchema := &schemapb.FieldSchema{DataType: schemapb.DataType_UInt16}
uint32FieldSchema := &schemapb.FieldSchema{DataType: schemapb.DataType_UInt32}
uint64FieldSchema := &schemapb.FieldSchema{DataType: schemapb.DataType_UInt64}
floatFieldSchema := &schemapb.FieldSchema{DataType: schemapb.DataType_Float}
doubleFieldSchema := &schemapb.FieldSchema{DataType: schemapb.DataType_Double}
floatVectorFieldSchema := &schemapb.FieldSchema{DataType: schemapb.DataType_FloatVector}
@ -190,6 +194,12 @@ func TestInsertTask_CheckAligned(t *testing.T) {
int16FieldSchema,
int32FieldSchema,
int64FieldSchema,
//
uint8FieldSchema,
uint16FieldSchema,
uint32FieldSchema,
uint64FieldSchema,
floatFieldSchema,
doubleFieldSchema,
floatVectorFieldSchema,
@ -207,6 +217,12 @@ func TestInsertTask_CheckAligned(t *testing.T) {
newScalarFieldData(int16FieldSchema, "Int16", numRows),
newScalarFieldData(int32FieldSchema, "Int32", numRows),
newScalarFieldData(int64FieldSchema, "Int64", numRows),
//
newScalarFieldData(uint8FieldSchema, "UInt8", numRows),
newScalarFieldData(uint16FieldSchema, "UInt16", numRows),
newScalarFieldData(uint32FieldSchema, "UInt32", numRows),
newScalarFieldData(uint64FieldSchema, "UInt64", numRows),
newScalarFieldData(floatFieldSchema, "Float", numRows),
newScalarFieldData(doubleFieldSchema, "Double", numRows),
newFloatVectorFieldData("FloatVector", numRows, dim),
@ -281,6 +297,59 @@ func TestInsertTask_CheckAligned(t *testing.T) {
err = case2.CheckAligned()
assert.NoError(t, err)
// less uint8 data
case2.FieldsData[1] = newScalarFieldData(uint8FieldSchema, "UInt8", numRows/2)
err = case2.CheckAligned()
assert.Error(t, err)
// more uint8 data
case2.FieldsData[1] = newScalarFieldData(uint8FieldSchema, "UInt8", numRows*2)
err = case2.CheckAligned()
assert.Error(t, err)
// revert
case2.FieldsData[1] = newScalarFieldData(uint8FieldSchema, "UInt8", numRows)
err = case2.CheckAligned()
assert.NoError(t, err)
// less uint16 data
case2.FieldsData[2] = newScalarFieldData(uint16FieldSchema, "UInt16", numRows/2)
err = case2.CheckAligned()
assert.Error(t, err)
// more uint16 data
case2.FieldsData[2] = newScalarFieldData(uint16FieldSchema, "UInt16", numRows*2)
err = case2.CheckAligned()
assert.Error(t, err)
// revert
case2.FieldsData[2] = newScalarFieldData(uint16FieldSchema, "UInt16", numRows)
err = case2.CheckAligned()
assert.NoError(t, err)
// less uint32 data
case2.FieldsData[3] = newScalarFieldData(uint32FieldSchema, "UInt32", numRows/2)
err = case2.CheckAligned()
assert.Error(t, err)
// more uint32 data
case2.FieldsData[3] = newScalarFieldData(uint32FieldSchema, "UInt32", numRows*2)
err = case2.CheckAligned()
assert.Error(t, err)
// revert
case2.FieldsData[3] = newScalarFieldData(uint32FieldSchema, "UInt32", numRows)
err = case2.CheckAligned()
assert.NoError(t, err)
// less uint64 data
case2.FieldsData[4] = newScalarFieldData(uint64FieldSchema, "UInt64", numRows/2)
err = case2.CheckAligned()
assert.Error(t, err)
// more uint64 data
case2.FieldsData[4] = newScalarFieldData(uint64FieldSchema, "UInt64", numRows*2)
err = case2.CheckAligned()
assert.Error(t, err)
// revert
case2.FieldsData[4] = newScalarFieldData(uint64FieldSchema, "UInt64", numRows)
err = case2.CheckAligned()
assert.NoError(t, err)
//
// less float data
case2.FieldsData[5] = newScalarFieldData(floatFieldSchema, "Float", numRows/2)
err = case2.CheckAligned()

View File

@ -53,6 +53,8 @@ const (
testBoolField = "bool"
testInt32Field = "int32"
testInt64Field = "int64"
testUInt32Field = "uint32"
testUInt64Field = "uint64"
testFloatField = "float"
testDoubleField = "double"
testVarCharField = "varChar"
@ -143,7 +145,7 @@ func constructCollectionSchemaByDataType(collectionName string, fieldName2DataTy
}
func constructCollectionSchemaWithAllType(
boolField, int32Field, int64Field, floatField, doubleField string,
boolField, int32Field, int64Field, floatField, doubleField, uint32Field, uint64Field string,
floatVecField, binaryVecField string,
dim int,
collectionName string,
@ -179,6 +181,26 @@ func constructCollectionSchemaWithAllType(
IndexParams: nil,
AutoID: false,
}
u32 := &schemapb.FieldSchema{
FieldID: 0,
Name: uint32Field,
IsPrimaryKey: false,
Description: "",
DataType: schemapb.DataType_UInt32,
TypeParams: nil,
IndexParams: nil,
AutoID: false,
}
u64 := &schemapb.FieldSchema{
FieldID: 0,
Name: uint64Field,
IsPrimaryKey: true,
Description: "",
DataType: schemapb.DataType_UInt64,
TypeParams: nil,
IndexParams: nil,
AutoID: false,
}
f := &schemapb.FieldSchema{
FieldID: 0,
Name: floatField,
@ -239,6 +261,8 @@ func constructCollectionSchemaWithAllType(
b,
i32,
i64,
u32,
u64,
f,
d,
fVec,
@ -255,6 +279,8 @@ func constructCollectionSchemaWithAllType(
b,
i32,
i64,
u32,
u64,
f,
d,
fVec,

View File

@ -339,7 +339,9 @@ func isVector(dataType schemapb.DataType) (bool, error) {
switch dataType {
case schemapb.DataType_Bool, schemapb.DataType_Int8,
schemapb.DataType_Int16, schemapb.DataType_Int32,
schemapb.DataType_Int64,
schemapb.DataType_Int64, schemapb.DataType_UInt8,
schemapb.DataType_UInt16, schemapb.DataType_UInt32,
schemapb.DataType_UInt64,
schemapb.DataType_Float, schemapb.DataType_Double:
return false, nil

View File

@ -307,6 +307,24 @@ func TestValidateFieldType(t *testing.T) {
dt: schemapb.DataType_Int64,
validate: true,
},
//
{
dt: schemapb.DataType_UInt8,
validate: true,
},
{
dt: schemapb.DataType_UInt16,
validate: true,
},
{
dt: schemapb.DataType_UInt32,
validate: true,
},
{
dt: schemapb.DataType_UInt64,
validate: true,
},
//
{
dt: schemapb.DataType_Float,
validate: true,

View File

@ -438,6 +438,14 @@ func getPKsFromRowBasedInsertMsg(msg *msgstream.InsertMsg, schema *schemapb.Coll
offset += 4
case schemapb.DataType_Int64:
offset += 8
case schemapb.DataType_UInt8:
offset++
case schemapb.DataType_UInt16:
offset += 2
case schemapb.DataType_UInt32:
offset += 4
case schemapb.DataType_UInt64:
offset += 8
case schemapb.DataType_Float:
offset += 4
case schemapb.DataType_Double:

View File

@ -221,6 +221,30 @@ var simpleVarCharField = constFieldParam{
fieldName: "varCharField",
}
var simpleUInt8Field = constFieldParam{
id: 110,
dataType: schemapb.DataType_UInt8,
fieldName: "uint8Field",
}
var simpleUInt16Field = constFieldParam{
id: 111,
dataType: schemapb.DataType_UInt16,
fieldName: "uint16Field",
}
var simpleUInt32Field = constFieldParam{
id: 112,
dataType: schemapb.DataType_UInt32,
fieldName: "uint32Field",
}
var simpleUInt64Field = constFieldParam{
id: 113,
dataType: schemapb.DataType_UInt64,
fieldName: "uint64Field",
}
var uidField = constFieldParam{
id: rowIDFieldID,
dataType: schemapb.DataType_Int64,
@ -511,6 +535,9 @@ func genTestCollectionSchema(pkTypes ...schemapb.DataType) *schemapb.CollectionS
fieldInt8 := genConstantFieldSchema(simpleInt8Field)
fieldInt16 := genConstantFieldSchema(simpleInt16Field)
fieldInt32 := genConstantFieldSchema(simpleInt32Field)
fieldUInt8 := genConstantFieldSchema(simpleUInt8Field)
fieldUInt16 := genConstantFieldSchema(simpleUInt16Field)
fieldUInt32 := genConstantFieldSchema(simpleUInt32Field)
fieldFloat := genConstantFieldSchema(simpleFloatField)
fieldDouble := genConstantFieldSchema(simpleDoubleField)
floatVecFieldSchema := genVectorFieldSchema(simpleFloatVecField)
@ -537,6 +564,9 @@ func genTestCollectionSchema(pkTypes ...schemapb.DataType) *schemapb.CollectionS
fieldInt8,
fieldInt16,
fieldInt32,
fieldUInt8,
fieldUInt16,
fieldUInt32,
fieldFloat,
fieldDouble,
floatVecFieldSchema,
@ -666,6 +696,38 @@ func generateInt64Array(numRows int) []int64 {
return ret
}
func generateUInt8Array(numRows int) []uint8 {
ret := make([]uint8, 0, numRows)
for i := 0; i < numRows; i++ {
ret = append(ret, uint8(rand.Int()))
}
return ret
}
func generateUInt16Array(numRows int) []uint16 {
ret := make([]uint16, 0, numRows)
for i := 0; i < numRows; i++ {
ret = append(ret, uint16(rand.Int()))
}
return ret
}
func generateUInt32Array(numRows int) []uint32 {
ret := make([]uint32, 0, numRows)
for i := 0; i < numRows; i++ {
ret = append(ret, uint32(i))
}
return ret
}
func generateUInt64Array(numRows int) []uint64 {
ret := make([]uint64, 0, numRows)
for i := 0; i < numRows; i++ {
ret = append(ret, uint64(i))
}
return ret
}
func generateFloat32Array(numRows int) []float32 {
ret := make([]float32, 0, numRows)
for i := 0; i < numRows; i++ {
@ -772,6 +834,51 @@ func newScalarFieldData(dType schemapb.DataType, fieldName string, numRows int)
},
},
}
case schemapb.DataType_UInt8:
ret.FieldId = simpleUInt8Field.id
ret.Field = &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: generateUInt32Array(numRows),
},
},
},
}
case schemapb.DataType_UInt16:
ret.FieldId = simpleUInt16Field.id
ret.Field = &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: generateUInt32Array(numRows),
},
},
},
}
case schemapb.DataType_UInt32:
ret.FieldId = simpleUInt32Field.id
ret.Field = &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: generateUInt32Array(numRows),
},
},
},
}
case schemapb.DataType_UInt64:
ret.FieldId = simpleUInt64Field.id
ret.Field = &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UlongData{
UlongData: &schemapb.ULongArray{
Data: generateUInt64Array(numRows),
},
},
},
}
case schemapb.DataType_Float:
ret.FieldId = simpleFloatField.id
ret.Field = &schemapb.FieldData_Scalars{
@ -890,6 +997,26 @@ func genInsertData(msgLength int, schema *schemapb.CollectionSchema) (*storage.I
NumRows: []int64{int64(msgLength)},
Data: generateInt64Array(msgLength),
}
case schemapb.DataType_UInt8:
insertData.Data[f.FieldID] = &storage.UInt8FieldData{
NumRows: []int64{int64(msgLength)},
Data: generateUInt8Array(msgLength),
}
case schemapb.DataType_UInt16:
insertData.Data[f.FieldID] = &storage.UInt16FieldData{
NumRows: []int64{int64(msgLength)},
Data: generateUInt16Array(msgLength),
}
case schemapb.DataType_UInt32:
insertData.Data[f.FieldID] = &storage.UInt32FieldData{
NumRows: []int64{int64(msgLength)},
Data: generateUInt32Array(msgLength),
}
case schemapb.DataType_UInt64:
insertData.Data[f.FieldID] = &storage.UInt64FieldData{
NumRows: []int64{int64(msgLength)},
Data: generateUInt64Array(msgLength),
}
case schemapb.DataType_Float:
insertData.Data[f.FieldID] = &storage.FloatFieldData{
NumRows: []int64{int64(msgLength)},
@ -965,6 +1092,14 @@ func genSimpleInsertMsg(schema *schemapb.CollectionSchema, numRows int) (*msgstr
fieldsData = append(fieldsData, newScalarFieldData(f.DataType, simpleInt32Field.fieldName, numRows))
case schemapb.DataType_Int64:
fieldsData = append(fieldsData, newScalarFieldData(f.DataType, simpleInt64Field.fieldName, numRows))
case schemapb.DataType_UInt8:
fieldsData = append(fieldsData, newScalarFieldData(f.DataType, simpleUInt8Field.fieldName, numRows))
case schemapb.DataType_UInt16:
fieldsData = append(fieldsData, newScalarFieldData(f.DataType, simpleUInt16Field.fieldName, numRows))
case schemapb.DataType_UInt32:
fieldsData = append(fieldsData, newScalarFieldData(f.DataType, simpleUInt32Field.fieldName, numRows))
case schemapb.DataType_UInt64:
fieldsData = append(fieldsData, newScalarFieldData(f.DataType, simpleUInt64Field.fieldName, numRows))
case schemapb.DataType_Float:
fieldsData = append(fieldsData, newScalarFieldData(f.DataType, simpleFloatField.fieldName, numRows))
case schemapb.DataType_Double:
@ -1807,6 +1942,38 @@ func genFieldData(fieldName string, fieldID int64, fieldType schemapb.DataType,
},
FieldId: fieldID,
}
case schemapb.DataType_UInt32:
fieldData = &schemapb.FieldData{
Type: schemapb.DataType_UInt32,
FieldName: fieldName,
Field: &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: fieldValue.([]uint32),
},
},
},
},
FieldId: fieldID,
}
case schemapb.DataType_UInt64:
fieldData = &schemapb.FieldData{
Type: schemapb.DataType_UInt64,
FieldName: fieldName,
Field: &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UlongData{
UlongData: &schemapb.ULongArray{
Data: fieldValue.([]uint64),
},
},
},
},
FieldId: fieldID,
}
case schemapb.DataType_Float:
fieldData = &schemapb.FieldData{
Type: schemapb.DataType_Float,

View File

@ -520,6 +520,56 @@ func fillInt64FieldData(vcm storage.ChunkManager, dataPath string, fieldData *sc
return funcutil.ReadBinary(endian, content, &(fieldData.GetScalars().GetLongData().GetData()[i]))
}
func fillUInt8FieldData(vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error {
// read by offset.
rowBytes := int64(1)
content, err := vcm.ReadAt(dataPath, offset*rowBytes, rowBytes)
if err != nil {
return err
}
var u8 uint8
if err := funcutil.ReadBinary(endian, content, &u8); err != nil {
return err
}
fieldData.GetScalars().GetUintData().GetData()[i] = uint32(u8)
return nil
}
func fillUInt16FieldData(vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error {
// read by offset.
rowBytes := int64(2)
content, err := vcm.ReadAt(dataPath, offset*rowBytes, rowBytes)
if err != nil {
return err
}
var u16 uint16
if err := funcutil.ReadBinary(endian, content, &u16); err != nil {
return err
}
fieldData.GetScalars().GetUintData().GetData()[i] = uint32(u16)
return nil
}
func fillUInt32FieldData(vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error {
// read by offset.
rowBytes := int64(4)
content, err := vcm.ReadAt(dataPath, offset*rowBytes, rowBytes)
if err != nil {
return err
}
return funcutil.ReadBinary(endian, content, &(fieldData.GetScalars().GetUintData().GetData()[i]))
}
func fillUInt64FieldData(vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error {
// read by offset.
rowBytes := int64(8)
content, err := vcm.ReadAt(dataPath, offset*rowBytes, rowBytes)
if err != nil {
return err
}
return funcutil.ReadBinary(endian, content, &(fieldData.GetScalars().GetUlongData().GetData()[i]))
}
func fillFloatFieldData(vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error {
// read by offset.
rowBytes := int64(4)
@ -558,6 +608,14 @@ func fillFieldData(vcm storage.ChunkManager, dataPath string, fieldData *schemap
return fillInt32FieldData(vcm, dataPath, fieldData, i, offset, endian)
case schemapb.DataType_Int64:
return fillInt64FieldData(vcm, dataPath, fieldData, i, offset, endian)
case schemapb.DataType_UInt8:
return fillUInt8FieldData(vcm, dataPath, fieldData, i, offset, endian)
case schemapb.DataType_UInt16:
return fillUInt16FieldData(vcm, dataPath, fieldData, i, offset, endian)
case schemapb.DataType_UInt32:
return fillUInt32FieldData(vcm, dataPath, fieldData, i, offset, endian)
case schemapb.DataType_UInt64:
return fillUInt64FieldData(vcm, dataPath, fieldData, i, offset, endian)
case schemapb.DataType_Float:
return fillFloatFieldData(vcm, dataPath, fieldData, i, offset, endian)
case schemapb.DataType_Double:

View File

@ -163,6 +163,14 @@ func TestSegmentLoader_loadSegmentFieldsData(t *testing.T) {
schema.Fields = append(schema.Fields, genConstantFieldSchema(simpleInt32Field))
case schemapb.DataType_Int64:
schema.Fields = append(schema.Fields, genConstantFieldSchema(simpleInt64Field))
case schemapb.DataType_UInt8:
schema.Fields = append(schema.Fields, genConstantFieldSchema(simpleUInt8Field))
case schemapb.DataType_UInt16:
schema.Fields = append(schema.Fields, genConstantFieldSchema(simpleUInt16Field))
case schemapb.DataType_UInt32:
schema.Fields = append(schema.Fields, genConstantFieldSchema(simpleUInt32Field))
case schemapb.DataType_UInt64:
schema.Fields = append(schema.Fields, genConstantFieldSchema(simpleUInt64Field))
case schemapb.DataType_Float:
schema.Fields = append(schema.Fields, genConstantFieldSchema(simpleFloatField))
case schemapb.DataType_Double:
@ -201,6 +209,11 @@ func TestSegmentLoader_loadSegmentFieldsData(t *testing.T) {
runLoadSegmentFieldData(schemapb.DataType_Int8, schemapb.DataType_Int64)
runLoadSegmentFieldData(schemapb.DataType_Int16, schemapb.DataType_Int64)
runLoadSegmentFieldData(schemapb.DataType_Int32, schemapb.DataType_Int64)
runLoadSegmentFieldData(schemapb.DataType_UInt8, schemapb.DataType_Int64)
runLoadSegmentFieldData(schemapb.DataType_UInt16, schemapb.DataType_Int64)
runLoadSegmentFieldData(schemapb.DataType_UInt32, schemapb.DataType_Int64)
runLoadSegmentFieldData(schemapb.DataType_Float, schemapb.DataType_Int64)
runLoadSegmentFieldData(schemapb.DataType_Double, schemapb.DataType_Int64)
//runLoadSegmentFieldData(schemapb.DataType_VarChar)

View File

@ -90,7 +90,9 @@ func TestSegment_deleteSegment(t *testing.T) {
s, err := genSimpleSealedSegment(defaultMsgLength)
assert.NoError(t, err)
s.segmentPtr = nil
fmt.Println("delete before segment")
deleteSegment(s)
fmt.Println("delete after segment")
})
}
@ -911,6 +913,90 @@ func Test_fillInt64FieldData(t *testing.T) {
assert.Error(t, fillInt64FieldData(m, path, f, index, offset, endian))
}
func Test_fillUInt8FieldData(t *testing.T) {
var m storage.ChunkManager
offset := int64(100)
m = newMockChunkManager(withDefaultReadAt())
f := newScalarFieldData(schemapb.DataType_UInt8, simpleUInt8Field.fieldName, 1)
path := funcutil.GenRandomStr()
index := 0
endian := common.Endian
assert.NoError(t, fillUInt8FieldData(m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtErr())
assert.Error(t, fillUInt8FieldData(m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtEmptyContent())
assert.Error(t, fillUInt8FieldData(m, path, f, index, offset, endian))
}
func Test_fillUInt16FieldData(t *testing.T) {
var m storage.ChunkManager
offset := int64(100)
m = newMockChunkManager(withDefaultReadAt())
f := newScalarFieldData(schemapb.DataType_UInt16, simpleUInt64Field.fieldName, 1)
path := funcutil.GenRandomStr()
index := 0
endian := common.Endian
assert.NoError(t, fillUInt16FieldData(m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtErr())
assert.Error(t, fillUInt16FieldData(m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtEmptyContent())
assert.Error(t, fillUInt16FieldData(m, path, f, index, offset, endian))
}
func Test_fillUInt32FieldData(t *testing.T) {
var m storage.ChunkManager
offset := int64(100)
m = newMockChunkManager(withDefaultReadAt())
f := newScalarFieldData(schemapb.DataType_UInt32, simpleUInt32Field.fieldName, 1)
path := funcutil.GenRandomStr()
index := 0
endian := common.Endian
assert.NoError(t, fillUInt32FieldData(m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtErr())
assert.Error(t, fillUInt32FieldData(m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtEmptyContent())
assert.Error(t, fillUInt32FieldData(m, path, f, index, offset, endian))
}
func Test_fillUInt64FieldData(t *testing.T) {
var m storage.ChunkManager
offset := int64(100)
m = newMockChunkManager(withDefaultReadAt())
f := newScalarFieldData(schemapb.DataType_UInt64, simpleUInt64Field.fieldName, 1)
path := funcutil.GenRandomStr()
index := 0
endian := common.Endian
assert.NoError(t, fillUInt64FieldData(m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtErr())
assert.Error(t, fillUInt64FieldData(m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtEmptyContent())
assert.Error(t, fillUInt64FieldData(m, path, f, index, offset, endian))
}
func Test_fillFloatFieldData(t *testing.T) {
var m storage.ChunkManager
@ -965,6 +1051,10 @@ func Test_fillFieldData(t *testing.T) {
newScalarFieldData(schemapb.DataType_Int16, simpleInt16Field.fieldName, 1),
newScalarFieldData(schemapb.DataType_Int32, simpleInt32Field.fieldName, 1),
newScalarFieldData(schemapb.DataType_Int64, simpleInt64Field.fieldName, 1),
newScalarFieldData(schemapb.DataType_UInt8, simpleUInt8Field.fieldName, 1),
newScalarFieldData(schemapb.DataType_UInt16, simpleUInt16Field.fieldName, 1),
newScalarFieldData(schemapb.DataType_UInt32, simpleUInt32Field.fieldName, 1),
newScalarFieldData(schemapb.DataType_UInt64, simpleUInt64Field.fieldName, 1),
newScalarFieldData(schemapb.DataType_Float, simpleFloatField.fieldName, 1),
newScalarFieldData(schemapb.DataType_Double, simpleDoubleField.fieldName, 1),
}

View File

@ -281,6 +281,253 @@ func TestInsertBinlog(t *testing.T) {
assert.Equal(t, ed2.EndTimestamp, Timestamp(400))
}
/* #nosec G103 */
func TestInsertUBinlog(t *testing.T) {
w := NewInsertBinlogWriter(schemapb.DataType_UInt64, 10, 20, 30, 40)
e1, err := w.NextInsertEventWriter()
assert.Nil(t, err)
err = e1.AddDataToPayload([]uint64{1, 2, 3})
assert.Nil(t, err)
err = e1.AddDataToPayload([]uint32{4, 5, 6})
assert.NotNil(t, err)
err = e1.AddDataToPayload([]uint64{4, 5, 6})
assert.Nil(t, err)
e1.SetEventTimestamp(100, 200)
e2, err := w.NextInsertEventWriter()
assert.Nil(t, err)
err = e2.AddDataToPayload([]uint64{7, 8, 9})
assert.Nil(t, err)
err = e2.AddDataToPayload([]bool{true, false, true})
assert.NotNil(t, err)
err = e2.AddDataToPayload([]uint64{10, 11, 12})
assert.Nil(t, err)
e2.SetEventTimestamp(300, 400)
w.SetEventTimeStamp(1000, 2000)
w.baseBinlogWriter.descriptorEventData.AddExtra("test", "testExtra")
sizeTotal := 2000000
w.baseBinlogWriter.descriptorEventData.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
_, err = w.GetBuffer()
assert.NotNil(t, err)
err = w.Finish()
assert.Nil(t, err)
buf, err := w.GetBuffer()
assert.Nil(t, err)
w.Close()
// magic number
magicNum := UnsafeReadInt32(buf, 0)
assert.Equal(t, magicNum, MagicNumber)
pos := int(unsafe.Sizeof(MagicNumber))
// descriptor header, timestamp
ts := UnsafeReadInt64(buf, pos)
assert.Greater(t, ts, int64(0))
curts := time.Now().UnixNano() / int64(time.Millisecond)
curts = int64(tsoutil.ComposeTS(curts, 0))
diffts := curts - ts
maxdiff := int64(tsoutil.ComposeTS(1000, 0))
assert.LessOrEqual(t, diffts, maxdiff)
pos += int(unsafe.Sizeof(ts))
//descriptor header, type code
tc := UnsafeReadUInt8(buf, pos)
assert.Equal(t, EventTypeCode(tc), DescriptorEventType)
pos += int(unsafe.Sizeof(tc))
//descriptor header, event length
descEventLen := UnsafeReadUInt32(buf, pos)
pos += int(unsafe.Sizeof(descEventLen))
//descriptor header, next position
descNxtPos := UnsafeReadInt32(buf, pos)
assert.Equal(t, descEventLen+uint32(unsafe.Sizeof(MagicNumber)), uint32(descNxtPos))
pos += int(unsafe.Sizeof(descNxtPos))
//descriptor data fix, collection id
collID := UnsafeReadInt64(buf, pos)
assert.Equal(t, collID, int64(10))
pos += int(unsafe.Sizeof(collID))
//descriptor data fix, partition id
partID := UnsafeReadInt64(buf, pos)
assert.Equal(t, partID, int64(20))
pos += int(unsafe.Sizeof(partID))
//descriptor data fix, segment id
segID := UnsafeReadInt64(buf, pos)
assert.Equal(t, segID, int64(30))
pos += int(unsafe.Sizeof(segID))
//descriptor data fix, field id
fieldID := UnsafeReadInt64(buf, pos)
assert.Equal(t, fieldID, int64(40))
pos += int(unsafe.Sizeof(fieldID))
//descriptor data fix, start time stamp
startts := UnsafeReadInt64(buf, pos)
assert.Equal(t, startts, int64(1000))
pos += int(unsafe.Sizeof(startts))
//descriptor data fix, end time stamp
endts := UnsafeReadInt64(buf, pos)
assert.Equal(t, endts, int64(2000))
pos += int(unsafe.Sizeof(endts))
//descriptor data fix, payload type
colType := UnsafeReadUInt32(buf, pos)
assert.Equal(t, schemapb.DataType(colType), schemapb.DataType_UInt64)
pos += int(unsafe.Sizeof(colType))
//descriptor data, post header lengths
for i := DescriptorEventType; i < EventTypeEnd; i++ {
size := getEventFixPartSize(i)
assert.Equal(t, uint8(size), buf[pos])
pos++
}
//descriptor data, extra length
extraLength := UnsafeReadInt32(buf, pos)
assert.Equal(t, extraLength, w.baseBinlogWriter.descriptorEventData.ExtraLength)
pos += int(unsafe.Sizeof(extraLength))
multiBytes := make([]byte, extraLength)
for i := 0; i < int(extraLength); i++ {
singleByte := UnsafeReadByte(buf, pos)
multiBytes[i] = singleByte
pos++
}
var extra map[string]interface{}
err = json.Unmarshal(multiBytes, &extra)
assert.NoError(t, err)
testExtra, ok := extra["test"]
assert.True(t, ok)
assert.Equal(t, "testExtra", fmt.Sprintf("%v", testExtra))
size, ok := extra[originalSizeKey]
assert.True(t, ok)
assert.Equal(t, fmt.Sprintf("%v", sizeTotal), fmt.Sprintf("%v", size))
//start of e1
assert.Equal(t, pos, int(descNxtPos))
//insert e1 header, Timestamp
e1ts := UnsafeReadInt64(buf, pos)
diffts = curts - e1ts
assert.LessOrEqual(t, diffts, maxdiff)
pos += int(unsafe.Sizeof(e1ts))
//insert e1 header, type code
e1tc := UnsafeReadUInt8(buf, pos)
assert.Equal(t, EventTypeCode(e1tc), InsertEventType)
pos += int(unsafe.Sizeof(e1tc))
//insert e1 header, event length
e1EventLen := UnsafeReadInt32(buf, pos)
pos += int(unsafe.Sizeof(e1EventLen))
//insert e1 header, next position
e1NxtPos := UnsafeReadInt32(buf, pos)
assert.Equal(t, descNxtPos+e1EventLen, e1NxtPos)
pos += int(unsafe.Sizeof(descNxtPos))
//insert e1 data, start time stamp
e1st := UnsafeReadInt64(buf, pos)
assert.Equal(t, e1st, int64(100))
pos += int(unsafe.Sizeof(e1st))
//insert e1 data, end time stamp
e1et := UnsafeReadInt64(buf, pos)
assert.Equal(t, e1et, int64(200))
pos += int(unsafe.Sizeof(e1et))
//insert e1, payload
e1Payload := buf[pos:e1NxtPos]
e1r, err := NewPayloadReader(schemapb.DataType_UInt64, e1Payload)
assert.Nil(t, err)
e1a, err := e1r.GetUInt64FromPayload()
assert.Nil(t, err)
assert.Equal(t, e1a, []uint64{1, 2, 3, 4, 5, 6})
e1r.Close()
//start of e2
pos = int(e1NxtPos)
//insert e2 header, Timestamp
e2ts := UnsafeReadInt64(buf, pos)
diffts = curts - e2ts
assert.LessOrEqual(t, diffts, maxdiff)
pos += int(unsafe.Sizeof(e2ts))
//insert e2 header, type code
e2tc := UnsafeReadInt8(buf, pos)
assert.Equal(t, EventTypeCode(e2tc), InsertEventType)
pos += int(unsafe.Sizeof(e2tc))
//insert e2 header, event length
e2EventLen := UnsafeReadInt32(buf, pos)
pos += int(unsafe.Sizeof(e2EventLen))
//insert e2 header, next position
e2NxtPos := UnsafeReadInt32(buf, pos)
assert.Equal(t, e1NxtPos+e2EventLen, e2NxtPos)
pos += int(unsafe.Sizeof(descNxtPos))
//insert e2 data, start time stamp
e2st := UnsafeReadInt64(buf, pos)
assert.Equal(t, e2st, int64(300))
pos += int(unsafe.Sizeof(e2st))
//insert e2 data, end time stamp
e2et := UnsafeReadInt64(buf, pos)
assert.Equal(t, e2et, int64(400))
pos += int(unsafe.Sizeof(e2et))
//insert e2, payload
e2Payload := buf[pos:]
e2r, err := NewPayloadReader(schemapb.DataType_UInt64, e2Payload)
assert.Nil(t, err)
e2a, err := e2r.GetUInt64FromPayload()
assert.Nil(t, err)
assert.Equal(t, e2a, []uint64{7, 8, 9, 10, 11, 12})
e2r.Close()
assert.Equal(t, int(e2NxtPos), len(buf))
//read binlog
r, err := NewBinlogReader(buf)
assert.Nil(t, err)
event1, err := r.NextEventReader()
assert.Nil(t, err)
assert.NotNil(t, event1)
p1, err := event1.GetUInt64FromPayload()
assert.Equal(t, p1, []uint64{1, 2, 3, 4, 5, 6})
assert.Nil(t, err)
assert.Equal(t, event1.TypeCode, InsertEventType)
ed1, ok := (event1.eventData).(*insertEventData)
assert.True(t, ok)
assert.Equal(t, ed1.StartTimestamp, Timestamp(100))
assert.Equal(t, ed1.EndTimestamp, Timestamp(200))
event2, err := r.NextEventReader()
assert.Nil(t, err)
assert.NotNil(t, event2)
p2, err := event2.GetUInt64FromPayload()
assert.Nil(t, err)
assert.Equal(t, p2, []uint64{7, 8, 9, 10, 11, 12})
assert.Equal(t, event2.TypeCode, InsertEventType)
ed2, ok := (event2.eventData).(*insertEventData)
assert.True(t, ok)
_, ok = (event2.eventData).(*deleteEventData)
assert.False(t, ok)
assert.Equal(t, ed2.StartTimestamp, Timestamp(300))
assert.Equal(t, ed2.EndTimestamp, Timestamp(400))
}
/* #nosec G103 */
func TestDeleteBinlog(t *testing.T) {
w := NewDeleteBinlogWriter(schemapb.DataType_Int64, 50, 1, 1)

View File

@ -133,6 +133,22 @@ type Int64FieldData struct {
NumRows []int64
Data []int64
}
type UInt8FieldData struct {
NumRows []int64
Data []uint8
}
type UInt16FieldData struct {
NumRows []int64
Data []uint16
}
type UInt32FieldData struct {
NumRows []int64
Data []uint32
}
type UInt64FieldData struct {
NumRows []int64
Data []uint64
}
type FloatFieldData struct {
NumRows []int64
Data []float32
@ -157,11 +173,17 @@ type FloatVectorFieldData struct {
}
// RowNum implements FieldData.RowNum
func (data *BoolFieldData) RowNum() int { return len(data.Data) }
func (data *Int8FieldData) RowNum() int { return len(data.Data) }
func (data *Int16FieldData) RowNum() int { return len(data.Data) }
func (data *Int32FieldData) RowNum() int { return len(data.Data) }
func (data *Int64FieldData) RowNum() int { return len(data.Data) }
func (data *BoolFieldData) RowNum() int { return len(data.Data) }
func (data *Int8FieldData) RowNum() int { return len(data.Data) }
func (data *Int16FieldData) RowNum() int { return len(data.Data) }
func (data *Int32FieldData) RowNum() int { return len(data.Data) }
func (data *Int64FieldData) RowNum() int { return len(data.Data) }
func (data *UInt8FieldData) RowNum() int { return len(data.Data) }
func (data *UInt16FieldData) RowNum() int { return len(data.Data) }
func (data *UInt32FieldData) RowNum() int { return len(data.Data) }
func (data *UInt64FieldData) RowNum() int { return len(data.Data) }
func (data *FloatFieldData) RowNum() int { return len(data.Data) }
func (data *DoubleFieldData) RowNum() int { return len(data.Data) }
func (data *StringFieldData) RowNum() int { return len(data.Data) }
@ -174,6 +196,11 @@ func (data *Int8FieldData) GetRow(i int) interface{} { return data.Data[i] }
func (data *Int16FieldData) GetRow(i int) interface{} { return data.Data[i] }
func (data *Int32FieldData) GetRow(i int) interface{} { return data.Data[i] }
func (data *Int64FieldData) GetRow(i int) interface{} { return data.Data[i] }
func (data *UInt8FieldData) GetRow(i int) interface{} { return data.Data[i] }
func (data *UInt16FieldData) GetRow(i int) interface{} { return data.Data[i] }
func (data *UInt32FieldData) GetRow(i int) interface{} { return data.Data[i] }
func (data *UInt64FieldData) GetRow(i int) interface{} { return data.Data[i] }
func (data *FloatFieldData) GetRow(i int) interface{} { return data.Data[i] }
func (data *DoubleFieldData) GetRow(i int) interface{} { return data.Data[i] }
func (data *StringFieldData) GetRow(i int) interface{} { return data.Data[i] }
@ -214,6 +241,26 @@ func (data *Int64FieldData) GetMemorySize() int {
return binary.Size(data.NumRows) + binary.Size(data.Data)
}
// GetMemorySize implements FieldData.GetMemorySize
func (data *UInt8FieldData) GetMemorySize() int {
return binary.Size(data.NumRows) + binary.Size(data.Data)
}
// GetMemorySize implements FieldData.GetMemorySize
func (data *UInt16FieldData) GetMemorySize() int {
return binary.Size(data.NumRows) + binary.Size(data.Data)
}
// GetMemorySize implements FieldData.GetMemorySize
func (data *UInt32FieldData) GetMemorySize() int {
return binary.Size(data.NumRows) + binary.Size(data.Data)
}
// GetMemorySize implements FieldData.GetMemorySize
func (data *UInt64FieldData) GetMemorySize() int {
return binary.Size(data.NumRows) + binary.Size(data.Data)
}
func (data *FloatFieldData) GetMemorySize() int {
return binary.Size(data.NumRows) + binary.Size(data.Data)
}
@ -360,6 +407,38 @@ func (insertCodec *InsertCodec) Serialize(partitionID UniqueID, segmentID Unique
return nil, nil, err
}
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", singleData.(*Int64FieldData).GetMemorySize()))
case schemapb.DataType_UInt8:
err = eventWriter.AddUInt8ToPayload(singleData.(*UInt8FieldData).Data)
if err != nil {
eventWriter.Close()
writer.Close()
return nil, nil, err
}
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", singleData.(*UInt8FieldData).GetMemorySize()))
case schemapb.DataType_UInt16:
err = eventWriter.AddUInt16ToPayload(singleData.(*UInt16FieldData).Data)
if err != nil {
eventWriter.Close()
writer.Close()
return nil, nil, err
}
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", singleData.(*UInt16FieldData).GetMemorySize()))
case schemapb.DataType_UInt32:
err = eventWriter.AddUInt32ToPayload(singleData.(*UInt32FieldData).Data)
if err != nil {
eventWriter.Close()
writer.Close()
return nil, nil, err
}
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", singleData.(*UInt32FieldData).GetMemorySize()))
case schemapb.DataType_UInt64:
err = eventWriter.AddUInt64ToPayload(singleData.(*UInt64FieldData).Data)
if err != nil {
eventWriter.Close()
writer.Close()
return nil, nil, err
}
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", singleData.(*UInt64FieldData).GetMemorySize()))
case schemapb.DataType_Float:
err = eventWriter.AddFloatToPayload(singleData.(*FloatFieldData).Data)
if err != nil {
@ -607,6 +686,90 @@ func (insertCodec *InsertCodec) DeserializeInto(fieldBinlogs []*Blob, rowNum int
int64FieldData.NumRows = append(int64FieldData.NumRows, int64(len(singleData)))
insertData.Data[fieldID] = int64FieldData
case schemapb.DataType_UInt8:
singleData, err := eventReader.GetUInt8FromPayload()
if err != nil {
eventReader.Close()
binlogReader.Close()
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, err
}
if insertData.Data[fieldID] == nil {
insertData.Data[fieldID] = &UInt8FieldData{
NumRows: make([]int64, 0),
Data: make([]uint8, 0, rowNum),
}
}
uint8FieldData := insertData.Data[fieldID].(*UInt8FieldData)
uint8FieldData.Data = append(uint8FieldData.Data, singleData...)
totalLength += len(singleData)
uint8FieldData.NumRows = append(uint8FieldData.NumRows, int64(len(singleData)))
insertData.Data[fieldID] = uint8FieldData
case schemapb.DataType_UInt16:
singleData, err := eventReader.GetUInt16FromPayload()
if err != nil {
eventReader.Close()
binlogReader.Close()
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, err
}
if insertData.Data[fieldID] == nil {
insertData.Data[fieldID] = &UInt16FieldData{
NumRows: make([]int64, 0),
Data: make([]uint16, 0, rowNum),
}
}
uint16FieldData := insertData.Data[fieldID].(*UInt16FieldData)
uint16FieldData.Data = append(uint16FieldData.Data, singleData...)
totalLength += len(singleData)
uint16FieldData.NumRows = append(uint16FieldData.NumRows, int64(len(singleData)))
insertData.Data[fieldID] = uint16FieldData
case schemapb.DataType_UInt32:
singleData, err := eventReader.GetUInt32FromPayload()
if err != nil {
eventReader.Close()
binlogReader.Close()
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, err
}
if insertData.Data[fieldID] == nil {
insertData.Data[fieldID] = &UInt32FieldData{
NumRows: make([]int64, 0),
Data: make([]uint32, 0, rowNum),
}
}
uint32FieldData := insertData.Data[fieldID].(*UInt32FieldData)
uint32FieldData.Data = append(uint32FieldData.Data, singleData...)
totalLength += len(singleData)
uint32FieldData.NumRows = append(uint32FieldData.NumRows, int64(len(singleData)))
insertData.Data[fieldID] = uint32FieldData
case schemapb.DataType_UInt64:
singleData, err := eventReader.GetUInt64FromPayload()
if err != nil {
eventReader.Close()
binlogReader.Close()
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, err
}
if insertData.Data[fieldID] == nil {
insertData.Data[fieldID] = &UInt64FieldData{
NumRows: make([]int64, 0),
Data: make([]uint64, 0, rowNum),
}
}
uint64FieldData := insertData.Data[fieldID].(*UInt64FieldData)
uint64FieldData.Data = append(uint64FieldData.Data, singleData...)
totalLength += len(singleData)
uint64FieldData.NumRows = append(uint64FieldData.NumRows, int64(len(singleData)))
insertData.Data[fieldID] = uint64FieldData
case schemapb.DataType_Float:
singleData, err := eventReader.GetFloatFromPayload()
if err != nil {

View File

@ -47,6 +47,10 @@ const (
StringField = 107
BinaryVectorField = 108
FloatVectorField = 109
UInt8Field = 110
UInt16Field = 111
UInt32Field = 112
UInt64Field = 113
)
func TestInsertCodec(t *testing.T) {
@ -109,6 +113,34 @@ func TestInsertCodec(t *testing.T) {
Description: "int64",
DataType: schemapb.DataType_Int64,
},
{
FieldID: UInt8Field,
Name: "field_uint8",
IsPrimaryKey: false,
Description: "uint8",
DataType: schemapb.DataType_UInt8,
},
{
FieldID: UInt16Field,
Name: "field_uint16",
IsPrimaryKey: false,
Description: "uint16",
DataType: schemapb.DataType_UInt16,
},
{
FieldID: UInt32Field,
Name: "field_uint32",
IsPrimaryKey: false,
Description: "uint32",
DataType: schemapb.DataType_UInt32,
},
{
FieldID: UInt64Field,
Name: "field_uint64",
IsPrimaryKey: false,
Description: "uint64",
DataType: schemapb.DataType_UInt64,
},
{
FieldID: FloatField,
Name: "field_float",
@ -178,6 +210,23 @@ func TestInsertCodec(t *testing.T) {
NumRows: []int64{2},
Data: []int64{3, 4},
},
UInt8Field: &UInt8FieldData{
NumRows: []int64{2},
Data: []uint8{3, 4},
},
UInt16Field: &UInt16FieldData{
NumRows: []int64{2},
Data: []uint16{3, 4},
},
UInt32Field: &UInt32FieldData{
NumRows: []int64{2},
Data: []uint32{3, 4},
},
UInt64Field: &UInt64FieldData{
NumRows: []int64{2},
Data: []uint64{3, 4},
},
FloatField: &FloatFieldData{
NumRows: []int64{2},
Data: []float32{3, 4},
@ -233,6 +282,22 @@ func TestInsertCodec(t *testing.T) {
NumRows: []int64{2},
Data: []int64{1, 2},
},
UInt8Field: &UInt8FieldData{
NumRows: []int64{2},
Data: []uint8{1, 2},
},
UInt16Field: &UInt16FieldData{
NumRows: []int64{2},
Data: []uint16{1, 2},
},
UInt32Field: &UInt32FieldData{
NumRows: []int64{2},
Data: []uint32{1, 2},
},
UInt64Field: &UInt64FieldData{
NumRows: []int64{2},
Data: []uint64{1, 2},
},
FloatField: &FloatFieldData{
NumRows: []int64{2},
Data: []float32{1, 2},
@ -267,6 +332,10 @@ func TestInsertCodec(t *testing.T) {
Int16Field: &Int16FieldData{[]int64{}, []int16{}},
Int32Field: &Int32FieldData{[]int64{}, []int32{}},
Int64Field: &Int64FieldData{[]int64{}, []int64{}},
UInt8Field: &UInt8FieldData{[]int64{}, []uint8{}},
UInt16Field: &UInt16FieldData{[]int64{}, []uint16{}},
UInt32Field: &UInt32FieldData{[]int64{}, []uint32{}},
UInt64Field: &UInt64FieldData{[]int64{}, []uint64{}},
FloatField: &FloatFieldData{[]int64{}, []float32{}},
DoubleField: &DoubleFieldData{[]int64{}, []float64{}},
StringField: &StringFieldData{[]int64{}, []string{}},
@ -304,6 +373,10 @@ func TestInsertCodec(t *testing.T) {
assert.Equal(t, []int64{2, 2}, resultData.Data[Int16Field].(*Int16FieldData).NumRows)
assert.Equal(t, []int64{2, 2}, resultData.Data[Int32Field].(*Int32FieldData).NumRows)
assert.Equal(t, []int64{2, 2}, resultData.Data[Int64Field].(*Int64FieldData).NumRows)
assert.Equal(t, []int64{2, 2}, resultData.Data[UInt8Field].(*UInt8FieldData).NumRows)
assert.Equal(t, []int64{2, 2}, resultData.Data[UInt16Field].(*UInt16FieldData).NumRows)
assert.Equal(t, []int64{2, 2}, resultData.Data[UInt32Field].(*UInt32FieldData).NumRows)
assert.Equal(t, []int64{2, 2}, resultData.Data[UInt64Field].(*UInt64FieldData).NumRows)
assert.Equal(t, []int64{2, 2}, resultData.Data[FloatField].(*FloatFieldData).NumRows)
assert.Equal(t, []int64{2, 2}, resultData.Data[DoubleField].(*DoubleFieldData).NumRows)
assert.Equal(t, []int64{2, 2}, resultData.Data[StringField].(*StringFieldData).NumRows)
@ -316,6 +389,10 @@ func TestInsertCodec(t *testing.T) {
assert.Equal(t, []int16{1, 2, 3, 4}, resultData.Data[Int16Field].(*Int16FieldData).Data)
assert.Equal(t, []int32{1, 2, 3, 4}, resultData.Data[Int32Field].(*Int32FieldData).Data)
assert.Equal(t, []int64{1, 2, 3, 4}, resultData.Data[Int64Field].(*Int64FieldData).Data)
assert.Equal(t, []uint8{1, 2, 3, 4}, resultData.Data[UInt8Field].(*UInt8FieldData).Data)
assert.Equal(t, []uint16{1, 2, 3, 4}, resultData.Data[UInt16Field].(*UInt16FieldData).Data)
assert.Equal(t, []uint32{1, 2, 3, 4}, resultData.Data[UInt32Field].(*UInt32FieldData).Data)
assert.Equal(t, []uint64{1, 2, 3, 4}, resultData.Data[UInt64Field].(*UInt64FieldData).Data)
assert.Equal(t, []float32{1, 2, 3, 4}, resultData.Data[FloatField].(*FloatFieldData).Data)
assert.Equal(t, []float64{1, 2, 3, 4}, resultData.Data[DoubleField].(*DoubleFieldData).Data)
assert.Equal(t, []string{"1", "2", "3", "4"}, resultData.Data[StringField].(*StringFieldData).Data)

View File

@ -71,6 +71,18 @@ func (ds *DataSorter) Swap(i, j int) {
case schemapb.DataType_Int64:
data := singleData.(*Int64FieldData).Data
data[i], data[j] = data[j], data[i]
case schemapb.DataType_UInt8:
data := singleData.(*UInt8FieldData).Data
data[i], data[j] = data[j], data[i]
case schemapb.DataType_UInt16:
data := singleData.(*UInt16FieldData).Data
data[i], data[j] = data[j], data[i]
case schemapb.DataType_UInt32:
data := singleData.(*UInt32FieldData).Data
data[i], data[j] = data[j], data[i]
case schemapb.DataType_UInt64:
data := singleData.(*UInt64FieldData).Data
data[i], data[j] = data[j], data[i]
case schemapb.DataType_Float:
data := singleData.(*FloatFieldData).Data
data[i], data[j] = data[j], data[i]

View File

@ -120,6 +120,34 @@ func TestDataSorter(t *testing.T) {
Description: "description_11",
DataType: schemapb.DataType_FloatVector,
},
{
FieldID: 110,
Name: "field_uint8",
IsPrimaryKey: false,
Description: "description_12",
DataType: schemapb.DataType_UInt8,
},
{
FieldID: 111,
Name: "field_uint16",
IsPrimaryKey: false,
Description: "description_13",
DataType: schemapb.DataType_UInt16,
},
{
FieldID: 112,
Name: "field_uint32",
IsPrimaryKey: false,
Description: "description_14",
DataType: schemapb.DataType_UInt32,
},
{
FieldID: 113,
Name: "field_uint64",
IsPrimaryKey: false,
Description: "description_15",
DataType: schemapb.DataType_UInt64,
},
},
},
}
@ -177,6 +205,22 @@ func TestDataSorter(t *testing.T) {
Data: []float32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
Dim: 8,
},
110: &UInt8FieldData{
NumRows: []int64{3},
Data: []uint8{3, 4, 5},
},
111: &UInt16FieldData{
NumRows: []int64{3},
Data: []uint16{3, 4, 5},
},
112: &UInt32FieldData{
NumRows: []int64{3},
Data: []uint32{3, 4, 5},
},
113: &UInt64FieldData{
NumRows: []int64{3},
Data: []uint64{3, 4, 5},
},
},
}
@ -233,6 +277,10 @@ func TestDataSorter(t *testing.T) {
assert.Equal(t, []int16{5, 3, 4}, dataSorter.InsertData.Data[102].(*Int16FieldData).Data)
assert.Equal(t, []int32{5, 3, 4}, dataSorter.InsertData.Data[103].(*Int32FieldData).Data)
assert.Equal(t, []int64{5, 3, 4}, dataSorter.InsertData.Data[104].(*Int64FieldData).Data)
assert.Equal(t, []uint8{5, 3, 4}, dataSorter.InsertData.Data[110].(*UInt8FieldData).Data)
assert.Equal(t, []uint16{5, 3, 4}, dataSorter.InsertData.Data[111].(*UInt16FieldData).Data)
assert.Equal(t, []uint32{5, 3, 4}, dataSorter.InsertData.Data[112].(*UInt32FieldData).Data)
assert.Equal(t, []uint64{5, 3, 4}, dataSorter.InsertData.Data[113].(*UInt64FieldData).Data)
assert.Equal(t, []float32{5, 3, 4}, dataSorter.InsertData.Data[105].(*FloatFieldData).Data)
assert.Equal(t, []float64{5, 3, 4}, dataSorter.InsertData.Data[106].(*DoubleFieldData).Data)
assert.Equal(t, []string{"5", "3", "4"}, dataSorter.InsertData.Data[107].(*StringFieldData).Data)

View File

@ -255,6 +255,70 @@ func TestInsertEvent(t *testing.T) {
[]int64{1, 2, 3, 4, 5, 6})
})
t.Run("insert_uint8", func(t *testing.T) {
w, err := newInsertEventWriter(schemapb.DataType_UInt8)
assert.Nil(t, err)
insertT(t, schemapb.DataType_UInt8, w,
func(w *insertEventWriter) error {
return w.AddDataToPayload([]uint8{1, 2, 3})
},
func(w *insertEventWriter) error {
return w.AddDataToPayload([]uint8{4, 5, 6})
},
func(w *insertEventWriter) error {
return w.AddDataToPayload([]uint{1, 2, 3, 4, 5})
},
[]uint8{1, 2, 3, 4, 5, 6})
})
t.Run("insert_uint16", func(t *testing.T) {
w, err := newInsertEventWriter(schemapb.DataType_UInt16)
assert.Nil(t, err)
insertT(t, schemapb.DataType_UInt16, w,
func(w *insertEventWriter) error {
return w.AddDataToPayload([]uint16{1, 2, 3})
},
func(w *insertEventWriter) error {
return w.AddDataToPayload([]uint16{4, 5, 6})
},
func(w *insertEventWriter) error {
return w.AddDataToPayload([]uint{1, 2, 3, 4, 5})
},
[]uint16{1, 2, 3, 4, 5, 6})
})
t.Run("insert_uint32", func(t *testing.T) {
w, err := newInsertEventWriter(schemapb.DataType_UInt32)
assert.Nil(t, err)
insertT(t, schemapb.DataType_UInt32, w,
func(w *insertEventWriter) error {
return w.AddDataToPayload([]uint32{1, 2, 3})
},
func(w *insertEventWriter) error {
return w.AddDataToPayload([]uint32{4, 5, 6})
},
func(w *insertEventWriter) error {
return w.AddDataToPayload([]uint{1, 2, 3, 4, 5})
},
[]uint32{1, 2, 3, 4, 5, 6})
})
t.Run("insert_uint64", func(t *testing.T) {
w, err := newInsertEventWriter(schemapb.DataType_UInt64)
assert.Nil(t, err)
insertT(t, schemapb.DataType_UInt64, w,
func(w *insertEventWriter) error {
return w.AddDataToPayload([]uint64{1, 2, 3})
},
func(w *insertEventWriter) error {
return w.AddDataToPayload([]uint64{4, 5, 6})
},
func(w *insertEventWriter) error {
return w.AddDataToPayload([]uint{1, 2, 3, 4, 5})
},
[]uint64{1, 2, 3, 4, 5, 6})
})
t.Run("insert_float32", func(t *testing.T) {
w, err := newInsertEventWriter(schemapb.DataType_Float)
assert.Nil(t, err)

View File

@ -90,6 +90,39 @@ func TestEventWriter(t *testing.T) {
insertEvent.Close()
}
func TestUEventWriter(t *testing.T) {
insertEvent, err := newInsertEventWriter(schemapb.DataType_UInt32)
assert.Nil(t, err)
insertEvent.Close()
insertEvent, err = newInsertEventWriter(schemapb.DataType_UInt32)
assert.Nil(t, err)
defer insertEvent.Close()
err = insertEvent.AddUInt64ToPayload([]uint64{1, 1})
assert.NotNil(t, err)
err = insertEvent.AddUInt32ToPayload([]uint32{1, 2, 3})
assert.Nil(t, err)
nums, err := insertEvent.GetPayloadLengthFromWriter()
assert.Nil(t, err)
assert.EqualValues(t, 3, nums)
err = insertEvent.Finish()
assert.Nil(t, err)
length, err := insertEvent.GetMemoryUsageInBytes()
assert.Nil(t, err)
assert.EqualValues(t, length, insertEvent.EventLength)
err = insertEvent.AddUInt32ToPayload([]uint32{1})
assert.NotNil(t, err)
buffer := new(bytes.Buffer)
insertEvent.SetEventTimestamp(100, 200)
err = insertEvent.Write(buffer)
assert.Nil(t, err)
length, err = insertEvent.GetMemoryUsageInBytes()
assert.Nil(t, err)
assert.EqualValues(t, length, buffer.Len())
insertEvent.Close()
}
func TestReadMagicNumber(t *testing.T) {
var err error
buf := bytes.Buffer{}

View File

@ -42,6 +42,10 @@ type PayloadWriterInterface interface {
AddInt16ToPayload(msgs []int16) error
AddInt32ToPayload(msgs []int32) error
AddInt64ToPayload(msgs []int64) error
AddUInt8ToPayload(msgs []uint8) error
AddUInt16ToPayload(msgs []uint16) error
AddUInt32ToPayload(msgs []uint32) error
AddUInt64ToPayload(msgs []uint64) error
AddFloatToPayload(msgs []float32) error
AddDoubleToPayload(msgs []float64) error
AddOneStringToPayload(msgs string) error
@ -63,6 +67,10 @@ type PayloadReaderInterface interface {
GetInt16FromPayload() ([]int16, error)
GetInt32FromPayload() ([]int32, error)
GetInt64FromPayload() ([]int64, error)
GetUInt8FromPayload() ([]uint8, error)
GetUInt16FromPayload() ([]uint16, error)
GetUInt32FromPayload() ([]uint32, error)
GetUInt64FromPayload() ([]uint64, error)
GetFloatFromPayload() ([]float32, error)
GetDoubleFromPayload() ([]float64, error)
GetStringFromPayload() ([]string, error)
@ -131,6 +139,31 @@ func (w *PayloadWriter) AddDataToPayload(msgs interface{}, dim ...int) error {
return errors.New("incorrect data type")
}
return w.AddInt64ToPayload(val)
case schemapb.DataType_UInt8:
val, ok := msgs.([]uint8)
if !ok {
return errors.New("incorrect data type")
}
return w.AddUInt8ToPayload(val)
case schemapb.DataType_UInt16:
val, ok := msgs.([]uint16)
if !ok {
return errors.New("incorrect data type")
}
return w.AddUInt16ToPayload(val)
case schemapb.DataType_UInt32:
val, ok := msgs.([]uint32)
if !ok {
return errors.New("incorrect data type")
}
return w.AddUInt32ToPayload(val)
case schemapb.DataType_UInt64:
val, ok := msgs.([]uint64)
if !ok {
return errors.New("incorrect data type")
}
return w.AddUInt64ToPayload(val)
case schemapb.DataType_Float:
val, ok := msgs.([]float32)
if !ok {
@ -252,6 +285,57 @@ func (w *PayloadWriter) AddInt64ToPayload(msgs []int64) error {
return HandleCStatus(&status, "AddInt64ToPayload failed")
}
func (w *PayloadWriter) AddUInt8ToPayload(msgs []uint8) error {
length := len(msgs)
if length <= 0 {
return errors.New("can't add empty msgs into payload")
}
cMsgs := (*C.uint8_t)(unsafe.Pointer(&msgs[0]))
cLength := C.int(length)
status := C.AddUInt8ToPayload(w.payloadWriterPtr, cMsgs, cLength)
return HandleCStatus(&status, "AddUInt8ToPayload failed")
}
func (w *PayloadWriter) AddUInt16ToPayload(msgs []uint16) error {
length := len(msgs)
if length <= 0 {
return errors.New("can't add empty msgs into payload")
}
cMsgs := (*C.uint16_t)(unsafe.Pointer(&msgs[0]))
cLength := C.int(length)
status := C.AddUInt16ToPayload(w.payloadWriterPtr, cMsgs, cLength)
return HandleCStatus(&status, "AddUInt16ToPayload failed")
}
func (w *PayloadWriter) AddUInt32ToPayload(msgs []uint32) error {
length := len(msgs)
if length <= 0 {
return errors.New("can't add empty msgs into payload")
}
cMsgs := (*C.uint32_t)(unsafe.Pointer(&msgs[0]))
cLength := C.int(length)
status := C.AddUInt32ToPayload(w.payloadWriterPtr, cMsgs, cLength)
return HandleCStatus(&status, "AddUInt32ToPayload failed")
}
func (w *PayloadWriter) AddUInt64ToPayload(msgs []uint64) error {
length := len(msgs)
if length <= 0 {
return errors.New("can't add empty msgs into payload")
}
cMsgs := (*C.uint64_t)(unsafe.Pointer(&msgs[0]))
cLength := C.int(length)
status := C.AddUInt64ToPayload(w.payloadWriterPtr, cMsgs, cLength)
return HandleCStatus(&status, "AddUInt64ToPayload failed")
}
func (w *PayloadWriter) AddFloatToPayload(msgs []float32) error {
length := len(msgs)
if length <= 0 {

View File

@ -102,6 +102,61 @@ func BenchmarkPayloadReader_Int64(b *testing.B) {
})
}
func BenchmarkPayloadReader_UInt32(b *testing.B) {
w, _ := NewPayloadWriter(schemapb.DataType_UInt32)
defer w.ReleasePayloadWriter()
data := make([]uint32, 0, numElements)
for i := 0; i < numElements; i++ {
data = append(data, uint32(rand.Int31n(1000)))
}
w.AddUInt32ToPayload(data)
w.FinishPayloadWriter()
buffer, _ := w.GetPayloadBufferFromWriter()
b.Run("cgo reader", func(b *testing.B) {
for i := 0; i < b.N; i++ {
r, _ := NewPayloadReaderCgo(schemapb.DataType_UInt32, buffer)
r.GetUInt32FromPayload()
r.ReleasePayloadReader()
}
})
b.Run("go reader", func(b *testing.B) {
for i := 0; i < b.N; i++ {
r, _ := NewPayloadReader(schemapb.DataType_UInt32, buffer)
r.GetUInt32FromPayload()
r.ReleasePayloadReader()
}
})
}
func BenchmarkPayloadReader_UInt64(b *testing.B) {
w, _ := NewPayloadWriter(schemapb.DataType_UInt64)
defer w.ReleasePayloadWriter()
data := make([]uint64, 0, numElements)
for i := 0; i < numElements; i++ {
data = append(data, uint64(rand.Int63n(1000)))
}
w.AddUInt64ToPayload(data)
w.FinishPayloadWriter()
buffer, _ := w.GetPayloadBufferFromWriter()
b.Run("cgo reader", func(b *testing.B) {
for i := 0; i < b.N; i++ {
r, _ := NewPayloadReaderCgo(schemapb.DataType_UInt64, buffer)
r.GetUInt64FromPayload()
r.ReleasePayloadReader()
}
})
b.Run("go reader", func(b *testing.B) {
for i := 0; i < b.N; i++ {
r, _ := NewPayloadReader(schemapb.DataType_UInt64, buffer)
r.GetUInt64FromPayload()
r.ReleasePayloadReader()
}
})
}
func BenchmarkPayloadReader_Float32(b *testing.B) {
w, err := NewPayloadWriter(schemapb.DataType_Float)
assert.NoError(b, err)

View File

@ -212,6 +212,154 @@ func TestPayload_CGO_ReaderandWriter(t *testing.T) {
defer r.ReleasePayloadReader()
})
t.Run("TestUInt8", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt8)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddUInt8ToPayload([]uint8{1, 2, 3})
assert.Nil(t, err)
err = w.AddDataToPayload([]uint8{4, 5, 6})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
length, err := w.GetPayloadLengthFromWriter()
assert.Nil(t, err)
assert.Equal(t, 6, length)
defer w.ReleasePayloadWriter()
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReaderCgo(schemapb.DataType_UInt8, buffer)
require.Nil(t, err)
length, err = r.GetPayloadLengthFromReader()
assert.Nil(t, err)
assert.Equal(t, length, 6)
uint8s, err := r.GetUInt8FromPayload()
assert.Nil(t, err)
assert.ElementsMatch(t, []uint8{1, 2, 3, 4, 5, 6}, uint8s)
uiint8s, _, err := r.GetDataFromPayload()
uint8s = uiint8s.([]uint8)
assert.Nil(t, err)
assert.ElementsMatch(t, []uint8{1, 2, 3, 4, 5, 6}, uint8s)
defer r.ReleasePayloadReader()
})
t.Run("TestUInt16", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt16)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddUInt16ToPayload([]uint16{1, 2, 3})
assert.Nil(t, err)
err = w.AddDataToPayload([]uint16{1, 2, 3})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
length, err := w.GetPayloadLengthFromWriter()
assert.Nil(t, err)
assert.Equal(t, 6, length)
defer w.ReleasePayloadWriter()
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReaderCgo(schemapb.DataType_UInt16, buffer)
require.Nil(t, err)
length, err = r.GetPayloadLengthFromReader()
assert.Nil(t, err)
assert.Equal(t, length, 6)
uint16s, err := r.GetUInt16FromPayload()
assert.Nil(t, err)
assert.ElementsMatch(t, []uint16{1, 2, 3, 1, 2, 3}, uint16s)
uiint16s, _, err := r.GetDataFromPayload()
uint16s = uiint16s.([]uint16)
assert.Nil(t, err)
assert.ElementsMatch(t, []uint16{1, 2, 3, 1, 2, 3}, uint16s)
defer r.ReleasePayloadReader()
})
t.Run("TestUInt32", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt32)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddUInt32ToPayload([]uint32{1, 2, 3})
assert.Nil(t, err)
err = w.AddDataToPayload([]uint32{1, 2, 3})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
length, err := w.GetPayloadLengthFromWriter()
assert.Nil(t, err)
assert.Equal(t, 6, length)
defer w.ReleasePayloadWriter()
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReaderCgo(schemapb.DataType_UInt32, buffer)
require.Nil(t, err)
length, err = r.GetPayloadLengthFromReader()
assert.Nil(t, err)
assert.Equal(t, length, 6)
uint32s, err := r.GetUInt32FromPayload()
assert.Nil(t, err)
assert.ElementsMatch(t, []uint32{1, 2, 3, 1, 2, 3}, uint32s)
uiint32s, _, err := r.GetDataFromPayload()
uint32s = uiint32s.([]uint32)
assert.Nil(t, err)
assert.ElementsMatch(t, []uint32{1, 2, 3, 1, 2, 3}, uint32s)
defer r.ReleasePayloadReader()
})
t.Run("TestUInt64", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt64)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddUInt64ToPayload([]uint64{1, 2, 3})
assert.Nil(t, err)
err = w.AddDataToPayload([]uint64{1, 2, 3})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
length, err := w.GetPayloadLengthFromWriter()
assert.Nil(t, err)
assert.Equal(t, 6, length)
defer w.ReleasePayloadWriter()
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReaderCgo(schemapb.DataType_UInt64, buffer)
require.Nil(t, err)
length, err = r.GetPayloadLengthFromReader()
assert.Nil(t, err)
assert.Equal(t, length, 6)
uint64s, err := r.GetUInt64FromPayload()
assert.Nil(t, err)
assert.ElementsMatch(t, []uint64{1, 2, 3, 1, 2, 3}, uint64s)
uiint64s, _, err := r.GetDataFromPayload()
uint64s = uiint64s.([]uint64)
assert.Nil(t, err)
assert.ElementsMatch(t, []uint64{1, 2, 3, 1, 2, 3}, uint64s)
defer r.ReleasePayloadReader()
})
t.Run("TestFloat32", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_Float)
require.Nil(t, err)
@ -525,6 +673,70 @@ func TestPayload_CGO_ReaderandWriter(t *testing.T) {
err = w.AddInt64ToPayload([]int64{0})
assert.NotNil(t, err)
})
t.Run("TestAddUInt8AfterFinish", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt8)
require.Nil(t, err)
require.NotNil(t, w)
defer w.Close()
_, err = w.GetPayloadBufferFromWriter()
assert.NotNil(t, err)
err = w.AddUInt8ToPayload([]uint8{})
assert.NotNil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
err = w.AddUInt8ToPayload([]uint8{0})
assert.NotNil(t, err)
})
t.Run("TestAddUInt16AfterFinish", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt16)
require.Nil(t, err)
require.NotNil(t, w)
defer w.Close()
_, err = w.GetPayloadBufferFromWriter()
assert.NotNil(t, err)
err = w.AddUInt16ToPayload([]uint16{})
assert.NotNil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
err = w.AddUInt16ToPayload([]uint16{0})
assert.NotNil(t, err)
})
t.Run("TestAddUInt32AfterFinish", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt32)
require.Nil(t, err)
require.NotNil(t, w)
defer w.Close()
_, err = w.GetPayloadBufferFromWriter()
assert.NotNil(t, err)
err = w.AddUInt32ToPayload([]uint32{})
assert.NotNil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
err = w.AddUInt32ToPayload([]uint32{0})
assert.NotNil(t, err)
})
t.Run("TestAddUInt64AfterFinish", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt64)
require.Nil(t, err)
require.NotNil(t, w)
defer w.Close()
_, err = w.GetPayloadBufferFromWriter()
assert.NotNil(t, err)
err = w.AddUInt64ToPayload([]uint64{})
assert.NotNil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
err = w.AddUInt64ToPayload([]uint64{0})
assert.NotNil(t, err)
})
t.Run("TestAddFloatAfterFinish", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_Float)
require.Nil(t, err)
@ -752,6 +964,103 @@ func TestPayload_CGO_ReaderandWriter(t *testing.T) {
_, err = r.GetInt64FromPayload()
assert.NotNil(t, err)
})
t.Run("TestGetUInt8Error", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_Bool)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddBoolToPayload([]bool{false, true, true})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReaderCgo(schemapb.DataType_UInt8, buffer)
assert.Nil(t, err)
_, err = r.GetUInt8FromPayload()
assert.NotNil(t, err)
r.colType = 999
_, err = r.GetUInt8FromPayload()
assert.NotNil(t, err)
})
t.Run("TestGetUInt16Error", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_Bool)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddBoolToPayload([]bool{false, true, true})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReaderCgo(schemapb.DataType_UInt16, buffer)
assert.Nil(t, err)
_, err = r.GetUInt16FromPayload()
assert.NotNil(t, err)
r.colType = 999
_, err = r.GetUInt16FromPayload()
assert.NotNil(t, err)
})
t.Run("TestGetUInt32Error", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_Bool)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddBoolToPayload([]bool{false, true, true})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReaderCgo(schemapb.DataType_UInt32, buffer)
assert.Nil(t, err)
_, err = r.GetUInt32FromPayload()
assert.NotNil(t, err)
r.colType = 999
_, err = r.GetUInt32FromPayload()
assert.NotNil(t, err)
})
t.Run("TestGetUInt64Error", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_Bool)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddBoolToPayload([]bool{false, true, true})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReaderCgo(schemapb.DataType_UInt64, buffer)
assert.Nil(t, err)
_, err = r.GetUInt64FromPayload()
assert.NotNil(t, err)
r.colType = 999
_, err = r.GetUInt64FromPayload()
assert.NotNil(t, err)
})
t.Run("TestGetFloatError", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_Bool)
require.Nil(t, err)

View File

@ -52,6 +52,19 @@ func (r *PayloadReader) GetDataFromPayload() (interface{}, int, error) {
case schemapb.DataType_Int64:
val, err := r.GetInt64FromPayload()
return val, 0, err
case schemapb.DataType_UInt8:
val, err := r.GetUInt8FromPayload()
return val, 0, err
case schemapb.DataType_UInt16:
val, err := r.GetUInt16FromPayload()
return val, 0, err
case schemapb.DataType_UInt32:
val, err := r.GetUInt32FromPayload()
return val, 0, err
case schemapb.DataType_UInt64:
val, err := r.GetUInt64FromPayload()
return val, 0, err
case schemapb.DataType_Float:
val, err := r.GetFloatFromPayload()
return val, 0, err
@ -196,6 +209,105 @@ func (r *PayloadReader) GetInt64FromPayload() ([]int64, error) {
return values, nil
}
func (r *PayloadReader) GetUInt8FromPayload() ([]uint8, error) {
if r.colType != schemapb.DataType_UInt8 {
return nil, fmt.Errorf("failed to get uint8 from datatype %v", r.colType.String())
}
reader, ok := r.reader.RowGroup(0).Column(0).(*file.Int32ColumnChunkReader)
if !ok {
return nil, fmt.Errorf("expect type *file.UInt32(8)ColumnChunkReader, but got %T", r.reader.RowGroup(0).Column(0))
}
values := make([]int32, r.numRows)
total, valuesRead, err := reader.ReadBatch(r.numRows, values, nil, nil)
if err != nil {
return nil, err
}
if total != r.numRows || int64(valuesRead) != r.numRows {
return nil, fmt.Errorf("expect %d rows, but got total = %d and valuesRead = %d", r.numRows, total, valuesRead)
}
ret := make([]uint8, r.numRows)
for i := int64(0); i < r.numRows; i++ {
ret[i] = uint8(values[i])
}
return ret, nil
}
func (r *PayloadReader) GetUInt16FromPayload() ([]uint16, error) {
if r.colType != schemapb.DataType_UInt16 {
return nil, fmt.Errorf("failed to get uint16 from datatype %v", r.colType.String())
}
reader, ok := r.reader.RowGroup(0).Column(0).(*file.Int32ColumnChunkReader)
if !ok {
return nil, fmt.Errorf("expect type *file.UInt32(16)ColumnChunkReader, but got %T", r.reader.RowGroup(0).Column(0))
}
values := make([]int32, r.numRows)
total, valuesRead, err := reader.ReadBatch(r.numRows, values, nil, nil)
if err != nil {
return nil, err
}
if total != r.numRows || int64(valuesRead) != r.numRows {
return nil, fmt.Errorf("expect %d rows, but got total = %d and valuesRead = %d", r.numRows, total, valuesRead)
}
ret := make([]uint16, r.numRows)
for i := int64(0); i < r.numRows; i++ {
ret[i] = uint16(values[i])
}
return ret, nil
}
func (r *PayloadReader) GetUInt32FromPayload() ([]uint32, error) {
if r.colType != schemapb.DataType_UInt32 {
return nil, fmt.Errorf("failed to get uint32 from datatype %v", r.colType.String())
}
reader, ok := r.reader.RowGroup(0).Column(0).(*file.Int32ColumnChunkReader)
if !ok {
return nil, fmt.Errorf("expect type *file.Int32ColumnChunkReader, but got %T", r.reader.RowGroup(0).Column(0))
}
values := make([]int32, r.numRows)
total, valuesRead, err := reader.ReadBatch(r.numRows, values, nil, nil)
if err != nil {
return nil, err
}
if total != r.numRows || int64(valuesRead) != r.numRows {
return nil, fmt.Errorf("expect %d rows, but got total = %d and valuesRead = %d", r.numRows, total, valuesRead)
}
ret := make([]uint32, r.numRows)
for i := int64(0); i < r.numRows; i++ {
ret[i] = uint32(values[i])
}
return ret, nil
}
func (r *PayloadReader) GetUInt64FromPayload() ([]uint64, error) {
if r.colType != schemapb.DataType_UInt64 {
return nil, fmt.Errorf("failed to get uint64 from datatype %v", r.colType.String())
}
reader, ok := r.reader.RowGroup(0).Column(0).(*file.Int64ColumnChunkReader)
if !ok {
return nil, fmt.Errorf("expect type *file.Int64ColumnChunkReader, but got %T", r.reader.RowGroup(0).Column(0))
}
values := make([]int64, r.numRows)
total, valuesRead, err := reader.ReadBatch(r.numRows, values, nil, nil)
if err != nil {
return nil, err
}
if total != r.numRows || int64(valuesRead) != r.numRows {
return nil, fmt.Errorf("expect %d rows, but got total = %d and valuesRead = %d", r.numRows, total, valuesRead)
}
ret := make([]uint64, r.numRows)
for i := int64(0); i < r.numRows; i++ {
ret[i] = uint64(values[i])
}
return ret, nil
}
func (r *PayloadReader) GetFloatFromPayload() ([]float32, error) {
if r.colType != schemapb.DataType_Float {
return nil, fmt.Errorf("failed to get float32 from datatype %v", r.colType.String())

View File

@ -58,6 +58,18 @@ func (r *PayloadReaderCgo) GetDataFromPayload() (interface{}, int, error) {
case schemapb.DataType_Int64:
val, err := r.GetInt64FromPayload()
return val, 0, err
case schemapb.DataType_UInt8:
val, err := r.GetUInt8FromPayload()
return val, 0, err
case schemapb.DataType_UInt16:
val, err := r.GetUInt16FromPayload()
return val, 0, err
case schemapb.DataType_UInt32:
val, err := r.GetUInt32FromPayload()
return val, 0, err
case schemapb.DataType_UInt64:
val, err := r.GetUInt64FromPayload()
return val, 0, err
case schemapb.DataType_Float:
val, err := r.GetFloatFromPayload()
return val, 0, err
@ -189,6 +201,75 @@ func (r *PayloadReaderCgo) GetInt64FromPayload() ([]int64, error) {
return slice, nil
}
// GetUInt8FromPayload returns uint8 slice from payload
func (r *PayloadReaderCgo) GetUInt8FromPayload() ([]uint8, error) {
if r.colType != schemapb.DataType_UInt8 {
return nil, errors.New("incorrect data type")
}
var cMsg *C.uint8_t
var cSize C.int
status := C.GetUInt8FromPayload(r.payloadReaderPtr, &cMsg, &cSize)
if err := HandleCStatus(&status, "GetUInt8FromPayload failed"); err != nil {
return nil, err
}
slice := (*[1 << 28]uint8)(unsafe.Pointer(cMsg))[:cSize:cSize]
return slice, nil
}
func (r *PayloadReaderCgo) GetUInt16FromPayload() ([]uint16, error) {
if r.colType != schemapb.DataType_UInt16 {
return nil, errors.New("incorrect data type")
}
var cMsg *C.uint16_t
var cSize C.int
status := C.GetUInt16FromPayload(r.payloadReaderPtr, &cMsg, &cSize)
if err := HandleCStatus(&status, "GetUInt16FromPayload failed"); err != nil {
return nil, err
}
slice := (*[1 << 28]uint16)(unsafe.Pointer(cMsg))[:cSize:cSize]
return slice, nil
}
func (r *PayloadReaderCgo) GetUInt32FromPayload() ([]uint32, error) {
if r.colType != schemapb.DataType_UInt32 {
return nil, errors.New("incorrect data type")
}
var cMsg *C.uint32_t
var cSize C.int
status := C.GetUInt32FromPayload(r.payloadReaderPtr, &cMsg, &cSize)
if err := HandleCStatus(&status, "GetUInt32FromPayload failed"); err != nil {
return nil, err
}
slice := (*[1 << 28]uint32)(unsafe.Pointer(cMsg))[:cSize:cSize]
return slice, nil
}
func (r *PayloadReaderCgo) GetUInt64FromPayload() ([]uint64, error) {
if r.colType != schemapb.DataType_UInt64 {
return nil, errors.New("incorrect data type")
}
var cMsg *C.uint64_t
var cSize C.int
status := C.GetUInt64FromPayload(r.payloadReaderPtr, &cMsg, &cSize)
if err := HandleCStatus(&status, "GetUInt64FromPayload failed"); err != nil {
return nil, err
}
slice := (*[1 << 28]uint64)(unsafe.Pointer(cMsg))[:cSize:cSize]
return slice, nil
}
func (r *PayloadReaderCgo) GetFloatFromPayload() ([]float32, error) {
if r.colType != schemapb.DataType_Float {
return nil, errors.New("incorrect data type")

View File

@ -102,6 +102,44 @@ func TestPayload_ReaderAndWriter(t *testing.T) {
defer r.ReleasePayloadReader()
})
t.Run("TestUInt8", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt8)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddUInt8ToPayload([]uint8{1, 2, 3})
assert.Nil(t, err)
err = w.AddDataToPayload([]uint8{4, 5, 6})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
length, err := w.GetPayloadLengthFromWriter()
assert.Nil(t, err)
assert.Equal(t, 6, length)
defer w.ReleasePayloadWriter()
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReader(schemapb.DataType_UInt8, buffer)
require.Nil(t, err)
length, err = r.GetPayloadLengthFromReader()
assert.Nil(t, err)
assert.Equal(t, length, 6)
uint8s, err := r.GetUInt8FromPayload()
assert.Nil(t, err)
assert.ElementsMatch(t, []uint8{1, 2, 3, 4, 5, 6}, uint8s)
uiint8s, _, err := r.GetDataFromPayload()
uint8s = uiint8s.([]uint8)
assert.Nil(t, err)
assert.ElementsMatch(t, []uint8{1, 2, 3, 4, 5, 6}, uint8s)
defer r.ReleasePayloadReader()
})
t.Run("TestInt16", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_Int16)
require.Nil(t, err)
@ -138,6 +176,42 @@ func TestPayload_ReaderAndWriter(t *testing.T) {
defer r.ReleasePayloadReader()
})
t.Run("TestUInt16", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt16)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddUInt16ToPayload([]uint16{1, 2, 3})
assert.Nil(t, err)
err = w.AddDataToPayload([]uint16{1, 2, 3})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
length, err := w.GetPayloadLengthFromWriter()
assert.Nil(t, err)
assert.Equal(t, 6, length)
defer w.ReleasePayloadWriter()
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReader(schemapb.DataType_UInt16, buffer)
require.Nil(t, err)
length, err = r.GetPayloadLengthFromReader()
assert.Nil(t, err)
assert.Equal(t, length, 6)
uint16s, err := r.GetUInt16FromPayload()
assert.Nil(t, err)
assert.ElementsMatch(t, []uint16{1, 2, 3, 1, 2, 3}, uint16s)
uiint16s, _, err := r.GetDataFromPayload()
uint16s = uiint16s.([]uint16)
assert.Nil(t, err)
assert.ElementsMatch(t, []uint16{1, 2, 3, 1, 2, 3}, uint16s)
defer r.ReleasePayloadReader()
})
t.Run("TestInt32", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_Int32)
require.Nil(t, err)
@ -175,6 +249,43 @@ func TestPayload_ReaderAndWriter(t *testing.T) {
defer r.ReleasePayloadReader()
})
t.Run("TestUInt32", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt32)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddUInt32ToPayload([]uint32{1, 2, 3})
assert.Nil(t, err)
err = w.AddDataToPayload([]uint32{1, 2, 3})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
length, err := w.GetPayloadLengthFromWriter()
assert.Nil(t, err)
assert.Equal(t, 6, length)
defer w.ReleasePayloadWriter()
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReader(schemapb.DataType_UInt32, buffer)
require.Nil(t, err)
length, err = r.GetPayloadLengthFromReader()
assert.Nil(t, err)
assert.Equal(t, length, 6)
uint32s, err := r.GetUInt32FromPayload()
assert.Nil(t, err)
assert.ElementsMatch(t, []uint32{1, 2, 3, 1, 2, 3}, uint32s)
uiint32s, _, err := r.GetDataFromPayload()
uint32s = uiint32s.([]uint32)
assert.Nil(t, err)
assert.ElementsMatch(t, []uint32{1, 2, 3, 1, 2, 3}, uint32s)
defer r.ReleasePayloadReader()
})
t.Run("TestInt64", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_Int64)
require.Nil(t, err)
@ -212,6 +323,43 @@ func TestPayload_ReaderAndWriter(t *testing.T) {
defer r.ReleasePayloadReader()
})
t.Run("TestUInt64", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt64)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddUInt64ToPayload([]uint64{1, 2, 3})
assert.Nil(t, err)
err = w.AddDataToPayload([]uint64{1, 2, 3})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
length, err := w.GetPayloadLengthFromWriter()
assert.Nil(t, err)
assert.Equal(t, 6, length)
defer w.ReleasePayloadWriter()
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReader(schemapb.DataType_UInt64, buffer)
require.Nil(t, err)
length, err = r.GetPayloadLengthFromReader()
assert.Nil(t, err)
assert.Equal(t, length, 6)
uint64s, err := r.GetUInt64FromPayload()
assert.Nil(t, err)
assert.ElementsMatch(t, []uint64{1, 2, 3, 1, 2, 3}, uint64s)
uiint64s, _, err := r.GetDataFromPayload()
uint64s = uiint64s.([]uint64)
assert.Nil(t, err)
assert.ElementsMatch(t, []uint64{1, 2, 3, 1, 2, 3}, uint64s)
defer r.ReleasePayloadReader()
})
t.Run("TestFloat32", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_Float)
require.Nil(t, err)
@ -525,6 +673,70 @@ func TestPayload_ReaderAndWriter(t *testing.T) {
err = w.AddInt64ToPayload([]int64{0})
assert.NotNil(t, err)
})
t.Run("TestAddUInt8AfterFinish", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt8)
require.Nil(t, err)
require.NotNil(t, w)
defer w.Close()
_, err = w.GetPayloadBufferFromWriter()
assert.NotNil(t, err)
err = w.AddUInt8ToPayload([]uint8{})
assert.NotNil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
err = w.AddUInt8ToPayload([]uint8{0})
assert.NotNil(t, err)
})
t.Run("TestAddUInt16AfterFinish", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt16)
require.Nil(t, err)
require.NotNil(t, w)
defer w.Close()
_, err = w.GetPayloadBufferFromWriter()
assert.NotNil(t, err)
err = w.AddUInt16ToPayload([]uint16{})
assert.NotNil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
err = w.AddUInt16ToPayload([]uint16{0})
assert.NotNil(t, err)
})
t.Run("TestAddUInt32AfterFinish", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt32)
require.Nil(t, err)
require.NotNil(t, w)
defer w.Close()
_, err = w.GetPayloadBufferFromWriter()
assert.NotNil(t, err)
err = w.AddUInt32ToPayload([]uint32{})
assert.NotNil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
err = w.AddUInt32ToPayload([]uint32{0})
assert.NotNil(t, err)
})
t.Run("TestAddUInt64AfterFinish", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt64)
require.Nil(t, err)
require.NotNil(t, w)
defer w.Close()
_, err = w.GetPayloadBufferFromWriter()
assert.NotNil(t, err)
err = w.AddUInt64ToPayload([]uint64{})
assert.NotNil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
err = w.AddUInt64ToPayload([]uint64{0})
assert.NotNil(t, err)
})
t.Run("TestAddFloatAfterFinish", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_Float)
require.Nil(t, err)
@ -857,6 +1069,187 @@ func TestPayload_ReaderAndWriter(t *testing.T) {
_, err = r.GetInt64FromPayload()
assert.NotNil(t, err)
})
t.Run("TestGetUInt8Error", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_Bool)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddBoolToPayload([]bool{false, true, true})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReader(schemapb.DataType_UInt8, buffer)
assert.Nil(t, err)
_, err = r.GetUInt8FromPayload()
assert.NotNil(t, err)
r.colType = 999
_, err = r.GetUInt8FromPayload()
assert.NotNil(t, err)
})
t.Run("TestGetUInt8Error2", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt8)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddUInt8ToPayload([]uint8{1, 2, 3})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReader(schemapb.DataType_UInt8, buffer)
assert.Nil(t, err)
r.numRows = 99
_, err = r.GetUInt8FromPayload()
assert.NotNil(t, err)
})
t.Run("TestGetUInt16Error", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_Bool)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddBoolToPayload([]bool{false, true, true})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReader(schemapb.DataType_UInt16, buffer)
assert.Nil(t, err)
_, err = r.GetUInt16FromPayload()
assert.NotNil(t, err)
r.colType = 999
_, err = r.GetUInt16FromPayload()
assert.NotNil(t, err)
})
t.Run("TestGetUInt16Error2", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt16)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddUInt16ToPayload([]uint16{1, 2, 3})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReader(schemapb.DataType_UInt16, buffer)
assert.Nil(t, err)
r.numRows = 99
_, err = r.GetUInt16FromPayload()
assert.NotNil(t, err)
})
t.Run("TestGetUInt32Error", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_Bool)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddBoolToPayload([]bool{false, true, true})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReader(schemapb.DataType_UInt32, buffer)
assert.Nil(t, err)
_, err = r.GetUInt32FromPayload()
assert.NotNil(t, err)
r.colType = 999
_, err = r.GetUInt32FromPayload()
assert.NotNil(t, err)
})
t.Run("TestGetUInt32Error2", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt32)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddUInt32ToPayload([]uint32{1, 2, 3})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReader(schemapb.DataType_UInt32, buffer)
assert.Nil(t, err)
r.numRows = 99
_, err = r.GetUInt32FromPayload()
assert.NotNil(t, err)
})
t.Run("TestGetUInt64Error", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_Bool)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddBoolToPayload([]bool{false, true, true})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReader(schemapb.DataType_UInt64, buffer)
assert.Nil(t, err)
_, err = r.GetUInt64FromPayload()
assert.NotNil(t, err)
r.colType = 999
_, err = r.GetUInt64FromPayload()
assert.NotNil(t, err)
})
t.Run("TestGetUInt64Error2", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_UInt64)
require.Nil(t, err)
require.NotNil(t, w)
err = w.AddUInt64ToPayload([]uint64{1, 2, 3})
assert.Nil(t, err)
err = w.FinishPayloadWriter()
assert.Nil(t, err)
buffer, err := w.GetPayloadBufferFromWriter()
assert.Nil(t, err)
r, err := NewPayloadReader(schemapb.DataType_UInt64, buffer)
assert.Nil(t, err)
r.numRows = 99
_, err = r.GetUInt64FromPayload()
assert.NotNil(t, err)
})
t.Run("TestGetFloatError", func(t *testing.T) {
w, err := NewPayloadWriter(schemapb.DataType_Bool)
require.Nil(t, err)

View File

@ -261,6 +261,40 @@ func printPayloadValues(colType schemapb.DataType, reader PayloadReaderInterface
for i, v := range val {
fmt.Printf("\t\t%d : %d\n", i, v)
}
case schemapb.DataType_UInt8:
val, err := reader.GetUInt8FromPayload()
if err != nil {
return err
}
for i, v := range val {
fmt.Printf("\t\t%d : %d\n", i, v)
}
case schemapb.DataType_UInt16:
val, err := reader.GetUInt16FromPayload()
if err != nil {
return err
}
for i, v := range val {
fmt.Printf("\t\t%d : %d\n", i, v)
}
case schemapb.DataType_UInt32:
val, err := reader.GetUInt32FromPayload()
if err != nil {
return err
}
for i, v := range val {
fmt.Printf("\t\t%d : %d\n", i, v)
}
case schemapb.DataType_UInt64:
val, err := reader.GetUInt64FromPayload()
if err != nil {
return err
}
for i, v := range val {
fmt.Printf("\t\t%d : %d\n", i, v)
}
case schemapb.DataType_Float:
val, err := reader.GetFloatFromPayload()
if err != nil {

View File

@ -178,6 +178,34 @@ func TestPrintBinlogFiles(t *testing.T) {
Description: "description_11",
DataType: schemapb.DataType_FloatVector,
},
{
FieldID: 110,
Name: "field_uint8",
IsPrimaryKey: false,
Description: "description_12",
DataType: schemapb.DataType_UInt8,
},
{
FieldID: 111,
Name: "field_uint16",
IsPrimaryKey: false,
Description: "description_13",
DataType: schemapb.DataType_UInt16,
},
{
FieldID: 112,
Name: "field_uint32",
IsPrimaryKey: false,
Description: "description_14",
DataType: schemapb.DataType_UInt32,
},
{
FieldID: 113,
Name: "field_uint64",
IsPrimaryKey: false,
Description: "description_15",
DataType: schemapb.DataType_UInt64,
},
},
},
}
@ -234,6 +262,22 @@ func TestPrintBinlogFiles(t *testing.T) {
Data: []float32{0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7},
Dim: 8,
},
110: &UInt8FieldData{
NumRows: []int64{2},
Data: []uint8{3, 4},
},
111: &UInt16FieldData{
NumRows: []int64{2},
Data: []uint16{3, 4},
},
112: &UInt32FieldData{
NumRows: []int64{2},
Data: []uint32{3, 4},
},
113: &UInt64FieldData{
NumRows: []int64{2},
Data: []uint64{3, 4},
},
},
}
@ -289,6 +333,22 @@ func TestPrintBinlogFiles(t *testing.T) {
Data: []float32{0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7},
Dim: 8,
},
110: &UInt8FieldData{
NumRows: []int64{2},
Data: []uint8{1, 2},
},
111: &UInt16FieldData{
NumRows: []int64{2},
Data: []uint16{1, 2},
},
112: &UInt32FieldData{
NumRows: []int64{2},
Data: []uint32{1, 2},
},
113: &UInt64FieldData{
NumRows: []int64{2},
Data: []uint64{1, 2},
},
},
}
firstBlobs, _, err := insertCodec.Serialize(1, 1, insertDataFirst)

View File

@ -48,6 +48,30 @@ func UnsafeReadInt64(buf []byte, idx int) int64 {
return *((*int64)(ptr))
}
/* #nosec G103 */
func UnsafeReadUInt8(buf []byte, idx int) uint8 {
ptr := unsafe.Pointer(&(buf[idx]))
return *((*uint8)(ptr))
}
/* #nosec G103 */
func UnsafeReadUInt16(buf []byte, idx int) uint16 {
ptr := unsafe.Pointer(&(buf[idx]))
return *((*uint16)(ptr))
}
/* #nosec G103 */
func UnsafeReadUInt32(buf []byte, idx int) uint32 {
ptr := unsafe.Pointer(&(buf[idx]))
return *((*uint32)(ptr))
}
/* #nosec G103 */
func UnsafeReadUInt64(buf []byte, idx int) uint64 {
ptr := unsafe.Pointer(&(buf[idx]))
return *((*uint64)(ptr))
}
/* #nosec G103 */
func UnsafeReadFloat32(buf []byte, idx int) float32 {
ptr := unsafe.Pointer(&(buf[idx]))

View File

@ -39,6 +39,22 @@ func TestUnsafe(t *testing.T) {
int64Res := UnsafeReadInt64(buf, 0)
assert.Equal(t, int64Res, int64(1157442765409226768))
buf = []byte{16}
uint8Res := UnsafeReadUInt8(buf, 0)
assert.Equal(t, uint8Res, uint8(16))
buf = []byte{16, 16}
uint16Res := UnsafeReadUInt16(buf, 0)
assert.Equal(t, uint16Res, uint16(4112))
buf = []byte{16, 16, 16, 16}
uint32Res := UnsafeReadUInt32(buf, 0)
assert.Equal(t, uint32Res, uint32(269488144))
buf = []byte{16, 16, 16, 16, 16, 16, 16, 16}
uint64Res := UnsafeReadUInt64(buf, 0)
assert.Equal(t, uint64Res, uint64(1157442765409226768))
buf = []byte{16, 16, 16, 16}
float32Res := UnsafeReadFloat32(buf, 0)
assert.Equal(t, float32Res, float32(2.8411367e-29))

View File

@ -272,6 +272,46 @@ func readInt64Array(blobReaders []io.Reader) []int64 {
return ret
}
func readUInt8Array(blobReaders []io.Reader) []uint8 {
ret := make([]uint8, 0)
for _, r := range blobReaders {
var v uint8
ReadBinary(r, &v, schemapb.DataType_UInt8)
ret = append(ret, v)
}
return ret
}
func readUInt16Array(blobReaders []io.Reader) []uint16 {
ret := make([]uint16, 0)
for _, r := range blobReaders {
var v uint16
ReadBinary(r, &v, schemapb.DataType_UInt16)
ret = append(ret, v)
}
return ret
}
func readUInt32Array(blobReaders []io.Reader) []uint32 {
ret := make([]uint32, 0)
for _, r := range blobReaders {
var v uint32
ReadBinary(r, &v, schemapb.DataType_UInt32)
ret = append(ret, v)
}
return ret
}
func readUInt64Array(blobReaders []io.Reader) []uint64 {
ret := make([]uint64, 0)
for _, r := range blobReaders {
var v uint64
ReadBinary(r, &v, schemapb.DataType_UInt64)
ret = append(ret, v)
}
return ret
}
func readFloatArray(blobReaders []io.Reader) []float32 {
ret := make([]float32, 0)
for _, r := range blobReaders {
@ -377,6 +417,45 @@ func RowBasedInsertMsgToInsertData(msg *msgstream.InsertMsg, collSchema *schemap
fieldData.Data = readInt64Array(blobReaders)
}
case schemapb.DataType_UInt8:
idata.Data[field.FieldID] = &UInt8FieldData{
NumRows: []int64{int64(msg.NRows())},
Data: readUInt8Array(blobReaders),
}
case schemapb.DataType_UInt16:
idata.Data[field.FieldID] = &UInt16FieldData{
NumRows: []int64{int64(msg.NRows())},
Data: readUInt16Array(blobReaders),
}
case schemapb.DataType_UInt32:
idata.Data[field.FieldID] = &UInt32FieldData{
NumRows: []int64{int64(msg.NRows())},
Data: readUInt32Array(blobReaders),
}
case schemapb.DataType_UInt64:
idata.Data[field.FieldID] = &UInt64FieldData{
NumRows: []int64{int64(msg.NRows())},
Data: nil,
}
fieldData := idata.Data[field.FieldID].(*UInt64FieldData)
switch field.FieldID {
case 0: // rowIDs
//fieldData.Data = append(fieldData.Data, msg.RowIDs...)
for _, ids := range msg.RowIDs {
fieldData.Data = append(fieldData.Data, uint64(ids))
}
case 1: // Timestamps
//for _, ts := range msg.Timestamps {
fieldData.Data = append(fieldData.Data, msg.Timestamps...)
//}
default:
fieldData.Data = readUInt64Array(blobReaders)
}
case schemapb.DataType_Float:
idata.Data[field.FieldID] = &FloatFieldData{
NumRows: []int64{int64(msg.NRows())},
@ -519,6 +598,72 @@ func ColumnBasedInsertMsgToInsertData(msg *msgstream.InsertMsg, collSchema *sche
idata.Data[field.FieldID] = fieldData
case schemapb.DataType_UInt8:
srcData := srcFields[field.FieldID].GetScalars().GetUintData().GetData()
fieldData := &UInt8FieldData{
NumRows: []int64{int64(msg.NRows())},
Data: make([]uint8, 0, len(srcData)),
}
uint8SrcData := make([]uint8, len(srcData))
for i := 0; i < len(srcData); i++ {
uint8SrcData[i] = uint8(srcData[i])
}
fieldData.Data = append(fieldData.Data, uint8SrcData...)
idata.Data[field.FieldID] = fieldData
case schemapb.DataType_UInt16:
srcData := srcFields[field.FieldID].GetScalars().GetUintData().GetData()
fieldData := &UInt16FieldData{
NumRows: []int64{int64(msg.NRows())},
Data: make([]uint16, 0, len(srcData)),
}
uint16SrcData := make([]uint16, len(srcData))
for i := 0; i < len(srcData); i++ {
uint16SrcData[i] = uint16(srcData[i])
}
fieldData.Data = append(fieldData.Data, uint16SrcData...)
idata.Data[field.FieldID] = fieldData
case schemapb.DataType_UInt32:
srcData := srcFields[field.FieldID].GetScalars().GetUintData().GetData()
fieldData := &UInt32FieldData{
NumRows: []int64{int64(msg.NRows())},
Data: make([]uint32, 0, len(srcData)),
}
fieldData.Data = append(fieldData.Data, srcData...)
idata.Data[field.FieldID] = fieldData
case schemapb.DataType_UInt64:
fieldData := &UInt64FieldData{
NumRows: []int64{int64(msg.NRows())},
Data: make([]uint64, 0),
}
switch field.FieldID {
case 0: // rowIDs
fieldData.Data = make([]uint64, 0, len(msg.RowIDs))
for _, IDs := range msg.RowIDs {
fieldData.Data = append(fieldData.Data, uint64(IDs))
}
case 1: // Timestamps
fieldData.Data = make([]uint64, 0, len(msg.Timestamps))
//for _, ts := range msg.Timestamps {
fieldData.Data = append(fieldData.Data, msg.Timestamps...)
//}
default:
srcData := srcFields[field.FieldID].GetScalars().GetUlongData().GetData()
fieldData.Data = make([]uint64, 0, len(srcData))
fieldData.Data = append(fieldData.Data, srcData...)
}
idata.Data[field.FieldID] = fieldData
case schemapb.DataType_Float:
srcData := srcFields[field.FieldID].GetScalars().GetFloatData().GetData()
@ -628,6 +773,58 @@ func mergeInt64Field(data *InsertData, fid FieldID, field *Int64FieldData) {
fieldData.NumRows[0] += int64(field.RowNum())
}
func mergeUInt8Field(data *InsertData, fid FieldID, field *UInt8FieldData) {
if _, ok := data.Data[fid]; !ok {
fieldData := &UInt8FieldData{
NumRows: []int64{0},
Data: nil,
}
data.Data[fid] = fieldData
}
fieldData := data.Data[fid].(*UInt8FieldData)
fieldData.Data = append(fieldData.Data, field.Data...)
fieldData.NumRows[0] += int64(field.RowNum())
}
func mergeUInt16Field(data *InsertData, fid FieldID, field *UInt16FieldData) {
if _, ok := data.Data[fid]; !ok {
fieldData := &UInt16FieldData{
NumRows: []int64{0},
Data: nil,
}
data.Data[fid] = fieldData
}
fieldData := data.Data[fid].(*UInt16FieldData)
fieldData.Data = append(fieldData.Data, field.Data...)
fieldData.NumRows[0] += int64(field.RowNum())
}
func mergeUInt32Field(data *InsertData, fid FieldID, field *UInt32FieldData) {
if _, ok := data.Data[fid]; !ok {
fieldData := &UInt32FieldData{
NumRows: []int64{0},
Data: nil,
}
data.Data[fid] = fieldData
}
fieldData := data.Data[fid].(*UInt32FieldData)
fieldData.Data = append(fieldData.Data, field.Data...)
fieldData.NumRows[0] += int64(field.RowNum())
}
func mergeUInt64Field(data *InsertData, fid FieldID, field *UInt64FieldData) {
if _, ok := data.Data[fid]; !ok {
fieldData := &UInt64FieldData{
NumRows: []int64{0},
Data: nil,
}
data.Data[fid] = fieldData
}
fieldData := data.Data[fid].(*UInt64FieldData)
fieldData.Data = append(fieldData.Data, field.Data...)
fieldData.NumRows[0] += int64(field.RowNum())
}
func mergeFloatField(data *InsertData, fid FieldID, field *FloatFieldData) {
if _, ok := data.Data[fid]; !ok {
fieldData := &FloatFieldData{
@ -711,6 +908,16 @@ func MergeFieldData(data *InsertData, fid FieldID, field FieldData) {
mergeInt32Field(data, fid, field)
case *Int64FieldData:
mergeInt64Field(data, fid, field)
case *UInt8FieldData:
mergeUInt8Field(data, fid, field)
case *UInt16FieldData:
mergeUInt16Field(data, fid, field)
case *UInt32FieldData:
mergeUInt32Field(data, fid, field)
case *UInt64FieldData:
mergeUInt64Field(data, fid, field)
case *FloatFieldData:
mergeFloatField(data, fid, field)
case *DoubleFieldData:
@ -842,6 +1049,16 @@ func FieldDataToBytes(endian binary.ByteOrder, fieldData FieldData) ([]byte, err
return binaryWrite(endian, field.Data)
case *Int64FieldData:
return binaryWrite(endian, field.Data)
case *UInt8FieldData:
return binaryWrite(endian, field.Data)
case *UInt16FieldData:
return binaryWrite(endian, field.Data)
case *UInt32FieldData:
return binaryWrite(endian, field.Data)
case *UInt64FieldData:
return binaryWrite(endian, field.Data)
case *FloatFieldData:
return binaryWrite(endian, field.Data)
case *DoubleFieldData:
@ -934,6 +1151,72 @@ func TransferInsertDataToInsertRecord(insertData *InsertData) (*segcorepb.Insert
},
},
}
case *UInt8FieldData:
uint32Data := make([]uint32, len(rawData.Data))
for index, v := range rawData.Data {
uint32Data[index] = uint32(v)
}
fieldData = &schemapb.FieldData{
Type: schemapb.DataType_UInt8,
FieldId: fieldID,
Field: &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: uint32Data,
},
},
},
},
}
case *UInt16FieldData:
uint32Data := make([]uint32, len(rawData.Data))
for index, v := range rawData.Data {
uint32Data[index] = uint32(v)
}
fieldData = &schemapb.FieldData{
Type: schemapb.DataType_UInt16,
FieldId: fieldID,
Field: &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: uint32Data,
},
},
},
},
}
case *UInt32FieldData:
fieldData = &schemapb.FieldData{
Type: schemapb.DataType_UInt32,
FieldId: fieldID,
Field: &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: rawData.Data,
},
},
},
},
}
case *UInt64FieldData:
fieldData = &schemapb.FieldData{
Type: schemapb.DataType_UInt64,
FieldId: fieldID,
Field: &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UlongData{
UlongData: &schemapb.ULongArray{
Data: rawData.Data,
},
},
},
},
}
case *FloatFieldData:
fieldData = &schemapb.FieldData{
Type: schemapb.DataType_Float,

View File

@ -100,15 +100,28 @@ func TestSortFieldDataList(t *testing.T) {
Data: []int64{7, 8, 9},
}
ls := fieldDataList{
IDs: []FieldID{1, 3, 2},
datas: []FieldData{f1, f3, f2},
f4 := &UInt16FieldData{
NumRows: nil,
Data: []uint16{1, 2, 3},
}
f5 := &UInt32FieldData{
NumRows: nil,
Data: []uint32{4, 5, 6},
}
f6 := &UInt64FieldData{
NumRows: nil,
Data: []uint64{7, 8, 9},
}
assert.Equal(t, 3, ls.Len())
ls := fieldDataList{
IDs: []FieldID{1, 3, 2, 4, 5, 6},
datas: []FieldData{f1, f3, f2, f4, f5, f6},
}
assert.Equal(t, 6, ls.Len())
sortFieldDataList(ls)
assert.ElementsMatch(t, []FieldID{1, 2, 3}, ls.IDs)
assert.ElementsMatch(t, []FieldData{f1, f2, f3}, ls.datas)
assert.ElementsMatch(t, []FieldID{1, 2, 3, 4, 5, 6}, ls.IDs)
assert.ElementsMatch(t, []FieldData{f1, f2, f3, f4, f5, f6}, ls.datas)
}
func TestTransferColumnBasedInsertDataToRowBased(t *testing.T) {
@ -178,6 +191,19 @@ func TestTransferColumnBasedInsertDataToRowBased(t *testing.T) {
Data: []float32{0, 0, 0},
}
f11 := &UInt8FieldData{
Data: []uint8{0, 0xf, 0x1f},
}
f12 := &UInt16FieldData{
Data: []uint16{0, 0xff, 0x1fff},
}
f13 := &UInt32FieldData{
Data: []uint32{0, 0xffff, 0x1fffffff},
}
f14 := &UInt64FieldData{
Data: []uint64{0, 0xffffffff, 0x1fffffffffffffff},
}
data.Data[101] = f1
data.Data[102] = f2
data.Data[103] = f3
@ -189,6 +215,10 @@ func TestTransferColumnBasedInsertDataToRowBased(t *testing.T) {
data.Data[109] = f9
data.Data[110] = f10
data.Data[111] = f11
data.Data[112] = f12
data.Data[113] = f13
data.Data[114] = f14
utss, rowIds, rows, err := TransferColumnBasedInsertDataToRowBased(data)
assert.NoError(t, err)
assert.ElementsMatch(t, []uint64{1, 2, 3}, utss)
@ -210,6 +240,11 @@ func TestTransferColumnBasedInsertDataToRowBased(t *testing.T) {
// b + 1, // "1"
1, // 1
0, 0, 0, 0, // 0
0, // 0
0, 0, // 0
0, 0, 0, 0, // 0
0, 0, 0, 0, 0, 0, 0, 0, // 0
},
rows[0].Value)
assert.ElementsMatch(t,
@ -224,6 +259,11 @@ func TestTransferColumnBasedInsertDataToRowBased(t *testing.T) {
// b + 2, // "2"
2, // 2
0, 0, 0, 0, // 0
0xf, // 0xf
0, 0xff, // 0xff
0, 0, 0xff, 0xff, // 0xffff
0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, // 0xffffffff
},
rows[1].Value)
assert.ElementsMatch(t,
@ -238,6 +278,11 @@ func TestTransferColumnBasedInsertDataToRowBased(t *testing.T) {
// b + 3, // "3"
3, // 3
0, 0, 0, 0, // 0
0x1f, // 0x1f
0xff, 0x1f, // 0x1fff
0xff, 0xff, 0xff, 0x1f, // 0x1fffffff
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, // 0x1fffffffffffffff
},
rows[2].Value)
}
@ -288,6 +333,12 @@ func TestReadBinary(t *testing.T) {
// b + 3, // "3"
3, // 3
// 0, 0, 0, 0, // 0
0x1f, // 0x1f
0xff, 0x1f, // 0x1fff
0xff, 0xff, 0xff, 0x1f, // 0x1fffffff
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, // 0x1fffffffffffffff
},
)
@ -316,6 +367,22 @@ func TestReadBinary(t *testing.T) {
ReadBinary(reader, &bvec, schemapb.DataType_BinaryVector)
assert.Equal(t, []byte{3}, bvec)
var u8 uint8
ReadBinary(reader, &u8, schemapb.DataType_UInt8)
assert.Equal(t, uint8(0x1f), u8)
var u16 uint16
ReadBinary(reader, &u16, schemapb.DataType_UInt16)
assert.Equal(t, uint16(0x1fff), u16)
var u32 uint32
ReadBinary(reader, &u32, schemapb.DataType_UInt32)
assert.Equal(t, uint32(0x1fffffff), u32)
var u64 uint64
ReadBinary(reader, &u64, schemapb.DataType_UInt64)
assert.Equal(t, uint64(0x1fffffffffffffff), u64)
// should print error here, no content in reader.
ReadBinary(reader, &bvec, schemapb.DataType_BinaryVector)
}
@ -343,6 +410,18 @@ func genAllFieldsSchema(fVecDim, bVecDim int) (schema *schemapb.CollectionSchema
{
DataType: schemapb.DataType_Int32,
},
{
DataType: schemapb.DataType_UInt8,
},
{
DataType: schemapb.DataType_UInt16,
},
{
DataType: schemapb.DataType_UInt32,
},
{
DataType: schemapb.DataType_UInt64,
},
{
DataType: schemapb.DataType_Float,
},
@ -438,6 +517,22 @@ func generateInt64Array(numRows int) []int64 {
return ret
}
func generateUInt32Array(numRows int) []uint32 {
ret := make([]uint32, 0, numRows)
for i := 0; i < numRows; i++ {
ret = append(ret, uint32(rand.Int()))
}
return ret
}
func generateUInt64Array(numRows int) []uint64 {
ret := make([]uint64, 0, numRows)
for i := 0; i < numRows; i++ {
ret = append(ret, uint64(rand.Int()))
}
return ret
}
func generateFloat32Array(numRows int) []float32 {
ret := make([]float32, 0, numRows)
for i := 0; i < numRows; i++ {
@ -498,6 +593,28 @@ func genRowWithAllFields(fVecDim, bVecDim int) (blob *commonpb.Blob, pk int64, r
_ = binary.Write(&buffer, common.Endian, pk)
ret.Value = append(ret.Value, buffer.Bytes()...)
row = append(row, pk)
case schemapb.DataType_UInt8:
data := uint8(rand.Int())
_ = binary.Write(&buffer, common.Endian, data)
ret.Value = append(ret.Value, buffer.Bytes()...)
row = append(row, data)
case schemapb.DataType_UInt16:
data := uint16(rand.Int())
_ = binary.Write(&buffer, common.Endian, data)
ret.Value = append(ret.Value, buffer.Bytes()...)
row = append(row, data)
case schemapb.DataType_UInt32:
data := uint32(rand.Int())
_ = binary.Write(&buffer, common.Endian, data)
ret.Value = append(ret.Value, buffer.Bytes()...)
row = append(row, data)
case schemapb.DataType_UInt64:
//pk = int64(rand.Int())
data := uint64(rand.Int())
_ = binary.Write(&buffer, common.Endian, data)
ret.Value = append(ret.Value, buffer.Bytes()...)
row = append(row, data)
case schemapb.DataType_Float:
data := rand.Float32()
_ = binary.Write(&buffer, common.Endian, data)
@ -678,6 +795,88 @@ func genColumnBasedInsertMsg(schema *schemapb.CollectionSchema, numRows, fVecDim
columns[idx] = append(columns[idx], d)
}
pks = data
case schemapb.DataType_UInt8:
data := generateUInt32Array(numRows)
f := &schemapb.FieldData{
Type: field.DataType,
FieldName: field.Name,
Field: &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: data,
},
},
},
},
FieldId: field.FieldID,
}
msg.FieldsData = append(msg.FieldsData, f)
for _, d := range data {
columns[idx] = append(columns[idx], uint8(d))
}
case schemapb.DataType_UInt16:
data := generateUInt32Array(numRows)
f := &schemapb.FieldData{
Type: field.DataType,
FieldName: field.Name,
Field: &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: data,
},
},
},
},
FieldId: field.FieldID,
}
msg.FieldsData = append(msg.FieldsData, f)
for _, d := range data {
columns[idx] = append(columns[idx], uint16(d))
}
case schemapb.DataType_UInt32:
data := generateUInt32Array(numRows)
f := &schemapb.FieldData{
Type: field.DataType,
FieldName: field.Name,
Field: &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: data,
},
},
},
},
FieldId: field.FieldID,
}
msg.FieldsData = append(msg.FieldsData, f)
for _, d := range data {
columns[idx] = append(columns[idx], d)
}
case schemapb.DataType_UInt64:
data := generateUInt64Array(numRows)
f := &schemapb.FieldData{
Type: field.DataType,
FieldName: field.Name,
Field: &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{
Data: &schemapb.ScalarField_UlongData{
UlongData: &schemapb.ULongArray{
Data: data,
},
},
},
},
FieldId: field.FieldID,
}
msg.FieldsData = append(msg.FieldsData, f)
for _, d := range data {
columns[idx] = append(columns[idx], d)
}
//pks = data
case schemapb.DataType_Float:
data := generateFloat32Array(numRows)
f := &schemapb.FieldData{
@ -867,6 +1066,23 @@ func TestMergeInsertData(t *testing.T) {
NumRows: []int64{1},
Data: []int64{1},
},
UInt8Field: &UInt8FieldData{
NumRows: []int64{1},
Data: []uint8{1},
},
UInt16Field: &UInt16FieldData{
NumRows: []int64{1},
Data: []uint16{1},
},
UInt32Field: &UInt32FieldData{
NumRows: []int64{1},
Data: []uint32{1},
},
UInt64Field: &UInt64FieldData{
NumRows: []int64{1},
Data: []uint64{1},
},
FloatField: &FloatFieldData{
NumRows: []int64{1},
Data: []float32{0},
@ -922,6 +1138,23 @@ func TestMergeInsertData(t *testing.T) {
NumRows: []int64{1},
Data: []int64{2},
},
UInt8Field: &UInt8FieldData{
NumRows: []int64{1},
Data: []uint8{2},
},
UInt16Field: &UInt16FieldData{
NumRows: []int64{1},
Data: []uint16{2},
},
UInt32Field: &UInt32FieldData{
NumRows: []int64{1},
Data: []uint32{2},
},
UInt64Field: &UInt64FieldData{
NumRows: []int64{1},
Data: []uint64{2},
},
FloatField: &FloatFieldData{
NumRows: []int64{1},
Data: []float32{0},
@ -985,6 +1218,26 @@ func TestMergeInsertData(t *testing.T) {
assert.Equal(t, []int64{2}, f.(*Int64FieldData).NumRows)
assert.Equal(t, []int64{1, 2}, f.(*Int64FieldData).Data)
f, ok = merged.Data[UInt8Field]
assert.True(t, ok)
assert.Equal(t, []int64{2}, f.(*UInt8FieldData).NumRows)
assert.Equal(t, []uint8{1, 2}, f.(*UInt8FieldData).Data)
f, ok = merged.Data[UInt16Field]
assert.True(t, ok)
assert.Equal(t, []int64{2}, f.(*UInt16FieldData).NumRows)
assert.Equal(t, []uint16{1, 2}, f.(*UInt16FieldData).Data)
f, ok = merged.Data[UInt32Field]
assert.True(t, ok)
assert.Equal(t, []int64{2}, f.(*UInt32FieldData).NumRows)
assert.Equal(t, []uint32{1, 2}, f.(*UInt32FieldData).Data)
f, ok = merged.Data[UInt64Field]
assert.True(t, ok)
assert.Equal(t, []int64{2}, f.(*UInt64FieldData).NumRows)
assert.Equal(t, []uint64{1, 2}, f.(*UInt64FieldData).Data)
f, ok = merged.Data[FloatField]
assert.True(t, ok)
assert.Equal(t, []int64{2}, f.(*FloatFieldData).NumRows)
@ -1210,6 +1463,38 @@ func TestFieldDataToBytes(t *testing.T) {
assert.NoError(t, err)
assert.ElementsMatch(t, f6.Data, receiver)
f11 := &UInt8FieldData{Data: []uint8{0, 1}}
bs, err = FieldDataToBytes(endian, f11)
assert.NoError(t, err)
receiver = make([]uint8, 2)
err = binaryRead(endian, bs, receiver)
assert.NoError(t, err)
assert.ElementsMatch(t, f11.Data, receiver)
f12 := &UInt16FieldData{Data: []uint16{0, 1}}
bs, err = FieldDataToBytes(endian, f12)
assert.NoError(t, err)
receiver = make([]uint16, 2)
err = binaryRead(endian, bs, receiver)
assert.NoError(t, err)
assert.ElementsMatch(t, f12.Data, receiver)
f13 := &UInt32FieldData{Data: []uint32{0, 1}}
bs, err = FieldDataToBytes(endian, f13)
assert.NoError(t, err)
receiver = make([]uint32, 2)
err = binaryRead(endian, bs, receiver)
assert.NoError(t, err)
assert.ElementsMatch(t, f13.Data, receiver)
f14 := &UInt64FieldData{Data: []uint64{0, 1}}
bs, err = FieldDataToBytes(endian, f14)
assert.NoError(t, err)
receiver = make([]uint64, 2)
err = binaryRead(endian, bs, receiver)
assert.NoError(t, err)
assert.ElementsMatch(t, f14.Data, receiver)
// in fact, hard to compare float point value.
f7 := &FloatFieldData{Data: []float32{0, 1}}

View File

@ -76,6 +76,14 @@ func initMeta() *etcdpb.CollectionMeta {
Description: "description_11",
DataType: schemapb.DataType_FloatVector,
},
{
FieldID: 110,
Name: "field_uint8",
IsPrimaryKey: false,
Description: "description_6",
DataType: schemapb.DataType_UInt8,
},
},
},
}
@ -108,6 +116,11 @@ func initBinlogFile(schema *etcdpb.CollectionMeta) []*Blob {
Data: []float32{0, 1, 2, 3, 4, 5, 6, 7, 0, 111, 222, 333, 444, 555, 777, 666},
Dim: 8,
},
110: &UInt8FieldData{
NumRows: []int64{2},
Data: []uint8{3, 4},
},
},
}

View File

@ -315,6 +315,10 @@ func GetNumRowOfFieldData(fieldData *schemapb.FieldData) (uint64, error) {
fieldNumRows = getNumRowsOfScalarField(scalarField.GetIntData().Data)
case *schemapb.ScalarField_LongData:
fieldNumRows = getNumRowsOfScalarField(scalarField.GetLongData().Data)
case *schemapb.ScalarField_UintData:
fieldNumRows = getNumRowsOfScalarField(scalarField.GetUintData().Data)
case *schemapb.ScalarField_UlongData:
fieldNumRows = getNumRowsOfScalarField(scalarField.GetUlongData().Data)
case *schemapb.ScalarField_FloatData:
fieldNumRows = getNumRowsOfScalarField(scalarField.GetFloatData().Data)
case *schemapb.ScalarField_DoubleData:

View File

@ -447,6 +447,36 @@ func (p *ImportWrapper) appendFunc(schema *schemapb.FieldSchema) func(src storag
arr.NumRows[0]++
return nil
}
case schemapb.DataType_UInt8:
return func(src storage.FieldData, n int, target storage.FieldData) error {
arr := target.(*storage.UInt8FieldData)
arr.Data = append(arr.Data, src.GetRow(n).(uint8))
arr.NumRows[0]++
return nil
}
case schemapb.DataType_UInt16:
return func(src storage.FieldData, n int, target storage.FieldData) error {
arr := target.(*storage.UInt16FieldData)
arr.Data = append(arr.Data, src.GetRow(n).(uint16))
arr.NumRows[0]++
return nil
}
case schemapb.DataType_UInt32:
return func(src storage.FieldData, n int, target storage.FieldData) error {
arr := target.(*storage.UInt32FieldData)
arr.Data = append(arr.Data, src.GetRow(n).(uint32))
arr.NumRows[0]++
return nil
}
case schemapb.DataType_UInt64:
return func(src storage.FieldData, n int, target storage.FieldData) error {
arr := target.(*storage.UInt64FieldData)
arr.Data = append(arr.Data, src.GetRow(n).(uint64))
arr.NumRows[0]++
return nil
}
case schemapb.DataType_BinaryVector:
return func(src storage.FieldData, n int, target storage.FieldData) error {
arr := target.(*storage.BinaryVectorFieldData)

View File

@ -137,11 +137,11 @@ func Test_ImportRowBased(t *testing.T) {
content := []byte(`{
"rows":[
{"field_bool": true, "field_int8": 10, "field_int16": 101, "field_int32": 1001, "field_int64": 10001, "field_float": 3.14, "field_double": 1.56, "field_string": "hello world", "field_binary_vector": [254, 0], "field_float_vector": [1.1, 1.2, 1.3, 1.4]},
{"field_bool": false, "field_int8": 11, "field_int16": 102, "field_int32": 1002, "field_int64": 10002, "field_float": 3.15, "field_double": 2.56, "field_string": "hello world", "field_binary_vector": [253, 0], "field_float_vector": [2.1, 2.2, 2.3, 2.4]},
{"field_bool": true, "field_int8": 12, "field_int16": 103, "field_int32": 1003, "field_int64": 10003, "field_float": 3.16, "field_double": 3.56, "field_string": "hello world", "field_binary_vector": [252, 0], "field_float_vector": [3.1, 3.2, 3.3, 3.4]},
{"field_bool": false, "field_int8": 13, "field_int16": 104, "field_int32": 1004, "field_int64": 10004, "field_float": 3.17, "field_double": 4.56, "field_string": "hello world", "field_binary_vector": [251, 0], "field_float_vector": [4.1, 4.2, 4.3, 4.4]},
{"field_bool": true, "field_int8": 14, "field_int16": 105, "field_int32": 1005, "field_int64": 10005, "field_float": 3.18, "field_double": 5.56, "field_string": "hello world", "field_binary_vector": [250, 0], "field_float_vector": [5.1, 5.2, 5.3, 5.4]}
{"field_bool": true, "field_int8": 10, "field_int16": 101, "field_int32": 1001, "field_int64": 10001, "field_uint8": 15, "field_uint16": 106, "field_uint32": 1006, "field_uint64": 10006,"field_float": 3.14, "field_double": 1.56, "field_string": "hello world", "field_binary_vector": [254, 0], "field_float_vector": [1.1, 1.2, 1.3, 1.4]},
{"field_bool": false, "field_int8": 11, "field_int16": 102, "field_int32": 1002, "field_int64": 10002, "field_uint8": 16, "field_uint16": 107, "field_uint32": 1007, "field_uint64": 10007, "field_float": 3.15, "field_double": 2.56, "field_string": "hello world", "field_binary_vector": [253, 0], "field_float_vector": [2.1, 2.2, 2.3, 2.4]},
{"field_bool": true, "field_int8": 12, "field_int16": 103, "field_int32": 1003, "field_int64": 10003, "field_uint8": 17, "field_uint16": 108, "field_uint32": 1008, "field_uint64": 10008, "field_float": 3.16, "field_double": 3.56, "field_string": "hello world", "field_binary_vector": [252, 0], "field_float_vector": [3.1, 3.2, 3.3, 3.4]},
{"field_bool": false, "field_int8": 13, "field_int16": 104, "field_int32": 1004, "field_int64": 10004, "field_uint8": 18, "field_uint16": 109, "field_uint32": 1009, "field_uint64": 10009, "field_float": 3.17, "field_double": 4.56, "field_string": "hello world", "field_binary_vector": [251, 0], "field_float_vector": [4.1, 4.2, 4.3, 4.4]},
{"field_bool": true, "field_int8": 14, "field_int16": 105, "field_int32": 1005, "field_int64": 10005, "field_uint8": 19, "field_uint16": 110, "field_uint32": 1010, "field_uint64": 10010, "field_float": 3.18, "field_double": 5.56, "field_string": "hello world", "field_binary_vector": [250, 0], "field_float_vector": [5.1, 5.2, 5.3, 5.4]}
]
}`)
@ -191,7 +191,7 @@ func Test_ImportRowBased(t *testing.T) {
// parse error
content = []byte(`{
"rows":[
{"field_bool": true, "field_int8": false, "field_int16": 101, "field_int32": 1001, "field_int64": 10001, "field_float": 3.14, "field_double": 1.56, "field_string": "hello world", "field_binary_vector": [254, 0], "field_float_vector": [1.1, 1.2, 1.3, 1.4]},
{"field_bool": true, "field_int8": false, "field_int16": 101, "field_int32": 1001, "field_int64": 10001, "field_uint8": true, "field_uint16": 106, "field_uint32": 1006, "field_uint64": 10006, "field_float": 3.14, "field_double": 1.56, "field_string": "hello world", "field_binary_vector": [254, 0], "field_float_vector": [1.1, 1.2, 1.3, 1.4]},
]
}`)
@ -245,7 +245,11 @@ func Test_ImportColumnBased_json(t *testing.T) {
[3.1, 3.2, 3.3, 3.4],
[4.1, 4.2, 4.3, 4.4],
[5.1, 5.2, 5.3, 5.4]
]
],
"field_uint8": [10, 11, 12, 13, 14],
"field_uint16": [100, 101, 102, 103, 104],
"field_uint32": [1000, 1001, 1002, 1003, 1004],
"field_uint64": [10000, 10001, 10002, 10003, 10004]
}`)
filePath := TempFilesPath + "columns_1.json"
@ -396,6 +400,10 @@ func Test_ImportColumnBased_numpy(t *testing.T) {
"field_int16": [100, 101, 102, 103, 104],
"field_int32": [1000, 1001, 1002, 1003, 1004],
"field_int64": [10000, 10001, 10002, 10003, 10004],
"field_uint8": [10, 11, 12, 13, 14],
"field_uint16": [100, 101, 102, 103, 104],
"field_uint32": [1000, 1001, 1002, 1003, 1004],
"field_uint64": [10000, 10001, 10002, 10003, 10004],
"field_float": [3.14, 3.15, 3.16, 3.17, 3.18],
"field_double": [5.1, 5.2, 5.3, 5.4, 5.5],
"field_string": ["a", "b", "c", "d", "e"]

View File

@ -147,6 +147,40 @@ func initValidators(collectionSchema *schemapb.CollectionSchema, validators map[
field.(*storage.Int64FieldData).NumRows[0]++
return nil
}
case schemapb.DataType_UInt8:
validators[schema.GetFieldID()].validateFunc = numericValidator
validators[schema.GetFieldID()].convertFunc = func(obj interface{}, field storage.FieldData) error {
value := uint8(obj.(float64))
field.(*storage.UInt8FieldData).Data = append(field.(*storage.UInt8FieldData).Data, value)
field.(*storage.UInt8FieldData).NumRows[0]++
return nil
}
case schemapb.DataType_UInt16:
validators[schema.GetFieldID()].validateFunc = numericValidator
validators[schema.GetFieldID()].convertFunc = func(obj interface{}, field storage.FieldData) error {
value := uint16(obj.(float64))
field.(*storage.UInt16FieldData).Data = append(field.(*storage.UInt16FieldData).Data, value)
field.(*storage.UInt16FieldData).NumRows[0]++
return nil
}
case schemapb.DataType_UInt32:
validators[schema.GetFieldID()].validateFunc = numericValidator
validators[schema.GetFieldID()].convertFunc = func(obj interface{}, field storage.FieldData) error {
value := uint32(obj.(float64))
field.(*storage.UInt32FieldData).Data = append(field.(*storage.UInt32FieldData).Data, value)
field.(*storage.UInt32FieldData).NumRows[0]++
return nil
}
case schemapb.DataType_UInt64:
validators[schema.GetFieldID()].validateFunc = numericValidator
validators[schema.GetFieldID()].convertFunc = func(obj interface{}, field storage.FieldData) error {
value := uint64(obj.(float64))
field.(*storage.UInt64FieldData).Data = append(field.(*storage.UInt64FieldData).Data, value)
field.(*storage.UInt64FieldData).NumRows[0]++
return nil
}
case schemapb.DataType_BinaryVector:
dim, err := getFieldDimension(schema)
if err != nil {
@ -460,6 +494,28 @@ func initSegmentData(collectionSchema *schemapb.CollectionSchema) map[storage.Fi
Data: make([]int64, 0),
NumRows: []int64{0},
}
case schemapb.DataType_UInt8:
segmentData[schema.GetFieldID()] = &storage.UInt8FieldData{
Data: make([]uint8, 0),
NumRows: []int64{0},
}
case schemapb.DataType_UInt16:
segmentData[schema.GetFieldID()] = &storage.UInt16FieldData{
Data: make([]uint16, 0),
NumRows: []int64{0},
}
case schemapb.DataType_UInt32:
segmentData[schema.GetFieldID()] = &storage.UInt32FieldData{
Data: make([]uint32, 0),
NumRows: []int64{0},
}
case schemapb.DataType_UInt64:
segmentData[schema.GetFieldID()] = &storage.UInt64FieldData{
Data: make([]uint64, 0),
NumRows: []int64{0},
}
case schemapb.DataType_BinaryVector:
dim, _ := getFieldDimension(schema)
segmentData[schema.GetFieldID()] = &storage.BinaryVectorFieldData{

View File

@ -105,6 +105,10 @@ func Test_InitValidators(t *testing.T) {
checkFunc("field_int16", validVal, invalidVal)
checkFunc("field_int32", validVal, invalidVal)
checkFunc("field_int64", validVal, invalidVal)
checkFunc("field_uint8", validVal, invalidVal)
checkFunc("field_uint16", validVal, invalidVal)
checkFunc("field_uint32", validVal, invalidVal)
checkFunc("field_uint64", validVal, invalidVal)
checkFunc("field_float", validVal, invalidVal)
checkFunc("field_double", validVal, invalidVal)
@ -206,7 +210,7 @@ func Test_JSONRowValidator(t *testing.T) {
// invalid dimension
reader = strings.NewReader(`{
"rows":[
{"field_bool": true, "field_int8": true, "field_int16": 101, "field_int32": 1001, "field_int64": 10001, "field_float": 3.14, "field_double": 1.56, "field_string": "hello world", "field_binary_vector": [254, 0, 1, 66, 128, 0, 1, 66], "field_float_vector": [1.1, 1.2, 1.3, 1.4]}
{"field_bool": true, "field_int8": true, "field_int16": 101, "field_int32": 1001, "field_int64": 10001, "field_uint8": true, "field_uint16": 105, "field_uint32": 1005, "field_uint64": 10005,"field_float": 3.14, "field_double": 1.56, "field_string": "hello world", "field_binary_vector": [254, 0, 1, 66, 128, 0, 1, 66], "field_float_vector": [1.1, 1.2, 1.3, 1.4]}
]
}`)
err = parser.ParseRows(reader, validator)
@ -215,7 +219,7 @@ func Test_JSONRowValidator(t *testing.T) {
// invalid value type
reader = strings.NewReader(`{
"rows":[
{"field_bool": true, "field_int8": true, "field_int16": 101, "field_int32": 1001, "field_int64": 10001, "field_float": 3.14, "field_double": 1.56, "field_string": "hello world", "field_binary_vector": [254, 0], "field_float_vector": [1.1, 1.2, 1.3, 1.4]}
{"field_bool": true, "field_int8": true, "field_int16": 101, "field_int32": 1001, "field_int64": 10001, "field_uint8": true, "field_uint16": 105, "field_uint32": 1005, "field_uint64": 10005,"field_float": 3.14, "field_double": 1.56, "field_string": "hello world", "field_binary_vector": [254, 0], "field_float_vector": [1.1, 1.2, 1.3, 1.4]}
]
}`)
err = parser.ParseRows(reader, validator)
@ -242,6 +246,10 @@ func Test_JSONColumnValidator(t *testing.T) {
"field_int16": [],
"field_int32": [],
"field_int64": [],
"field_uint8": [],
"field_uint16": [],
"field_uint32": [],
"field_uint64": [],
"field_float": [],
"field_double": [],
"field_string": [],
@ -266,6 +274,10 @@ func Test_JSONColumnValidator(t *testing.T) {
"field_int16": [],
"field_int32": [1, 2, 3],
"field_int64": [],
"field_uint8": [],
"field_uint16": [],
"field_uint32": [1, 2, 3],
"field_uint64": [],
"field_float": [],
"field_double": [],
"field_string": [],
@ -288,6 +300,10 @@ func Test_JSONColumnValidator(t *testing.T) {
"field_int16": [2],
"field_int32": [3],
"field_int64": [4],
"field_uint8": [5],
"field_uint16": [6],
"field_uint32": [7],
"field_uint64": [8],
"field_float": [1],
"field_double": [1],
"field_string": [9],
@ -318,11 +334,11 @@ func Test_JSONRowConsumer(t *testing.T) {
reader := strings.NewReader(`{
"rows":[
{"field_bool": true, "field_int8": 10, "field_int16": 101, "field_int32": 1001, "field_int64": 10001, "field_float": 3.14, "field_double": 1.56, "field_string": "hello world", "field_binary_vector": [254, 0], "field_float_vector": [1.1, 1.2, 1.3, 1.4]},
{"field_bool": false, "field_int8": 11, "field_int16": 102, "field_int32": 1002, "field_int64": 10002, "field_float": 3.15, "field_double": 2.56, "field_string": "hello world", "field_binary_vector": [253, 0], "field_float_vector": [2.1, 2.2, 2.3, 2.4]},
{"field_bool": true, "field_int8": 12, "field_int16": 103, "field_int32": 1003, "field_int64": 10003, "field_float": 3.16, "field_double": 3.56, "field_string": "hello world", "field_binary_vector": [252, 0], "field_float_vector": [3.1, 3.2, 3.3, 3.4]},
{"field_bool": false, "field_int8": 13, "field_int16": 104, "field_int32": 1004, "field_int64": 10004, "field_float": 3.17, "field_double": 4.56, "field_string": "hello world", "field_binary_vector": [251, 0], "field_float_vector": [4.1, 4.2, 4.3, 4.4]},
{"field_bool": true, "field_int8": 14, "field_int16": 105, "field_int32": 1005, "field_int64": 10005, "field_float": 3.18, "field_double": 5.56, "field_string": "hello world", "field_binary_vector": [250, 0], "field_float_vector": [5.1, 5.2, 5.3, 5.4]}
{"field_bool": true, "field_int8": 10, "field_int16": 101, "field_int32": 1001, "field_int64": 10001, "field_uint8": 15, "field_uint16": 106, "field_uint32": 1006, "field_uint64": 10006, "field_float": 3.14, "field_double": 1.56, "field_string": "hello world", "field_binary_vector": [254, 0], "field_float_vector": [1.1, 1.2, 1.3, 1.4]},
{"field_bool": false, "field_int8": 11, "field_int16": 102, "field_int32": 1002, "field_int64": 10002, "field_uint8": 16, "field_uint16": 107, "field_uint32": 1007, "field_uint64": 10007, "field_float": 3.15, "field_double": 2.56, "field_string": "hello world", "field_binary_vector": [253, 0], "field_float_vector": [2.1, 2.2, 2.3, 2.4]},
{"field_bool": true, "field_int8": 12, "field_int16": 103, "field_int32": 1003, "field_int64": 10003, "field_uint8": 17, "field_uint16": 108, "field_uint32": 1008, "field_uint64": 10008, "field_float": 3.16, "field_double": 3.56, "field_string": "hello world", "field_binary_vector": [252, 0], "field_float_vector": [3.1, 3.2, 3.3, 3.4]},
{"field_bool": false, "field_int8": 13, "field_int16": 104, "field_int32": 1004, "field_int64": 10004, "field_uint8": 18, "field_uint16": 109, "field_uint32": 1009, "field_uint64": 10009, "field_float": 3.17, "field_double": 4.56, "field_string": "hello world", "field_binary_vector": [251, 0], "field_float_vector": [4.1, 4.2, 4.3, 4.4]},
{"field_bool": true, "field_int8": 14, "field_int16": 105, "field_int32": 1005, "field_int64": 10005, "field_uint8": 19, "field_uint16": 110, "field_uint32": 1010, "field_uint64": 10010, "field_float": 3.18, "field_double": 5.56, "field_string": "hello world", "field_binary_vector": [250, 0], "field_float_vector": [5.1, 5.2, 5.3, 5.4]}
]
}`)
@ -500,6 +516,10 @@ func Test_JSONColumnConsumer(t *testing.T) {
"field_int16": [100, 101, 102, 103, 104],
"field_int32": [1000, 1001, 1002, 1003, 1004],
"field_int64": [10000, 10001, 10002, 10003, 10004],
"field_uint8": [10, 11, 12, 13, 14],
"field_uint16": [100, 101, 102, 103, 104],
"field_uint32": [1000, 1001, 1002, 1003, 1004],
"field_uint64": [10000, 10001, 10002, 10003, 10004],
"field_float": [3.14, 3.15, 3.16, 3.17, 3.18],
"field_double": [5.1, 5.2, 5.3, 5.4, 5.5],
"field_string": ["a", "b", "c", "d", "e"],

View File

@ -94,6 +94,34 @@ func sampleSchema() *schemapb.CollectionSchema {
{Key: "dim", Value: "4"},
},
},
{
FieldID: 112,
Name: "field_uint8",
IsPrimaryKey: false,
Description: "uint8",
DataType: schemapb.DataType_UInt8,
},
{
FieldID: 113,
Name: "field_uint16",
IsPrimaryKey: false,
Description: "uint16",
DataType: schemapb.DataType_UInt16,
},
{
FieldID: 114,
Name: "field_uint32",
IsPrimaryKey: false,
Description: "uint32",
DataType: schemapb.DataType_UInt32,
},
{
FieldID: 115,
Name: "field_uint64",
IsPrimaryKey: false,
Description: "uint64",
DataType: schemapb.DataType_UInt64,
},
},
}
return schema
@ -205,11 +233,11 @@ func Test_ParserRows(t *testing.T) {
reader := strings.NewReader(`{
"rows":[
{"field_bool": true, "field_int8": 10, "field_int16": 101, "field_int32": 1001, "field_int64": 10001, "field_float": 3.14, "field_double": 1.56, "field_string": "hello world", "field_binary_vector": [254, 0], "field_float_vector": [1.1, 1.2, 1.3, 1.4]},
{"field_bool": false, "field_int8": 11, "field_int16": 102, "field_int32": 1002, "field_int64": 10002, "field_float": 3.15, "field_double": 2.56, "field_string": "hello world", "field_binary_vector": [253, 0], "field_float_vector": [2.1, 2.2, 2.3, 2.4]},
{"field_bool": true, "field_int8": 12, "field_int16": 103, "field_int32": 1003, "field_int64": 10003, "field_float": 3.16, "field_double": 3.56, "field_string": "hello world", "field_binary_vector": [252, 0], "field_float_vector": [3.1, 3.2, 3.3, 3.4]},
{"field_bool": false, "field_int8": 13, "field_int16": 104, "field_int32": 1004, "field_int64": 10004, "field_float": 3.17, "field_double": 4.56, "field_string": "hello world", "field_binary_vector": [251, 0], "field_float_vector": [4.1, 4.2, 4.3, 4.4]},
{"field_bool": true, "field_int8": 14, "field_int16": 105, "field_int32": 1005, "field_int64": 10005, "field_float": 3.18, "field_double": 5.56, "field_string": "hello world", "field_binary_vector": [250, 0], "field_float_vector": [5.1, 5.2, 5.3, 5.4]}
{"field_bool": true, "field_int8": 10, "field_int16": 101, "field_int32": 1001, "field_int64": 10001, "field_uint8": 15, "field_uint16": 106, "field_uint32": 1006, "field_uint64": 10006, "field_float": 3.14, "field_double": 1.56, "field_string": "hello world", "field_binary_vector": [254, 0], "field_float_vector": [1.1, 1.2, 1.3, 1.4]},
{"field_bool": false, "field_int8": 11, "field_int16": 102, "field_int32": 1002, "field_int64": 10002, "field_uint8": 16, "field_uint16": 107, "field_uint32": 1007, "field_uint64": 10007, "field_float": 3.15, "field_double": 2.56, "field_string": "hello world", "field_binary_vector": [253, 0], "field_float_vector": [2.1, 2.2, 2.3, 2.4]},
{"field_bool": true, "field_int8": 12, "field_int16": 103, "field_int32": 1003, "field_int64": 10003, "field_uint8": 17, "field_uint16": 108, "field_uint32": 1008, "field_uint64": 10008, "field_float": 3.16, "field_double": 3.56, "field_string": "hello world", "field_binary_vector": [252, 0], "field_float_vector": [3.1, 3.2, 3.3, 3.4]},
{"field_bool": false, "field_int8": 13, "field_int16": 104, "field_int32": 1004, "field_int64": 10004, "field_uint8": 18, "field_uint16": 109, "field_uint32": 1009, "field_uint64": 10009, "field_float": 3.17, "field_double": 4.56, "field_string": "hello world", "field_binary_vector": [251, 0], "field_float_vector": [4.1, 4.2, 4.3, 4.4]},
{"field_bool": true, "field_int8": 14, "field_int16": 105, "field_int32": 1005, "field_int64": 10005, "field_uint8": 19, "field_uint16": 110, "field_uint32": 1010, "field_uint64": 10010, "field_float": 3.18, "field_double": 5.56, "field_string": "hello world", "field_binary_vector": [250, 0], "field_float_vector": [5.1, 5.2, 5.3, 5.4]}
]
}`)
@ -314,6 +342,10 @@ func Test_ParserColumns(t *testing.T) {
"field_int16": [100, 101, 102, 103, 104],
"field_int32": [1000, 1001, 1002, 1003, 1004],
"field_int64": [10000, 10001, 10002, 10003, 10004],
"field_uint8": [10, 11, 12, 13, 14],
"field_uint16": [100, 101, 102, 103, 104],
"field_uint32": [1000, 1001, 1002, 1003, 1004],
"field_uint64": [10000, 10001, 10002, 10003, 10004],
"field_float": [3.14, 3.15, 3.16, 3.17, 3.18],
"field_double": [5.1, 5.2, 5.3, 5.4, 5.5],
"field_string": ["a", "b", "c", "d", "e"],

View File

@ -216,6 +216,36 @@ func (n *NumpyAdapter) ReadInt8(size int) ([]int8, error) {
return data, nil
}
func (n *NumpyAdapter) ReadUint16(size int) ([]uint16, error) {
if n.npyReader == nil {
return nil, errors.New("reader is not initialized")
}
// incorrect type
switch n.npyReader.Header.Descr.Type {
case "u2", "<u2", "|u2", ">u2", "uint16":
default:
return nil, errors.New("numpy data is not uint16 type")
}
// avoid read overflow
readSize := n.checkSize(size)
if readSize <= 0 {
return nil, errors.New("nothing to read")
}
data := make([]uint16, readSize)
err := binary.Read(n.reader, n.order, &data)
if err != nil {
return nil, err
}
// update read position after successfully read
n.readPosition += readSize
return data, nil
}
func (n *NumpyAdapter) ReadInt16(size int) ([]int16, error) {
if n.npyReader == nil {
return nil, errors.New("reader is not initialized")
@ -246,6 +276,36 @@ func (n *NumpyAdapter) ReadInt16(size int) ([]int16, error) {
return data, nil
}
func (n *NumpyAdapter) ReadUint32(size int) ([]uint32, error) {
if n.npyReader == nil {
return nil, errors.New("reader is not initialized")
}
// incorrect type
switch n.npyReader.Header.Descr.Type {
case "u4", "<u4", "|u4", ">u4", "uint32":
default:
return nil, errors.New("numpy data is not uint32 type")
}
// avoid read overflow
readSize := n.checkSize(size)
if readSize <= 0 {
return nil, errors.New("nothing to read")
}
data := make([]uint32, readSize)
err := binary.Read(n.reader, n.order, &data)
if err != nil {
return nil, err
}
// update read position after successfully read
n.readPosition += readSize
return data, nil
}
func (n *NumpyAdapter) ReadInt32(size int) ([]int32, error) {
if n.npyReader == nil {
return nil, errors.New("reader is not initialized")
@ -276,6 +336,36 @@ func (n *NumpyAdapter) ReadInt32(size int) ([]int32, error) {
return data, nil
}
func (n *NumpyAdapter) ReadUint64(size int) ([]uint64, error) {
if n.npyReader == nil {
return nil, errors.New("reader is not initialized")
}
// incorrect type
switch n.npyReader.Header.Descr.Type {
case "u8", "<u8", "|u8", ">u8", "uint64":
default:
return nil, errors.New("numpy data is not uint64 type")
}
// avoid read overflow
readSize := n.checkSize(size)
if readSize <= 0 {
return nil, errors.New("nothing to read")
}
data := make([]uint64, readSize)
err := binary.Read(n.reader, n.order, &data)
if err != nil {
return nil, err
}
// update read position after successfully read
n.readPosition += readSize
return data, nil
}
func (n *NumpyAdapter) ReadInt64(size int) ([]int64, error) {
if n.npyReader == nil {
return nil, errors.New("reader is not initialized")

View File

@ -66,6 +66,13 @@ func Test_ReadError(t *testing.T) {
assert.NotNil(t, err)
_, err = adapter.ReadInt64(1)
assert.NotNil(t, err)
_, err = adapter.ReadUint16(1)
assert.NotNil(t, err)
_, err = adapter.ReadUint32(1)
assert.NotNil(t, err)
_, err = adapter.ReadUint64(1)
assert.NotNil(t, err)
_, err = adapter.ReadFloat32(1)
assert.NotNil(t, err)
_, err = adapter.ReadFloat64(1)
@ -149,6 +156,42 @@ func Test_ReadError(t *testing.T) {
assert.NotNil(t, err)
}
{
adapter.npyReader.Header.Descr.Type = "u2"
data, err := adapter.ReadUint16(1)
assert.Nil(t, data)
assert.NotNil(t, err)
adapter.npyReader.Header.Descr.Type = "dummy"
data, err = adapter.ReadUint16(1)
assert.Nil(t, data)
assert.NotNil(t, err)
}
{
adapter.npyReader.Header.Descr.Type = "u4"
data, err := adapter.ReadUint32(1)
assert.Nil(t, data)
assert.NotNil(t, err)
adapter.npyReader.Header.Descr.Type = "dummy"
data, err = adapter.ReadUint32(1)
assert.Nil(t, data)
assert.NotNil(t, err)
}
{
adapter.npyReader.Header.Descr.Type = "u8"
data, err := adapter.ReadUint64(1)
assert.Nil(t, data)
assert.NotNil(t, err)
adapter.npyReader.Header.Descr.Type = "dummy"
data, err = adapter.ReadUint64(1)
assert.Nil(t, data)
assert.NotNil(t, err)
}
{
adapter.npyReader.Header.Descr.Type = "f4"
data, err := adapter.ReadFloat32(1)
@ -230,6 +273,18 @@ func Test_Read(t *testing.T) {
assert.NotNil(t, err)
assert.Nil(t, resi8)
resu2, err := adapter.ReadUint16(len(data))
assert.NotNil(t, err)
assert.Nil(t, resu2)
resu4, err := adapter.ReadUint32(len(data))
assert.NotNil(t, err)
assert.Nil(t, resu4)
resu8, err := adapter.ReadUint64(len(data))
assert.NotNil(t, err)
assert.Nil(t, resu8)
resf4, err := adapter.ReadFloat32(len(data))
assert.NotNil(t, err)
assert.Nil(t, resf4)
@ -399,6 +454,96 @@ func Test_Read(t *testing.T) {
assert.Nil(t, res)
}
{
filePath := TempFilesPath + "uint16.npy"
data := []uint16{1, 2, 3, 4, 5, 6}
CreateNumpyFile(filePath, data)
file, err := os.Open(filePath)
assert.Nil(t, err)
defer file.Close()
adapter, err := NewNumpyAdapter(file)
assert.Nil(t, err)
res, err := adapter.ReadUint16(len(data) - 1)
assert.Nil(t, err)
assert.Equal(t, len(data)-1, len(res))
for i := 0; i < len(res); i++ {
assert.Equal(t, data[i], res[i])
}
res, err = adapter.ReadUint16(len(data))
assert.Nil(t, err)
assert.Equal(t, 1, len(res))
assert.Equal(t, data[len(data)-1], res[0])
res, err = adapter.ReadUint16(len(data))
assert.NotNil(t, err)
assert.Nil(t, res)
}
{
filePath := TempFilesPath + "uint32.npy"
data := []uint32{1, 2, 3, 4, 5, 6}
CreateNumpyFile(filePath, data)
file, err := os.Open(filePath)
assert.Nil(t, err)
defer file.Close()
adapter, err := NewNumpyAdapter(file)
assert.Nil(t, err)
res, err := adapter.ReadUint32(len(data) - 1)
assert.Nil(t, err)
assert.Equal(t, len(data)-1, len(res))
for i := 0; i < len(res); i++ {
assert.Equal(t, data[i], res[i])
}
res, err = adapter.ReadUint32(len(data))
assert.Nil(t, err)
assert.Equal(t, 1, len(res))
assert.Equal(t, data[len(data)-1], res[0])
res, err = adapter.ReadUint32(len(data))
assert.NotNil(t, err)
assert.Nil(t, res)
}
{
filePath := TempFilesPath + "uint64.npy"
data := []uint64{1, 2, 3, 4, 5, 6}
CreateNumpyFile(filePath, data)
file, err := os.Open(filePath)
assert.Nil(t, err)
defer file.Close()
adapter, err := NewNumpyAdapter(file)
assert.Nil(t, err)
res, err := adapter.ReadUint64(len(data) - 1)
assert.Nil(t, err)
assert.Equal(t, len(data)-1, len(res))
for i := 0; i < len(res); i++ {
assert.Equal(t, data[i], res[i])
}
res, err = adapter.ReadUint64(len(data))
assert.Nil(t, err)
assert.Equal(t, 1, len(res))
assert.Equal(t, data[len(data)-1], res[0])
res, err = adapter.ReadUint64(len(data))
assert.NotNil(t, err)
assert.Nil(t, res)
}
{
filePath := TempFilesPath + "float.npy"
data := []float32{1, 2, 3, 4, 5, 6}

View File

@ -64,6 +64,12 @@ func convertNumpyType(str string) (schemapb.DataType, error) {
return schemapb.DataType_Int32, nil
case "i8", "<i8", "|i8", ">i8", "int64":
return schemapb.DataType_Int64, nil
case "u2", "<u2", "|u2", ">u2", "uint16":
return schemapb.DataType_UInt16, nil
case "u4", "<u4", "|u4", ">u4", "uint32":
return schemapb.DataType_UInt32, nil
case "u8", "<u8", "|u8", ">u8", "uint64":
return schemapb.DataType_UInt64, nil
case "f4", "<f4", "|f4", ">f4", "float32":
return schemapb.DataType_Float, nil
case "f8", "<f8", "|f8", ">f8", "float64":
@ -215,6 +221,46 @@ func (p *NumpyParser) consume(adapter *NumpyAdapter) error {
NumRows: []int64{int64(p.columnDesc.elementCount)},
Data: data,
}
case schemapb.DataType_UInt8:
data, err := adapter.ReadUint8(p.columnDesc.elementCount)
if err != nil {
return err
}
p.columnData = &storage.UInt8FieldData{
NumRows: []int64{int64(p.columnDesc.elementCount)},
Data: data,
}
case schemapb.DataType_UInt16:
data, err := adapter.ReadUint16(p.columnDesc.elementCount)
if err != nil {
return err
}
p.columnData = &storage.UInt16FieldData{
NumRows: []int64{int64(p.columnDesc.elementCount)},
Data: data,
}
case schemapb.DataType_UInt32:
data, err := adapter.ReadUint32(p.columnDesc.elementCount)
if err != nil {
return err
}
p.columnData = &storage.UInt32FieldData{
NumRows: []int64{int64(p.columnDesc.elementCount)},
Data: data,
}
case schemapb.DataType_UInt64:
data, err := adapter.ReadUint64(p.columnDesc.elementCount)
if err != nil {
return err
}
p.columnData = &storage.UInt64FieldData{
NumRows: []int64{int64(p.columnDesc.elementCount)},
Data: data,
}
case schemapb.DataType_Float:
data, err := adapter.ReadFloat32(p.columnDesc.elementCount)
if err != nil {

View File

@ -34,6 +34,12 @@ func Test_ConvertNumpyType(t *testing.T) {
checkFunc([]string{"i2", "<i2", "|i2", ">i2", "int16"}, schemapb.DataType_Int16)
checkFunc([]string{"i4", "<i4", "|i4", ">i4", "int32"}, schemapb.DataType_Int32)
checkFunc([]string{"i8", "<i8", "|i8", ">i8", "int64"}, schemapb.DataType_Int64)
//checkFunc([]string{"u1", "<u1", "|u1","uint8"}, schemapb.DataType_UInt8)
checkFunc([]string{"u2", "<u2", "|u2", ">u2", "uint16"}, schemapb.DataType_UInt16)
checkFunc([]string{"u4", "<u4", "|u4", ">u4", "uint32"}, schemapb.DataType_UInt32)
checkFunc([]string{"u8", "<u8", "|u8", ">u8", "uint64"}, schemapb.DataType_UInt64)
checkFunc([]string{"f4", "<f4", "|f4", ">f4", "float32"}, schemapb.DataType_Float)
checkFunc([]string{"f8", "<f8", "|f8", ">f8", "float64"}, schemapb.DataType_Double)
@ -409,6 +415,62 @@ func Test_Parse(t *testing.T) {
}
checkFunc(data5, "field_int64", flushFunc)
// scalar uint8
/*data11 := []uint8{1, 2, 3, 4, 5}
flushFunc = func(field storage.FieldData) error {
assert.NotNil(t, field)
assert.Equal(t, len(data11), field.RowNum())
for i := 0; i < len(data11); i++ {
assert.Equal(t, data11[i], field.GetRow(i))
}
return nil
}
checkFunc(data11, "field_uint8", flushFunc)*/
// scalar uint16
data12 := []uint16{1, 2, 3, 4, 5}
flushFunc = func(field storage.FieldData) error {
assert.NotNil(t, field)
assert.Equal(t, len(data12), field.RowNum())
for i := 0; i < len(data12); i++ {
assert.Equal(t, data12[i], field.GetRow(i))
}
return nil
}
checkFunc(data12, "field_uint16", flushFunc)
// scalar uint32
data13 := []uint32{1, 2, 3, 4, 5}
flushFunc = func(field storage.FieldData) error {
assert.NotNil(t, field)
assert.Equal(t, len(data13), field.RowNum())
for i := 0; i < len(data13); i++ {
assert.Equal(t, data13[i], field.GetRow(i))
}
return nil
}
checkFunc(data13, "field_uint32", flushFunc)
// scalar uint64
data14 := []uint64{1, 2, 3, 4, 5}
flushFunc = func(field storage.FieldData) error {
assert.NotNil(t, field)
assert.Equal(t, len(data14), field.RowNum())
for i := 0; i < len(data14); i++ {
assert.Equal(t, data14[i], field.GetRow(i))
}
return nil
}
checkFunc(data14, "field_uint64", flushFunc)
// scalar float
data6 := []float32{1, 2, 3, 4, 5}
flushFunc = func(field storage.FieldData) error {

View File

@ -63,6 +63,38 @@ func generateInt64Array(numRows int) []int64 {
return ret
}
func generateUInt8Array(numRows int) []uint8 {
ret := make([]uint8, 0, numRows)
for i := 0; i < numRows; i++ {
ret = append(ret, uint8(rand.Int()))
}
return ret
}
func generateUInt16Array(numRows int) []uint16 {
ret := make([]uint16, 0, numRows)
for i := 0; i < numRows; i++ {
ret = append(ret, uint16(rand.Int()))
}
return ret
}
func generateUInt32Array(numRows int) []uint32 {
ret := make([]uint32, 0, numRows)
for i := 0; i < numRows; i++ {
ret = append(ret, uint32(rand.Int()))
}
return ret
}
func generateUInt64Array(numRows int) []uint64 {
ret := make([]uint64, 0, numRows)
for i := 0; i < numRows; i++ {
ret = append(ret, uint64(rand.Int()))
}
return ret
}
func generateFloat32Array(numRows int) []float32 {
ret := make([]float32, 0, numRows)
for i := 0; i < numRows; i++ {
@ -128,6 +160,22 @@ func genFieldData(dtype schemapb.DataType, numRows, dim int) storage.FieldData {
return &storage.Int64FieldData{
Data: generateInt64Array(numRows),
}
case schemapb.DataType_UInt8:
return &storage.UInt8FieldData{
Data: generateUInt8Array(numRows),
}
case schemapb.DataType_UInt16:
return &storage.UInt16FieldData{
Data: generateUInt16Array(numRows),
}
case schemapb.DataType_UInt32:
return &storage.UInt32FieldData{
Data: generateUInt32Array(numRows),
}
case schemapb.DataType_UInt64:
return &storage.UInt64FieldData{
Data: generateUInt64Array(numRows),
}
case schemapb.DataType_Float:
return &storage.FloatFieldData{
Data: generateFloat32Array(numRows),
@ -255,6 +303,14 @@ func genTypedIndexCase(dtype schemapb.DataType) []indexTestCase {
return genScalarIndexCases(dtype)
case schemapb.DataType_Int64:
return genScalarIndexCases(dtype)
case schemapb.DataType_UInt8:
return genScalarIndexCases(dtype)
case schemapb.DataType_UInt16:
return genScalarIndexCases(dtype)
case schemapb.DataType_UInt32:
return genScalarIndexCases(dtype)
case schemapb.DataType_UInt64:
return genScalarIndexCases(dtype)
case schemapb.DataType_Float:
return genScalarIndexCases(dtype)
case schemapb.DataType_Double:
@ -279,6 +335,10 @@ func genIndexCase() []indexTestCase {
schemapb.DataType_Int16,
schemapb.DataType_Int32,
schemapb.DataType_Int64,
schemapb.DataType_UInt8,
schemapb.DataType_UInt16,
schemapb.DataType_UInt32,
schemapb.DataType_UInt64,
schemapb.DataType_Float,
schemapb.DataType_Double,
schemapb.DataType_String,

View File

@ -69,6 +69,35 @@ func GenDataset(data storage.FieldData) *Dataset {
keyRawArr: f.Data,
},
}
case *storage.UInt8FieldData:
return &Dataset{
DType: schemapb.DataType_UInt8,
Data: map[string]interface{}{
keyRawArr: f.Data,
},
}
case *storage.UInt16FieldData:
return &Dataset{
DType: schemapb.DataType_UInt16,
Data: map[string]interface{}{
keyRawArr: f.Data,
},
}
case *storage.UInt32FieldData:
return &Dataset{
DType: schemapb.DataType_UInt32,
Data: map[string]interface{}{
keyRawArr: f.Data,
},
}
case *storage.UInt64FieldData:
return &Dataset{
DType: schemapb.DataType_UInt64,
Data: map[string]interface{}{
keyRawArr: f.Data,
},
}
case *storage.FloatFieldData:
return &Dataset{
DType: schemapb.DataType_Float,

View File

@ -105,6 +105,14 @@ func (index *CgoIndex) Build(dataset *Dataset) error {
return index.buildInt32Index(dataset)
case schemapb.DataType_Int64:
return index.buildInt64Index(dataset)
case schemapb.DataType_UInt8:
return index.buildUInt8Index(dataset)
case schemapb.DataType_UInt16:
return index.buildUInt16Index(dataset)
case schemapb.DataType_UInt32:
return index.buildUInt32Index(dataset)
case schemapb.DataType_UInt64:
return index.buildUInt64Index(dataset)
case schemapb.DataType_Float:
return index.buildFloatIndex(dataset)
case schemapb.DataType_Double:
@ -170,6 +178,30 @@ func (index *CgoIndex) buildInt64Index(dataset *Dataset) error {
return HandleCStatus(&status, "failed to build scalar index")
}
func (index *CgoIndex) buildUInt8Index(dataset *Dataset) error {
data := dataset.Data[keyRawArr].([]uint8)
status := C.BuildScalarIndex(index.indexPtr, (C.int64_t)(len(data)), unsafe.Pointer(&data[0]))
return HandleCStatus(&status, "failed to build scalar index")
}
func (index *CgoIndex) buildUInt16Index(dataset *Dataset) error {
data := dataset.Data[keyRawArr].([]uint16)
status := C.BuildScalarIndex(index.indexPtr, (C.int64_t)(len(data)), unsafe.Pointer(&data[0]))
return HandleCStatus(&status, "failed to build scalar index")
}
func (index *CgoIndex) buildUInt32Index(dataset *Dataset) error {
data := dataset.Data[keyRawArr].([]uint32)
status := C.BuildScalarIndex(index.indexPtr, (C.int64_t)(len(data)), unsafe.Pointer(&data[0]))
return HandleCStatus(&status, "failed to build scalar index")
}
func (index *CgoIndex) buildUInt64Index(dataset *Dataset) error {
data := dataset.Data[keyRawArr].([]uint64)
status := C.BuildScalarIndex(index.indexPtr, (C.int64_t)(len(data)), unsafe.Pointer(&data[0]))
return HandleCStatus(&status, "failed to build scalar index")
}
func (index *CgoIndex) buildFloatIndex(dataset *Dataset) error {
data := dataset.Data[keyRawArr].([]float32)
status := C.BuildScalarIndex(index.indexPtr, (C.int64_t)(len(data)), unsafe.Pointer(&data[0]))

View File

@ -143,6 +143,20 @@ func TransferColumnBasedDataToRowBasedData(schema *schemapb.CollectionSchema, co
if err != nil {
return nil, err
}
case *schemapb.ScalarField_UintData:
err := appendScalarField(&data, &rowNum, func() interface{} {
return scalarField.GetUintData().Data
})
if err != nil {
return nil, err
}
case *schemapb.ScalarField_UlongData:
err := appendScalarField(&data, &rowNum, func() interface{} {
return scalarField.GetUlongData().Data
})
if err != nil {
return nil, err
}
case *schemapb.ScalarField_FloatData:
err := appendScalarField(&data, &rowNum, func() interface{} {
return scalarField.GetFloatData().Data
@ -229,6 +243,18 @@ func parseToRowData(data [][]any, dTypes []schemapb.DataType, rowNum int) ([]*co
case schemapb.DataType_Int64:
d := data[j][i].(int64)
err = binary.Write(&buffer, endian, d)
case schemapb.DataType_UInt8:
d := uint8(data[j][i].(uint32))
err = binary.Write(&buffer, endian, d)
case schemapb.DataType_UInt16:
d := uint16(data[j][i].(uint32))
err = binary.Write(&buffer, endian, d)
case schemapb.DataType_UInt32:
d := data[j][i].(uint32)
err = binary.Write(&buffer, endian, d)
case schemapb.DataType_UInt64:
d := data[j][i].(uint64)
err = binary.Write(&buffer, endian, d)
case schemapb.DataType_Float:
d := data[j][i].(float32)
err = binary.Write(&buffer, endian, d)

View File

@ -65,13 +65,13 @@ func EstimateSizePerRecord(schema *schemapb.CollectionSchema) (int, error) {
res := 0
for _, fs := range schema.Fields {
switch fs.DataType {
case schemapb.DataType_Bool, schemapb.DataType_Int8:
case schemapb.DataType_Bool, schemapb.DataType_Int8, schemapb.DataType_UInt8:
res++
case schemapb.DataType_Int16:
case schemapb.DataType_Int16, schemapb.DataType_UInt16:
res += 2
case schemapb.DataType_Int32, schemapb.DataType_Float:
case schemapb.DataType_Int32, schemapb.DataType_Float, schemapb.DataType_UInt32:
res += 4
case schemapb.DataType_Int64, schemapb.DataType_Double:
case schemapb.DataType_Int64, schemapb.DataType_Double, schemapb.DataType_UInt64:
res += 8
case schemapb.DataType_VarChar:
maxLengthPerRow, err := GetAvgLengthOfVarLengthField(fs)
@ -110,13 +110,13 @@ func EstimateEntitySize(fieldsData []*schemapb.FieldData, rowOffset int) (int, e
res := 0
for _, fs := range fieldsData {
switch fs.GetType() {
case schemapb.DataType_Bool, schemapb.DataType_Int8:
case schemapb.DataType_Bool, schemapb.DataType_Int8, schemapb.DataType_UInt8:
res++
case schemapb.DataType_Int16:
case schemapb.DataType_Int16, schemapb.DataType_UInt16:
res += 2
case schemapb.DataType_Int32, schemapb.DataType_Float:
case schemapb.DataType_Int32, schemapb.DataType_Float, schemapb.DataType_UInt32:
res += 4
case schemapb.DataType_Int64, schemapb.DataType_Double:
case schemapb.DataType_Int64, schemapb.DataType_Double, schemapb.DataType_UInt64:
res += 8
case schemapb.DataType_VarChar:
if rowOffset >= len(fs.GetScalars().GetStringData().GetData()) {
@ -227,7 +227,9 @@ func IsVectorType(dataType schemapb.DataType) bool {
func IsIntegerType(dataType schemapb.DataType) bool {
switch dataType {
case schemapb.DataType_Int8, schemapb.DataType_Int16,
schemapb.DataType_Int32, schemapb.DataType_Int64:
schemapb.DataType_Int32, schemapb.DataType_Int64,
schemapb.DataType_UInt8, schemapb.DataType_UInt16,
schemapb.DataType_UInt32, schemapb.DataType_UInt64:
return true
default:
return false
@ -316,6 +318,26 @@ func AppendFieldData(dst []*schemapb.FieldData, src []*schemapb.FieldData, idx i
} else {
dstScalar.GetLongData().Data = append(dstScalar.GetLongData().Data, srcScalar.LongData.Data[idx])
}
case *schemapb.ScalarField_UintData:
if dstScalar.GetUintData() == nil {
dstScalar.Data = &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: []uint32{srcScalar.UintData.Data[idx]},
},
}
} else {
dstScalar.GetUintData().Data = append(dstScalar.GetUintData().Data, srcScalar.UintData.Data[idx])
}
case *schemapb.ScalarField_UlongData:
if dstScalar.GetUlongData() == nil {
dstScalar.Data = &schemapb.ScalarField_UlongData{
UlongData: &schemapb.ULongArray{
Data: []uint64{srcScalar.UlongData.Data[idx]},
},
}
} else {
dstScalar.GetUlongData().Data = append(dstScalar.GetUlongData().Data, srcScalar.UlongData.Data[idx])
}
case *schemapb.ScalarField_FloatData:
if dstScalar.GetFloatData() == nil {
dstScalar.Data = &schemapb.ScalarField_FloatData{
@ -448,6 +470,26 @@ func MergeFieldData(dst []*schemapb.FieldData, src []*schemapb.FieldData) {
} else {
dstScalar.GetLongData().Data = append(dstScalar.GetLongData().Data, srcScalar.LongData.Data...)
}
case *schemapb.ScalarField_UintData:
if dstScalar.GetUintData() == nil {
dstScalar.Data = &schemapb.ScalarField_UintData{
UintData: &schemapb.UIntArray{
Data: srcScalar.UintData.Data,
},
}
} else {
dstScalar.GetUintData().Data = append(dstScalar.GetUintData().Data, srcScalar.UintData.Data...)
}
case *schemapb.ScalarField_UlongData:
if dstScalar.GetUlongData() == nil {
dstScalar.Data = &schemapb.ScalarField_UlongData{
UlongData: &schemapb.ULongArray{
Data: srcScalar.UlongData.Data,
},
}
} else {
dstScalar.GetUlongData().Data = append(dstScalar.GetUlongData().Data, srcScalar.UlongData.Data...)
}
case *schemapb.ScalarField_FloatData:
if dstScalar.GetFloatData() == nil {
dstScalar.Data = &schemapb.ScalarField_FloatData{

View File

@ -88,6 +88,29 @@ def gen_int64_field(name=ct.default_int64_field_name, description=ct.default_des
is_primary=is_primary, **kwargs)
return int64_field
def gen_uint8_field(name=ct.default_uint8_field_name, description=ct.default_desc, is_primary=False, **kwargs):
uint8_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.UINT8, description=description,
is_primary=is_primary, **kwargs)
return uint8_field
def gen_uint16_field(name=ct.default_uint16_field_name, description=ct.default_desc, is_primary=False, **kwargs):
uint16_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.UINT16, description=description,
is_primary=is_primary, **kwargs)
return uint16_field
def gen_uint32_field(name=ct.default_uint32_field_name, description=ct.default_desc, is_primary=False, **kwargs):
uint32_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.UINT32, description=description,
is_primary=is_primary, **kwargs)
return uint32_field
def gen_uint64_field(name=ct.default_uint64_field_name, description=ct.default_desc, is_primary=False, **kwargs):
uint64_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.UINT64, description=description,
is_primary=is_primary, **kwargs)
return uint64_field
def gen_float_field(name=ct.default_float_field_name, is_primary=False, description=ct.default_desc):
float_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.FLOAT, description=description,
@ -148,6 +171,7 @@ def gen_collection_schema_all_datatype(description=ct.default_desc,
primary_field=ct.default_int64_field_name,
auto_id=False, dim=ct.default_dim):
fields = [gen_int64_field(), gen_int32_field(), gen_int16_field(), gen_int8_field(),
gen_uint64_field(), gen_uint32_field(), gen_uint16_field(), gen_uint8_field(),
gen_bool_field(), gen_float_field(), gen_double_field(), gen_string_field(), gen_float_vec_field(dim=dim)]
schema, _ = ApiCollectionSchemaWrapper().init_collection_schema(fields=fields, description=description,
primary_field=primary_field, auto_id=auto_id)
@ -277,6 +301,10 @@ def gen_dataframe_all_data_type(nb=ct.default_nb, dim=ct.default_dim, start=0):
int32_values = pd.Series(data=[np.int32(i) for i in range(start, start + nb)], dtype="int32")
int16_values = pd.Series(data=[np.int16(i) for i in range(start, start + nb)], dtype="int16")
int8_values = pd.Series(data=[np.int8(i) for i in range(start, start + nb)], dtype="int8")
uint64_values = pd.Series(data=[np.uint64(i) for i in range(start, start + nb)], dtype="uint64")
uint32_values = pd.Series(data=[np.uint32(i) for i in range(start, start + nb)], dtype="uint32")
uint16_values = pd.Series(data=[np.uint16(i) for i in range(start, start + nb)], dtype="uint16")
uint8_values = pd.Series(data=[np.uint8(i) for i in range(start, start + nb)], dtype="uint8")
bool_values = pd.Series(data=[np.bool(i) for i in range(start, start + nb)], dtype="bool")
float_values = pd.Series(data=[np.float32(i) for i in range(start, start + nb)], dtype="float32")
double_values = pd.Series(data=[np.double(i) for i in range(start, start + nb)], dtype="double")
@ -287,6 +315,10 @@ def gen_dataframe_all_data_type(nb=ct.default_nb, dim=ct.default_dim, start=0):
ct.default_int32_field_name: int32_values,
ct.default_int16_field_name: int16_values,
ct.default_int8_field_name: int8_values,
ct.default_uint64_field_name: uint64_values,
ct.default_uint32_field_name: uint32_values,
ct.default_uint16_field_name: uint16_values,
ct.default_uint8_field_name: uint8_values,
ct.default_bool_field_name: bool_values,
ct.default_float_field_name: float_values,
ct.default_double_field_name: double_values,
@ -368,7 +400,7 @@ def gen_simple_index():
def gen_invalid_field_types():
field_types = [
6,
12,
1.0,
[[]],
{},

View File

@ -28,6 +28,10 @@ default_int8_field_name = "int8"
default_int16_field_name = "int16"
default_int32_field_name = "int32"
default_int64_field_name = "int64"
default_uint8_field_name = "uint8"
default_uint16_field_name = "uint16"
default_uint32_field_name = "uint32"
default_uint64_field_name = "uint64"
default_float_field_name = "float"
default_double_field_name = "double"
default_string_field_name = "varchar"

View File

@ -357,7 +357,7 @@ class TestCollectionParams(TestcaseBase):
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("dtype", [6, [[]], {}, (), "", "a"])
@pytest.mark.parametrize("dtype", [12, [[]], {}, (), "", "a"])
def test_collection_invalid_field_type(self, dtype):
"""
target: test collection with invalid field type

View File

@ -242,6 +242,9 @@ class TestQueryParams(TestcaseBase):
ct.default_int64_field_name: pd.Series(data=[i for i in range(ct.default_nb)]),
ct.default_int32_field_name: pd.Series(data=[np.int32(i) for i in range(ct.default_nb)], dtype="int32"),
ct.default_int16_field_name: pd.Series(data=[np.int16(i) for i in range(ct.default_nb)], dtype="int16"),
ct.default_uint64_field_name: pd.Series(data=[np.uint64(i) for i in range(ct.default_nb)], dtype="uint64"),
ct.default_uint32_field_name: pd.Series(data=[np.uint32(i) for i in range(ct.default_nb)], dtype="uint32"),
ct.default_uint16_field_name: pd.Series(data=[np.uint16(i) for i in range(ct.default_nb)], dtype="uint16"),
ct.default_float_field_name: pd.Series(data=[np.float32(i) for i in range(ct.default_nb)], dtype="float32"),
ct.default_double_field_name: pd.Series(data=[np.double(i) for i in range(ct.default_nb)], dtype="double"),
ct.default_string_field_name: pd.Series(data=[str(i) for i in range(ct.default_nb)], dtype="string"),
@ -254,6 +257,7 @@ class TestQueryParams(TestcaseBase):
# query by non_primary non_vector scalar field
non_primary_field = [ct.default_int32_field_name, ct.default_int16_field_name,
ct.default_uint64_field_name, ct.default_uint32_field_name, ct.default_uint16_field_name,
ct.default_float_field_name, ct.default_double_field_name, ct.default_string_field_name]
# exp res: first two rows and all fields expect last vec field
@ -553,8 +557,9 @@ class TestQueryParams(TestcaseBase):
collection_w, df, _, insert_ids = self.init_collection_general(prefix, True, nb=10,
is_all_data_type=True)[0:4]
all_fields = [ct.default_int64_field_name, ct.default_int32_field_name, ct.default_int16_field_name,
ct.default_int8_field_name, ct.default_bool_field_name, ct.default_float_field_name,
ct.default_double_field_name, ct.default_string_field_name, ct.default_float_vec_field_name]
ct.default_int8_field_name, ct.default_uint64_field_name, ct.default_uint32_field_name,
ct.default_uint16_field_name, ct.default_uint8_field_name, ct.default_bool_field_name,
ct.default_float_field_name, ct.default_double_field_name, ct.default_string_field_name, ct.default_float_vec_field_name]
res = df[0].iloc[:2].to_dict('records')
collection_w.load()
actual_res, _ = collection_w.query(default_term_expr, output_fields=all_fields,

View File

@ -2241,6 +2241,7 @@ class TestCollectionSearch(TestcaseBase):
log.info("test_search_expression_all_data_type: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
search_exp = "int64 >= 0 && int32 >= 0 && int16 >= 0 " \
"uint64 >= 0 && uint32 >= 0 && uint16 >= 0 uint8 >= 0" \
"&& int8 >= 0 && float >= 0 && double >= 0"
res = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,

View File

@ -221,7 +221,7 @@ def gen_primary_field():
def gen_single_filter_fields():
fields = []
for data_type in DataType:
if data_type in [DataType.INT32, DataType.INT64, DataType.FLOAT, DataType.DOUBLE]:
if data_type in [DataType.INT32, DataType.INT64,DataType.UINT32, DataType.UINT64, DataType.FLOAT, DataType.DOUBLE]:
fields.append({"name": data_type.name, "type": data_type})
return fields
@ -350,7 +350,7 @@ def gen_entities_by_fields(fields, nb, dim, ids=None):
for field in fields:
if field.get("is_primary", False) and ids:
field_value = ids
elif field["type"] in [DataType.INT32, DataType.INT64]:
elif field["type"] in [DataType.INT32, DataType.INT64,DataType.UINT32, DataType.UINT64]:
field_value = [1 for i in range(nb)]
elif field["type"] in [DataType.FLOAT, DataType.DOUBLE]:
field_value = [3.0 for i in range(nb)]