2021-12-23 04:05:21 +00:00
|
|
|
// Licensed to the LF AI & Data foundation under one
|
|
|
|
// or more contributor license agreements. See the NOTICE file
|
|
|
|
// distributed with this work for additional information
|
|
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
|
|
// to you under the Apache License, Version 2.0 (the
|
|
|
|
// "License"); you may not use this file except in compliance
|
2021-04-19 03:32:24 +00:00
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
2021-12-23 04:05:21 +00:00
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
2021-04-19 03:32:24 +00:00
|
|
|
//
|
2021-12-23 04:05:21 +00:00
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
2021-04-19 03:32:24 +00:00
|
|
|
|
2020-12-09 01:55:56 +00:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/binary"
|
2021-09-23 09:23:54 +00:00
|
|
|
"encoding/json"
|
2021-10-11 09:28:30 +00:00
|
|
|
"fmt"
|
2021-07-07 11:10:07 +00:00
|
|
|
"io"
|
2021-10-11 13:02:37 +00:00
|
|
|
"strconv"
|
2021-03-05 02:15:27 +00:00
|
|
|
|
2023-02-26 03:31:49 +00:00
|
|
|
"github.com/cockroachdb/errors"
|
|
|
|
|
2023-06-08 17:28:37 +00:00
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
2023-04-06 11:14:32 +00:00
|
|
|
"github.com/milvus-io/milvus/pkg/common"
|
2024-07-01 08:38:06 +00:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/merr"
|
2023-04-06 11:14:32 +00:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
2020-12-09 01:55:56 +00:00
|
|
|
)
|
|
|
|
|
2024-07-03 11:42:10 +00:00
|
|
|
const (
|
|
|
|
originalSizeKey = "original_size"
|
|
|
|
nullableKey = "nullable"
|
|
|
|
)
|
2021-10-11 09:28:30 +00:00
|
|
|
|
enhance: add delta log stream new format reader and writer (#34116)
issue: #34123
Benchmark case: The benchmark run the go benchmark function
`BenchmarkDeltalogFormat` which is put in the Files changed. It tests
the performance of serializing and deserializing from two different data
formats under a 10 million delete log dataset.
Metrics: The benchmarks measure the average time taken per operation
(ns/op), memory allocated per operation (MB/op), and the number of
memory allocations per operation (allocs/op).
| Test Name | Avg Time (ns/op) | Time Comparison | Memory Allocation
(MB/op) | Memory Comparison | Allocation Count (allocs/op) | Allocation
Comparison |
|---------------------------------|------------------|-----------------|---------------------------|-------------------|------------------------------|------------------------|
| one_string_format_reader | 2,781,990,000 | Baseline | 2,422 | Baseline
| 20,336,539 | Baseline |
| pk_ts_separate_format_reader | 480,682,639 | -82.72% | 1,765 | -27.14%
| 20,396,958 | +0.30% |
| one_string_format_writer | 5,483,436,041 | Baseline | 13,900 |
Baseline | 70,057,473 | Baseline |
| pk_and_ts_separate_format_writer| 798,591,584 | -85.43% | 2,178 |
-84.34% | 30,270,488 | -56.78% |
Both read and write operations show significant improvements in both
speed and memory allocation.
Signed-off-by: shaoting-huang <shaoting.huang@zilliz.com>
2024-07-06 01:08:09 +00:00
|
|
|
const version = "version"
|
|
|
|
|
|
|
|
// mark useMultiFieldFormat if there are multi fields in a log file
|
2024-07-09 02:06:24 +00:00
|
|
|
const MultiField = "MULTI_FIELD"
|
enhance: add delta log stream new format reader and writer (#34116)
issue: #34123
Benchmark case: The benchmark run the go benchmark function
`BenchmarkDeltalogFormat` which is put in the Files changed. It tests
the performance of serializing and deserializing from two different data
formats under a 10 million delete log dataset.
Metrics: The benchmarks measure the average time taken per operation
(ns/op), memory allocated per operation (MB/op), and the number of
memory allocations per operation (allocs/op).
| Test Name | Avg Time (ns/op) | Time Comparison | Memory Allocation
(MB/op) | Memory Comparison | Allocation Count (allocs/op) | Allocation
Comparison |
|---------------------------------|------------------|-----------------|---------------------------|-------------------|------------------------------|------------------------|
| one_string_format_reader | 2,781,990,000 | Baseline | 2,422 | Baseline
| 20,336,539 | Baseline |
| pk_ts_separate_format_reader | 480,682,639 | -82.72% | 1,765 | -27.14%
| 20,396,958 | +0.30% |
| one_string_format_writer | 5,483,436,041 | Baseline | 13,900 |
Baseline | 70,057,473 | Baseline |
| pk_and_ts_separate_format_writer| 798,591,584 | -85.43% | 2,178 |
-84.34% | 30,270,488 | -56.78% |
Both read and write operations show significant improvements in both
speed and memory allocation.
Signed-off-by: shaoting-huang <shaoting.huang@zilliz.com>
2024-07-06 01:08:09 +00:00
|
|
|
|
2020-12-09 01:55:56 +00:00
|
|
|
type descriptorEventData struct {
|
|
|
|
DescriptorEventDataFixPart
|
2021-09-23 09:23:54 +00:00
|
|
|
ExtraLength int32
|
|
|
|
ExtraBytes []byte
|
|
|
|
Extras map[string]interface{}
|
2021-09-24 10:17:56 +00:00
|
|
|
PostHeaderLengths []uint8
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
2021-12-23 14:03:56 +00:00
|
|
|
// DescriptorEventDataFixPart is a memory struct saves events' DescriptorEventData.
|
2020-12-09 01:55:56 +00:00
|
|
|
type DescriptorEventDataFixPart struct {
|
|
|
|
CollectionID int64
|
|
|
|
PartitionID int64
|
|
|
|
SegmentID int64
|
2020-12-11 03:29:07 +00:00
|
|
|
FieldID int64
|
2020-12-09 01:55:56 +00:00
|
|
|
StartTimestamp typeutil.Timestamp
|
|
|
|
EndTimestamp typeutil.Timestamp
|
|
|
|
PayloadDataType schemapb.DataType
|
|
|
|
}
|
|
|
|
|
2021-09-18 10:37:56 +00:00
|
|
|
// SetEventTimeStamp set the timestamp value of DescriptorEventDataFixPart.
|
2021-07-07 11:10:07 +00:00
|
|
|
func (data *descriptorEventData) SetEventTimeStamp(start typeutil.Timestamp, end typeutil.Timestamp) {
|
|
|
|
data.StartTimestamp = start
|
|
|
|
data.EndTimestamp = end
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
2021-09-23 09:23:54 +00:00
|
|
|
// GetEventDataFixPartSize returns the memory size of DescriptorEventDataFixPart.
|
2021-07-07 11:10:07 +00:00
|
|
|
func (data *descriptorEventData) GetEventDataFixPartSize() int32 {
|
|
|
|
return int32(binary.Size(data.DescriptorEventDataFixPart))
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
2024-07-01 08:38:06 +00:00
|
|
|
func (data *descriptorEventData) GetNullable() (bool, error) {
|
|
|
|
nullableStore, ok := data.Extras[nullableKey]
|
|
|
|
// previous descriptorEventData not store nullable
|
|
|
|
if !ok {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
nullable, ok := nullableStore.(bool)
|
2024-07-03 11:42:10 +00:00
|
|
|
// will not happen, has checked bool format when FinishExtra
|
2024-07-01 08:38:06 +00:00
|
|
|
if !ok {
|
|
|
|
return false, merr.WrapErrParameterInvalidMsg(fmt.Sprintf("value of %v must in bool format", nullableKey))
|
|
|
|
}
|
|
|
|
return nullable, nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 09:23:54 +00:00
|
|
|
// GetMemoryUsageInBytes returns the memory size of DescriptorEventDataFixPart.
|
2020-12-09 01:55:56 +00:00
|
|
|
func (data *descriptorEventData) GetMemoryUsageInBytes() int32 {
|
2021-09-23 09:23:54 +00:00
|
|
|
return data.GetEventDataFixPartSize() + int32(binary.Size(data.PostHeaderLengths)) + int32(binary.Size(data.ExtraLength)) + data.ExtraLength
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddExtra add extra params to description event.
|
|
|
|
func (data *descriptorEventData) AddExtra(k string, v interface{}) {
|
|
|
|
data.Extras[k] = v
|
|
|
|
}
|
|
|
|
|
|
|
|
// FinishExtra marshal extras to json format.
|
2021-12-15 04:03:58 +00:00
|
|
|
// Call before GetMemoryUsageInBytes to get an accurate length of description event.
|
2021-09-23 09:23:54 +00:00
|
|
|
func (data *descriptorEventData) FinishExtra() error {
|
|
|
|
var err error
|
2021-10-11 09:28:30 +00:00
|
|
|
|
|
|
|
// keep all binlog file records the original size
|
2021-10-11 13:02:37 +00:00
|
|
|
sizeStored, ok := data.Extras[originalSizeKey]
|
2021-10-11 09:28:30 +00:00
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("%v not in extra", originalSizeKey)
|
|
|
|
}
|
2021-10-11 13:02:37 +00:00
|
|
|
// if we store a large int directly, golang will use scientific notation, we then will get a float value.
|
|
|
|
// so it's better to store the original size in string format.
|
|
|
|
sizeStr, ok := sizeStored.(string)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("value of %v must in string format", originalSizeKey)
|
|
|
|
}
|
|
|
|
_, err = strconv.Atoi(sizeStr)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("value of %v must be able to be converted into int format", originalSizeKey)
|
|
|
|
}
|
2021-10-11 09:28:30 +00:00
|
|
|
|
2024-07-01 08:38:06 +00:00
|
|
|
nullableStore, existed := data.Extras[nullableKey]
|
|
|
|
if existed {
|
|
|
|
_, ok := nullableStore.(bool)
|
|
|
|
if !ok {
|
|
|
|
return merr.WrapErrParameterInvalidMsg(fmt.Sprintf("value of %v must in bool format", nullableKey))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-23 09:23:54 +00:00
|
|
|
data.ExtraBytes, err = json.Marshal(data.Extras)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
data.ExtraLength = int32(len(data.ExtraBytes))
|
|
|
|
return nil
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
2021-09-18 10:37:56 +00:00
|
|
|
// Write transfer DescriptorEventDataFixPart to binary buffer.
|
2020-12-09 01:55:56 +00:00
|
|
|
func (data *descriptorEventData) Write(buffer io.Writer) error {
|
2021-11-02 10:16:32 +00:00
|
|
|
if err := binary.Write(buffer, common.Endian, data.DescriptorEventDataFixPart); err != nil {
|
2020-12-09 01:55:56 +00:00
|
|
|
return err
|
|
|
|
}
|
2021-11-02 10:16:32 +00:00
|
|
|
if err := binary.Write(buffer, common.Endian, data.PostHeaderLengths); err != nil {
|
2020-12-09 01:55:56 +00:00
|
|
|
return err
|
|
|
|
}
|
2021-11-02 10:16:32 +00:00
|
|
|
if err := binary.Write(buffer, common.Endian, data.ExtraLength); err != nil {
|
2021-09-23 09:23:54 +00:00
|
|
|
return err
|
|
|
|
}
|
2021-11-02 10:16:32 +00:00
|
|
|
if err := binary.Write(buffer, common.Endian, data.ExtraBytes); err != nil {
|
2021-09-23 09:23:54 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-12-09 01:55:56 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func readDescriptorEventData(buffer io.Reader) (*descriptorEventData, error) {
|
2021-04-19 02:36:19 +00:00
|
|
|
event := newDescriptorEventData()
|
2021-11-02 10:16:32 +00:00
|
|
|
if err := binary.Read(buffer, common.Endian, &event.DescriptorEventDataFixPart); err != nil {
|
2020-12-09 01:55:56 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2021-11-02 10:16:32 +00:00
|
|
|
if err := binary.Read(buffer, common.Endian, &event.PostHeaderLengths); err != nil {
|
2020-12-09 01:55:56 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2021-09-23 09:23:54 +00:00
|
|
|
|
2021-11-02 10:16:32 +00:00
|
|
|
if err := binary.Read(buffer, common.Endian, &event.ExtraLength); err != nil {
|
2021-09-23 09:23:54 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
event.ExtraBytes = make([]byte, event.ExtraLength)
|
2021-11-02 10:16:32 +00:00
|
|
|
if err := binary.Read(buffer, common.Endian, &event.ExtraBytes); err != nil {
|
2021-09-23 09:23:54 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if err := json.Unmarshal(event.ExtraBytes, &event.Extras); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-12-09 03:18:13 +00:00
|
|
|
return event, nil
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type eventData interface {
|
2020-12-09 03:18:13 +00:00
|
|
|
GetEventDataFixPartSize() int32
|
2020-12-09 01:55:56 +00:00
|
|
|
WriteEventData(buffer io.Writer) error
|
|
|
|
}
|
|
|
|
|
|
|
|
// all event types' fixed part only have start Timestamp and end Timestamp yet, but maybe different events will
|
2021-12-20 14:49:00 +00:00
|
|
|
// have different fields later, so we just create an event data struct per event type.
|
2020-12-09 01:55:56 +00:00
|
|
|
type insertEventData struct {
|
|
|
|
StartTimestamp typeutil.Timestamp
|
|
|
|
EndTimestamp typeutil.Timestamp
|
|
|
|
}
|
|
|
|
|
2021-07-07 11:10:07 +00:00
|
|
|
func (data *insertEventData) SetEventTimestamp(start typeutil.Timestamp, end typeutil.Timestamp) {
|
|
|
|
data.StartTimestamp = start
|
|
|
|
data.EndTimestamp = end
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
2020-12-09 03:18:13 +00:00
|
|
|
func (data *insertEventData) GetEventDataFixPartSize() int32 {
|
|
|
|
return int32(binary.Size(data))
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (data *insertEventData) WriteEventData(buffer io.Writer) error {
|
2020-12-11 03:29:07 +00:00
|
|
|
if data.StartTimestamp == 0 {
|
|
|
|
return errors.New("hasn't set start time stamp")
|
|
|
|
}
|
|
|
|
if data.EndTimestamp == 0 {
|
|
|
|
return errors.New("hasn't set end time stamp")
|
|
|
|
}
|
2021-11-02 10:16:32 +00:00
|
|
|
return binary.Write(buffer, common.Endian, data)
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type deleteEventData struct {
|
|
|
|
StartTimestamp typeutil.Timestamp
|
|
|
|
EndTimestamp typeutil.Timestamp
|
|
|
|
}
|
|
|
|
|
2021-07-07 11:10:07 +00:00
|
|
|
func (data *deleteEventData) SetEventTimestamp(start typeutil.Timestamp, end typeutil.Timestamp) {
|
|
|
|
data.StartTimestamp = start
|
|
|
|
data.EndTimestamp = end
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
2020-12-09 03:18:13 +00:00
|
|
|
func (data *deleteEventData) GetEventDataFixPartSize() int32 {
|
|
|
|
return int32(binary.Size(data))
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (data *deleteEventData) WriteEventData(buffer io.Writer) error {
|
2020-12-11 03:29:07 +00:00
|
|
|
if data.StartTimestamp == 0 {
|
|
|
|
return errors.New("hasn't set start time stamp")
|
|
|
|
}
|
|
|
|
if data.EndTimestamp == 0 {
|
|
|
|
return errors.New("hasn't set end time stamp")
|
|
|
|
}
|
2021-11-02 10:16:32 +00:00
|
|
|
return binary.Write(buffer, common.Endian, data)
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type createCollectionEventData struct {
|
|
|
|
StartTimestamp typeutil.Timestamp
|
|
|
|
EndTimestamp typeutil.Timestamp
|
|
|
|
}
|
|
|
|
|
2021-07-07 11:10:07 +00:00
|
|
|
func (data *createCollectionEventData) SetEventTimestamp(start typeutil.Timestamp, end typeutil.Timestamp) {
|
|
|
|
data.StartTimestamp = start
|
|
|
|
data.EndTimestamp = end
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
2020-12-09 03:18:13 +00:00
|
|
|
func (data *createCollectionEventData) GetEventDataFixPartSize() int32 {
|
|
|
|
return int32(binary.Size(data))
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (data *createCollectionEventData) WriteEventData(buffer io.Writer) error {
|
2020-12-11 03:29:07 +00:00
|
|
|
if data.StartTimestamp == 0 {
|
|
|
|
return errors.New("hasn't set start time stamp")
|
|
|
|
}
|
|
|
|
if data.EndTimestamp == 0 {
|
|
|
|
return errors.New("hasn't set end time stamp")
|
|
|
|
}
|
2021-11-02 10:16:32 +00:00
|
|
|
return binary.Write(buffer, common.Endian, data)
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type dropCollectionEventData struct {
|
|
|
|
StartTimestamp typeutil.Timestamp
|
|
|
|
EndTimestamp typeutil.Timestamp
|
|
|
|
}
|
|
|
|
|
2021-07-07 11:10:07 +00:00
|
|
|
func (data *dropCollectionEventData) SetEventTimestamp(start typeutil.Timestamp, end typeutil.Timestamp) {
|
|
|
|
data.StartTimestamp = start
|
|
|
|
data.EndTimestamp = end
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
2020-12-09 03:18:13 +00:00
|
|
|
func (data *dropCollectionEventData) GetEventDataFixPartSize() int32 {
|
|
|
|
return int32(binary.Size(data))
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (data *dropCollectionEventData) WriteEventData(buffer io.Writer) error {
|
2020-12-11 03:29:07 +00:00
|
|
|
if data.StartTimestamp == 0 {
|
|
|
|
return errors.New("hasn't set start time stamp")
|
|
|
|
}
|
|
|
|
if data.EndTimestamp == 0 {
|
|
|
|
return errors.New("hasn't set end time stamp")
|
|
|
|
}
|
2021-11-02 10:16:32 +00:00
|
|
|
return binary.Write(buffer, common.Endian, data)
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type createPartitionEventData struct {
|
|
|
|
StartTimestamp typeutil.Timestamp
|
|
|
|
EndTimestamp typeutil.Timestamp
|
|
|
|
}
|
|
|
|
|
2021-07-07 11:10:07 +00:00
|
|
|
func (data *createPartitionEventData) SetEventTimestamp(start typeutil.Timestamp, end typeutil.Timestamp) {
|
|
|
|
data.StartTimestamp = start
|
|
|
|
data.EndTimestamp = end
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
2020-12-09 03:18:13 +00:00
|
|
|
func (data *createPartitionEventData) GetEventDataFixPartSize() int32 {
|
|
|
|
return int32(binary.Size(data))
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (data *createPartitionEventData) WriteEventData(buffer io.Writer) error {
|
2020-12-11 03:29:07 +00:00
|
|
|
if data.StartTimestamp == 0 {
|
|
|
|
return errors.New("hasn't set start time stamp")
|
|
|
|
}
|
|
|
|
if data.EndTimestamp == 0 {
|
|
|
|
return errors.New("hasn't set end time stamp")
|
|
|
|
}
|
2021-11-02 10:16:32 +00:00
|
|
|
return binary.Write(buffer, common.Endian, data)
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type dropPartitionEventData struct {
|
|
|
|
StartTimestamp typeutil.Timestamp
|
|
|
|
EndTimestamp typeutil.Timestamp
|
|
|
|
}
|
|
|
|
|
2021-07-07 11:10:07 +00:00
|
|
|
func (data *dropPartitionEventData) SetEventTimestamp(start typeutil.Timestamp, end typeutil.Timestamp) {
|
|
|
|
data.StartTimestamp = start
|
|
|
|
data.EndTimestamp = end
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
2020-12-09 03:18:13 +00:00
|
|
|
func (data *dropPartitionEventData) GetEventDataFixPartSize() int32 {
|
|
|
|
return int32(binary.Size(data))
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (data *dropPartitionEventData) WriteEventData(buffer io.Writer) error {
|
2020-12-11 03:29:07 +00:00
|
|
|
if data.StartTimestamp == 0 {
|
|
|
|
return errors.New("hasn't set start time stamp")
|
|
|
|
}
|
|
|
|
if data.EndTimestamp == 0 {
|
|
|
|
return errors.New("hasn't set end time stamp")
|
|
|
|
}
|
2021-11-02 10:16:32 +00:00
|
|
|
return binary.Write(buffer, common.Endian, data)
|
2020-12-09 03:18:13 +00:00
|
|
|
}
|
|
|
|
|
2021-09-29 01:52:12 +00:00
|
|
|
type indexFileEventData struct {
|
|
|
|
StartTimestamp typeutil.Timestamp
|
|
|
|
EndTimestamp typeutil.Timestamp
|
|
|
|
}
|
|
|
|
|
|
|
|
func (data *indexFileEventData) SetEventTimestamp(start typeutil.Timestamp, end typeutil.Timestamp) {
|
|
|
|
data.StartTimestamp = start
|
|
|
|
data.EndTimestamp = end
|
|
|
|
}
|
|
|
|
|
|
|
|
func (data *indexFileEventData) GetEventDataFixPartSize() int32 {
|
|
|
|
return int32(binary.Size(data))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (data *indexFileEventData) WriteEventData(buffer io.Writer) error {
|
|
|
|
if data.StartTimestamp == 0 {
|
|
|
|
return errors.New("hasn't set start time stamp")
|
|
|
|
}
|
|
|
|
if data.EndTimestamp == 0 {
|
|
|
|
return errors.New("hasn't set end time stamp")
|
|
|
|
}
|
2021-11-02 10:16:32 +00:00
|
|
|
return binary.Write(buffer, common.Endian, data)
|
2021-09-29 01:52:12 +00:00
|
|
|
}
|
|
|
|
|
2020-12-09 03:18:13 +00:00
|
|
|
func getEventFixPartSize(code EventTypeCode) int32 {
|
|
|
|
switch code {
|
|
|
|
case DescriptorEventType:
|
2021-07-07 11:10:07 +00:00
|
|
|
return (&descriptorEventData{}).GetEventDataFixPartSize()
|
2020-12-09 03:18:13 +00:00
|
|
|
case InsertEventType:
|
|
|
|
return (&insertEventData{}).GetEventDataFixPartSize()
|
|
|
|
case DeleteEventType:
|
|
|
|
return (&deleteEventData{}).GetEventDataFixPartSize()
|
|
|
|
case CreateCollectionEventType:
|
|
|
|
return (&createCollectionEventData{}).GetEventDataFixPartSize()
|
|
|
|
case DropCollectionEventType:
|
|
|
|
return (&dropCollectionEventData{}).GetEventDataFixPartSize()
|
|
|
|
case CreatePartitionEventType:
|
2021-04-19 02:36:19 +00:00
|
|
|
return (&createPartitionEventData{}).GetEventDataFixPartSize()
|
2020-12-09 03:18:13 +00:00
|
|
|
case DropPartitionEventType:
|
|
|
|
return (&dropPartitionEventData{}).GetEventDataFixPartSize()
|
2021-09-29 01:52:12 +00:00
|
|
|
case IndexFileEventType:
|
|
|
|
return (&indexFileEventData{}).GetEventDataFixPartSize()
|
2020-12-09 03:18:13 +00:00
|
|
|
default:
|
|
|
|
return -1
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-19 02:36:19 +00:00
|
|
|
func newDescriptorEventData() *descriptorEventData {
|
2020-12-09 01:55:56 +00:00
|
|
|
data := descriptorEventData{
|
|
|
|
DescriptorEventDataFixPart: DescriptorEventDataFixPart{
|
|
|
|
CollectionID: -1,
|
|
|
|
PartitionID: -1,
|
|
|
|
SegmentID: -1,
|
2020-12-11 03:29:07 +00:00
|
|
|
FieldID: -1,
|
2020-12-09 01:55:56 +00:00
|
|
|
StartTimestamp: 0,
|
|
|
|
EndTimestamp: 0,
|
|
|
|
PayloadDataType: -1,
|
|
|
|
},
|
2020-12-09 03:18:13 +00:00
|
|
|
PostHeaderLengths: []uint8{},
|
2021-09-23 09:23:54 +00:00
|
|
|
Extras: make(map[string]interface{}),
|
2020-12-09 03:18:13 +00:00
|
|
|
}
|
|
|
|
for i := DescriptorEventType; i < EventTypeEnd; i++ {
|
|
|
|
size := getEventFixPartSize(i)
|
|
|
|
data.PostHeaderLengths = append(data.PostHeaderLengths, uint8(size))
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
2021-04-19 02:36:19 +00:00
|
|
|
return &data
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
|
|
|
|
2021-04-19 02:36:19 +00:00
|
|
|
func newInsertEventData() *insertEventData {
|
2020-12-09 03:18:13 +00:00
|
|
|
return &insertEventData{
|
2020-12-09 01:55:56 +00:00
|
|
|
StartTimestamp: 0,
|
|
|
|
EndTimestamp: 0,
|
2021-04-19 02:36:19 +00:00
|
|
|
}
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
2023-09-21 01:45:27 +00:00
|
|
|
|
2021-04-19 02:36:19 +00:00
|
|
|
func newDeleteEventData() *deleteEventData {
|
2020-12-09 03:18:13 +00:00
|
|
|
return &deleteEventData{
|
2020-12-09 01:55:56 +00:00
|
|
|
StartTimestamp: 0,
|
|
|
|
EndTimestamp: 0,
|
2021-04-19 02:36:19 +00:00
|
|
|
}
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
2023-09-21 01:45:27 +00:00
|
|
|
|
2021-04-19 02:36:19 +00:00
|
|
|
func newCreateCollectionEventData() *createCollectionEventData {
|
2020-12-09 03:18:13 +00:00
|
|
|
return &createCollectionEventData{
|
2020-12-09 01:55:56 +00:00
|
|
|
StartTimestamp: 0,
|
|
|
|
EndTimestamp: 0,
|
2021-04-19 02:36:19 +00:00
|
|
|
}
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
2023-09-21 01:45:27 +00:00
|
|
|
|
2021-04-19 02:36:19 +00:00
|
|
|
func newDropCollectionEventData() *dropCollectionEventData {
|
2020-12-09 03:18:13 +00:00
|
|
|
return &dropCollectionEventData{
|
2020-12-09 01:55:56 +00:00
|
|
|
StartTimestamp: 0,
|
|
|
|
EndTimestamp: 0,
|
2021-04-19 02:36:19 +00:00
|
|
|
}
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
2023-09-21 01:45:27 +00:00
|
|
|
|
2021-04-19 02:36:19 +00:00
|
|
|
func newCreatePartitionEventData() *createPartitionEventData {
|
2020-12-09 03:18:13 +00:00
|
|
|
return &createPartitionEventData{
|
2020-12-09 01:55:56 +00:00
|
|
|
StartTimestamp: 0,
|
|
|
|
EndTimestamp: 0,
|
2021-04-19 02:36:19 +00:00
|
|
|
}
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
2023-09-21 01:45:27 +00:00
|
|
|
|
2021-04-19 02:36:19 +00:00
|
|
|
func newDropPartitionEventData() *dropPartitionEventData {
|
2020-12-09 03:18:13 +00:00
|
|
|
return &dropPartitionEventData{
|
2020-12-09 01:55:56 +00:00
|
|
|
StartTimestamp: 0,
|
|
|
|
EndTimestamp: 0,
|
2021-04-19 02:36:19 +00:00
|
|
|
}
|
2020-12-09 01:55:56 +00:00
|
|
|
}
|
2023-09-21 01:45:27 +00:00
|
|
|
|
2021-09-29 01:52:12 +00:00
|
|
|
func newIndexFileEventData() *indexFileEventData {
|
|
|
|
return &indexFileEventData{
|
|
|
|
StartTimestamp: 0,
|
|
|
|
EndTimestamp: 0,
|
|
|
|
}
|
|
|
|
}
|
2020-12-09 01:55:56 +00:00
|
|
|
|
2020-12-09 03:18:13 +00:00
|
|
|
func readInsertEventDataFixPart(buffer io.Reader) (*insertEventData, error) {
|
2020-12-09 01:55:56 +00:00
|
|
|
data := &insertEventData{}
|
2021-11-02 10:16:32 +00:00
|
|
|
if err := binary.Read(buffer, common.Endian, data); err != nil {
|
2020-12-09 01:55:56 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return data, nil
|
|
|
|
}
|
|
|
|
|
2020-12-09 03:18:13 +00:00
|
|
|
func readDeleteEventDataFixPart(buffer io.Reader) (*deleteEventData, error) {
|
2020-12-09 01:55:56 +00:00
|
|
|
data := &deleteEventData{}
|
2021-11-02 10:16:32 +00:00
|
|
|
if err := binary.Read(buffer, common.Endian, data); err != nil {
|
2020-12-09 01:55:56 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return data, nil
|
|
|
|
}
|
|
|
|
|
2020-12-09 03:18:13 +00:00
|
|
|
func readCreateCollectionEventDataFixPart(buffer io.Reader) (*createCollectionEventData, error) {
|
2020-12-09 01:55:56 +00:00
|
|
|
data := &createCollectionEventData{}
|
2021-11-02 10:16:32 +00:00
|
|
|
if err := binary.Read(buffer, common.Endian, data); err != nil {
|
2020-12-09 01:55:56 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return data, nil
|
|
|
|
}
|
|
|
|
|
2020-12-09 03:18:13 +00:00
|
|
|
func readDropCollectionEventDataFixPart(buffer io.Reader) (*dropCollectionEventData, error) {
|
2020-12-09 01:55:56 +00:00
|
|
|
data := &dropCollectionEventData{}
|
2021-11-02 10:16:32 +00:00
|
|
|
if err := binary.Read(buffer, common.Endian, data); err != nil {
|
2020-12-09 01:55:56 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return data, nil
|
|
|
|
}
|
|
|
|
|
2020-12-09 03:18:13 +00:00
|
|
|
func readCreatePartitionEventDataFixPart(buffer io.Reader) (*createPartitionEventData, error) {
|
2020-12-09 01:55:56 +00:00
|
|
|
data := &createPartitionEventData{}
|
2021-11-02 10:16:32 +00:00
|
|
|
if err := binary.Read(buffer, common.Endian, data); err != nil {
|
2020-12-09 01:55:56 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return data, nil
|
|
|
|
}
|
|
|
|
|
2020-12-09 03:18:13 +00:00
|
|
|
func readDropPartitionEventDataFixPart(buffer io.Reader) (*dropPartitionEventData, error) {
|
2020-12-09 01:55:56 +00:00
|
|
|
data := &dropPartitionEventData{}
|
2021-11-02 10:16:32 +00:00
|
|
|
if err := binary.Read(buffer, common.Endian, data); err != nil {
|
2020-12-09 01:55:56 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return data, nil
|
|
|
|
}
|
2021-09-29 01:52:12 +00:00
|
|
|
|
|
|
|
func readIndexFileEventDataFixPart(buffer io.Reader) (*indexFileEventData, error) {
|
|
|
|
data := &indexFileEventData{}
|
2021-11-02 10:16:32 +00:00
|
|
|
if err := binary.Read(buffer, common.Endian, data); err != nil {
|
2021-09-29 01:52:12 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return data, nil
|
|
|
|
}
|