mirror of https://github.com/milvus-io/milvus.git
Add segment threshold factor
Signed-off-by: sunby <bingyi.sun@zilliz.com>pull/4973/head^2
parent
1d1692055d
commit
578e952c60
|
@ -29,26 +29,27 @@ func main() {
|
|||
statsChannel := master.Params.StatsChannels()
|
||||
|
||||
opt := master.Option{
|
||||
KVRootPath: etcdRootPath,
|
||||
MetaRootPath: etcdRootPath,
|
||||
EtcdAddr: []string{etcdAddress},
|
||||
PulsarAddr: pulsarAddr,
|
||||
ProxyIDs: master.Params.ProxyIDList(),
|
||||
PulsarProxyChannels: master.Params.ProxyTimeSyncChannels(),
|
||||
PulsarProxySubName: master.Params.ProxyTimeSyncSubName(),
|
||||
SoftTTBInterval: master.Params.SoftTimeTickBarrierInterval(),
|
||||
WriteIDs: master.Params.WriteIDList(),
|
||||
PulsarWriteChannels: master.Params.WriteTimeSyncChannels(),
|
||||
PulsarWriteSubName: master.Params.WriteTimeSyncSubName(),
|
||||
PulsarDMChannels: master.Params.DMTimeSyncChannels(),
|
||||
PulsarK2SChannels: master.Params.K2STimeSyncChannels(),
|
||||
DefaultRecordSize: defaultRecordSize,
|
||||
MinimumAssignSize: minimumAssignSize,
|
||||
SegmentThreshold: segmentThreshold,
|
||||
SegmentExpireDuration: segmentExpireDuration,
|
||||
NumOfChannel: numOfChannel,
|
||||
NumOfQueryNode: nodeNum,
|
||||
StatsChannels: statsChannel,
|
||||
KVRootPath: etcdRootPath,
|
||||
MetaRootPath: etcdRootPath,
|
||||
EtcdAddr: []string{etcdAddress},
|
||||
PulsarAddr: pulsarAddr,
|
||||
ProxyIDs: master.Params.ProxyIDList(),
|
||||
PulsarProxyChannels: master.Params.ProxyTimeSyncChannels(),
|
||||
PulsarProxySubName: master.Params.ProxyTimeSyncSubName(),
|
||||
SoftTTBInterval: master.Params.SoftTimeTickBarrierInterval(),
|
||||
WriteIDs: master.Params.WriteIDList(),
|
||||
PulsarWriteChannels: master.Params.WriteTimeSyncChannels(),
|
||||
PulsarWriteSubName: master.Params.WriteTimeSyncSubName(),
|
||||
PulsarDMChannels: master.Params.DMTimeSyncChannels(),
|
||||
PulsarK2SChannels: master.Params.K2STimeSyncChannels(),
|
||||
DefaultRecordSize: defaultRecordSize,
|
||||
MinimumAssignSize: minimumAssignSize,
|
||||
SegmentThreshold: segmentThreshold,
|
||||
SegmentThresholdFactor: master.Params.SegmentThresholdFactor(),
|
||||
SegmentExpireDuration: segmentExpireDuration,
|
||||
NumOfChannel: numOfChannel,
|
||||
NumOfQueryNode: nodeNum,
|
||||
StatsChannels: statsChannel,
|
||||
}
|
||||
|
||||
svr, err := master.CreateServer(ctx, &opt)
|
||||
|
|
|
@ -31,6 +31,7 @@ master:
|
|||
minimumAssignSize: 1048576
|
||||
segmentThreshold: 536870912
|
||||
segmentExpireDuration: 2000
|
||||
segmentThresholdFactor: 0.75
|
||||
querynodenum: 1
|
||||
writenodenum: 1
|
||||
statsChannels: "statistic"
|
||||
|
|
|
@ -23,444 +23,21 @@ In this section, we introduce the RPCs of milvus service. A brief description of
|
|||
|
||||
#### 3.1 Definition Requests
|
||||
|
||||
###### 3.1.1 CreateCollection
|
||||
###### 3.2.1 Collection
|
||||
|
||||
**Interface:**
|
||||
* CreateCollection
|
||||
* DropCollection
|
||||
* HasCollection
|
||||
* DescribeCollection
|
||||
* ShowCollections
|
||||
|
||||
```
|
||||
rpc CreateCollection(schema.CollectionSchema) returns (common.Status){}
|
||||
```
|
||||
###### 3.2.2 Partition
|
||||
|
||||
**Description:**
|
||||
|
||||
Create a collection through collection schema.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **schema.CollectionSchema**
|
||||
|
||||
CollectionSchema struct is shown as follows:
|
||||
|
||||
```protobuf
|
||||
message CollectionSchema {
|
||||
string name = 1;
|
||||
string description = 2;
|
||||
bool autoID = 3;
|
||||
repeated FieldSchema fields = 4;
|
||||
}
|
||||
```
|
||||
|
||||
Collection schema contains all the base information of a collection including **collection name**, **description**, **autoID** and **fields**. Collection description is defined by database manager to describe the collection. **autoID** determines whether the ID of each row of data is user-defined. If **autoID** is true, our system will generate a unique ID for each data. If **autoID** is false, user need to give each entity a ID when insert.
|
||||
|
||||
**Fields** is a list of **FieldSchema**. Each schema should include Field **name**, **description**, **dataType**, **type_params** and **index_params**.
|
||||
|
||||
FieldSchema struct is shown as follows:
|
||||
|
||||
```protobuf
|
||||
message FieldSchema {
|
||||
string name = 1;
|
||||
string description = 2;
|
||||
DataType data_type = 3;
|
||||
repeated common.KeyValuePair type_params = 4;
|
||||
repeated common.KeyValuePair index_params = 5;
|
||||
}
|
||||
```
|
||||
|
||||
**Field schema** contains all the base information of a field including field **name**, **description**, **data_type**, **type_params** and **index_params**. **data_type** is a enum type to distingush different data type.Total enum is shown in the last of this doc
|
||||
|
||||
**type_params** contains the detailed information of data_type. For example, vector data type should include dimension information. You can give a pair of <dim, 8> to let the field store 8-dimension vector.
|
||||
|
||||
**index_params**:For fast search, you build index for field. You specify detailed index information for a field. Detailed information about index can be seen in chapter 2.2.3
|
||||
|
||||
|
||||
|
||||
**Returns:**
|
||||
|
||||
- **common.Status**
|
||||
|
||||
```protobuf
|
||||
message Status {
|
||||
ErrorCode error_code = 1;
|
||||
string reason = 2;
|
||||
}
|
||||
```
|
||||
|
||||
**Status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
|
||||
|
||||
|
||||
|
||||
###### 3.1.2 DropCollection
|
||||
|
||||
**Interface:**
|
||||
|
||||
```
|
||||
rpc DropCollection(CollectionName) returns (common.Status) {}
|
||||
```
|
||||
|
||||
**Description:**
|
||||
|
||||
This method is used to delete collection.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **CollectionName**
|
||||
|
||||
CollectionName struct is shown as follows:
|
||||
|
||||
```protobuf
|
||||
message CollectionName {
|
||||
string collection_name = 1;
|
||||
}
|
||||
```
|
||||
|
||||
**CollectionName** contains only a string named **collection_name**. Collection with the same collection_name is going to be deleted.
|
||||
|
||||
**Returns:**
|
||||
|
||||
- **common.Status**
|
||||
|
||||
```protobuf
|
||||
message Status {
|
||||
ErrorCode error_code = 1;
|
||||
string reason = 2;
|
||||
}
|
||||
```
|
||||
|
||||
**Status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
|
||||
|
||||
|
||||
|
||||
###### 3.1.3 HasCollection
|
||||
|
||||
**Interface:**
|
||||
|
||||
```
|
||||
rpc HasCollection(CollectionName) returns (BoolResponse) {}
|
||||
```
|
||||
|
||||
**Description:**
|
||||
|
||||
This method is used to test collection existence.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **CollectionName**
|
||||
|
||||
CollectionName struct is shown as follows:
|
||||
|
||||
```protobuf
|
||||
message CollectionName {
|
||||
string collection_name = 1;
|
||||
}
|
||||
```
|
||||
|
||||
**CollectionName** contains only a string named **collection_name**. The server finds the collection through collection_name and judge whether the collection exists.
|
||||
|
||||
**Returns:**
|
||||
|
||||
- **BoolResponse**
|
||||
|
||||
```protobuf
|
||||
message BoolResponse {
|
||||
common.Status status = 1;
|
||||
bool value = 2;
|
||||
}
|
||||
```
|
||||
|
||||
**status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
|
||||
|
||||
**value** represents whether the collection exists. If collection exists, value will be true. If collection doesn't exist, value will be false.
|
||||
|
||||
|
||||
|
||||
###### 3.1.4 DescribeCollection
|
||||
|
||||
**Interface:**
|
||||
|
||||
```
|
||||
rpc DescribeCollection(CollectionName) returns (CollectionDescription) {}
|
||||
```
|
||||
|
||||
**Description:**
|
||||
|
||||
This method is used to get collection schema.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **CollectionName**
|
||||
|
||||
CollectionName struct is shown as follows:
|
||||
|
||||
```protobuf
|
||||
message CollectionName {
|
||||
string collection_name = 1;
|
||||
}
|
||||
```
|
||||
|
||||
**CollectionName** contains only a string named **collection_name**. The server finds the collection through collection_name and get detailed collection information
|
||||
|
||||
**Returns:**
|
||||
|
||||
- **CollectionDescription**
|
||||
|
||||
```protobuf
|
||||
message CollectionDescription {
|
||||
common.Status status = 1;
|
||||
schema.CollectionSchema schema = 2;
|
||||
repeated common.KeyValuePair statistics = 3;
|
||||
}
|
||||
```
|
||||
|
||||
**status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
|
||||
|
||||
**schema** is collection schema same as the collection schema in [CreateCollection](#311-createcollection).
|
||||
|
||||
**statitistics** is a statistic used to count various information, such as the number of segments, how many rows there are, the number of visits in the last hour, etc.
|
||||
|
||||
|
||||
|
||||
###### 3.1.5 ShowCollections
|
||||
|
||||
**Interface:**
|
||||
|
||||
```
|
||||
rpc ShowCollections(common.Empty) returns (StringListResponse) {}
|
||||
```
|
||||
|
||||
**Description:**
|
||||
|
||||
This method is used to get collection schema.
|
||||
|
||||
**Parameters:** None
|
||||
|
||||
**Returns:**
|
||||
|
||||
- **StringListResponse**
|
||||
|
||||
```protobuf
|
||||
message StringListResponse {
|
||||
common.Status status = 1;
|
||||
repeated string values = 2;
|
||||
}
|
||||
```
|
||||
|
||||
**status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
|
||||
|
||||
**values** is a list contains all collections' name.
|
||||
|
||||
|
||||
|
||||
###### 3.1.6 CreatePartition
|
||||
|
||||
**Interface:**
|
||||
|
||||
```
|
||||
rpc CreatePartition(PartitionName) returns (common.Status) {}
|
||||
```
|
||||
|
||||
**Description:**
|
||||
|
||||
This method is used to create partition
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **PartitionName**
|
||||
|
||||
PartitionName struct is shown as follows:
|
||||
|
||||
```protobuf
|
||||
message PartitionName {
|
||||
string partition_name = 1;
|
||||
}
|
||||
```
|
||||
|
||||
**PartitionName** contains only a string named **partition_name**. The server creates partition with the partition_name
|
||||
|
||||
- **Returns:**
|
||||
|
||||
- **common.Status**
|
||||
|
||||
```protobuf
|
||||
message Status {
|
||||
ErrorCode error_code = 1;
|
||||
string reason = 2;
|
||||
}
|
||||
```
|
||||
|
||||
**Status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
|
||||
|
||||
|
||||
|
||||
###### 3.1.7 DropPartition
|
||||
|
||||
**Interface:**
|
||||
|
||||
```
|
||||
rpc DropPartition(PartitionName) returns (common.Status) {}
|
||||
```
|
||||
|
||||
**Description:**
|
||||
|
||||
This method is used to drop partition.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **PartitionName**
|
||||
|
||||
PartitionName struct is shown as follows:
|
||||
|
||||
```protobuf
|
||||
message PartitionName {
|
||||
string partition_name = 1;
|
||||
}
|
||||
```
|
||||
|
||||
**PartitionName** contains only a string named **partition_name**. Partition with the same partition_name is going to be deleted.
|
||||
|
||||
**Returns:**
|
||||
|
||||
- **common.Status**
|
||||
|
||||
```protobuf
|
||||
message Status {
|
||||
ErrorCode error_code = 1;
|
||||
string reason = 2;
|
||||
}
|
||||
```
|
||||
|
||||
**Status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
|
||||
|
||||
|
||||
|
||||
###### 3.1.8 HasPartition
|
||||
|
||||
**Interface:**
|
||||
|
||||
```
|
||||
rpc HasPartition(PartitionName) returns (BoolResponse) {}
|
||||
```
|
||||
|
||||
**Description:**
|
||||
|
||||
This method is used to test partition existence.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **PartitionName**
|
||||
|
||||
PartitionName struct is shown as follows:
|
||||
|
||||
```protobuf
|
||||
message PartitionName {
|
||||
string partition_name = 1;
|
||||
}
|
||||
```
|
||||
|
||||
**PartitionName** contains only a string named **partition_name**. Partition with the same partition_name is going to be tested.
|
||||
|
||||
**Returns:**
|
||||
|
||||
- **BoolResponse**
|
||||
|
||||
```protobuf
|
||||
message BoolResponse {
|
||||
common.Status status = 1;
|
||||
bool value = 2;
|
||||
}
|
||||
```
|
||||
|
||||
**status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
|
||||
|
||||
**value** represents whether the partition exists. If partition exists, value will be true. If partition doesn't exist, value will be false.
|
||||
|
||||
|
||||
|
||||
###### 3.1.9 DescribePartition
|
||||
|
||||
**Interface:**
|
||||
|
||||
```
|
||||
rpc DescribePartition(PartitionName) returns (PartitionDescription) {}
|
||||
```
|
||||
|
||||
**Description:**
|
||||
|
||||
This method is used to show partition information
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **PartitionName**
|
||||
|
||||
PartitionName struct is shown as follows:
|
||||
|
||||
```protobuf
|
||||
message PartitionName {
|
||||
string partition_name = 1;
|
||||
}
|
||||
```
|
||||
|
||||
**PartitionName** contains only a string named **partition_name**. The server finds the partition through partition_name and get detailed partition information
|
||||
|
||||
**Returns:**
|
||||
|
||||
- **PartitionDescription**
|
||||
|
||||
```protobuf
|
||||
message PartitionDescription {
|
||||
common.Status status = 1;
|
||||
PartitionName name = 2;
|
||||
repeated common.KeyValuePair statistics = 3;
|
||||
}
|
||||
```
|
||||
|
||||
**status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
|
||||
|
||||
**name** is partition_name same as the PartitionName in [CreatePartition](#316-createpartition).
|
||||
|
||||
**statitistics** is a statistic used to count various information, such as the number of segments, how many rows there are, the number of visits in the last hour, etc.
|
||||
|
||||
|
||||
|
||||
###### 3.1.10 ShowPartitions
|
||||
|
||||
**Interface:**
|
||||
|
||||
```
|
||||
rpc ShowPartitions(CollectionName) returns (StringListResponse) {}
|
||||
```
|
||||
|
||||
**Description:**
|
||||
|
||||
This method is used to get partition description.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **CollectionName**
|
||||
|
||||
CollectionName struct is shown as follows:
|
||||
|
||||
```protobuf
|
||||
message CollectionName {
|
||||
string collection_name = 1;
|
||||
}
|
||||
```
|
||||
|
||||
**CollectionName** contains only a string named **collection_name**. Partition with the same collection_name is going to be listed.
|
||||
|
||||
**Returns:**
|
||||
|
||||
- **StringListResponse**
|
||||
|
||||
```protobuf
|
||||
message StringListResponse {
|
||||
common.Status status = 1;
|
||||
repeated string values = 2;
|
||||
}
|
||||
```
|
||||
|
||||
**status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
|
||||
|
||||
**values** is a list contains all partitions' name.
|
||||
* CreatePartition
|
||||
* DropPartition
|
||||
* HasPartition
|
||||
* DescribePartition
|
||||
* ShowPartitions
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,36 +1,2 @@
|
|||
## Appendix D. Error Code
|
||||
|
||||
**ErrorCode**
|
||||
|
||||
```protobuf
|
||||
enum ErrorCode {
|
||||
SUCCESS = 0;
|
||||
UNEXPECTED_ERROR = 1;
|
||||
CONNECT_FAILED = 2;
|
||||
PERMISSION_DENIED = 3;
|
||||
COLLECTION_NOT_EXISTS = 4;
|
||||
ILLEGAL_ARGUMENT = 5;
|
||||
ILLEGAL_DIMENSION = 7;
|
||||
ILLEGAL_INDEX_TYPE = 8;
|
||||
ILLEGAL_COLLECTION_NAME = 9;
|
||||
ILLEGAL_TOPK = 10;
|
||||
ILLEGAL_ROWRECORD = 11;
|
||||
ILLEGAL_VECTOR_ID = 12;
|
||||
ILLEGAL_SEARCH_RESULT = 13;
|
||||
FILE_NOT_FOUND = 14;
|
||||
META_FAILED = 15;
|
||||
CACHE_FAILED = 16;
|
||||
CANNOT_CREATE_FOLDER = 17;
|
||||
CANNOT_CREATE_FILE = 18;
|
||||
CANNOT_DELETE_FOLDER = 19;
|
||||
CANNOT_DELETE_FILE = 20;
|
||||
BUILD_INDEX_ERROR = 21;
|
||||
ILLEGAL_NLIST = 22;
|
||||
ILLEGAL_METRIC_TYPE = 23;
|
||||
OUT_OF_MEMORY = 24;
|
||||
|
||||
// internal error code.
|
||||
DD_REQUEST_RACE = 1000;
|
||||
}
|
||||
```
|
||||
|
||||
|
|
|
@ -27,487 +27,7 @@ type FieldSchema struct {
|
|||
|
||||
###### 2.2.1 Data Types
|
||||
|
||||
**DataType**
|
||||
|
||||
```protobuf
|
||||
enum DataType {
|
||||
NONE = 0;
|
||||
BOOL = 1;
|
||||
INT8 = 2;
|
||||
INT16 = 3;
|
||||
INT32 = 4;
|
||||
INT64 = 5;
|
||||
|
||||
FLOAT = 10;
|
||||
DOUBLE = 11;
|
||||
|
||||
STRING = 20;
|
||||
|
||||
VECTOR_BINARY = 100;
|
||||
VECTOR_FLOAT = 101;
|
||||
}
|
||||
```
|
||||
|
||||
###### 2.2.2 Type Params
|
||||
|
||||
###### 2.2.3 Index Params
|
||||
|
||||
|
||||
|
||||
# Intro to Index
|
||||
|
||||
For more detailed information about indexes, please refer to [Milvus documentation index chapter.](https://milvus.io/docs/v0.11.0/index.md)
|
||||
|
||||
To learn how to choose an appropriate index for your application scenarios, please read [How to Select an Index in Milvus](https://medium.com/@milvusio/how-to-choose-an-index-in-milvus-4f3d15259212).
|
||||
|
||||
To learn how to choose an appropriate index for a metric, see [Distance Metrics](https://www.milvus.io/docs/v0.11.0/metric.md).
|
||||
|
||||
Different index types use different index params in construction and query. All index params are represented by the structure of map. This doc shows the map code in python.
|
||||
|
||||
|
||||
|
||||
[IVF_FLAT](#IVF_FLAT)
|
||||
[BIN_IVF_FLAT](#BIN_IVF_FLAT)
|
||||
[IVF_PQ](#IVF_PQ)
|
||||
[IVF_SQ8](#IVF_SQ8)
|
||||
[IVF_SQ8_HYBRID](#IVF_SQ8_HYBRID)
|
||||
[ANNOY](#ANNOY)
|
||||
[HNSW](#HNSW)
|
||||
[RHNSW_PQ](#RHNSW_PQ)
|
||||
[RHNSW_SQ](#RHNSW_SQ)
|
||||
[NSG](#NSG)
|
||||
|
||||
|
||||
|
||||
## IVF_FLAT
|
||||
|
||||
**IVF** (*Inverted File*) is an index type based on quantization. It divides the points in space into `nlist`
|
||||
units by clustering method. During searching vectors, it compares the distances between the target vector
|
||||
and the center of all the units, and then select the `nprobe` nearest unit. Then, it compares all the vectors
|
||||
in these selected cells to get the final result.
|
||||
|
||||
IVF_FLAT is the most basic IVF index, and the encoded data stored in each unit is consistent with the original data.
|
||||
|
||||
|
||||
|
||||
- building parameters:
|
||||
|
||||
**nlist**: Number of cluster units.
|
||||
|
||||
```python
|
||||
# IVF_FLAT
|
||||
{
|
||||
"index_type": "IVF_FLAT",
|
||||
"metric_type": "L2", # one of L2, IP
|
||||
|
||||
#Special for IVF_FLAT
|
||||
"nlist": 100 # int. 1~65536
|
||||
}
|
||||
```
|
||||
|
||||
- search parameters:
|
||||
|
||||
**nprobe**: Number of inverted file cell to probe.
|
||||
|
||||
```python
|
||||
# IVF_FLAT
|
||||
{
|
||||
"topk": top_k,
|
||||
"query": queries,
|
||||
"metric_type": "L2", # one of L2, IP
|
||||
|
||||
#Special for IVF_FLAT
|
||||
"nprobe": 8 # int. 1~nlist(cpu), 1~min[2048, nlist](gpu)
|
||||
}
|
||||
```
|
||||
|
||||
## BIN_IVF_FLAT
|
||||
|
||||
**BIN_IVF_FLAT** is a binary variant of IVF_FLAT.
|
||||
|
||||
- building parameters:
|
||||
|
||||
**nlist**: Number of cluster units.
|
||||
|
||||
```python
|
||||
# BIN_IVF_FLAT
|
||||
{
|
||||
"index_type": "BIN_IVF_FLAT",
|
||||
"metric_type": "jaccard", # one of jaccard, hamming, tanimoto
|
||||
|
||||
#Special for BIN_IVF_FLAT
|
||||
"nlist": 100 # int. 1~65536
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
- search parameters:
|
||||
|
||||
**nprobe**: Number of inverted file cell to probe.
|
||||
|
||||
```python
|
||||
# BIN_IVF_FLAT
|
||||
{
|
||||
"topk": top_k,
|
||||
"query": queries,
|
||||
|
||||
#Special for BIN_IVF_FLAT
|
||||
"metric_type": "jaccard", # one of jaccard, hamming, tanimoto
|
||||
"nprobe": 8 # int. 1~nlist(cpu), 1~min[2048, nlist](gpu)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
## IVF_PQ
|
||||
|
||||
**PQ** (*Product Quantization*) uniformly decomposes the original high-dimensional vector space into
|
||||
Cartesian products of `m` low-dimensional vector spaces, and then quantizes the decomposed low-dimensional
|
||||
vector spaces. Instead of calculating the distances between the target vector and the center of all the units,
|
||||
product quantization enables the calculation of distances between the target vector and the clustering center
|
||||
of each low-dimensional space and greatly reduces the time complexity and space complexity of the algorithm.
|
||||
|
||||
IVF_PQ performs IVF index clustering, and then quantizes the product of vectors. Its index file is even
|
||||
smaller than IVF_SQ8, but it also causes a loss of accuracy during searching.
|
||||
|
||||
- building parameters:
|
||||
|
||||
**nlist**: Number of cluster units.
|
||||
|
||||
**m**: Number of factors of product quantization. **CPU-only** Milvus: `m ≡ dim (mod m)`; **GPU-enabled** Milvus: `m` ∈ {1, 2, 3, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 96}, and (dim / m) ∈ {1, 2, 3, 4, 6, 8, 10, 12, 16, 20, 24, 28, 32}. (`m` x 1024) ≥ `MaxSharedMemPerBlock` of your graphics card.
|
||||
|
||||
```python
|
||||
# IVF_PQ
|
||||
{
|
||||
"index_type": "IVF_PQ",
|
||||
"metric_type": "L2", # one of L2, IP
|
||||
|
||||
#Special for IVF_PQ
|
||||
"nlist": 100, # int. 1~65536
|
||||
"m": 8
|
||||
}
|
||||
```
|
||||
|
||||
- search parameters:
|
||||
|
||||
**nprobe**: Number of inverted file cell to probe.
|
||||
|
||||
```python
|
||||
# IVF_PQ
|
||||
{
|
||||
"topk": top_k,
|
||||
"query": queries,
|
||||
"metric_type": "L2", # one of L2, IP
|
||||
|
||||
#Special for IVF_PQ
|
||||
"nprobe": 8 # int. 1~nlist(cpu), 1~min[2048, nlist](gpu)
|
||||
}
|
||||
```
|
||||
|
||||
## IVF_SQ8
|
||||
|
||||
**IVF_SQ8** does scalar quantization for each vector placed in the unit based on IVF. Scalar quantization
|
||||
converts each dimension of the original vector from a 4-byte floating-point number to a 1-byte unsigned integer,
|
||||
so the IVF_SQ8 index file occupies much less space than the IVF_FLAT index file.
|
||||
However, scalar quantization results in a loss of accuracy during searching vectors.
|
||||
|
||||
- building parameters:
|
||||
|
||||
**nlist**: Number of cluster units.
|
||||
|
||||
```python
|
||||
# IVF_SQ8
|
||||
{
|
||||
"index_type": "IVF_SQ8",
|
||||
"metric_type": "L2", # one of L2, IP
|
||||
|
||||
#Special for IVF_SQ8
|
||||
"nlist": 100 # int. 1~65536
|
||||
}
|
||||
```
|
||||
|
||||
- search parameters:
|
||||
|
||||
**nprobe**: Number of inverted file cell to probe.
|
||||
|
||||
```python
|
||||
# IVF_SQ8
|
||||
{
|
||||
"topk": top_k,
|
||||
"query": queries,
|
||||
"metric_type": "L2", # one of L2, IP
|
||||
|
||||
#Special for IVF_SQ8
|
||||
"nprobe": 8 # int. 1~nlist(cpu), 1~min[2048, nlist](gpu)
|
||||
}
|
||||
```
|
||||
|
||||
## IVF_SQ8_HYBRID
|
||||
|
||||
Optimized version of IVF_SQ8 that requires both CPU and GPU to work. Unlike IVF_SQ8, IVF_SQ8H uses a GPU-based
|
||||
coarse quantizer, which greatly reduces time to quantize.
|
||||
|
||||
IVF_SQ8H is an IVF_SQ8 index that optimizes query execution.
|
||||
|
||||
The query method is as follows:
|
||||
|
||||
- If `nq` ≥ `gpu_search_threshold`, GPU handles the entire query task.
|
||||
- If `nq` < `gpu_search_threshold`, GPU handles the task of retrieving the `nprobe` nearest unit in the IVF
|
||||
index file, and CPU handles the rest.
|
||||
|
||||
- building parameters:
|
||||
|
||||
**nlist**: Number of cluster units.
|
||||
|
||||
```python
|
||||
# IVF_SQ8_HYBRID
|
||||
{
|
||||
"index_type": "IVF_SQ8_HYBRID",
|
||||
"metric_type": "L2", # one of L2, IP
|
||||
|
||||
#Special for IVF_SQ8_HYBRID
|
||||
"nlist": 100 # int. 1~65536
|
||||
}
|
||||
```
|
||||
|
||||
- search parameters:
|
||||
|
||||
**nprobe**: Number of inverted file cell to probe.
|
||||
|
||||
```python
|
||||
# IVF_SQ8_HYBRID
|
||||
{
|
||||
"topk": top_k,
|
||||
"query": queries,
|
||||
"metric_type": "L2", # one of L2, IP
|
||||
|
||||
#Special for IVF_SQ8_HYBRID
|
||||
"nprobe": 8 # int. 1~nlist(cpu), 1~min[2048, nlist](gpu)
|
||||
}
|
||||
```
|
||||
|
||||
## ANNOY
|
||||
|
||||
**ANNOY** (*Approximate Nearest Neighbors Oh Yeah*) is an index that uses a hyperplane to divide a
|
||||
high-dimensional space into multiple subspaces, and then stores them in a tree structure.
|
||||
|
||||
When searching for vectors, ANNOY follows the tree structure to find subspaces closer to the target vector,
|
||||
and then compares all the vectors in these subspaces (The number of vectors being compared should not be
|
||||
less than `search_k`) to obtain the final result. Obviously, when the target vector is close to the edge of
|
||||
a certain subspace, sometimes it is necessary to greatly increase the number of searched subspaces to obtain
|
||||
a high recall rate. Therefore, ANNOY uses `n_trees` different methods to divide the whole space, and searches
|
||||
all the dividing methods simultaneously to reduce the probability that the target vector is always at the edge of the subspace.
|
||||
|
||||
- building parameters:
|
||||
|
||||
**n_trees**: The number of methods of space division.
|
||||
|
||||
```python
|
||||
# ANNOY
|
||||
{
|
||||
"index_type": "ANNOY",
|
||||
"metric_type": "L2", # one of L2, IP
|
||||
|
||||
#Special for ANNOY
|
||||
"n_trees": 8 # int. 1~1024
|
||||
}
|
||||
```
|
||||
|
||||
- search parameters:
|
||||
|
||||
**search_k**: The number of nodes to search. -1 means 5% of the whole data.
|
||||
|
||||
```python
|
||||
# ANNOY
|
||||
{
|
||||
"topk": top_k,
|
||||
"query": queries,
|
||||
"metric_type": "L2", # one of L2, IP
|
||||
|
||||
#Special for ANNOY
|
||||
"search_k": -1 # int. {-1} U [top_k, n*n_trees], n represents vectors count.
|
||||
}
|
||||
```
|
||||
|
||||
## HNSW
|
||||
|
||||
**HNSW** (*Hierarchical Navigable Small World Graph*) is a graph-based indexing algorithm. It builds a
|
||||
multi-layer navigation structure for an image according to certain rules. In this structure, the upper
|
||||
layers are more sparse and the distances between nodes are farther; the lower layers are denser and
|
||||
he distances between nodes are closer. The search starts from the uppermost layer, finds the node closest
|
||||
to the target in this layer, and then enters the next layer to begin another search. After multiple iterations,
|
||||
it can quickly approach the target position.
|
||||
|
||||
In order to improve performance, HNSW limits the maximum degree of nodes on each layer of the graph to `M`.
|
||||
In addition, you can use `efConstruction` (when building index) or `ef` (when searching targets) to specify a search range.
|
||||
|
||||
- building parameters:
|
||||
|
||||
**M**: Maximum degree of the node.
|
||||
|
||||
**efConstruction**: Take the effect in stage of index construction.
|
||||
|
||||
```python
|
||||
# HNSW
|
||||
{
|
||||
"index_type": "HNSW",
|
||||
"metric_type": "L2", # one of L2, IP
|
||||
|
||||
#Special for HNSW
|
||||
"M": 16, # int. 4~64
|
||||
"efConstruction": 40 # int. 8~512
|
||||
}
|
||||
```
|
||||
|
||||
- search parameters:
|
||||
|
||||
**ef**: Take the effect in stage of search scope, should be larger than `top_k`.
|
||||
|
||||
```python
|
||||
# HNSW
|
||||
|
||||
{
|
||||
"topk": top_k,
|
||||
"query": queries,
|
||||
"metric_type": "L2", # one of L2, IP
|
||||
|
||||
#Special for HNSW
|
||||
"ef": 64 # int. top_k~32768
|
||||
}
|
||||
```
|
||||
|
||||
## RHNSW_PQ
|
||||
|
||||
**RHNSW_PQ** is a variant index type combining PQ and HNSW. It first uses PQ to quantize the vector,
|
||||
then uses HNSW to quantize the PQ quantization result to get the index.
|
||||
|
||||
- building parameters:
|
||||
|
||||
**M**: Maximum degree of the node.
|
||||
|
||||
**efConstruction**: Take effect in stage of index construction.
|
||||
|
||||
**PQM**: m for PQ.
|
||||
|
||||
```python
|
||||
# RHNSW_PQ
|
||||
{
|
||||
"index_type": "RHNSW_PQ",
|
||||
"metric_type": "L2",
|
||||
|
||||
#Special for RHNSW_PQ
|
||||
"M": 16, # int. 4~64
|
||||
"efConstruction": 40, # int. 8~512
|
||||
"PQM": 8, # int. CPU only. PQM = dim (mod m)
|
||||
}
|
||||
```
|
||||
|
||||
- search parameters:
|
||||
|
||||
**ef**: Take the effect in stage of search scope, should be larger than `top_k`.
|
||||
|
||||
```python
|
||||
# RHNSW_PQ
|
||||
{
|
||||
"topk": top_k,
|
||||
"query": queries,
|
||||
"metric_type": "L2", # one of L2, IP
|
||||
|
||||
|
||||
#Special for RHNSW_PQ
|
||||
"ef": 64 # int. top_k~32768
|
||||
}
|
||||
```
|
||||
|
||||
## RHNSW_SQ
|
||||
|
||||
**RHNSW_SQ** is a variant index type combining SQ and HNSW. It first uses SQ to quantize the vector, then uses HNSW to quantize the SQ quantization result to get the index.
|
||||
|
||||
- building parameters:
|
||||
|
||||
**M**: Maximum degree of the node.
|
||||
|
||||
**efConstruction**: Take effect in stage of index construction, search scope.
|
||||
|
||||
```python
|
||||
# RHNSW_SQ
|
||||
{
|
||||
"index_type": "RHNSW_SQ",
|
||||
"metric_type": "L2", # one of L2, IP
|
||||
|
||||
#Special for RHNSW_SQ
|
||||
"M": 16, # int. 4~64
|
||||
"efConstruction": 40 # int. 8~512
|
||||
}
|
||||
```
|
||||
|
||||
- search parameters:
|
||||
|
||||
**ef**: Take the effect in stage of search scope, should be larger than `top_k`.
|
||||
|
||||
```python
|
||||
# RHNSW_SQ
|
||||
{
|
||||
"topk": top_k,
|
||||
"query": queries,
|
||||
"metric_type": "L2", # one of L2, IP
|
||||
|
||||
#Special for RHNSW_SQ
|
||||
"ef": 64 # int. top_k~32768
|
||||
}
|
||||
```
|
||||
|
||||
## NSG
|
||||
|
||||
**NSG** (*Refined Navigating Spreading-out Graph*) is a graph-based indexing algorithm. It sets the center
|
||||
position of the whole image as a navigation point, and then uses a specific edge selection strategy to control
|
||||
the out-degree of each point (less than or equal to `out_degree`). Therefore, it can reduce memory usage and
|
||||
quickly locate the target position nearby during searching vectors.
|
||||
|
||||
The graph construction process of NSG is as follows:
|
||||
|
||||
1. Find `knng` nearest neighbors for each point.
|
||||
2. Iterate at least `search_length` times based on `knng` nearest neighbor nodes to select `candidate_pool_size` possible nearest neighbor nodes.
|
||||
3. Construct the out-edge of each point in the selected `candidate_pool_size` nodes according to the edge selection strategy.
|
||||
|
||||
The query process is similar to the graph building process. It starts from the navigation point and iterates at least `search_length` times to get the final result.
|
||||
|
||||
- building parameters:
|
||||
|
||||
**search_length**: Number of query iterations.
|
||||
|
||||
**out_degree**: Maximum out-degree of the node.
|
||||
|
||||
**candidate_pool_size**: Candidate pool size of the node.
|
||||
|
||||
**knng**: Number of nearest neighbors
|
||||
|
||||
```python
|
||||
# NSG
|
||||
{
|
||||
"index_type": "NSG",
|
||||
"metric_type": "L2",
|
||||
|
||||
#Special for RHNSW_SQ
|
||||
"search_length": 60, # int. 10~300
|
||||
"out_degree": 30, # int. 5~300
|
||||
"candidate_pool_size": 300, # int. 50~1000
|
||||
"knng": 50 # int. 5~300
|
||||
}
|
||||
```
|
||||
|
||||
- search parameters:
|
||||
|
||||
**search_length**: Number of query iterations
|
||||
|
||||
```python
|
||||
# NSG
|
||||
{
|
||||
"topk": top_k,
|
||||
"query": queries,
|
||||
"metric_type": "L2", # one of L2, IP
|
||||
|
||||
#Special for RHNSW_SQ
|
||||
"search_length": 100 # int. 10~300
|
||||
}
|
||||
```
|
|
@ -48,13 +48,14 @@ type Option struct {
|
|||
PulsarDMChannels []string
|
||||
PulsarK2SChannels []string
|
||||
|
||||
DefaultRecordSize int64
|
||||
MinimumAssignSize int64
|
||||
SegmentThreshold float64
|
||||
SegmentExpireDuration int64
|
||||
NumOfChannel int
|
||||
NumOfQueryNode int
|
||||
StatsChannels string
|
||||
DefaultRecordSize int64
|
||||
MinimumAssignSize int64
|
||||
SegmentThreshold float64
|
||||
SegmentThresholdFactor float64
|
||||
SegmentExpireDuration int64
|
||||
NumOfChannel int
|
||||
NumOfQueryNode int
|
||||
StatsChannels string
|
||||
}
|
||||
|
||||
type Master struct {
|
||||
|
|
|
@ -214,3 +214,15 @@ func (p *ParamTable) TopicNum() int {
|
|||
}
|
||||
return num
|
||||
}
|
||||
|
||||
func (p *ParamTable) SegmentThresholdFactor() float64 {
|
||||
factor, err := p.Load("master.segmentThresholdFactor")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
res, err := strconv.ParseFloat(factor, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
|
|
@ -29,20 +29,21 @@ type segmentStatus struct {
|
|||
}
|
||||
|
||||
type SegmentManager struct {
|
||||
metaTable *metaTable
|
||||
statsStream msgstream.MsgStream
|
||||
channelRanges []*channelRange
|
||||
segmentStatus map[UniqueID]*segmentStatus // segment id to segment status
|
||||
collStatus map[UniqueID]*collectionStatus // collection id to collection status
|
||||
defaultSizePerRecord int64
|
||||
minimumAssignSize int64
|
||||
segmentThreshold int64
|
||||
segmentExpireDuration int64
|
||||
numOfChannels int
|
||||
numOfQueryNodes int
|
||||
globalIDAllocator func() (UniqueID, error)
|
||||
globalTSOAllocator func() (Timestamp, error)
|
||||
mu sync.RWMutex
|
||||
metaTable *metaTable
|
||||
statsStream msgstream.MsgStream
|
||||
channelRanges []*channelRange
|
||||
segmentStatus map[UniqueID]*segmentStatus // segment id to segment status
|
||||
collStatus map[UniqueID]*collectionStatus // collection id to collection status
|
||||
defaultSizePerRecord int64
|
||||
minimumAssignSize int64
|
||||
segmentThreshold float64
|
||||
segmentThresholdFactor float64
|
||||
segmentExpireDuration int64
|
||||
numOfChannels int
|
||||
numOfQueryNodes int
|
||||
globalIDAllocator func() (UniqueID, error)
|
||||
globalTSOAllocator func() (Timestamp, error)
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func (segMgr *SegmentManager) HandleQueryNodeMsgPack(msgPack *msgstream.MsgPack) error {
|
||||
|
@ -76,7 +77,7 @@ func (segMgr *SegmentManager) handleSegmentStat(segStats *internalpb.SegmentStat
|
|||
segMeta.NumRows = segStats.NumRows
|
||||
segMeta.MemSize = segStats.MemorySize
|
||||
|
||||
if segStats.MemorySize > segMgr.segmentThreshold {
|
||||
if segStats.MemorySize > int64(segMgr.segmentThresholdFactor*segMgr.segmentThreshold) {
|
||||
return segMgr.closeSegment(segMeta)
|
||||
}
|
||||
return segMgr.metaTable.UpdateSegment(segMeta)
|
||||
|
@ -150,6 +151,7 @@ func (segMgr *SegmentManager) AssignSegmentID(segIDReq []*internalpb.SegIDReques
|
|||
|
||||
func (segMgr *SegmentManager) assignSegment(collName string, collID UniqueID, partitionTag string, count uint32, channelID int32,
|
||||
collStatus *collectionStatus) (*internalpb.SegIDAssignment, error) {
|
||||
segmentThreshold := int64(segMgr.segmentThreshold)
|
||||
for _, segID := range collStatus.openedSegments {
|
||||
segMeta, _ := segMgr.metaTable.GetSegmentByID(segID)
|
||||
if segMeta.GetCloseTime() != 0 || channelID < segMeta.GetChannelStart() ||
|
||||
|
@ -160,8 +162,8 @@ func (segMgr *SegmentManager) assignSegment(collName string, collID UniqueID, pa
|
|||
assignedMem := segMgr.checkAssignedSegExpire(segID)
|
||||
memSize := segMeta.MemSize
|
||||
neededMemSize := segMgr.calNeededSize(memSize, segMeta.NumRows, int64(count))
|
||||
if memSize+assignedMem+neededMemSize <= segMgr.segmentThreshold {
|
||||
remainingSize := segMgr.segmentThreshold - memSize - assignedMem
|
||||
if memSize+assignedMem+neededMemSize <= segmentThreshold {
|
||||
remainingSize := segmentThreshold - memSize - assignedMem
|
||||
allocMemSize := segMgr.calAllocMemSize(neededMemSize, remainingSize)
|
||||
segMgr.addAssignment(segID, allocMemSize)
|
||||
return &internalpb.SegIDAssignment{
|
||||
|
@ -174,7 +176,7 @@ func (segMgr *SegmentManager) assignSegment(collName string, collID UniqueID, pa
|
|||
}
|
||||
}
|
||||
neededMemSize := segMgr.defaultSizePerRecord * int64(count)
|
||||
if neededMemSize > segMgr.segmentThreshold {
|
||||
if neededMemSize > segmentThreshold {
|
||||
return nil, errors.Errorf("request with count %d need about %d mem size which is larger than segment threshold",
|
||||
count, neededMemSize)
|
||||
}
|
||||
|
@ -184,7 +186,7 @@ func (segMgr *SegmentManager) assignSegment(collName string, collID UniqueID, pa
|
|||
return nil, err
|
||||
}
|
||||
|
||||
allocMemSize := segMgr.calAllocMemSize(neededMemSize, segMgr.segmentThreshold)
|
||||
allocMemSize := segMgr.calAllocMemSize(neededMemSize, segmentThreshold)
|
||||
segMgr.addAssignment(segMeta.SegmentID, allocMemSize)
|
||||
return &internalpb.SegIDAssignment{
|
||||
SegID: segMeta.SegmentID,
|
||||
|
@ -327,18 +329,19 @@ func NewSegmentManager(meta *metaTable,
|
|||
globalTSOAllocator func() (Timestamp, error),
|
||||
) *SegmentManager {
|
||||
segMgr := &SegmentManager{
|
||||
metaTable: meta,
|
||||
channelRanges: make([]*channelRange, 0),
|
||||
segmentStatus: make(map[UniqueID]*segmentStatus),
|
||||
collStatus: make(map[UniqueID]*collectionStatus),
|
||||
segmentThreshold: int64(opt.SegmentThreshold),
|
||||
segmentExpireDuration: opt.SegmentExpireDuration,
|
||||
minimumAssignSize: opt.MinimumAssignSize,
|
||||
defaultSizePerRecord: opt.DefaultRecordSize,
|
||||
numOfChannels: opt.NumOfChannel,
|
||||
numOfQueryNodes: opt.NumOfQueryNode,
|
||||
globalIDAllocator: globalIDAllocator,
|
||||
globalTSOAllocator: globalTSOAllocator,
|
||||
metaTable: meta,
|
||||
channelRanges: make([]*channelRange, 0),
|
||||
segmentStatus: make(map[UniqueID]*segmentStatus),
|
||||
collStatus: make(map[UniqueID]*collectionStatus),
|
||||
segmentThreshold: opt.SegmentThreshold,
|
||||
segmentThresholdFactor: opt.SegmentThresholdFactor,
|
||||
segmentExpireDuration: opt.SegmentExpireDuration,
|
||||
minimumAssignSize: opt.MinimumAssignSize,
|
||||
defaultSizePerRecord: opt.DefaultRecordSize,
|
||||
numOfChannels: opt.NumOfChannel,
|
||||
numOfQueryNodes: opt.NumOfQueryNode,
|
||||
globalIDAllocator: globalIDAllocator,
|
||||
globalTSOAllocator: globalTSOAllocator,
|
||||
}
|
||||
segMgr.createChannelRanges()
|
||||
return segMgr
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
|
@ -78,12 +78,13 @@ func setup() {
|
|||
panic(err)
|
||||
}
|
||||
opt := &Option{
|
||||
SegmentThreshold: 536870912,
|
||||
SegmentExpireDuration: 2000,
|
||||
MinimumAssignSize: 1048576,
|
||||
DefaultRecordSize: 1024,
|
||||
NumOfQueryNode: 3,
|
||||
NumOfChannel: 5,
|
||||
SegmentThreshold: 536870912,
|
||||
SegmentExpireDuration: 2000,
|
||||
MinimumAssignSize: 1048576,
|
||||
DefaultRecordSize: 1024,
|
||||
NumOfQueryNode: 3,
|
||||
NumOfChannel: 5,
|
||||
SegmentThresholdFactor: 0.75,
|
||||
}
|
||||
|
||||
var cnt int64
|
||||
|
@ -209,7 +210,7 @@ func TestSegmentManager_SegmentStats(t *testing.T) {
|
|||
|
||||
// close segment
|
||||
stats.SegStats[0].NumRows = 600000
|
||||
stats.SegStats[0].MemorySize = 600000000
|
||||
stats.SegStats[0].MemorySize = int64(0.8 * segMgr.segmentThreshold)
|
||||
err = segMgr.HandleQueryNodeMsgPack(&msgPack)
|
||||
assert.Nil(t, err)
|
||||
segMeta, _ = mt.GetSegmentByID(100)
|
||||
|
|
Loading…
Reference in New Issue