2023-03-26 16:42:00 +00:00
|
|
|
// Licensed to the LF AI & Data foundation under one
|
|
|
|
// or more contributor license agreements. See the NOTICE file
|
|
|
|
// distributed with this work for additional information
|
|
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
|
|
// to you under the Apache License, Version 2.0 (the
|
|
|
|
// "License"); you may not use this file except in compliance
|
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package segments
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2023-05-19 10:19:24 +00:00
|
|
|
"fmt"
|
|
|
|
"sync"
|
2023-03-26 16:42:00 +00:00
|
|
|
|
2023-06-08 17:28:37 +00:00
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
2023-03-26 16:42:00 +00:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/segcorepb"
|
2023-05-19 10:19:24 +00:00
|
|
|
"github.com/milvus-io/milvus/pkg/metrics"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/timerecord"
|
2023-04-06 11:14:32 +00:00
|
|
|
. "github.com/milvus-io/milvus/pkg/util/typeutil"
|
2023-03-26 16:42:00 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// retrieveOnSegments performs retrieve on listed segments
|
|
|
|
// all segment ids are validated before calling this function
|
2023-08-16 10:38:17 +00:00
|
|
|
func retrieveOnSegments(ctx context.Context, segments []Segment, segType SegmentType, plan *RetrievePlan) ([]*segcorepb.RetrieveResults, error) {
|
2023-05-19 10:19:24 +00:00
|
|
|
var (
|
2023-08-16 10:38:17 +00:00
|
|
|
resultCh = make(chan *segcorepb.RetrieveResults, len(segments))
|
|
|
|
errs = make([]error, len(segments))
|
2023-05-19 10:19:24 +00:00
|
|
|
wg sync.WaitGroup
|
|
|
|
)
|
2023-03-26 16:42:00 +00:00
|
|
|
|
2023-05-19 10:19:24 +00:00
|
|
|
label := metrics.SealedSegmentLabel
|
|
|
|
if segType == commonpb.SegmentState_Growing {
|
|
|
|
label = metrics.GrowingSegmentLabel
|
|
|
|
}
|
|
|
|
|
2023-08-16 10:38:17 +00:00
|
|
|
for i, segment := range segments {
|
2023-05-19 10:19:24 +00:00
|
|
|
wg.Add(1)
|
2023-09-09 02:35:16 +00:00
|
|
|
go func(seg Segment, i int) {
|
2023-05-19 10:19:24 +00:00
|
|
|
defer wg.Done()
|
|
|
|
tr := timerecord.NewTimeRecorder("retrieveOnSegments")
|
2023-08-16 10:38:17 +00:00
|
|
|
result, err := seg.Retrieve(ctx, plan)
|
2023-05-19 10:19:24 +00:00
|
|
|
if err != nil {
|
|
|
|
errs[i] = err
|
|
|
|
return
|
|
|
|
}
|
2023-08-16 10:38:17 +00:00
|
|
|
if err = seg.ValidateIndexedFieldsData(ctx, result); err != nil {
|
2023-05-19 10:19:24 +00:00
|
|
|
errs[i] = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
errs[i] = nil
|
|
|
|
resultCh <- result
|
|
|
|
metrics.QueryNodeSQSegmentLatency.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()),
|
|
|
|
metrics.QueryLabel, label).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
2023-08-16 10:38:17 +00:00
|
|
|
}(segment, i)
|
2023-05-19 10:19:24 +00:00
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
close(resultCh)
|
|
|
|
|
|
|
|
for _, err := range errs {
|
2023-03-26 16:42:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-05-19 10:19:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var retrieveResults []*segcorepb.RetrieveResults
|
|
|
|
for result := range resultCh {
|
2023-03-26 16:42:00 +00:00
|
|
|
retrieveResults = append(retrieveResults, result)
|
|
|
|
}
|
2023-05-19 10:19:24 +00:00
|
|
|
|
2023-03-26 16:42:00 +00:00
|
|
|
return retrieveResults, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// retrieveHistorical will retrieve all the target segments in historical
|
2023-08-16 10:38:17 +00:00
|
|
|
func RetrieveHistorical(ctx context.Context, manager *Manager, plan *RetrievePlan, collID UniqueID, partIDs []UniqueID, segIDs []UniqueID) ([]*segcorepb.RetrieveResults, []Segment, error) {
|
|
|
|
segments, err := validateOnHistorical(ctx, manager, collID, partIDs, segIDs)
|
2023-03-26 16:42:00 +00:00
|
|
|
if err != nil {
|
2023-08-16 10:38:17 +00:00
|
|
|
return nil, nil, err
|
2023-03-26 16:42:00 +00:00
|
|
|
}
|
|
|
|
|
2023-08-16 10:38:17 +00:00
|
|
|
retrieveResults, err := retrieveOnSegments(ctx, segments, SegmentTypeSealed, plan)
|
|
|
|
return retrieveResults, segments, err
|
2023-03-26 16:42:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// retrieveStreaming will retrieve all the target segments in streaming
|
2023-08-16 10:38:17 +00:00
|
|
|
func RetrieveStreaming(ctx context.Context, manager *Manager, plan *RetrievePlan, collID UniqueID, partIDs []UniqueID, segIDs []UniqueID) ([]*segcorepb.RetrieveResults, []Segment, error) {
|
|
|
|
segments, err := validateOnStream(ctx, manager, collID, partIDs, segIDs)
|
2023-03-26 16:42:00 +00:00
|
|
|
if err != nil {
|
2023-08-16 10:38:17 +00:00
|
|
|
return nil, nil, err
|
2023-03-26 16:42:00 +00:00
|
|
|
}
|
2023-08-16 10:38:17 +00:00
|
|
|
retrieveResults, err := retrieveOnSegments(ctx, segments, SegmentTypeGrowing, plan)
|
|
|
|
return retrieveResults, segments, err
|
2023-03-26 16:42:00 +00:00
|
|
|
}
|