Rename proxynode to proxy (#5961)

* rename package name from proxynode to proxy

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* move distributed/proxynode to distributed/proxy

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* move internal/proxynode to internal/proxy

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* rename proxynode to proxy

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>
pull/5958/head
Cai Yudong 2021-06-22 14:40:07 +08:00 committed by GitHub
parent 22b2b7dbbe
commit 2926a78968
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
44 changed files with 320 additions and 277 deletions

View File

@ -14,12 +14,12 @@ package components
import (
"context"
grpcproxynode "github.com/milvus-io/milvus/internal/distributed/proxynode"
grpcproxy "github.com/milvus-io/milvus/internal/distributed/proxy"
"github.com/milvus-io/milvus/internal/msgstream"
)
type Proxy struct {
svr *grpcproxynode.Server
svr *grpcproxy.Server
}
// NewProxy creates a new Proxy
@ -27,7 +27,7 @@ func NewProxy(ctx context.Context, factory msgstream.Factory) (*Proxy, error) {
var err error
n := &Proxy{}
svr, err := grpcproxynode.NewServer(ctx, factory)
svr, err := grpcproxy.NewServer(ctx, factory)
if err != nil {
return nil, err
}

View File

@ -30,7 +30,7 @@ import (
"github.com/milvus-io/milvus/internal/logutil"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/msgstream"
"github.com/milvus-io/milvus/internal/proxynode"
"github.com/milvus-io/milvus/internal/proxy"
"github.com/milvus-io/milvus/internal/querynode"
"github.com/milvus-io/milvus/internal/queryservice"
"github.com/milvus-io/milvus/internal/rootcoord"
@ -108,11 +108,11 @@ func (mr *MilvusRoles) runProxy(ctx context.Context, localMsg bool, alias string
wg.Add(1)
go func() {
proxynode.Params.InitAlias(alias)
proxynode.Params.Init()
proxy.Params.InitAlias(alias)
proxy.Params.Init()
if !localMsg {
logutil.SetupLogger(&proxynode.Params.Log)
logutil.SetupLogger(&proxy.Params.Log)
defer log.Sync()
}

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package grpcproxynodeclient
package grpcproxyclient
import (
"context"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package grpcproxynode
package grpcproxy
import (
"sync"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package grpcproxynode
package grpcproxy
import (
"context"
@ -36,7 +36,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/proxypb"
"github.com/milvus-io/milvus/internal/proxynode"
"github.com/milvus-io/milvus/internal/proxy"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/trace"
"github.com/opentracing/opentracing-go"
@ -49,7 +49,7 @@ const (
type Server struct {
ctx context.Context
wg sync.WaitGroup
proxynode *proxynode.ProxyNode
proxy *proxy.Proxy
grpcServer *grpc.Server
grpcErrChan chan error
@ -71,7 +71,7 @@ func NewServer(ctx context.Context, factory msgstream.Factory) (*Server, error)
grpcErrChan: make(chan error),
}
server.proxynode, err = proxynode.NewProxyNode(server.ctx, factory)
server.proxy, err = proxy.NewProxy(server.ctx, factory)
if err != nil {
return nil, err
}
@ -82,10 +82,10 @@ func (s *Server) startGrpcLoop(grpcPort int) {
defer s.wg.Done()
log.Debug("proxynode", zap.Int("network port", grpcPort))
log.Debug("proxy", zap.Int("network port", grpcPort))
lis, err := net.Listen("tcp", ":"+strconv.Itoa(grpcPort))
if err != nil {
log.Warn("proxynode", zap.String("Server:failed to listen:", err.Error()))
log.Warn("proxy", zap.String("Server:failed to listen:", err.Error()))
s.grpcErrChan <- err
return
}
@ -131,30 +131,30 @@ func (s *Server) init() error {
Params.Init()
if !funcutil.CheckPortAvailable(Params.Port) {
Params.Port = funcutil.GetAvailablePort()
log.Warn("ProxyNode init", zap.Any("Port", Params.Port))
log.Warn("Proxy init", zap.Any("Port", Params.Port))
}
Params.LoadFromEnv()
Params.LoadFromArgs()
Params.Address = Params.IP + ":" + strconv.FormatInt(int64(Params.Port), 10)
proxynode.Params.Init()
proxy.Params.Init()
log.Debug("init params done ...")
proxynode.Params.NetworkPort = Params.Port
proxynode.Params.IP = Params.IP
proxynode.Params.NetworkAddress = Params.Address
proxy.Params.NetworkPort = Params.Port
proxy.Params.IP = Params.IP
proxy.Params.NetworkAddress = Params.Address
// for purpose of ID Allocator
proxynode.Params.MasterAddress = Params.MasterAddress
proxy.Params.MasterAddress = Params.MasterAddress
closer := trace.InitTracing(fmt.Sprintf("proxy_node ip: %s, port: %d", Params.IP, Params.Port))
s.closer = closer
log.Debug("proxynode", zap.String("proxy host", Params.IP))
log.Debug("proxynode", zap.Int("proxy port", Params.Port))
log.Debug("proxynode", zap.String("proxy address", Params.Address))
log.Debug("proxy", zap.String("proxy host", Params.IP))
log.Debug("proxy", zap.Int("proxy port", Params.Port))
log.Debug("proxy", zap.String("proxy address", Params.Address))
err = s.proxynode.Register()
err = s.proxy.Register()
if err != nil {
log.Debug("ProxyNode Register etcd failed ", zap.Error(err))
log.Debug("Proxy Register etcd failed ", zap.Error(err))
return err
}
@ -168,51 +168,51 @@ func (s *Server) init() error {
}
rootCoordAddr := Params.MasterAddress
log.Debug("ProxyNode", zap.String("RootCoord address", rootCoordAddr))
log.Debug("Proxy", zap.String("RootCoord address", rootCoordAddr))
timeout := 3 * time.Second
s.rootCoordClient, err = rcc.NewClient(s.ctx, proxynode.Params.MetaRootPath, proxynode.Params.EtcdEndpoints, timeout)
s.rootCoordClient, err = rcc.NewClient(s.ctx, proxy.Params.MetaRootPath, proxy.Params.EtcdEndpoints, timeout)
if err != nil {
log.Debug("ProxyNode new rootCoordClient failed ", zap.Error(err))
log.Debug("Proxy new rootCoordClient failed ", zap.Error(err))
return err
}
err = s.rootCoordClient.Init()
if err != nil {
log.Debug("ProxyNode new rootCoordClient Init ", zap.Error(err))
log.Debug("Proxy new rootCoordClient Init ", zap.Error(err))
return err
}
err = funcutil.WaitForComponentHealthy(s.ctx, s.rootCoordClient, "RootCoord", 1000000, time.Millisecond*200)
if err != nil {
log.Debug("ProxyNode WaitForComponentHealthy RootCoord failed ", zap.Error(err))
log.Debug("Proxy WaitForComponentHealthy RootCoord failed ", zap.Error(err))
panic(err)
}
s.proxynode.SetRootCoordClient(s.rootCoordClient)
s.proxy.SetRootCoordClient(s.rootCoordClient)
log.Debug("set rootcoord client ...")
dataCoordAddr := Params.DataCoordAddress
log.Debug("ProxyNode", zap.String("data service address", dataCoordAddr))
s.dataCoordClient = grpcdatacoordclient.NewClient(proxynode.Params.MetaRootPath, proxynode.Params.EtcdEndpoints, timeout)
log.Debug("Proxy", zap.String("datacoord address", dataCoordAddr))
s.dataCoordClient = grpcdatacoordclient.NewClient(proxy.Params.MetaRootPath, proxy.Params.EtcdEndpoints, timeout)
err = s.dataCoordClient.Init()
if err != nil {
log.Debug("ProxyNode dataCoordClient init failed ", zap.Error(err))
log.Debug("Proxy dataCoordClient init failed ", zap.Error(err))
return err
}
s.proxynode.SetDataCoordClient(s.dataCoordClient)
log.Debug("set data service address ...")
s.proxy.SetDataCoordClient(s.dataCoordClient)
log.Debug("set datacoord address ...")
indexServiceAddr := Params.IndexServerAddress
log.Debug("ProxyNode", zap.String("index server address", indexServiceAddr))
s.indexCoordClient = grpcindexcoordclient.NewClient(proxynode.Params.MetaRootPath, proxynode.Params.EtcdEndpoints, timeout)
log.Debug("Proxy", zap.String("indexcoord address", indexServiceAddr))
s.indexCoordClient = grpcindexcoordclient.NewClient(proxy.Params.MetaRootPath, proxy.Params.EtcdEndpoints, timeout)
err = s.indexCoordClient.Init()
if err != nil {
log.Debug("ProxyNode indexCoordClient init failed ", zap.Error(err))
log.Debug("Proxy indexCoordClient init failed ", zap.Error(err))
return err
}
s.proxynode.SetIndexCoordClient(s.indexCoordClient)
log.Debug("set index service client ...")
s.proxy.SetIndexCoordClient(s.indexCoordClient)
log.Debug("set indexcoord client ...")
queryServiceAddr := Params.QueryServiceAddress
log.Debug("ProxyNode", zap.String("query server address", queryServiceAddr))
s.queryServiceClient, err = grpcqueryserviceclient.NewClient(proxynode.Params.MetaRootPath, proxynode.Params.EtcdEndpoints, timeout)
log.Debug("Proxy", zap.String("querycoord address", queryServiceAddr))
s.queryServiceClient, err = grpcqueryserviceclient.NewClient(proxy.Params.MetaRootPath, proxy.Params.EtcdEndpoints, timeout)
if err != nil {
return err
}
@ -220,15 +220,14 @@ func (s *Server) init() error {
if err != nil {
return err
}
s.proxynode.SetQueryServiceClient(s.queryServiceClient)
s.proxy.SetQueryServiceClient(s.queryServiceClient)
log.Debug("set query service client ...")
s.proxynode.UpdateStateCode(internalpb.StateCode_Initializing)
log.Debug("proxynode",
zap.Any("state of proxynode", internalpb.StateCode_Initializing))
s.proxy.UpdateStateCode(internalpb.StateCode_Initializing)
log.Debug("proxy", zap.Any("state of proxy", internalpb.StateCode_Initializing))
if err := s.proxynode.Init(); err != nil {
log.Debug("proxynode", zap.String("proxynode init error", err.Error()))
if err := s.proxy.Init(); err != nil {
log.Debug("proxy", zap.String("proxy init error", err.Error()))
return err
}
@ -236,7 +235,7 @@ func (s *Server) init() error {
}
func (s *Server) start() error {
return s.proxynode.Start()
return s.proxy.Start()
}
func (s *Server) Stop() error {
@ -251,7 +250,7 @@ func (s *Server) Stop() error {
s.grpcServer.GracefulStop()
}
err = s.proxynode.Stop()
err = s.proxy.Stop()
if err != nil {
return err
}
@ -262,140 +261,140 @@ func (s *Server) Stop() error {
}
func (s *Server) GetComponentStates(ctx context.Context, request *internalpb.GetComponentStatesRequest) (*internalpb.ComponentStates, error) {
return s.proxynode.GetComponentStates(ctx)
return s.proxy.GetComponentStates(ctx)
}
func (s *Server) GetStatisticsChannel(ctx context.Context, request *internalpb.GetStatisticsChannelRequest) (*milvuspb.StringResponse, error) {
return s.proxynode.GetStatisticsChannel(ctx)
return s.proxy.GetStatisticsChannel(ctx)
}
func (s *Server) InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
return s.proxynode.InvalidateCollectionMetaCache(ctx, request)
return s.proxy.InvalidateCollectionMetaCache(ctx, request)
}
func (s *Server) ReleaseDQLMessageStream(ctx context.Context, request *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) {
return s.proxynode.ReleaseDQLMessageStream(ctx, request)
return s.proxy.ReleaseDQLMessageStream(ctx, request)
}
func (s *Server) CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
return s.proxynode.CreateCollection(ctx, request)
return s.proxy.CreateCollection(ctx, request)
}
func (s *Server) DropCollection(ctx context.Context, request *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
return s.proxynode.DropCollection(ctx, request)
return s.proxy.DropCollection(ctx, request)
}
func (s *Server) HasCollection(ctx context.Context, request *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
return s.proxynode.HasCollection(ctx, request)
return s.proxy.HasCollection(ctx, request)
}
func (s *Server) LoadCollection(ctx context.Context, request *milvuspb.LoadCollectionRequest) (*commonpb.Status, error) {
return s.proxynode.LoadCollection(ctx, request)
return s.proxy.LoadCollection(ctx, request)
}
func (s *Server) ReleaseCollection(ctx context.Context, request *milvuspb.ReleaseCollectionRequest) (*commonpb.Status, error) {
return s.proxynode.ReleaseCollection(ctx, request)
return s.proxy.ReleaseCollection(ctx, request)
}
func (s *Server) DescribeCollection(ctx context.Context, request *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
return s.proxynode.DescribeCollection(ctx, request)
return s.proxy.DescribeCollection(ctx, request)
}
func (s *Server) GetCollectionStatistics(ctx context.Context, request *milvuspb.GetCollectionStatisticsRequest) (*milvuspb.GetCollectionStatisticsResponse, error) {
return s.proxynode.GetCollectionStatistics(ctx, request)
return s.proxy.GetCollectionStatistics(ctx, request)
}
func (s *Server) ShowCollections(ctx context.Context, request *milvuspb.ShowCollectionsRequest) (*milvuspb.ShowCollectionsResponse, error) {
return s.proxynode.ShowCollections(ctx, request)
return s.proxy.ShowCollections(ctx, request)
}
func (s *Server) CreatePartition(ctx context.Context, request *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
return s.proxynode.CreatePartition(ctx, request)
return s.proxy.CreatePartition(ctx, request)
}
func (s *Server) DropPartition(ctx context.Context, request *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
return s.proxynode.DropPartition(ctx, request)
return s.proxy.DropPartition(ctx, request)
}
func (s *Server) HasPartition(ctx context.Context, request *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
return s.proxynode.HasPartition(ctx, request)
return s.proxy.HasPartition(ctx, request)
}
func (s *Server) LoadPartitions(ctx context.Context, request *milvuspb.LoadPartitionsRequest) (*commonpb.Status, error) {
return s.proxynode.LoadPartitions(ctx, request)
return s.proxy.LoadPartitions(ctx, request)
}
func (s *Server) ReleasePartitions(ctx context.Context, request *milvuspb.ReleasePartitionsRequest) (*commonpb.Status, error) {
return s.proxynode.ReleasePartitions(ctx, request)
return s.proxy.ReleasePartitions(ctx, request)
}
func (s *Server) GetPartitionStatistics(ctx context.Context, request *milvuspb.GetPartitionStatisticsRequest) (*milvuspb.GetPartitionStatisticsResponse, error) {
return s.proxynode.GetPartitionStatistics(ctx, request)
return s.proxy.GetPartitionStatistics(ctx, request)
}
func (s *Server) ShowPartitions(ctx context.Context, request *milvuspb.ShowPartitionsRequest) (*milvuspb.ShowPartitionsResponse, error) {
return s.proxynode.ShowPartitions(ctx, request)
return s.proxy.ShowPartitions(ctx, request)
}
func (s *Server) CreateIndex(ctx context.Context, request *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
return s.proxynode.CreateIndex(ctx, request)
return s.proxy.CreateIndex(ctx, request)
}
func (s *Server) DropIndex(ctx context.Context, request *milvuspb.DropIndexRequest) (*commonpb.Status, error) {
return s.proxynode.DropIndex(ctx, request)
return s.proxy.DropIndex(ctx, request)
}
func (s *Server) DescribeIndex(ctx context.Context, request *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
return s.proxynode.DescribeIndex(ctx, request)
return s.proxy.DescribeIndex(ctx, request)
}
// GetIndexBuildProgress gets index build progress with filed_name and index_name.
// IndexRows is the num of indexed rows. And TotalRows is the total number of segment rows.
func (s *Server) GetIndexBuildProgress(ctx context.Context, request *milvuspb.GetIndexBuildProgressRequest) (*milvuspb.GetIndexBuildProgressResponse, error) {
return s.proxynode.GetIndexBuildProgress(ctx, request)
return s.proxy.GetIndexBuildProgress(ctx, request)
}
func (s *Server) GetIndexState(ctx context.Context, request *milvuspb.GetIndexStateRequest) (*milvuspb.GetIndexStateResponse, error) {
return s.proxynode.GetIndexState(ctx, request)
return s.proxy.GetIndexState(ctx, request)
}
func (s *Server) Insert(ctx context.Context, request *milvuspb.InsertRequest) (*milvuspb.MutationResult, error) {
return s.proxynode.Insert(ctx, request)
return s.proxy.Insert(ctx, request)
}
func (s *Server) Search(ctx context.Context, request *milvuspb.SearchRequest) (*milvuspb.SearchResults, error) {
return s.proxynode.Search(ctx, request)
return s.proxy.Search(ctx, request)
}
func (s *Server) Retrieve(ctx context.Context, request *milvuspb.RetrieveRequest) (*milvuspb.RetrieveResults, error) {
return s.proxynode.Retrieve(ctx, request)
return s.proxy.Retrieve(ctx, request)
}
func (s *Server) Flush(ctx context.Context, request *milvuspb.FlushRequest) (*commonpb.Status, error) {
return s.proxynode.Flush(ctx, request)
return s.proxy.Flush(ctx, request)
}
func (s *Server) Query(ctx context.Context, request *milvuspb.QueryRequest) (*milvuspb.QueryResults, error) {
return s.proxynode.Query(ctx, request)
return s.proxy.Query(ctx, request)
}
func (s *Server) GetDdChannel(ctx context.Context, request *internalpb.GetDdChannelRequest) (*milvuspb.StringResponse, error) {
return s.proxynode.GetDdChannel(ctx, request)
return s.proxy.GetDdChannel(ctx, request)
}
func (s *Server) GetPersistentSegmentInfo(ctx context.Context, request *milvuspb.GetPersistentSegmentInfoRequest) (*milvuspb.GetPersistentSegmentInfoResponse, error) {
return s.proxynode.GetPersistentSegmentInfo(ctx, request)
return s.proxy.GetPersistentSegmentInfo(ctx, request)
}
func (s *Server) GetQuerySegmentInfo(ctx context.Context, request *milvuspb.GetQuerySegmentInfoRequest) (*milvuspb.GetQuerySegmentInfoResponse, error) {
return s.proxynode.GetQuerySegmentInfo(ctx, request)
return s.proxy.GetQuerySegmentInfo(ctx, request)
}
func (s *Server) Dummy(ctx context.Context, request *milvuspb.DummyRequest) (*milvuspb.DummyResponse, error) {
return s.proxynode.Dummy(ctx, request)
return s.proxy.Dummy(ctx, request)
}
func (s *Server) RegisterLink(ctx context.Context, request *milvuspb.RegisterLinkRequest) (*milvuspb.RegisterLinkResponse, error) {
return s.proxynode.RegisterLink(ctx, request)
return s.proxy.RegisterLink(ctx, request)
}

View File

@ -26,7 +26,7 @@ import (
grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
dsc "github.com/milvus-io/milvus/internal/distributed/datacoord/client"
isc "github.com/milvus-io/milvus/internal/distributed/indexcoord/client"
pnc "github.com/milvus-io/milvus/internal/distributed/proxynode/client"
pnc "github.com/milvus-io/milvus/internal/distributed/proxy/client"
qsc "github.com/milvus-io/milvus/internal/distributed/queryservice/client"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/msgstream"

View File

@ -160,7 +160,7 @@ func TestGrpcService(t *testing.T) {
},
)
assert.Nil(t, err)
_, err = etcdCli.Put(ctx, path.Join(sessKey, typeutil.ProxyNodeRole+"-100"), string(pnb))
_, err = etcdCli.Put(ctx, path.Join(sessKey, typeutil.ProxyRole+"-100"), string(pnb))
assert.Nil(t, err)
err = core.Init()

View File

@ -13,5 +13,5 @@ approvers:
- scsven
labels:
- component/proxynode
- component/proxy

View File

@ -1,4 +1,15 @@
package proxynode
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxy
import (
"context"

View File

@ -1,4 +1,15 @@
package proxynode
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxy
import (
"testing"

View File

@ -1,4 +1,15 @@
package proxynode
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxy
import (
"context"
@ -82,7 +93,7 @@ func (ticker *channelsTimeTickerImpl) initCurrents(current Timestamp) {
func (ticker *channelsTimeTickerImpl) tick() error {
now, err := ticker.tso.AllocOne()
if err != nil {
log.Warn("ProxyNode channelsTimeTickerImpl failed to get ts from tso", zap.Error(err))
log.Warn("Proxy channelsTimeTickerImpl failed to get ts from tso", zap.Error(err))
return err
}
//nowPTime, _ := tsoutil.ParseTS(now)
@ -95,14 +106,14 @@ func (ticker *channelsTimeTickerImpl) tick() error {
stats, err := ticker.getStatisticsFunc()
if err != nil {
log.Debug("ProxyNode channelsTimeTickerImpl failed to getStatistics", zap.Error(err))
log.Debug("Proxy channelsTimeTickerImpl failed to getStatistics", zap.Error(err))
}
for pchan := range ticker.currents {
current := ticker.currents[pchan]
//currentPTime, _ := tsoutil.ParseTS(current)
stat, ok := stats[pchan]
//log.Debug("ProxyNode channelsTimeTickerImpl", zap.Any("pchan", pchan),
//log.Debug("Proxy channelsTimeTickerImpl", zap.Any("pchan", pchan),
// zap.Any("TaskInQueue", ok),
// zap.Any("current", currentPTime),
// zap.Any("now", nowPTime))
@ -125,7 +136,7 @@ func (ticker *channelsTimeTickerImpl) tick() error {
}
ticker.currents[pchan] = next
//nextPTime, _ := tsoutil.ParseTS(next)
//log.Debug("ProxyNode channelsTimeTickerImpl",
//log.Debug("Proxy channelsTimeTickerImpl",
// zap.Any("pchan", pchan),
// zap.Any("minPTime", minPTime),
// zap.Any("maxPTime", maxPTime),

View File

@ -1,4 +1,15 @@
package proxynode
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxy
import (
"context"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"context"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"context"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"strconv"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"errors"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"testing"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"strconv"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"encoding/json"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"testing"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"context"
@ -33,11 +33,11 @@ import (
"github.com/milvus-io/milvus/internal/util/typeutil"
)
func (node *ProxyNode) UpdateStateCode(code internalpb.StateCode) {
func (node *Proxy) UpdateStateCode(code internalpb.StateCode) {
node.stateCode.Store(code)
}
func (node *ProxyNode) GetComponentStates(ctx context.Context) (*internalpb.ComponentStates, error) {
func (node *Proxy) GetComponentStates(ctx context.Context) (*internalpb.ComponentStates, error) {
stats := &internalpb.ComponentStates{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
@ -54,14 +54,14 @@ func (node *ProxyNode) GetComponentStates(ctx context.Context) (*internalpb.Comp
}
info := &internalpb.ComponentInfo{
NodeID: Params.ProxyID,
Role: typeutil.ProxyNodeRole,
Role: typeutil.ProxyRole,
StateCode: code,
}
stats.State = info
return stats, nil
}
func (node *ProxyNode) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
func (node *Proxy) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
return &milvuspb.StringResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
@ -71,7 +71,7 @@ func (node *ProxyNode) GetStatisticsChannel(ctx context.Context) (*milvuspb.Stri
}, nil
}
func (node *ProxyNode) InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
func (node *Proxy) InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
log.Debug("InvalidateCollectionMetaCache",
zap.String("role", Params.RoleName),
zap.String("db", request.DbName),
@ -92,7 +92,7 @@ func (node *ProxyNode) InvalidateCollectionMetaCache(ctx context.Context, reques
}, nil
}
func (node *ProxyNode) ReleaseDQLMessageStream(ctx context.Context, request *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) {
func (node *Proxy) ReleaseDQLMessageStream(ctx context.Context, request *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) {
log.Debug("ReleaseDQLMessageStream",
zap.Any("role", Params.RoleName),
zap.Any("db", request.DbID),
@ -115,7 +115,7 @@ func (node *ProxyNode) ReleaseDQLMessageStream(ctx context.Context, request *pro
}, nil
}
func (node *ProxyNode) CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
func (node *Proxy) CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
return unhealthyStatus(), nil
}
@ -164,7 +164,7 @@ func (node *ProxyNode) CreateCollection(ctx context.Context, request *milvuspb.C
return cct.result, nil
}
func (node *ProxyNode) DropCollection(ctx context.Context, request *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
func (node *Proxy) DropCollection(ctx context.Context, request *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
return unhealthyStatus(), nil
}
@ -212,7 +212,7 @@ func (node *ProxyNode) DropCollection(ctx context.Context, request *milvuspb.Dro
return dct.result, nil
}
func (node *ProxyNode) HasCollection(ctx context.Context, request *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
func (node *Proxy) HasCollection(ctx context.Context, request *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
if !node.checkHealthy() {
return &milvuspb.BoolResponse{
Status: unhealthyStatus(),
@ -264,7 +264,7 @@ func (node *ProxyNode) HasCollection(ctx context.Context, request *milvuspb.HasC
return hct.result, nil
}
func (node *ProxyNode) LoadCollection(ctx context.Context, request *milvuspb.LoadCollectionRequest) (*commonpb.Status, error) {
func (node *Proxy) LoadCollection(ctx context.Context, request *milvuspb.LoadCollectionRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
return unhealthyStatus(), nil
}
@ -310,7 +310,7 @@ func (node *ProxyNode) LoadCollection(ctx context.Context, request *milvuspb.Loa
return lct.result, nil
}
func (node *ProxyNode) ReleaseCollection(ctx context.Context, request *milvuspb.ReleaseCollectionRequest) (*commonpb.Status, error) {
func (node *Proxy) ReleaseCollection(ctx context.Context, request *milvuspb.ReleaseCollectionRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
return unhealthyStatus(), nil
}
@ -357,7 +357,7 @@ func (node *ProxyNode) ReleaseCollection(ctx context.Context, request *milvuspb.
return rct.result, nil
}
func (node *ProxyNode) DescribeCollection(ctx context.Context, request *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
func (node *Proxy) DescribeCollection(ctx context.Context, request *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
if !node.checkHealthy() {
return &milvuspb.DescribeCollectionResponse{
Status: unhealthyStatus(),
@ -409,7 +409,7 @@ func (node *ProxyNode) DescribeCollection(ctx context.Context, request *milvuspb
return dct.result, nil
}
func (node *ProxyNode) GetCollectionStatistics(ctx context.Context, request *milvuspb.GetCollectionStatisticsRequest) (*milvuspb.GetCollectionStatisticsResponse, error) {
func (node *Proxy) GetCollectionStatistics(ctx context.Context, request *milvuspb.GetCollectionStatisticsRequest) (*milvuspb.GetCollectionStatisticsResponse, error) {
if !node.checkHealthy() {
return &milvuspb.GetCollectionStatisticsResponse{
Status: unhealthyStatus(),
@ -461,7 +461,7 @@ func (node *ProxyNode) GetCollectionStatistics(ctx context.Context, request *mil
return g.result, nil
}
func (node *ProxyNode) ShowCollections(ctx context.Context, request *milvuspb.ShowCollectionsRequest) (*milvuspb.ShowCollectionsResponse, error) {
func (node *Proxy) ShowCollections(ctx context.Context, request *milvuspb.ShowCollectionsRequest) (*milvuspb.ShowCollectionsResponse, error) {
if !node.checkHealthy() {
return &milvuspb.ShowCollectionsResponse{
Status: unhealthyStatus(),
@ -512,7 +512,7 @@ func (node *ProxyNode) ShowCollections(ctx context.Context, request *milvuspb.Sh
return sct.result, nil
}
func (node *ProxyNode) CreatePartition(ctx context.Context, request *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
func (node *Proxy) CreatePartition(ctx context.Context, request *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
return unhealthyStatus(), nil
}
@ -560,7 +560,7 @@ func (node *ProxyNode) CreatePartition(ctx context.Context, request *milvuspb.Cr
return cpt.result, nil
}
func (node *ProxyNode) DropPartition(ctx context.Context, request *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
func (node *Proxy) DropPartition(ctx context.Context, request *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
return unhealthyStatus(), nil
}
@ -609,7 +609,7 @@ func (node *ProxyNode) DropPartition(ctx context.Context, request *milvuspb.Drop
return dpt.result, nil
}
func (node *ProxyNode) HasPartition(ctx context.Context, request *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
func (node *Proxy) HasPartition(ctx context.Context, request *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
if !node.checkHealthy() {
return &milvuspb.BoolResponse{
Status: unhealthyStatus(),
@ -666,7 +666,7 @@ func (node *ProxyNode) HasPartition(ctx context.Context, request *milvuspb.HasPa
return hpt.result, nil
}
func (node *ProxyNode) LoadPartitions(ctx context.Context, request *milvuspb.LoadPartitionsRequest) (*commonpb.Status, error) {
func (node *Proxy) LoadPartitions(ctx context.Context, request *milvuspb.LoadPartitionsRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
return unhealthyStatus(), nil
}
@ -714,7 +714,7 @@ func (node *ProxyNode) LoadPartitions(ctx context.Context, request *milvuspb.Loa
return lpt.result, nil
}
func (node *ProxyNode) ReleasePartitions(ctx context.Context, request *milvuspb.ReleasePartitionsRequest) (*commonpb.Status, error) {
func (node *Proxy) ReleasePartitions(ctx context.Context, request *milvuspb.ReleasePartitionsRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
return unhealthyStatus(), nil
}
@ -762,7 +762,7 @@ func (node *ProxyNode) ReleasePartitions(ctx context.Context, request *milvuspb.
return rpt.result, nil
}
func (node *ProxyNode) GetPartitionStatistics(ctx context.Context, request *milvuspb.GetPartitionStatisticsRequest) (*milvuspb.GetPartitionStatisticsResponse, error) {
func (node *Proxy) GetPartitionStatistics(ctx context.Context, request *milvuspb.GetPartitionStatisticsRequest) (*milvuspb.GetPartitionStatisticsResponse, error) {
if !node.checkHealthy() {
return &milvuspb.GetPartitionStatisticsResponse{
Status: unhealthyStatus(),
@ -816,7 +816,7 @@ func (node *ProxyNode) GetPartitionStatistics(ctx context.Context, request *milv
return g.result, nil
}
func (node *ProxyNode) ShowPartitions(ctx context.Context, request *milvuspb.ShowPartitionsRequest) (*milvuspb.ShowPartitionsResponse, error) {
func (node *Proxy) ShowPartitions(ctx context.Context, request *milvuspb.ShowPartitionsRequest) (*milvuspb.ShowPartitionsResponse, error) {
if !node.checkHealthy() {
return &milvuspb.ShowPartitionsResponse{
Status: unhealthyStatus(),
@ -869,7 +869,7 @@ func (node *ProxyNode) ShowPartitions(ctx context.Context, request *milvuspb.Sho
return spt.result, nil
}
func (node *ProxyNode) CreateIndex(ctx context.Context, request *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
func (node *Proxy) CreateIndex(ctx context.Context, request *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
return unhealthyStatus(), nil
}
@ -919,7 +919,7 @@ func (node *ProxyNode) CreateIndex(ctx context.Context, request *milvuspb.Create
return cit.result, nil
}
func (node *ProxyNode) DescribeIndex(ctx context.Context, request *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
func (node *Proxy) DescribeIndex(ctx context.Context, request *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
if !node.checkHealthy() {
return &milvuspb.DescribeIndexResponse{
Status: unhealthyStatus(),
@ -979,7 +979,7 @@ func (node *ProxyNode) DescribeIndex(ctx context.Context, request *milvuspb.Desc
return dit.result, nil
}
func (node *ProxyNode) DropIndex(ctx context.Context, request *milvuspb.DropIndexRequest) (*commonpb.Status, error) {
func (node *Proxy) DropIndex(ctx context.Context, request *milvuspb.DropIndexRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
return unhealthyStatus(), nil
}
@ -1029,7 +1029,7 @@ func (node *ProxyNode) DropIndex(ctx context.Context, request *milvuspb.DropInde
// GetIndexBuildProgress gets index build progress with filed_name and index_name.
// IndexRows is the num of indexed rows. And TotalRows is the total number of segment rows.
func (node *ProxyNode) GetIndexBuildProgress(ctx context.Context, request *milvuspb.GetIndexBuildProgressRequest) (*milvuspb.GetIndexBuildProgressResponse, error) {
func (node *Proxy) GetIndexBuildProgress(ctx context.Context, request *milvuspb.GetIndexBuildProgressRequest) (*milvuspb.GetIndexBuildProgressResponse, error) {
if !node.checkHealthy() {
return &milvuspb.GetIndexBuildProgressResponse{
Status: unhealthyStatus(),
@ -1089,7 +1089,7 @@ func (node *ProxyNode) GetIndexBuildProgress(ctx context.Context, request *milvu
return gibpt.result, nil
}
func (node *ProxyNode) GetIndexState(ctx context.Context, request *milvuspb.GetIndexStateRequest) (*milvuspb.GetIndexStateResponse, error) {
func (node *Proxy) GetIndexState(ctx context.Context, request *milvuspb.GetIndexStateRequest) (*milvuspb.GetIndexStateResponse, error) {
if !node.checkHealthy() {
return &milvuspb.GetIndexStateResponse{
Status: unhealthyStatus(),
@ -1146,7 +1146,7 @@ func (node *ProxyNode) GetIndexState(ctx context.Context, request *milvuspb.GetI
return dipt.result, nil
}
func (node *ProxyNode) Insert(ctx context.Context, request *milvuspb.InsertRequest) (*milvuspb.MutationResult, error) {
func (node *Proxy) Insert(ctx context.Context, request *milvuspb.InsertRequest) (*milvuspb.MutationResult, error) {
if !node.checkHealthy() {
return &milvuspb.MutationResult{
Status: unhealthyStatus(),
@ -1244,7 +1244,7 @@ func (node *ProxyNode) Insert(ctx context.Context, request *milvuspb.InsertReque
return it.result, nil
}
func (node *ProxyNode) Search(ctx context.Context, request *milvuspb.SearchRequest) (*milvuspb.SearchResults, error) {
func (node *Proxy) Search(ctx context.Context, request *milvuspb.SearchRequest) (*milvuspb.SearchResults, error) {
if !node.checkHealthy() {
return &milvuspb.SearchResults{
Status: unhealthyStatus(),
@ -1322,7 +1322,7 @@ func (node *ProxyNode) Search(ctx context.Context, request *milvuspb.SearchReque
return qt.result, nil
}
func (node *ProxyNode) Retrieve(ctx context.Context, request *milvuspb.RetrieveRequest) (*milvuspb.RetrieveResults, error) {
func (node *Proxy) Retrieve(ctx context.Context, request *milvuspb.RetrieveRequest) (*milvuspb.RetrieveResults, error) {
if !node.checkHealthy() {
return &milvuspb.RetrieveResults{
Status: unhealthyStatus(),
@ -1386,7 +1386,7 @@ func (node *ProxyNode) Retrieve(ctx context.Context, request *milvuspb.RetrieveR
return rt.result, nil
}
func (node *ProxyNode) Flush(ctx context.Context, request *milvuspb.FlushRequest) (*commonpb.Status, error) {
func (node *Proxy) Flush(ctx context.Context, request *milvuspb.FlushRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
return unhealthyStatus(), nil
}
@ -1432,7 +1432,7 @@ func (node *ProxyNode) Flush(ctx context.Context, request *milvuspb.FlushRequest
return ft.result, nil
}
func (node *ProxyNode) Query(ctx context.Context, request *milvuspb.QueryRequest) (*milvuspb.QueryResults, error) {
func (node *Proxy) Query(ctx context.Context, request *milvuspb.QueryRequest) (*milvuspb.QueryResults, error) {
if !node.checkHealthy() {
return &milvuspb.QueryResults{
Status: unhealthyStatus(),
@ -1559,11 +1559,11 @@ func (node *ProxyNode) Query(ctx context.Context, request *milvuspb.QueryRequest
}
func (node *ProxyNode) GetDdChannel(ctx context.Context, request *internalpb.GetDdChannelRequest) (*milvuspb.StringResponse, error) {
func (node *Proxy) GetDdChannel(ctx context.Context, request *internalpb.GetDdChannelRequest) (*milvuspb.StringResponse, error) {
panic("implement me")
}
func (node *ProxyNode) GetPersistentSegmentInfo(ctx context.Context, req *milvuspb.GetPersistentSegmentInfoRequest) (*milvuspb.GetPersistentSegmentInfoResponse, error) {
func (node *Proxy) GetPersistentSegmentInfo(ctx context.Context, req *milvuspb.GetPersistentSegmentInfoRequest) (*milvuspb.GetPersistentSegmentInfoResponse, error) {
log.Debug("GetPersistentSegmentInfo",
zap.String("role", Params.RoleName),
zap.String("db", req.DbName),
@ -1616,7 +1616,7 @@ func (node *ProxyNode) GetPersistentSegmentInfo(ctx context.Context, req *milvus
return resp, nil
}
func (node *ProxyNode) GetQuerySegmentInfo(ctx context.Context, req *milvuspb.GetQuerySegmentInfoRequest) (*milvuspb.GetQuerySegmentInfoResponse, error) {
func (node *Proxy) GetQuerySegmentInfo(ctx context.Context, req *milvuspb.GetQuerySegmentInfoRequest) (*milvuspb.GetQuerySegmentInfoResponse, error) {
log.Debug("GetQuerySegmentInfo",
zap.String("role", Params.RoleName),
zap.String("db", req.DbName),
@ -1674,7 +1674,7 @@ func (node *ProxyNode) GetQuerySegmentInfo(ctx context.Context, req *milvuspb.Ge
return resp, nil
}
func (node *ProxyNode) getSegmentsOfCollection(ctx context.Context, dbName string, collectionName string) ([]UniqueID, error) {
func (node *Proxy) getSegmentsOfCollection(ctx context.Context, dbName string, collectionName string) ([]UniqueID, error) {
describeCollectionResponse, err := node.rootCoord.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DescribeCollection,
@ -1733,7 +1733,7 @@ func (node *ProxyNode) getSegmentsOfCollection(ctx context.Context, dbName strin
return ret, nil
}
func (node *ProxyNode) Dummy(ctx context.Context, req *milvuspb.DummyRequest) (*milvuspb.DummyResponse, error) {
func (node *Proxy) Dummy(ctx context.Context, req *milvuspb.DummyRequest) (*milvuspb.DummyResponse, error) {
failedResponse := &milvuspb.DummyResponse{
Response: `{"status": "fail"}`,
}
@ -1781,18 +1781,18 @@ func (node *ProxyNode) Dummy(ctx context.Context, req *milvuspb.DummyRequest) (*
return failedResponse, nil
}
func (node *ProxyNode) RegisterLink(ctx context.Context, req *milvuspb.RegisterLinkRequest) (*milvuspb.RegisterLinkResponse, error) {
func (node *Proxy) RegisterLink(ctx context.Context, req *milvuspb.RegisterLinkRequest) (*milvuspb.RegisterLinkResponse, error) {
code := node.stateCode.Load().(internalpb.StateCode)
log.Debug("RegisterLink",
zap.String("role", Params.RoleName),
zap.Any("state code of proxynode", code))
zap.Any("state code of proxy", code))
if code != internalpb.StateCode_Healthy {
return &milvuspb.RegisterLinkResponse{
Address: nil,
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "proxy node not healthy",
Reason: "proxy not healthy",
},
}, nil
}
@ -1805,8 +1805,8 @@ func (node *ProxyNode) RegisterLink(ctx context.Context, req *milvuspb.RegisterL
}, nil
}
// checkHealthy checks proxy node state is Healthy
func (node *ProxyNode) checkHealthy() bool {
// checkHealthy checks proxy state is Healthy
func (node *Proxy) checkHealthy() bool {
code := node.stateCode.Load().(internalpb.StateCode)
return code == internalpb.StateCode_Healthy
}
@ -1814,6 +1814,6 @@ func (node *ProxyNode) checkHealthy() bool {
func unhealthyStatus() *commonpb.Status {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "proxy node not healthy",
Reason: "proxy not healthy",
}
}

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
type IndexType = string

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"context"
@ -33,7 +33,7 @@ type insertChannelsMap struct {
usageHistogram []int // message stream can be closed only when the use count is zero
// TODO: use fine grained lock
mtx sync.RWMutex
nodeInstance *ProxyNode
nodeInstance *Proxy
msFactory msgstream.Factory
}
@ -60,7 +60,7 @@ func (m *insertChannelsMap) CreateInsertMsgStream(collID UniqueID, channels []st
stream, _ := m.msFactory.NewMsgStream(context.Background())
stream.AsProducer(channels)
log.Debug("proxynode", zap.Strings("proxynode AsProducer: ", channels))
log.Debug("proxy", zap.Strings("proxy AsProducer: ", channels))
stream.SetRepackFunc(insertRepackFunc)
stream.Start()
m.insertMsgStreams = append(m.insertMsgStreams, stream)
@ -146,7 +146,7 @@ func (m *insertChannelsMap) CloseAllMsgStream() {
m.usageHistogram = make([]int, 0)
}
func newInsertChannelsMap(node *ProxyNode) *insertChannelsMap {
func newInsertChannelsMap(node *Proxy) *insertChannelsMap {
return &insertChannelsMap{
collectionID2InsertChannels: make(map[UniqueID]int),
insertChannels: make([][]string, 0),
@ -162,7 +162,7 @@ var globalInsertChannelsMap *insertChannelsMap
var initGlobalInsertChannelsMapOnce sync.Once
// change to singleton mode later? Such as GetInsertChannelsMapInstance like GetConfAdapterMgrInstance.
func initGlobalInsertChannelsMap(node *ProxyNode) {
func initGlobalInsertChannelsMap(node *Proxy) {
initGlobalInsertChannelsMapOnce.Do(func() {
globalInsertChannelsMap = newInsertChannelsMap(node)
})

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"testing"
@ -23,7 +23,7 @@ import (
func TestInsertChannelsMap_CreateInsertMsgStream(t *testing.T) {
msFactory := msgstream.NewSimpleMsgStreamFactory()
node := &ProxyNode{
node := &Proxy{
segAssigner: nil,
msFactory: msFactory,
}
@ -52,7 +52,7 @@ func TestInsertChannelsMap_CreateInsertMsgStream(t *testing.T) {
func TestInsertChannelsMap_CloseInsertMsgStream(t *testing.T) {
msFactory := msgstream.NewSimpleMsgStreamFactory()
node := &ProxyNode{
node := &Proxy{
segAssigner: nil,
msFactory: msFactory,
}
@ -92,7 +92,7 @@ func TestInsertChannelsMap_CloseInsertMsgStream(t *testing.T) {
func TestInsertChannelsMap_GetInsertChannels(t *testing.T) {
msFactory := msgstream.NewSimpleMsgStreamFactory()
node := &ProxyNode{
node := &Proxy{
segAssigner: nil,
msFactory: msFactory,
}
@ -140,7 +140,7 @@ func TestInsertChannelsMap_GetInsertChannels(t *testing.T) {
func TestInsertChannelsMap_GetInsertMsgStream(t *testing.T) {
msFactory := msgstream.NewSimpleMsgStreamFactory()
node := &ProxyNode{
node := &Proxy{
segAssigner: nil,
msFactory: msFactory,
}
@ -188,7 +188,7 @@ func TestInsertChannelsMap_GetInsertMsgStream(t *testing.T) {
func TestInsertChannelsMap_CloseAllMsgStream(t *testing.T) {
msFactory := msgstream.NewSimpleMsgStreamFactory()
node := &ProxyNode{
node := &Proxy{
segAssigner: nil,
msFactory: msFactory,
}

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"context"
@ -143,9 +143,9 @@ func (m *MetaCache) GetPartitionID(ctx context.Context, collectionName string, p
m.mu.Lock()
defer m.mu.Unlock()
log.Debug("proxynode", zap.Any("GetPartitionID:partitions before update", partitions), zap.Any("collectionName", collectionName))
log.Debug("proxy", zap.Any("GetPartitionID:partitions before update", partitions), zap.Any("collectionName", collectionName))
m.updatePartitions(partitions, collectionName)
log.Debug("proxynode", zap.Any("GetPartitionID:partitions after update", partitions), zap.Any("collectionName", collectionName))
log.Debug("proxy", zap.Any("GetPartitionID:partitions after update", partitions), zap.Any("collectionName", collectionName))
partInfo := m.collInfo[collectionName].partInfo
_, ok := partInfo[partitionName]

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"context"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"fmt"
@ -252,7 +252,7 @@ func (pt *ParamTable) initLogCfg() {
}
func (pt *ParamTable) initRoleName() {
pt.RoleName = fmt.Sprintf("%s-%s", "ProxyNode", pt.Alias)
pt.RoleName = fmt.Sprintf("%s-%s", "Proxy", pt.Alias)
}
func (pt *ParamTable) initEtcdEndpoints() {

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"fmt"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"fmt"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"context"
@ -41,7 +41,7 @@ type Timestamp = typeutil.Timestamp
const sendTimeTickMsgInterval = 200 * time.Millisecond
const channelMgrTickerInterval = 100 * time.Millisecond
type ProxyNode struct {
type Proxy struct {
ctx context.Context
cancel func()
wg sync.WaitGroup
@ -77,79 +77,79 @@ type ProxyNode struct {
closeCallbacks []func()
}
func NewProxyNode(ctx context.Context, factory msgstream.Factory) (*ProxyNode, error) {
func NewProxy(ctx context.Context, factory msgstream.Factory) (*Proxy, error) {
rand.Seed(time.Now().UnixNano())
ctx1, cancel := context.WithCancel(ctx)
node := &ProxyNode{
node := &Proxy{
ctx: ctx1,
cancel: cancel,
msFactory: factory,
}
node.UpdateStateCode(internalpb.StateCode_Abnormal)
log.Debug("ProxyNode", zap.Any("State", node.stateCode.Load()))
log.Debug("Proxy", zap.Any("State", node.stateCode.Load()))
return node, nil
}
// Register register proxy node at etcd
func (node *ProxyNode) Register() error {
// Register register proxy at etcd
func (node *Proxy) Register() error {
node.session = sessionutil.NewSession(node.ctx, Params.MetaRootPath, Params.EtcdEndpoints)
node.session.Init(typeutil.ProxyNodeRole, Params.NetworkAddress, false)
node.session.Init(typeutil.ProxyRole, Params.NetworkAddress, false)
Params.ProxyID = node.session.ServerID
return nil
}
func (node *ProxyNode) Init() error {
func (node *Proxy) Init() error {
// wait for datacoord state changed to Healthy
if node.dataCoord != nil {
log.Debug("ProxyNode wait for dataCoord ready")
log.Debug("Proxy wait for dataCoord ready")
err := funcutil.WaitForComponentHealthy(node.ctx, node.dataCoord, "DataCoord", 1000000, time.Millisecond*200)
if err != nil {
log.Debug("ProxyNode wait for dataCoord ready failed", zap.Error(err))
log.Debug("Proxy wait for dataCoord ready failed", zap.Error(err))
return err
}
log.Debug("ProxyNode dataCoord is ready")
log.Debug("Proxy dataCoord is ready")
}
// wait for queryService state changed to Healthy
if node.queryService != nil {
log.Debug("ProxyNode wait for queryService ready")
log.Debug("Proxy wait for queryService ready")
err := funcutil.WaitForComponentHealthy(node.ctx, node.queryService, "QueryService", 1000000, time.Millisecond*200)
if err != nil {
log.Debug("ProxyNode wait for queryService ready failed", zap.Error(err))
log.Debug("Proxy wait for queryService ready failed", zap.Error(err))
return err
}
log.Debug("ProxyNode queryService is ready")
log.Debug("Proxy queryService is ready")
}
// wait for indexcoord state changed to Healthy
if node.indexCoord != nil {
log.Debug("ProxyNode wait for indexCoord ready")
log.Debug("Proxy wait for indexCoord ready")
err := funcutil.WaitForComponentHealthy(node.ctx, node.indexCoord, "IndexCoord", 1000000, time.Millisecond*200)
if err != nil {
log.Debug("ProxyNode wait for indexCoord ready failed", zap.Error(err))
log.Debug("Proxy wait for indexCoord ready failed", zap.Error(err))
return err
}
log.Debug("ProxyNode indexCoord is ready")
log.Debug("Proxy indexCoord is ready")
}
if node.queryService != nil {
resp, err := node.queryService.CreateQueryChannel(node.ctx, &querypb.CreateQueryChannelRequest{})
if err != nil {
log.Debug("ProxyNode CreateQueryChannel failed", zap.Error(err))
log.Debug("Proxy CreateQueryChannel failed", zap.Error(err))
return err
}
if resp.Status.ErrorCode != commonpb.ErrorCode_Success {
log.Debug("ProxyNode CreateQueryChannel failed", zap.String("reason", resp.Status.Reason))
log.Debug("Proxy CreateQueryChannel failed", zap.String("reason", resp.Status.Reason))
return errors.New(resp.Status.Reason)
}
log.Debug("ProxyNode CreateQueryChannel success")
log.Debug("Proxy CreateQueryChannel success")
Params.SearchResultChannelNames = []string{resp.ResultChannel}
Params.RetrieveResultChannelNames = []string{resp.ResultChannel}
log.Debug("ProxyNode CreateQueryChannel success", zap.Any("SearchResultChannelNames", Params.SearchResultChannelNames))
log.Debug("ProxyNode CreateQueryChannel success", zap.Any("RetrieveResultChannelNames", Params.RetrieveResultChannelNames))
log.Debug("Proxy CreateQueryChannel success", zap.Any("SearchResultChannelNames", Params.SearchResultChannelNames))
log.Debug("Proxy CreateQueryChannel success", zap.Any("RetrieveResultChannelNames", Params.RetrieveResultChannelNames))
}
m := map[string]interface{}{
@ -262,7 +262,7 @@ func (node *ProxyNode) Init() error {
return nil
}
func (node *ProxyNode) sendChannelsTimeTickLoop() {
func (node *Proxy) sendChannelsTimeTickLoop() {
node.wg.Add(1)
go func() {
defer node.wg.Done()
@ -329,7 +329,7 @@ func (node *ProxyNode) sendChannelsTimeTickLoop() {
}()
}
func (node *ProxyNode) Start() error {
func (node *Proxy) Start() error {
err := InitMetaCache(node.rootCoord)
if err != nil {
return err
@ -362,12 +362,12 @@ func (node *ProxyNode) Start() error {
}
node.UpdateStateCode(internalpb.StateCode_Healthy)
log.Debug("ProxyNode", zap.Any("State", node.stateCode.Load()))
log.Debug("Proxy", zap.Any("State", node.stateCode.Load()))
return nil
}
func (node *ProxyNode) Stop() error {
func (node *Proxy) Stop() error {
node.cancel()
node.idAllocator.Close()
@ -389,31 +389,31 @@ func (node *ProxyNode) Stop() error {
}
// AddStartCallback adds a callback in the startServer phase.
func (node *ProxyNode) AddStartCallback(callbacks ...func()) {
func (node *Proxy) AddStartCallback(callbacks ...func()) {
node.startCallbacks = append(node.startCallbacks, callbacks...)
}
func (node *ProxyNode) lastTick() Timestamp {
func (node *Proxy) lastTick() Timestamp {
return node.tick.LastTick()
}
// AddCloseCallback adds a callback in the Close phase.
func (node *ProxyNode) AddCloseCallback(callbacks ...func()) {
func (node *Proxy) AddCloseCallback(callbacks ...func()) {
node.closeCallbacks = append(node.closeCallbacks, callbacks...)
}
func (node *ProxyNode) SetRootCoordClient(cli types.RootCoord) {
func (node *Proxy) SetRootCoordClient(cli types.RootCoord) {
node.rootCoord = cli
}
func (node *ProxyNode) SetIndexCoordClient(cli types.IndexCoord) {
func (node *Proxy) SetIndexCoordClient(cli types.IndexCoord) {
node.indexCoord = cli
}
func (node *ProxyNode) SetDataCoordClient(cli types.DataCoord) {
func (node *Proxy) SetDataCoordClient(cli types.DataCoord) {
node.dataCoord = cli
}
func (node *ProxyNode) SetQueryServiceClient(cli types.QueryService) {
func (node *Proxy) SetQueryServiceClient(cli types.QueryService) {
node.queryService = cli
}

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"github.com/milvus-io/milvus/internal/msgstream"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"container/list"
@ -294,7 +294,7 @@ func (sa *SegIDAssigner) syncSegments() (bool, error) {
sa.reduceSegReqs()
req := &datapb.AssignSegmentIDRequest{
NodeID: sa.PeerID,
PeerRole: typeutil.ProxyNodeRole,
PeerRole: typeutil.ProxyRole,
SegmentIDRequests: sa.segReqs,
}
@ -309,7 +309,7 @@ func (sa *SegIDAssigner) syncSegments() (bool, error) {
success := false
for _, info := range resp.SegIDAssignments {
if info.Status.GetErrorCode() != commonpb.ErrorCode_Success {
log.Debug("proxynode", zap.String("SyncSegment Error", info.Status.Reason))
log.Debug("proxy", zap.String("SyncSegment Error", info.Status.Reason))
continue
}
assign, err := sa.getAssign(info.CollectionID, info.PartitionID, info.ChannelName)

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"bytes"
@ -427,7 +427,7 @@ func (it *InsertTask) transferColumnBasedRequestToRowBasedData() error {
}
}
if !printed {
log.Debug("ProxyNode, transform", zap.Any("ID", it.ID()), zap.Any("BlobLen", len(blob.Value)), zap.Any("dTypes", dTypes))
log.Debug("Proxy, transform", zap.Any("ID", it.ID()), zap.Any("BlobLen", len(blob.Value)), zap.Any("dTypes", dTypes))
printed = true
}
it.RowData = append(it.RowData, blob)
@ -603,7 +603,7 @@ func (it *InsertTask) PreExecute(ctx context.Context) error {
}
collSchema, err := globalMetaCache.GetCollectionSchema(ctx, collectionName)
log.Debug("ProxyNode Insert PreExecute", zap.Any("collSchema", collSchema))
log.Debug("Proxy Insert PreExecute", zap.Any("collSchema", collSchema))
if err != nil {
return err
}
@ -688,7 +688,7 @@ func (it *InsertTask) _assignSegmentID(stream msgstream.MsgStream, pack *msgstre
}
channelName := channelNames[channelID]
if channelName == "" {
return nil, fmt.Errorf("ProxyNode, repack_func, can not found channelName")
return nil, fmt.Errorf("Proxy, repack_func, can not found channelName")
}
mapInfo, err := it.segIDAssigner.GetSegmentID(it.CollectionID, it.PartitionID, channelName, count, ts)
if err != nil {
@ -696,7 +696,7 @@ func (it *InsertTask) _assignSegmentID(stream msgstream.MsgStream, pack *msgstre
}
reqSegCountMap[channelID] = make(map[UniqueID]uint32)
reqSegCountMap[channelID] = mapInfo
log.Debug("ProxyNode", zap.Int64("repackFunc, reqSegCountMap, reqID", reqID), zap.Any("mapinfo", mapInfo))
log.Debug("Proxy", zap.Int64("repackFunc, reqSegCountMap, reqID", reqID), zap.Any("mapinfo", mapInfo))
}
reqSegAccumulateCountMap := make(map[int32][]uint32)
@ -748,7 +748,7 @@ func (it *InsertTask) _assignSegmentID(stream msgstream.MsgStream, pack *msgstre
factor := 10
threshold := Params.PulsarMaxMessageSize / factor
log.Debug("ProxyNode", zap.Int("threshold of message size: ", threshold))
log.Debug("Proxy", zap.Int("threshold of message size: ", threshold))
// not accurate
getFixedSizeOfInsertMsg := func(msg *msgstream.InsertMsg) int {
size := 0
@ -888,7 +888,7 @@ func (it *InsertTask) Execute(ctx context.Context) error {
return err
}
for _, pchan := range pchans {
log.Debug("ProxyNode InsertTask add pchan", zap.Any("pchan", pchan))
log.Debug("Proxy InsertTask add pchan", zap.Any("pchan", pchan))
_ = it.chTicker.addPChan(pchan)
}
@ -1396,13 +1396,13 @@ func (st *SearchTask) Execute(ctx context.Context) error {
}
}
err = stream.Produce(&msgPack)
log.Debug("proxynode", zap.Int("length of searchMsg", len(msgPack.Msgs)))
log.Debug("proxy node sent one searchMsg",
log.Debug("proxy", zap.Int("length of searchMsg", len(msgPack.Msgs)))
log.Debug("proxy sent one searchMsg",
zap.Any("collectionID", st.CollectionID),
zap.Any("msgID", tsMsg.ID()),
)
if err != nil {
log.Debug("proxynode", zap.String("send search request failed", err.Error()))
log.Debug("proxy", zap.String("send search request failed", err.Error()))
}
return err
}
@ -1790,7 +1790,7 @@ func (st *SearchTask) PostExecute(ctx context.Context) error {
for {
select {
case <-st.TraceCtx().Done():
log.Debug("ProxyNode", zap.Int64("SearchTask PostExecute Loop exit caused by ctx.Done", st.ID()))
log.Debug("Proxy", zap.Int64("SearchTask PostExecute Loop exit caused by ctx.Done", st.ID()))
return fmt.Errorf("SearchTask:wait to finish failed, timeout: %d", st.ID())
case searchResults := <-st.resultBuf:
// fmt.Println("searchResults: ", searchResults)
@ -1807,9 +1807,9 @@ func (st *SearchTask) PostExecute(ctx context.Context) error {
}
availableQueryNodeNum := len(filterSearchResult)
log.Debug("ProxyNode Search PostExecute stage1", zap.Any("availableQueryNodeNum", availableQueryNodeNum))
log.Debug("Proxy Search PostExecute stage1", zap.Any("availableQueryNodeNum", availableQueryNodeNum))
if availableQueryNodeNum <= 0 {
log.Debug("ProxyNode Search PostExecute failed", zap.Any("filterReason", filterReason))
log.Debug("Proxy Search PostExecute failed", zap.Any("filterReason", filterReason))
st.result = &milvuspb.SearchResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
@ -1827,10 +1827,10 @@ func (st *SearchTask) PostExecute(ctx context.Context) error {
}
availableQueryNodeNum++
}
log.Debug("ProxyNode Search PostExecute stage2", zap.Any("availableQueryNodeNum", availableQueryNodeNum))
log.Debug("Proxy Search PostExecute stage2", zap.Any("availableQueryNodeNum", availableQueryNodeNum))
if availableQueryNodeNum <= 0 {
log.Debug("ProxyNode Search PostExecute stage2 failed", zap.Any("filterReason", filterReason))
log.Debug("Proxy Search PostExecute stage2 failed", zap.Any("filterReason", filterReason))
st.result = &milvuspb.SearchResults{
Status: &commonpb.Status{
@ -1842,7 +1842,7 @@ func (st *SearchTask) PostExecute(ctx context.Context) error {
}
results, err := decodeSearchResults(filterSearchResult)
log.Debug("ProxyNode Search PostExecute decodeSearchResults", zap.Error(err))
log.Debug("Proxy Search PostExecute decodeSearchResults", zap.Error(err))
if err != nil {
return err
}
@ -1874,7 +1874,7 @@ func (st *SearchTask) PostExecute(ctx context.Context) error {
}
}
log.Debug("ProxyNode Search PostExecute Done")
log.Debug("Proxy Search PostExecute Done")
return nil
}
}
@ -2129,7 +2129,7 @@ func (rt *RetrieveTask) Execute(ctx context.Context) error {
}
}
err = stream.Produce(&msgPack)
log.Debug("proxynode", zap.Int("length of retrieveMsg", len(msgPack.Msgs)))
log.Debug("proxy", zap.Int("length of retrieveMsg", len(msgPack.Msgs)))
if err != nil {
log.Debug("Failed to send retrieve request.",
zap.Any("requestID", rt.Base.MsgID), zap.Any("requestType", "retrieve"))
@ -2147,7 +2147,7 @@ func (rt *RetrieveTask) PostExecute(ctx context.Context) error {
}()
select {
case <-rt.TraceCtx().Done():
log.Debug("proxynode", zap.Int64("Retrieve: wait to finish failed, timeout!, taskID:", rt.ID()))
log.Debug("proxy", zap.Int64("Retrieve: wait to finish failed, timeout!, taskID:", rt.ID()))
return fmt.Errorf("RetrieveTask:wait to finish failed, timeout : %d", rt.ID())
case retrieveResults := <-rt.resultBuf:
retrieveResult := make([]*internalpb.RetrieveResults, 0)
@ -3687,7 +3687,7 @@ func (gist *GetIndexStateTask) Execute(ctx context.Context) error {
State: commonpb.IndexState_Finished,
}
log.Debug("ProxyNode GetIndexState", zap.Int("IndexBuildIDs", len(getIndexStatesRequest.IndexBuildIDs)), zap.Error(err))
log.Debug("Proxy GetIndexState", zap.Int("IndexBuildIDs", len(getIndexStatesRequest.IndexBuildIDs)), zap.Error(err))
if len(getIndexStatesRequest.IndexBuildIDs) == 0 {
return nil

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"container/list"
@ -118,7 +118,7 @@ func (queue *BaseTaskQueue) AddActiveTask(t task) {
tID := t.ID()
_, ok := queue.activeTasks[tID]
if ok {
log.Debug("ProxyNode task with tID already in active task list!", zap.Any("ID", tID))
log.Debug("Proxy task with tID already in active task list!", zap.Any("ID", tID))
}
queue.activeTasks[tID] = t
@ -133,7 +133,7 @@ func (queue *BaseTaskQueue) PopActiveTask(tID UniqueID) task {
return t
}
log.Debug("ProxyNode task not in active task list! ts", zap.Any("tID", tID))
log.Debug("Proxy task not in active task list! ts", zap.Any("tID", tID))
return t
}
@ -254,10 +254,10 @@ func (queue *DmTaskQueue) PopActiveTask(tID UniqueID) task {
t, ok := queue.activeTasks[tID]
if ok {
delete(queue.activeTasks, tID)
log.Debug("ProxyNode DmTaskQueue popPChanStats", zap.Any("tID", t.ID()))
log.Debug("Proxy DmTaskQueue popPChanStats", zap.Any("tID", t.ID()))
queue.popPChanStats(t)
} else {
log.Debug("ProxyNode task not in active task list!", zap.Any("tID", tID))
log.Debug("Proxy task not in active task list!", zap.Any("tID", tID))
}
return t
}
@ -268,7 +268,7 @@ func (queue *DmTaskQueue) addPChanStats(t task) error {
if err != nil {
return err
}
log.Debug("ProxyNode DmTaskQueue addPChanStats", zap.Any("tID", t.ID()),
log.Debug("Proxy DmTaskQueue addPChanStats", zap.Any("tID", t.ID()),
zap.Any("stats", stats))
queue.statsLock.Lock()
for cName, stat := range stats {
@ -291,7 +291,7 @@ func (queue *DmTaskQueue) addPChanStats(t task) error {
}
queue.statsLock.Unlock()
} else {
return fmt.Errorf("ProxyNode addUnissuedTask reflect to dmlTask failed, tID:%v", t.ID())
return fmt.Errorf("Proxy addUnissuedTask reflect to dmlTask failed, tID:%v", t.ID())
}
return nil
}
@ -314,7 +314,7 @@ func (queue *DmTaskQueue) popPChanStats(t task) error {
}
queue.statsLock.Unlock()
} else {
return fmt.Errorf("ProxyNode DmTaskQueue popPChanStats reflect to dmlTask failed, tID:%v", t.ID())
return fmt.Errorf("Proxy DmTaskQueue popPChanStats reflect to dmlTask failed, tID:%v", t.ID())
}
return nil
}
@ -570,7 +570,7 @@ func newQueryResultBuf() *queryResultBuf {
}
func setContain(m1, m2 map[interface{}]struct{}) bool {
log.Debug("ProxyNode task_scheduler setContain", zap.Any("len(m1)", len(m1)),
log.Debug("Proxy task_scheduler setContain", zap.Any("len(m1)", len(m1)),
zap.Any("len(m2)", len(m2)))
if len(m1) < len(m2) {
return false
@ -578,7 +578,7 @@ func setContain(m1, m2 map[interface{}]struct{}) bool {
for k2 := range m2 {
_, ok := m1[k2]
log.Debug("ProxyNode task_scheduler setContain", zap.Any("k2", fmt.Sprintf("%v", k2)),
log.Debug("Proxy task_scheduler setContain", zap.Any("k2", fmt.Sprintf("%v", k2)),
zap.Any("ok", ok))
if !ok {
return false
@ -590,7 +590,7 @@ func setContain(m1, m2 map[interface{}]struct{}) bool {
func (sr *resultBufHeader) readyToReduce() bool {
if sr.haveError {
log.Debug("ProxyNode searchResultBuf readyToReduce", zap.Any("haveError", true))
log.Debug("Proxy searchResultBuf readyToReduce", zap.Any("haveError", true))
return true
}
@ -623,7 +623,7 @@ func (sr *resultBufHeader) readyToReduce() bool {
ret1 := setContain(sr.receivedVChansSet, sr.usedVChans)
ret2 := setContain(sr.receivedVChansSet, sr.usedChans)
log.Debug("ProxyNode searchResultBuf readyToReduce", zap.Any("receivedVChansSet", receivedVChansSetStrMap),
log.Debug("Proxy searchResultBuf readyToReduce", zap.Any("receivedVChansSet", receivedVChansSetStrMap),
zap.Any("usedVChans", usedVChansSetStrMap),
zap.Any("usedChans", usedChansSetStrMap),
zap.Any("receivedSealedSegmentIDsSet", sealedSegmentIDsStrMap),
@ -634,7 +634,7 @@ func (sr *resultBufHeader) readyToReduce() bool {
return false
}
ret := setContain(sr.receivedSealedSegmentIDsSet, sr.receivedGlobalSegmentIDsSet)
log.Debug("ProxyNode searchResultBuf readyToReduce", zap.Any("ret", ret))
log.Debug("Proxy searchResultBuf readyToReduce", zap.Any("ret", ret))
return ret
}
@ -678,7 +678,7 @@ func (sched *TaskScheduler) collectResultLoop() {
queryResultMsgStream, _ := sched.msFactory.NewQueryMsgStream(sched.ctx)
queryResultMsgStream.AsConsumer(Params.SearchResultChannelNames, Params.ProxySubName)
log.Debug("ProxyNode", zap.Strings("SearchResultChannelNames", Params.SearchResultChannelNames),
log.Debug("Proxy", zap.Strings("SearchResultChannelNames", Params.SearchResultChannelNames),
zap.Any("ProxySubName", Params.ProxySubName))
queryResultMsgStream.Start()
@ -693,7 +693,7 @@ func (sched *TaskScheduler) collectResultLoop() {
select {
case msgPack, ok := <-queryResultMsgStream.Chan():
if !ok {
log.Debug("ProxyNode collectResultLoop exit Chan closed")
log.Debug("Proxy collectResultLoop exit Chan closed")
return
}
if msgPack == nil {
@ -712,13 +712,13 @@ func (sched *TaskScheduler) collectResultLoop() {
ignoreThisResult = false
}
if ignoreThisResult {
log.Debug("ProxyNode collectResultLoop Got a SearchResultMsg, but we should ignore", zap.Any("ReqID", reqID))
log.Debug("Proxy collectResultLoop Got a SearchResultMsg, but we should ignore", zap.Any("ReqID", reqID))
continue
}
t := sched.getTaskByReqID(reqID)
log.Debug("ProxyNode collectResultLoop Got a SearchResultMsg", zap.Any("ReqID", reqID), zap.Any("t", t))
log.Debug("Proxy collectResultLoop Got a SearchResultMsg", zap.Any("ReqID", reqID), zap.Any("t", t))
if t == nil {
log.Debug("ProxyNode collectResultLoop GetTaskByReqID failed", zap.String("reqID", reqIDStr))
log.Debug("Proxy collectResultLoop GetTaskByReqID failed", zap.String("reqID", reqIDStr))
delete(searchResultBufs, reqID)
searchResultBufFlags[reqID] = true
continue
@ -726,7 +726,7 @@ func (sched *TaskScheduler) collectResultLoop() {
st, ok := t.(*SearchTask)
if !ok {
log.Debug("ProxyNode collectResultLoop type assert t as SearchTask failed", zap.Any("t", t))
log.Debug("Proxy collectResultLoop type assert t as SearchTask failed", zap.Any("t", t))
delete(searchResultBufs, reqID)
searchResultBufFlags[reqID] = true
continue
@ -736,7 +736,7 @@ func (sched *TaskScheduler) collectResultLoop() {
if !ok {
resultBuf = newSearchResultBuf()
vchans, err := st.getVChannels()
log.Debug("ProxyNode collectResultLoop, first receive", zap.Any("reqID", reqID), zap.Any("vchans", vchans),
log.Debug("Proxy collectResultLoop, first receive", zap.Any("reqID", reqID), zap.Any("vchans", vchans),
zap.Error(err))
if err != nil {
delete(searchResultBufs, reqID)
@ -746,7 +746,7 @@ func (sched *TaskScheduler) collectResultLoop() {
resultBuf.usedVChans[vchan] = struct{}{}
}
pchans, err := st.getChannels()
log.Debug("ProxyNode collectResultLoop, first receive", zap.Any("reqID", reqID), zap.Any("pchans", pchans),
log.Debug("Proxy collectResultLoop, first receive", zap.Any("reqID", reqID), zap.Any("pchans", pchans),
zap.Error(err))
if err != nil {
delete(searchResultBufs, reqID)
@ -762,11 +762,11 @@ func (sched *TaskScheduler) collectResultLoop() {
//t := sched.getTaskByReqID(reqID)
{
colName := t.(*SearchTask).query.CollectionName
log.Debug("ProxyNode collectResultLoop", zap.String("collection name", colName), zap.String("reqID", reqIDStr), zap.Int("answer cnt", len(searchResultBufs[reqID].resultBuf)))
log.Debug("Proxy collectResultLoop", zap.String("collection name", colName), zap.String("reqID", reqIDStr), zap.Int("answer cnt", len(searchResultBufs[reqID].resultBuf)))
}
if resultBuf.readyToReduce() {
log.Debug("ProxyNode collectResultLoop readyToReduce and assign to reduce")
log.Debug("Proxy collectResultLoop readyToReduce and assign to reduce")
searchResultBufFlags[reqID] = true
st.resultBuf <- resultBuf.resultBuf
delete(searchResultBufs, reqID)
@ -779,7 +779,7 @@ func (sched *TaskScheduler) collectResultLoop() {
//reqIDStr := strconv.FormatInt(reqID, 10)
//t := sched.getTaskByReqID(reqID)
//if t == nil {
// log.Debug("proxynode", zap.String("RetrieveResult GetTaskByReqID failed, reqID = ", reqIDStr))
// log.Debug("proxy", zap.String("RetrieveResult GetTaskByReqID failed, reqID = ", reqIDStr))
// delete(queryResultBufs, reqID)
// continue
//}
@ -814,13 +814,13 @@ func (sched *TaskScheduler) collectResultLoop() {
ignoreThisResult = false
}
if ignoreThisResult {
log.Debug("ProxyNode collectResultLoop Got a queryResultMsg, but we should ignore", zap.Any("ReqID", reqID))
log.Debug("Proxy collectResultLoop Got a queryResultMsg, but we should ignore", zap.Any("ReqID", reqID))
continue
}
t := sched.getTaskByReqID(reqID)
log.Debug("ProxyNode collectResultLoop Got a queryResultMsg", zap.Any("ReqID", reqID), zap.Any("t", t))
log.Debug("Proxy collectResultLoop Got a queryResultMsg", zap.Any("ReqID", reqID), zap.Any("t", t))
if t == nil {
log.Debug("ProxyNode collectResultLoop GetTaskByReqID failed", zap.String("reqID", reqIDStr))
log.Debug("Proxy collectResultLoop GetTaskByReqID failed", zap.String("reqID", reqIDStr))
delete(queryResultBufs, reqID)
queryResultBufFlags[reqID] = true
continue
@ -828,7 +828,7 @@ func (sched *TaskScheduler) collectResultLoop() {
st, ok := t.(*RetrieveTask)
if !ok {
log.Debug("ProxyNode collectResultLoop type assert t as RetrieveTask failed", zap.Any("t", t))
log.Debug("Proxy collectResultLoop type assert t as RetrieveTask failed", zap.Any("t", t))
delete(queryResultBufs, reqID)
queryResultBufFlags[reqID] = true
continue
@ -838,7 +838,7 @@ func (sched *TaskScheduler) collectResultLoop() {
if !ok {
resultBuf = newQueryResultBuf()
vchans, err := st.getVChannels()
log.Debug("ProxyNode collectResultLoop, first receive", zap.Any("reqID", reqID), zap.Any("vchans", vchans),
log.Debug("Proxy collectResultLoop, first receive", zap.Any("reqID", reqID), zap.Any("vchans", vchans),
zap.Error(err))
if err != nil {
delete(queryResultBufs, reqID)
@ -848,7 +848,7 @@ func (sched *TaskScheduler) collectResultLoop() {
resultBuf.usedVChans[vchan] = struct{}{}
}
pchans, err := st.getChannels()
log.Debug("ProxyNode collectResultLoop, first receive", zap.Any("reqID", reqID), zap.Any("pchans", pchans),
log.Debug("Proxy collectResultLoop, first receive", zap.Any("reqID", reqID), zap.Any("pchans", pchans),
zap.Error(err))
if err != nil {
delete(queryResultBufs, reqID)
@ -864,11 +864,11 @@ func (sched *TaskScheduler) collectResultLoop() {
//t := sched.getTaskByReqID(reqID)
{
colName := t.(*RetrieveTask).retrieve.CollectionName
log.Debug("ProxyNode collectResultLoop", zap.String("collection name", colName), zap.String("reqID", reqIDStr), zap.Int("answer cnt", len(queryResultBufs[reqID].resultBuf)))
log.Debug("Proxy collectResultLoop", zap.String("collection name", colName), zap.String("reqID", reqIDStr), zap.Int("answer cnt", len(queryResultBufs[reqID].resultBuf)))
}
if resultBuf.readyToReduce() {
log.Debug("ProxyNode collectResultLoop readyToReduce and assign to reduce")
log.Debug("Proxy collectResultLoop readyToReduce and assign to reduce")
queryResultBufFlags[reqID] = true
st.resultBuf <- resultBuf.resultBuf
delete(queryResultBufs, reqID)
@ -877,7 +877,7 @@ func (sched *TaskScheduler) collectResultLoop() {
}
}
case <-sched.ctx.Done():
log.Debug("ProxyNode collectResultLoop is closed ...")
log.Debug("Proxy collectResultLoop is closed ...")
return
}
}

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"context"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"context"
@ -65,7 +65,7 @@ func newTimeTick(ctx context.Context,
t.tickMsgStream, _ = t.msFactory.NewMsgStream(t.ctx)
t.tickMsgStream.AsProducer(Params.ProxyTimeTickChannelNames)
log.Debug("proxynode", zap.Strings("proxynode AsProducer", Params.ProxyTimeTickChannelNames))
log.Debug("proxy", zap.Strings("proxy AsProducer", Params.ProxyTimeTickChannelNames))
return t
}
@ -98,7 +98,7 @@ func (tt *timeTick) tick() error {
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
err := tt.tickMsgStream.Produce(&msgPack)
if err != nil {
log.Warn("proxynode", zap.String("error", err.Error()))
log.Warn("proxy", zap.String("error", err.Error()))
}
tt.tickLock.Lock()
defer tt.tickLock.Unlock()

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"encoding/json"
@ -32,7 +32,7 @@ func GetPulsarConfig(protocol, ip, port, url string) (map[string]interface{}, er
var err error
getResp := func() error {
log.Debug("proxynode util", zap.String("url", protocol+"://"+ip+":"+port+url))
log.Debug("proxy util", zap.String("url", protocol+"://"+ip+":"+port+url))
resp, err = http.Get(protocol + "://" + ip + ":" + port + url)
return err
}

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"fmt"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"errors"

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package proxynode
package proxy
import (
"testing"

View File

@ -70,7 +70,7 @@ func (p *proxyNodeManager) WatchProxyNode() error {
defer cancel()
resp, err := p.etcdCli.Get(
ctx2,
path.Join(Params.MetaRootPath, sessionutil.DefaultServiceRoot, typeutil.ProxyNodeRole),
path.Join(Params.MetaRootPath, sessionutil.DefaultServiceRoot, typeutil.ProxyRole),
clientv3.WithPrefix(),
clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend),
)
@ -99,7 +99,7 @@ func (p *proxyNodeManager) WatchProxyNode() error {
rch := p.etcdCli.Watch(
p.ctx,
path.Join(Params.MetaRootPath, sessionutil.DefaultServiceRoot, typeutil.ProxyNodeRole),
path.Join(Params.MetaRootPath, sessionutil.DefaultServiceRoot, typeutil.ProxyRole),
clientv3.WithPrefix(),
clientv3.WithCreatedNotify(),
clientv3.WithPrevKV(),

View File

@ -40,7 +40,7 @@ func TestProxyNodeManager(t *testing.T) {
}
b1, err := json.Marshal(&s1)
assert.Nil(t, err)
k1 := path.Join(sessKey, typeutil.ProxyNodeRole+"-100")
k1 := path.Join(sessKey, typeutil.ProxyRole+"-100")
_, err = cli.Put(ctx, k1, string(b1))
assert.Nil(t, err)
@ -49,7 +49,7 @@ func TestProxyNodeManager(t *testing.T) {
}
b0, err := json.Marshal(&s0)
assert.Nil(t, err)
k0 := path.Join(sessKey, typeutil.ProxyNodeRole+"-99")
k0 := path.Join(sessKey, typeutil.ProxyRole+"-99")
_, err = cli.Put(ctx, k0, string(b0))
assert.Nil(t, err)
@ -82,7 +82,7 @@ func TestProxyNodeManager(t *testing.T) {
}
b2, err := json.Marshal(&s2)
assert.Nil(t, err)
k2 := path.Join(sessKey, typeutil.ProxyNodeRole+"-101")
k2 := path.Join(sessKey, typeutil.ProxyRole+"-101")
_, err = cli.Put(ctx, k2, string(b2))
assert.Nil(t, err)

View File

@ -295,7 +295,7 @@ func TestMasterService(t *testing.T) {
},
)
assert.Nil(t, err)
_, err = etcdCli.Put(ctx, path.Join(sessKey, typeutil.ProxyNodeRole+"-100"), string(pnb))
_, err = etcdCli.Put(ctx, path.Join(sessKey, typeutil.ProxyRole+"-100"), string(pnb))
assert.Nil(t, err)
pnm := &proxyNodeMock{
@ -1486,9 +1486,9 @@ func TestMasterService(t *testing.T) {
s2, err := json.Marshal(&p2)
assert.Nil(t, err)
_, err = core.etcdCli.Put(ctx2, path.Join(sessKey, typeutil.ProxyNodeRole)+"-1", string(s1))
_, err = core.etcdCli.Put(ctx2, path.Join(sessKey, typeutil.ProxyRole)+"-1", string(s1))
assert.Nil(t, err)
_, err = core.etcdCli.Put(ctx2, path.Join(sessKey, typeutil.ProxyNodeRole)+"-2", string(s2))
_, err = core.etcdCli.Put(ctx2, path.Join(sessKey, typeutil.ProxyRole)+"-2", string(s2))
assert.Nil(t, err)
time.Sleep(time.Second)

View File

@ -17,7 +17,7 @@ type UniqueID = int64
const (
RootCoordRole = "RootCoord"
ProxyNodeRole = "ProxyNode"
ProxyRole = "Proxy"
QueryServiceRole = "QueryService"
QueryNodeRole = "QueryNode"
IndexCoordRole = "IndexCoord"