From 2a850821b5b70f1f9c595bd745675c4120d21a4d Mon Sep 17 00:00:00 2001
From: groot <yihua.mo@zilliz.com>
Date: Tue, 16 Apr 2019 12:03:33 +0800
Subject: [PATCH] add test client

Former-commit-id: af73a98aad00251fa5432d39e092ee950fe2db2b
---
 cpp/CMakeLists.txt                            |    1 +
 cpp/README.md                                 |    8 +
 cpp/src/CMakeLists.txt                        |   11 +
 cpp/src/main.cpp                              |    9 +-
 cpp/src/server/Server.cpp                     |    9 +-
 cpp/src/server/ServerConfig.cpp               |   15 -
 cpp/src/server/ServerConfig.h                 |   11 +-
 cpp/src/server/ServiceWrapper.cpp             |  152 +
 cpp/src/server/ServiceWrapper.h               |   26 +
 cpp/src/thrift/VectorService.thrift           |   90 +
 cpp/src/thrift/gen-cpp/VecService.cpp         | 3009 +++++++++++++++++
 cpp/src/thrift/gen-cpp/VecService.h           | 1170 +++++++
 .../gen-cpp/VecService_server.skeleton.cpp    |   91 +
 .../gen-cpp/VectorService_constants.cpp       |   17 +
 .../thrift/gen-cpp/VectorService_constants.h  |   24 +
 .../thrift/gen-cpp/VectorService_types.cpp    | 1317 ++++++++
 cpp/src/thrift/gen-cpp/VectorService_types.h  |  548 +++
 cpp/src/utils/CommonUtil.cpp                  |    4 +-
 cpp/src/utils/SignalUtil.cpp                  |    1 -
 cpp/test_client/CMakeLists.txt                |   42 +
 cpp/test_client/main.cpp                      |   74 +
 cpp/test_client/src/ClientApp.cpp             |   19 +
 cpp/test_client/src/ClientApp.h               |   21 +
 23 files changed, 6640 insertions(+), 29 deletions(-)
 create mode 100644 cpp/src/server/ServiceWrapper.cpp
 create mode 100644 cpp/src/server/ServiceWrapper.h
 create mode 100644 cpp/src/thrift/VectorService.thrift
 create mode 100644 cpp/src/thrift/gen-cpp/VecService.cpp
 create mode 100644 cpp/src/thrift/gen-cpp/VecService.h
 create mode 100644 cpp/src/thrift/gen-cpp/VecService_server.skeleton.cpp
 create mode 100644 cpp/src/thrift/gen-cpp/VectorService_constants.cpp
 create mode 100644 cpp/src/thrift/gen-cpp/VectorService_constants.h
 create mode 100644 cpp/src/thrift/gen-cpp/VectorService_types.cpp
 create mode 100644 cpp/src/thrift/gen-cpp/VectorService_types.h
 create mode 100644 cpp/test_client/CMakeLists.txt
 create mode 100644 cpp/test_client/main.cpp
 create mode 100644 cpp/test_client/src/ClientApp.cpp
 create mode 100644 cpp/test_client/src/ClientApp.h

diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt
index ca26204603..5fc2f28c13 100644
--- a/cpp/CMakeLists.txt
+++ b/cpp/CMakeLists.txt
@@ -59,6 +59,7 @@ link_directories(${VECWISE_THIRD_PARTY_BUILD}/lib)
 #                WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/third_party)
 
 add_subdirectory(src)
+add_subdirectory(test_client)
 
 if (BUILD_UNIT_TEST)
     add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/unittest)
diff --git a/cpp/README.md b/cpp/README.md
index 66464cc9cb..93e0c8a36e 100644
--- a/cpp/README.md
+++ b/cpp/README.md
@@ -30,5 +30,13 @@ Set config in cpp/conf/server_config.yaml
 
 Then luanch server with config:
     
+    cd [build output path]/src
     vecwise_engine_server -c [sourcecode path]/cpp/conf/server_config.yaml
 
+### Luanch test_client(only for debug)
+
+Client use same config file with server:
+    
+    cd [build output path]/test_client
+    test_client -c [sourcecode path]/cpp/conf/server_config.yaml
+
diff --git a/cpp/src/CMakeLists.txt b/cpp/src/CMakeLists.txt
index 6698da6387..5d5b8d42a8 100644
--- a/cpp/src/CMakeLists.txt
+++ b/cpp/src/CMakeLists.txt
@@ -10,6 +10,11 @@ aux_source_directory(server server_files)
 aux_source_directory(utils utils_files)
 aux_source_directory(wrapper wrapper_files)
 
+set(service_files
+        thrift/gen-cpp/VecService.cpp
+        thrift/gen-cpp/VectorService_constants.cpp
+        thrift/gen-cpp/VectorService_types.cpp)
+
 set(vecwise_engine_src
         ${CMAKE_CURRENT_SOURCE_DIR}/main.cpp
         ${cache_files}
@@ -24,7 +29,11 @@ add_executable(vecwise_engine_server
         ${config_files}
         ${server_files}
         ${utils_files}
+<<<<<<< f0f0e4c6c70323464d2e3bb4834e942bdbe2c868
         ${wrapper_files}
+=======
+        ${service_files}
+>>>>>>> add test client
         ${VECWISE_THIRD_PARTY_BUILD}/include/easylogging++.cc
         )
 
@@ -33,5 +42,7 @@ set(dependency_libs
         yaml-cpp
         boost_system
         boost_filesystem
+        thrift
+        pthread
         )
 target_link_libraries(vecwise_engine_server ${dependency_libs} ${cuda_library})
\ No newline at end of file
diff --git a/cpp/src/main.cpp b/cpp/src/main.cpp
index 689f7a7af7..43cfc3436c 100644
--- a/cpp/src/main.cpp
+++ b/cpp/src/main.cpp
@@ -47,10 +47,11 @@ main(int argc, char *argv[]) {
 
     app_name = argv[0];
 
-//    if(argc < 5) {
-//        print_help(app_name);
-//        return EXIT_FAILURE;
-//    }
+    if(argc < 2) {
+        print_help(app_name);
+        printf("Vecwise engine server exit...\n");
+        return EXIT_FAILURE;
+    }
 
     int value;
     while ((value = getopt_long(argc, argv, "c:p:dh", long_options, &option_index)) != -1) {
diff --git a/cpp/src/server/Server.cpp b/cpp/src/server/Server.cpp
index 0a0f95fb1a..1536c7bf56 100644
--- a/cpp/src/server/Server.cpp
+++ b/cpp/src/server/Server.cpp
@@ -5,11 +5,13 @@
 ////////////////////////////////////////////////////////////////////////////////
 #include "Server.h"
 #include "ServerConfig.h"
+#include "ServiceWrapper.h"
 #include "utils/CommonUtil.h"
 #include "utils/SignalUtil.h"
 #include "utils/TimeRecorder.h"
 #include "utils/LogUtil.h"
 
+
 #include <fcntl.h>
 #include <sys/stat.h>
 #include <sys/types.h>
@@ -157,9 +159,8 @@ Server::Start() {
             signal(SIGHUP, SignalUtil::HandleSignal);
             signal(SIGTERM, SignalUtil::HandleSignal);
 
-            StartService();
-
             CommonUtil::PrintInfo("Vecwise server is running...");
+            StartService();
 
         } catch(std::exception& ex){
             std::string info = "Vecwise server encounter exception: " + std::string(ex.what());
@@ -216,12 +217,12 @@ Server::LoadConfig() {
 
 void
 Server::StartService() {
-
+    ServiceWrapper::StartService();
 }
 
 void
 Server::StopService() {
-
+    ServiceWrapper::StopService();
 }
 
 }
diff --git a/cpp/src/server/ServerConfig.cpp b/cpp/src/server/ServerConfig.cpp
index ffcfd47181..d17af0ae25 100644
--- a/cpp/src/server/ServerConfig.cpp
+++ b/cpp/src/server/ServerConfig.cpp
@@ -17,9 +17,6 @@ namespace zilliz {
 namespace vecwise {
 namespace server {
 
-static const std::string CONFIG_ADDRESS = "address";
-static const std::string CONFIG_PORT = "port";
-
 ServerConfig&
 ServerConfig::GetInstance() {
     static ServerConfig config;
@@ -79,18 +76,6 @@ ServerConfig::GetConfig(const std::string& name) {
     return root_node.GetChild(name);
 }
 
-std::string
-ServerConfig::GetServerAddress() const {
-    ConfigNode server_config = GetConfig(CONFIG_SERVER);
-    return server_config.GetValue(CONFIG_ADDRESS);
-}
-
-std::string
-ServerConfig::GetServerPort() const {
-    ConfigNode server_config = GetConfig(CONFIG_SERVER);
-    return server_config.GetValue(CONFIG_PORT);
-}
-
 
 }
 }
diff --git a/cpp/src/server/ServerConfig.h b/cpp/src/server/ServerConfig.h
index d3169686bd..3be7a0914e 100644
--- a/cpp/src/server/ServerConfig.h
+++ b/cpp/src/server/ServerConfig.h
@@ -15,8 +15,16 @@ namespace vecwise {
 namespace server {
 
 static const std::string CONFIG_SERVER = "server_config";
+static const std::string CONFIG_SERVER_ADDRESS = "address";
+static const std::string CONFIG_SERVER_PORT = "port";
+static const std::string CONFIG_SERVER_PROTOCOL = "transfer_protocol";
+static const std::string CONFIG_SERVER_MODE = "server_mode";
+
 static const std::string CONFIG_LOG = "log_config";
 
+static const std::string CONFIG_CACHE = "cache_config";
+static const std::string CONFIG_CACHE_CAPACITY = "cache_capacity";
+
 class ServerConfig {
  public:
     static ServerConfig &GetInstance();
@@ -26,9 +34,6 @@ class ServerConfig {
 
     ConfigNode GetConfig(const std::string& name) const;
     ConfigNode& GetConfig(const std::string& name);
-
-    std::string GetServerAddress() const;
-    std::string GetServerPort() const;
 };
 
 }
diff --git a/cpp/src/server/ServiceWrapper.cpp b/cpp/src/server/ServiceWrapper.cpp
new file mode 100644
index 0000000000..96698881f4
--- /dev/null
+++ b/cpp/src/server/ServiceWrapper.cpp
@@ -0,0 +1,152 @@
+/*******************************************************************************
+ * Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
+ * Unauthorized copying of this file, via any medium is strictly prohibited.
+ * Proprietary and confidential.
+ ******************************************************************************/
+#include "ServiceWrapper.h"
+#include "ServerConfig.h"
+
+#include "utils/CommonUtil.h"
+
+#include "thrift/gen-cpp/VecService.h"
+#include "thrift/gen-cpp/VectorService_types.h"
+#include "thrift/gen-cpp/VectorService_constants.h"
+
+#include <thrift/protocol/TBinaryProtocol.h>
+#include <thrift/server/TSimpleServer.h>
+#include <thrift/server/TThreadPoolServer.h>
+#include <thrift/transport/TServerSocket.h>
+#include <thrift/transport/TBufferTransports.h>
+#include <thrift/concurrency/PosixThreadFactory.h>
+
+#include <thread>
+
+namespace zilliz {
+namespace vecwise {
+namespace server {
+
+using namespace ::apache::thrift;
+using namespace ::apache::thrift::protocol;
+using namespace ::apache::thrift::transport;
+using namespace ::apache::thrift::server;
+using namespace ::apache::thrift::concurrency;
+
+class VecServiceHandler : virtual public VecServiceIf {
+public:
+    VecServiceHandler() {
+        // Your initialization goes here
+    }
+
+    /**
+     * group interfaces
+     *
+     * @param group
+     */
+    void add_group(const VecGroup& group) {
+        // Your implementation goes here
+        printf("add_group\n");
+    }
+
+    void get_group(VecGroup& _return, const std::string& group_id) {
+        // Your implementation goes here
+        printf("get_group\n");
+    }
+
+    void del_group(const std::string& group_id) {
+        // Your implementation goes here
+        printf("del_group\n");
+    }
+
+    /**
+     * vector interfaces
+     *
+     *
+     * @param group_id
+     * @param tensor
+     */
+    int64_t add_vector(const std::string& group_id, const VecTensor& tensor) {
+        // Your implementation goes here
+        printf("add_vector\n");
+    }
+
+    void add_vector_batch(VecTensorIdList& _return, const std::string& group_id, const VecTensorList& tensor_list) {
+        // Your implementation goes here
+        printf("add_vector_batch\n");
+    }
+
+    /**
+     * search interfaces
+     * if time_range_list is empty, engine will search without time limit
+     *
+     * @param group_id
+     * @param top_k
+     * @param tensor
+     * @param time_range_list
+     */
+    void search_vector(VecSearchResult& _return, const std::string& group_id, const int64_t top_k, const VecTensor& tensor, const VecTimeRangeList& time_range_list) {
+        // Your implementation goes here
+        printf("search_vector\n");
+    }
+
+    void search_vector_batch(VecSearchResultList& _return, const std::string& group_id, const int64_t top_k, const VecTensorList& tensor_list, const VecTimeRangeList& time_range_list) {
+        // Your implementation goes here
+        printf("search_vector_batch\n");
+    }
+
+};
+
+static ::apache::thrift::stdcxx::shared_ptr<TServer> s_server;
+
+void ServiceWrapper::StartService() {
+    if(s_server != nullptr){
+        StopService();
+    }
+
+    ServerConfig &config = ServerConfig::GetInstance();
+    ConfigNode server_config = config.GetConfig(CONFIG_SERVER);
+
+    std::string address = server_config.GetValue(CONFIG_SERVER_ADDRESS, "127.0.0.1");
+    int32_t port = server_config.GetInt32Value(CONFIG_SERVER_PORT, 33001);
+    std::string protocol = server_config.GetValue(CONFIG_SERVER_PROTOCOL, "binary");
+    std::string mode = server_config.GetValue(CONFIG_SERVER_MODE, "thread_pool");
+
+    ::apache::thrift::stdcxx::shared_ptr<VecServiceHandler> handler(new VecServiceHandler());
+    ::apache::thrift::stdcxx::shared_ptr<TProcessor> processor(new VecServiceProcessor(handler));
+
+    if(mode == "simple") {
+        ::apache::thrift::stdcxx::shared_ptr<TServerTransport> serverTransport(new TServerSocket(address, port));
+        ::apache::thrift::stdcxx::shared_ptr<TTransportFactory> transportFactory(new TBufferedTransportFactory());
+        ::apache::thrift::stdcxx::shared_ptr<TProtocolFactory> protocolFactory(new TBinaryProtocolFactory());
+        s_server.reset(new TSimpleServer(processor, serverTransport, transportFactory, protocolFactory));
+        s_server->serve();
+    } else if(mode == "thread_pool") {
+        ::apache::thrift::stdcxx::shared_ptr<TServerTransport> serverTransport(new TServerSocket(port));
+        ::apache::thrift::stdcxx::shared_ptr<TTransportFactory> transportFactory(new TBufferedTransportFactory());
+        ::apache::thrift::stdcxx::shared_ptr<TProtocolFactory> protocolFactory(new TBinaryProtocolFactory());
+
+        ::apache::thrift::stdcxx::shared_ptr<ThreadManager> threadManager(ThreadManager::newSimpleThreadManager(1));
+        ::apache::thrift::stdcxx::shared_ptr<PosixThreadFactory> threadFactory(new PosixThreadFactory());
+        threadManager->threadFactory(threadFactory);
+        threadManager->start();
+
+        s_server.reset(new TThreadPoolServer(processor, serverTransport, transportFactory, protocolFactory, threadManager));
+        s_server->serve();
+    } else {
+        CommonUtil::PrintError("Server mode: " + mode + " is not supported currently");
+    }
+}
+
+void ServiceWrapper::StopService() {
+    auto stop_server_worker = [&]{
+        if(s_server != nullptr) {
+            s_server->stop();
+        }
+    };
+
+    std::shared_ptr<std::thread> stop_thread = std::make_shared<std::thread>(stop_server_worker);
+    stop_thread->join();
+}
+
+}
+}
+}
\ No newline at end of file
diff --git a/cpp/src/server/ServiceWrapper.h b/cpp/src/server/ServiceWrapper.h
new file mode 100644
index 0000000000..00b01f2c19
--- /dev/null
+++ b/cpp/src/server/ServiceWrapper.h
@@ -0,0 +1,26 @@
+/*******************************************************************************
+ * Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
+ * Unauthorized copying of this file, via any medium is strictly prohibited.
+ * Proprietary and confidential.
+ ******************************************************************************/
+#pragma once
+
+#include "utils/Error.h"
+
+#include <cstdint>
+#include <string>
+
+namespace zilliz {
+namespace vecwise {
+namespace server {
+
+class ServiceWrapper {
+public:
+    static void StartService();
+    static void StopService();
+};
+
+
+}
+}
+}
diff --git a/cpp/src/thrift/VectorService.thrift b/cpp/src/thrift/VectorService.thrift
new file mode 100644
index 0000000000..7008ebd54c
--- /dev/null
+++ b/cpp/src/thrift/VectorService.thrift
@@ -0,0 +1,90 @@
+/*******************************************************************************
+ * Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
+ * Unauthorized copying of this file, via any medium is strictly prohibited.
+ * Proprietary and confidential.
+ ******************************************************************************/
+
+enum VecErrCode {
+    SUCCESS = 0,
+	ILLEGAL_ARGUMENT,
+	GROUP_NOT_EXISTS,
+	ILLEGAL_TIME_RANGE,
+	ILLEGAL_VECTOR_DIMENSION,
+	OUT_OF_MEMORY,
+}
+
+exception VecException {
+	1: VecErrCode code;
+	2: string reason;
+}
+
+struct VecGroup {
+	1: string id;
+	2: i32 dimension;
+	3: i32 index_type;
+}
+
+struct VecTensor {
+	2: list<double> tensor;
+}
+
+struct VecTensorList {
+	1: list<VecTensor> tensor_list;
+}
+
+struct VecTensorIdList {
+	1: list<i64> id_list;
+}
+
+struct VecSearchResult {
+    1: list<i64> id_list;
+}
+
+struct VecSearchResultList {
+    1: list<VecSearchResult> result_list;
+}
+
+
+struct VecDateTime {
+    1: i32 year;
+    2: i32 month;
+    3: i32 day;
+    4: i32 hour;
+    5: i32 minute;
+    6: i32 second;
+}
+
+struct VecTimeRange {
+    1: VecDateTime time_begin;
+    2: bool begine_closed;
+    3: VecDateTime time_end;
+    4: bool end_closed;
+}
+
+struct VecTimeRangeList {
+    1: list<VecTimeRange> range_list;
+}
+
+service VecService {
+    /**
+     * group interfaces
+     */
+	void add_group(2: VecGroup group) throws(1: VecException e);
+	VecGroup get_group(2: string group_id) throws(1: VecException e);
+	void del_group(2: string group_id) throws(1: VecException e);
+
+
+    /**
+     * vector interfaces
+     *
+     */
+    i64 add_vector(2: string group_id, 3: VecTensor tensor) throws(1: VecException e);
+    VecTensorIdList add_vector_batch(2: string group_id, 3: VecTensorList tensor_list) throws(1: VecException e);
+
+    /**
+     * search interfaces
+     * if time_range_list is empty, engine will search without time limit
+     */
+    VecSearchResult search_vector(2: string group_id, 3: i64 top_k, 4: VecTensor tensor, 5: VecTimeRangeList time_range_list) throws(1: VecException e);
+    VecSearchResultList search_vector_batch(2: string group_id, 3: i64 top_k, 4: VecTensorList tensor_list, 5: VecTimeRangeList time_range_list) throws(1: VecException e);
+}
\ No newline at end of file
diff --git a/cpp/src/thrift/gen-cpp/VecService.cpp b/cpp/src/thrift/gen-cpp/VecService.cpp
new file mode 100644
index 0000000000..6374f947ee
--- /dev/null
+++ b/cpp/src/thrift/gen-cpp/VecService.cpp
@@ -0,0 +1,3009 @@
+/**
+ * Autogenerated by Thrift Compiler (0.12.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#include "VecService.h"
+
+
+
+
+VecService_add_group_args::~VecService_add_group_args() throw() {
+}
+
+
+uint32_t VecService_add_group_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->group.read(iprot);
+          this->__isset.group = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecService_add_group_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecService_add_group_args");
+
+  xfer += oprot->writeFieldBegin("group", ::apache::thrift::protocol::T_STRUCT, 2);
+  xfer += this->group.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_add_group_pargs::~VecService_add_group_pargs() throw() {
+}
+
+
+uint32_t VecService_add_group_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecService_add_group_pargs");
+
+  xfer += oprot->writeFieldBegin("group", ::apache::thrift::protocol::T_STRUCT, 2);
+  xfer += (*(this->group)).write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_add_group_result::~VecService_add_group_result() throw() {
+}
+
+
+uint32_t VecService_add_group_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->e.read(iprot);
+          this->__isset.e = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecService_add_group_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("VecService_add_group_result");
+
+  if (this->__isset.e) {
+    xfer += oprot->writeFieldBegin("e", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->e.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_add_group_presult::~VecService_add_group_presult() throw() {
+}
+
+
+uint32_t VecService_add_group_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->e.read(iprot);
+          this->__isset.e = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+VecService_get_group_args::~VecService_get_group_args() throw() {
+}
+
+
+uint32_t VecService_get_group_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->group_id);
+          this->__isset.group_id = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecService_get_group_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecService_get_group_args");
+
+  xfer += oprot->writeFieldBegin("group_id", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->group_id);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_get_group_pargs::~VecService_get_group_pargs() throw() {
+}
+
+
+uint32_t VecService_get_group_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecService_get_group_pargs");
+
+  xfer += oprot->writeFieldBegin("group_id", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString((*(this->group_id)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_get_group_result::~VecService_get_group_result() throw() {
+}
+
+
+uint32_t VecService_get_group_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->success.read(iprot);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->e.read(iprot);
+          this->__isset.e = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecService_get_group_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("VecService_get_group_result");
+
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
+    xfer += this->success.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.e) {
+    xfer += oprot->writeFieldBegin("e", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->e.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_get_group_presult::~VecService_get_group_presult() throw() {
+}
+
+
+uint32_t VecService_get_group_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += (*(this->success)).read(iprot);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->e.read(iprot);
+          this->__isset.e = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+VecService_del_group_args::~VecService_del_group_args() throw() {
+}
+
+
+uint32_t VecService_del_group_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->group_id);
+          this->__isset.group_id = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecService_del_group_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecService_del_group_args");
+
+  xfer += oprot->writeFieldBegin("group_id", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->group_id);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_del_group_pargs::~VecService_del_group_pargs() throw() {
+}
+
+
+uint32_t VecService_del_group_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecService_del_group_pargs");
+
+  xfer += oprot->writeFieldBegin("group_id", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString((*(this->group_id)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_del_group_result::~VecService_del_group_result() throw() {
+}
+
+
+uint32_t VecService_del_group_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->e.read(iprot);
+          this->__isset.e = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecService_del_group_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("VecService_del_group_result");
+
+  if (this->__isset.e) {
+    xfer += oprot->writeFieldBegin("e", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->e.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_del_group_presult::~VecService_del_group_presult() throw() {
+}
+
+
+uint32_t VecService_del_group_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->e.read(iprot);
+          this->__isset.e = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+VecService_add_vector_args::~VecService_add_vector_args() throw() {
+}
+
+
+uint32_t VecService_add_vector_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->group_id);
+          this->__isset.group_id = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->tensor.read(iprot);
+          this->__isset.tensor = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecService_add_vector_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecService_add_vector_args");
+
+  xfer += oprot->writeFieldBegin("group_id", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->group_id);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("tensor", ::apache::thrift::protocol::T_STRUCT, 3);
+  xfer += this->tensor.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_add_vector_pargs::~VecService_add_vector_pargs() throw() {
+}
+
+
+uint32_t VecService_add_vector_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecService_add_vector_pargs");
+
+  xfer += oprot->writeFieldBegin("group_id", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString((*(this->group_id)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("tensor", ::apache::thrift::protocol::T_STRUCT, 3);
+  xfer += (*(this->tensor)).write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_add_vector_result::~VecService_add_vector_result() throw() {
+}
+
+
+uint32_t VecService_add_vector_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_I64) {
+          xfer += iprot->readI64(this->success);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->e.read(iprot);
+          this->__isset.e = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecService_add_vector_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("VecService_add_vector_result");
+
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_I64, 0);
+    xfer += oprot->writeI64(this->success);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.e) {
+    xfer += oprot->writeFieldBegin("e", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->e.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_add_vector_presult::~VecService_add_vector_presult() throw() {
+}
+
+
+uint32_t VecService_add_vector_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_I64) {
+          xfer += iprot->readI64((*(this->success)));
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->e.read(iprot);
+          this->__isset.e = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+VecService_add_vector_batch_args::~VecService_add_vector_batch_args() throw() {
+}
+
+
+uint32_t VecService_add_vector_batch_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->group_id);
+          this->__isset.group_id = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->tensor_list.read(iprot);
+          this->__isset.tensor_list = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecService_add_vector_batch_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecService_add_vector_batch_args");
+
+  xfer += oprot->writeFieldBegin("group_id", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->group_id);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("tensor_list", ::apache::thrift::protocol::T_STRUCT, 3);
+  xfer += this->tensor_list.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_add_vector_batch_pargs::~VecService_add_vector_batch_pargs() throw() {
+}
+
+
+uint32_t VecService_add_vector_batch_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecService_add_vector_batch_pargs");
+
+  xfer += oprot->writeFieldBegin("group_id", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString((*(this->group_id)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("tensor_list", ::apache::thrift::protocol::T_STRUCT, 3);
+  xfer += (*(this->tensor_list)).write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_add_vector_batch_result::~VecService_add_vector_batch_result() throw() {
+}
+
+
+uint32_t VecService_add_vector_batch_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->success.read(iprot);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->e.read(iprot);
+          this->__isset.e = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecService_add_vector_batch_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("VecService_add_vector_batch_result");
+
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
+    xfer += this->success.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.e) {
+    xfer += oprot->writeFieldBegin("e", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->e.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_add_vector_batch_presult::~VecService_add_vector_batch_presult() throw() {
+}
+
+
+uint32_t VecService_add_vector_batch_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += (*(this->success)).read(iprot);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->e.read(iprot);
+          this->__isset.e = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+VecService_search_vector_args::~VecService_search_vector_args() throw() {
+}
+
+
+uint32_t VecService_search_vector_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->group_id);
+          this->__isset.group_id = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_I64) {
+          xfer += iprot->readI64(this->top_k);
+          this->__isset.top_k = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->tensor.read(iprot);
+          this->__isset.tensor = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 5:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->time_range_list.read(iprot);
+          this->__isset.time_range_list = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecService_search_vector_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecService_search_vector_args");
+
+  xfer += oprot->writeFieldBegin("group_id", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->group_id);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("top_k", ::apache::thrift::protocol::T_I64, 3);
+  xfer += oprot->writeI64(this->top_k);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("tensor", ::apache::thrift::protocol::T_STRUCT, 4);
+  xfer += this->tensor.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("time_range_list", ::apache::thrift::protocol::T_STRUCT, 5);
+  xfer += this->time_range_list.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_search_vector_pargs::~VecService_search_vector_pargs() throw() {
+}
+
+
+uint32_t VecService_search_vector_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecService_search_vector_pargs");
+
+  xfer += oprot->writeFieldBegin("group_id", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString((*(this->group_id)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("top_k", ::apache::thrift::protocol::T_I64, 3);
+  xfer += oprot->writeI64((*(this->top_k)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("tensor", ::apache::thrift::protocol::T_STRUCT, 4);
+  xfer += (*(this->tensor)).write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("time_range_list", ::apache::thrift::protocol::T_STRUCT, 5);
+  xfer += (*(this->time_range_list)).write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_search_vector_result::~VecService_search_vector_result() throw() {
+}
+
+
+uint32_t VecService_search_vector_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->success.read(iprot);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->e.read(iprot);
+          this->__isset.e = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecService_search_vector_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("VecService_search_vector_result");
+
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
+    xfer += this->success.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.e) {
+    xfer += oprot->writeFieldBegin("e", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->e.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_search_vector_presult::~VecService_search_vector_presult() throw() {
+}
+
+
+uint32_t VecService_search_vector_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += (*(this->success)).read(iprot);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->e.read(iprot);
+          this->__isset.e = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+VecService_search_vector_batch_args::~VecService_search_vector_batch_args() throw() {
+}
+
+
+uint32_t VecService_search_vector_batch_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->group_id);
+          this->__isset.group_id = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_I64) {
+          xfer += iprot->readI64(this->top_k);
+          this->__isset.top_k = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->tensor_list.read(iprot);
+          this->__isset.tensor_list = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 5:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->time_range_list.read(iprot);
+          this->__isset.time_range_list = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecService_search_vector_batch_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecService_search_vector_batch_args");
+
+  xfer += oprot->writeFieldBegin("group_id", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->group_id);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("top_k", ::apache::thrift::protocol::T_I64, 3);
+  xfer += oprot->writeI64(this->top_k);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("tensor_list", ::apache::thrift::protocol::T_STRUCT, 4);
+  xfer += this->tensor_list.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("time_range_list", ::apache::thrift::protocol::T_STRUCT, 5);
+  xfer += this->time_range_list.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_search_vector_batch_pargs::~VecService_search_vector_batch_pargs() throw() {
+}
+
+
+uint32_t VecService_search_vector_batch_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecService_search_vector_batch_pargs");
+
+  xfer += oprot->writeFieldBegin("group_id", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString((*(this->group_id)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("top_k", ::apache::thrift::protocol::T_I64, 3);
+  xfer += oprot->writeI64((*(this->top_k)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("tensor_list", ::apache::thrift::protocol::T_STRUCT, 4);
+  xfer += (*(this->tensor_list)).write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("time_range_list", ::apache::thrift::protocol::T_STRUCT, 5);
+  xfer += (*(this->time_range_list)).write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_search_vector_batch_result::~VecService_search_vector_batch_result() throw() {
+}
+
+
+uint32_t VecService_search_vector_batch_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->success.read(iprot);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->e.read(iprot);
+          this->__isset.e = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecService_search_vector_batch_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("VecService_search_vector_batch_result");
+
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
+    xfer += this->success.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.e) {
+    xfer += oprot->writeFieldBegin("e", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->e.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+VecService_search_vector_batch_presult::~VecService_search_vector_batch_presult() throw() {
+}
+
+
+uint32_t VecService_search_vector_batch_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += (*(this->success)).read(iprot);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->e.read(iprot);
+          this->__isset.e = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+void VecServiceClient::add_group(const VecGroup& group)
+{
+  send_add_group(group);
+  recv_add_group();
+}
+
+void VecServiceClient::send_add_group(const VecGroup& group)
+{
+  int32_t cseqid = 0;
+  oprot_->writeMessageBegin("add_group", ::apache::thrift::protocol::T_CALL, cseqid);
+
+  VecService_add_group_pargs args;
+  args.group = &group;
+  args.write(oprot_);
+
+  oprot_->writeMessageEnd();
+  oprot_->getTransport()->writeEnd();
+  oprot_->getTransport()->flush();
+}
+
+void VecServiceClient::recv_add_group()
+{
+
+  int32_t rseqid = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TMessageType mtype;
+
+  iprot_->readMessageBegin(fname, mtype, rseqid);
+  if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+    ::apache::thrift::TApplicationException x;
+    x.read(iprot_);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+    throw x;
+  }
+  if (mtype != ::apache::thrift::protocol::T_REPLY) {
+    iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+  }
+  if (fname.compare("add_group") != 0) {
+    iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+  }
+  VecService_add_group_presult result;
+  result.read(iprot_);
+  iprot_->readMessageEnd();
+  iprot_->getTransport()->readEnd();
+
+  if (result.__isset.e) {
+    throw result.e;
+  }
+  return;
+}
+
+void VecServiceClient::get_group(VecGroup& _return, const std::string& group_id)
+{
+  send_get_group(group_id);
+  recv_get_group(_return);
+}
+
+void VecServiceClient::send_get_group(const std::string& group_id)
+{
+  int32_t cseqid = 0;
+  oprot_->writeMessageBegin("get_group", ::apache::thrift::protocol::T_CALL, cseqid);
+
+  VecService_get_group_pargs args;
+  args.group_id = &group_id;
+  args.write(oprot_);
+
+  oprot_->writeMessageEnd();
+  oprot_->getTransport()->writeEnd();
+  oprot_->getTransport()->flush();
+}
+
+void VecServiceClient::recv_get_group(VecGroup& _return)
+{
+
+  int32_t rseqid = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TMessageType mtype;
+
+  iprot_->readMessageBegin(fname, mtype, rseqid);
+  if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+    ::apache::thrift::TApplicationException x;
+    x.read(iprot_);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+    throw x;
+  }
+  if (mtype != ::apache::thrift::protocol::T_REPLY) {
+    iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+  }
+  if (fname.compare("get_group") != 0) {
+    iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+  }
+  VecService_get_group_presult result;
+  result.success = &_return;
+  result.read(iprot_);
+  iprot_->readMessageEnd();
+  iprot_->getTransport()->readEnd();
+
+  if (result.__isset.success) {
+    // _return pointer has now been filled
+    return;
+  }
+  if (result.__isset.e) {
+    throw result.e;
+  }
+  throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_group failed: unknown result");
+}
+
+void VecServiceClient::del_group(const std::string& group_id)
+{
+  send_del_group(group_id);
+  recv_del_group();
+}
+
+void VecServiceClient::send_del_group(const std::string& group_id)
+{
+  int32_t cseqid = 0;
+  oprot_->writeMessageBegin("del_group", ::apache::thrift::protocol::T_CALL, cseqid);
+
+  VecService_del_group_pargs args;
+  args.group_id = &group_id;
+  args.write(oprot_);
+
+  oprot_->writeMessageEnd();
+  oprot_->getTransport()->writeEnd();
+  oprot_->getTransport()->flush();
+}
+
+void VecServiceClient::recv_del_group()
+{
+
+  int32_t rseqid = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TMessageType mtype;
+
+  iprot_->readMessageBegin(fname, mtype, rseqid);
+  if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+    ::apache::thrift::TApplicationException x;
+    x.read(iprot_);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+    throw x;
+  }
+  if (mtype != ::apache::thrift::protocol::T_REPLY) {
+    iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+  }
+  if (fname.compare("del_group") != 0) {
+    iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+  }
+  VecService_del_group_presult result;
+  result.read(iprot_);
+  iprot_->readMessageEnd();
+  iprot_->getTransport()->readEnd();
+
+  if (result.__isset.e) {
+    throw result.e;
+  }
+  return;
+}
+
+int64_t VecServiceClient::add_vector(const std::string& group_id, const VecTensor& tensor)
+{
+  send_add_vector(group_id, tensor);
+  return recv_add_vector();
+}
+
+void VecServiceClient::send_add_vector(const std::string& group_id, const VecTensor& tensor)
+{
+  int32_t cseqid = 0;
+  oprot_->writeMessageBegin("add_vector", ::apache::thrift::protocol::T_CALL, cseqid);
+
+  VecService_add_vector_pargs args;
+  args.group_id = &group_id;
+  args.tensor = &tensor;
+  args.write(oprot_);
+
+  oprot_->writeMessageEnd();
+  oprot_->getTransport()->writeEnd();
+  oprot_->getTransport()->flush();
+}
+
+int64_t VecServiceClient::recv_add_vector()
+{
+
+  int32_t rseqid = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TMessageType mtype;
+
+  iprot_->readMessageBegin(fname, mtype, rseqid);
+  if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+    ::apache::thrift::TApplicationException x;
+    x.read(iprot_);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+    throw x;
+  }
+  if (mtype != ::apache::thrift::protocol::T_REPLY) {
+    iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+  }
+  if (fname.compare("add_vector") != 0) {
+    iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+  }
+  int64_t _return;
+  VecService_add_vector_presult result;
+  result.success = &_return;
+  result.read(iprot_);
+  iprot_->readMessageEnd();
+  iprot_->getTransport()->readEnd();
+
+  if (result.__isset.success) {
+    return _return;
+  }
+  if (result.__isset.e) {
+    throw result.e;
+  }
+  throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_vector failed: unknown result");
+}
+
+void VecServiceClient::add_vector_batch(VecTensorIdList& _return, const std::string& group_id, const VecTensorList& tensor_list)
+{
+  send_add_vector_batch(group_id, tensor_list);
+  recv_add_vector_batch(_return);
+}
+
+void VecServiceClient::send_add_vector_batch(const std::string& group_id, const VecTensorList& tensor_list)
+{
+  int32_t cseqid = 0;
+  oprot_->writeMessageBegin("add_vector_batch", ::apache::thrift::protocol::T_CALL, cseqid);
+
+  VecService_add_vector_batch_pargs args;
+  args.group_id = &group_id;
+  args.tensor_list = &tensor_list;
+  args.write(oprot_);
+
+  oprot_->writeMessageEnd();
+  oprot_->getTransport()->writeEnd();
+  oprot_->getTransport()->flush();
+}
+
+void VecServiceClient::recv_add_vector_batch(VecTensorIdList& _return)
+{
+
+  int32_t rseqid = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TMessageType mtype;
+
+  iprot_->readMessageBegin(fname, mtype, rseqid);
+  if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+    ::apache::thrift::TApplicationException x;
+    x.read(iprot_);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+    throw x;
+  }
+  if (mtype != ::apache::thrift::protocol::T_REPLY) {
+    iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+  }
+  if (fname.compare("add_vector_batch") != 0) {
+    iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+  }
+  VecService_add_vector_batch_presult result;
+  result.success = &_return;
+  result.read(iprot_);
+  iprot_->readMessageEnd();
+  iprot_->getTransport()->readEnd();
+
+  if (result.__isset.success) {
+    // _return pointer has now been filled
+    return;
+  }
+  if (result.__isset.e) {
+    throw result.e;
+  }
+  throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_vector_batch failed: unknown result");
+}
+
+void VecServiceClient::search_vector(VecSearchResult& _return, const std::string& group_id, const int64_t top_k, const VecTensor& tensor, const VecTimeRangeList& time_range_list)
+{
+  send_search_vector(group_id, top_k, tensor, time_range_list);
+  recv_search_vector(_return);
+}
+
+void VecServiceClient::send_search_vector(const std::string& group_id, const int64_t top_k, const VecTensor& tensor, const VecTimeRangeList& time_range_list)
+{
+  int32_t cseqid = 0;
+  oprot_->writeMessageBegin("search_vector", ::apache::thrift::protocol::T_CALL, cseqid);
+
+  VecService_search_vector_pargs args;
+  args.group_id = &group_id;
+  args.top_k = &top_k;
+  args.tensor = &tensor;
+  args.time_range_list = &time_range_list;
+  args.write(oprot_);
+
+  oprot_->writeMessageEnd();
+  oprot_->getTransport()->writeEnd();
+  oprot_->getTransport()->flush();
+}
+
+void VecServiceClient::recv_search_vector(VecSearchResult& _return)
+{
+
+  int32_t rseqid = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TMessageType mtype;
+
+  iprot_->readMessageBegin(fname, mtype, rseqid);
+  if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+    ::apache::thrift::TApplicationException x;
+    x.read(iprot_);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+    throw x;
+  }
+  if (mtype != ::apache::thrift::protocol::T_REPLY) {
+    iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+  }
+  if (fname.compare("search_vector") != 0) {
+    iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+  }
+  VecService_search_vector_presult result;
+  result.success = &_return;
+  result.read(iprot_);
+  iprot_->readMessageEnd();
+  iprot_->getTransport()->readEnd();
+
+  if (result.__isset.success) {
+    // _return pointer has now been filled
+    return;
+  }
+  if (result.__isset.e) {
+    throw result.e;
+  }
+  throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "search_vector failed: unknown result");
+}
+
+void VecServiceClient::search_vector_batch(VecSearchResultList& _return, const std::string& group_id, const int64_t top_k, const VecTensorList& tensor_list, const VecTimeRangeList& time_range_list)
+{
+  send_search_vector_batch(group_id, top_k, tensor_list, time_range_list);
+  recv_search_vector_batch(_return);
+}
+
+void VecServiceClient::send_search_vector_batch(const std::string& group_id, const int64_t top_k, const VecTensorList& tensor_list, const VecTimeRangeList& time_range_list)
+{
+  int32_t cseqid = 0;
+  oprot_->writeMessageBegin("search_vector_batch", ::apache::thrift::protocol::T_CALL, cseqid);
+
+  VecService_search_vector_batch_pargs args;
+  args.group_id = &group_id;
+  args.top_k = &top_k;
+  args.tensor_list = &tensor_list;
+  args.time_range_list = &time_range_list;
+  args.write(oprot_);
+
+  oprot_->writeMessageEnd();
+  oprot_->getTransport()->writeEnd();
+  oprot_->getTransport()->flush();
+}
+
+void VecServiceClient::recv_search_vector_batch(VecSearchResultList& _return)
+{
+
+  int32_t rseqid = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TMessageType mtype;
+
+  iprot_->readMessageBegin(fname, mtype, rseqid);
+  if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+    ::apache::thrift::TApplicationException x;
+    x.read(iprot_);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+    throw x;
+  }
+  if (mtype != ::apache::thrift::protocol::T_REPLY) {
+    iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+  }
+  if (fname.compare("search_vector_batch") != 0) {
+    iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+    iprot_->readMessageEnd();
+    iprot_->getTransport()->readEnd();
+  }
+  VecService_search_vector_batch_presult result;
+  result.success = &_return;
+  result.read(iprot_);
+  iprot_->readMessageEnd();
+  iprot_->getTransport()->readEnd();
+
+  if (result.__isset.success) {
+    // _return pointer has now been filled
+    return;
+  }
+  if (result.__isset.e) {
+    throw result.e;
+  }
+  throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "search_vector_batch failed: unknown result");
+}
+
+bool VecServiceProcessor::dispatchCall(::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, const std::string& fname, int32_t seqid, void* callContext) {
+  ProcessMap::iterator pfn;
+  pfn = processMap_.find(fname);
+  if (pfn == processMap_.end()) {
+    iprot->skip(::apache::thrift::protocol::T_STRUCT);
+    iprot->readMessageEnd();
+    iprot->getTransport()->readEnd();
+    ::apache::thrift::TApplicationException x(::apache::thrift::TApplicationException::UNKNOWN_METHOD, "Invalid method name: '"+fname+"'");
+    oprot->writeMessageBegin(fname, ::apache::thrift::protocol::T_EXCEPTION, seqid);
+    x.write(oprot);
+    oprot->writeMessageEnd();
+    oprot->getTransport()->writeEnd();
+    oprot->getTransport()->flush();
+    return true;
+  }
+  (this->*(pfn->second))(seqid, iprot, oprot, callContext);
+  return true;
+}
+
+void VecServiceProcessor::process_add_group(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+  void* ctx = NULL;
+  if (this->eventHandler_.get() != NULL) {
+    ctx = this->eventHandler_->getContext("VecService.add_group", callContext);
+  }
+  ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "VecService.add_group");
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->preRead(ctx, "VecService.add_group");
+  }
+
+  VecService_add_group_args args;
+  args.read(iprot);
+  iprot->readMessageEnd();
+  uint32_t bytes = iprot->getTransport()->readEnd();
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->postRead(ctx, "VecService.add_group", bytes);
+  }
+
+  VecService_add_group_result result;
+  try {
+    iface_->add_group(args.group);
+  } catch (VecException &e) {
+    result.e = e;
+    result.__isset.e = true;
+  } catch (const std::exception& e) {
+    if (this->eventHandler_.get() != NULL) {
+      this->eventHandler_->handlerError(ctx, "VecService.add_group");
+    }
+
+    ::apache::thrift::TApplicationException x(e.what());
+    oprot->writeMessageBegin("add_group", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+    x.write(oprot);
+    oprot->writeMessageEnd();
+    oprot->getTransport()->writeEnd();
+    oprot->getTransport()->flush();
+    return;
+  }
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->preWrite(ctx, "VecService.add_group");
+  }
+
+  oprot->writeMessageBegin("add_group", ::apache::thrift::protocol::T_REPLY, seqid);
+  result.write(oprot);
+  oprot->writeMessageEnd();
+  bytes = oprot->getTransport()->writeEnd();
+  oprot->getTransport()->flush();
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->postWrite(ctx, "VecService.add_group", bytes);
+  }
+}
+
+void VecServiceProcessor::process_get_group(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+  void* ctx = NULL;
+  if (this->eventHandler_.get() != NULL) {
+    ctx = this->eventHandler_->getContext("VecService.get_group", callContext);
+  }
+  ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "VecService.get_group");
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->preRead(ctx, "VecService.get_group");
+  }
+
+  VecService_get_group_args args;
+  args.read(iprot);
+  iprot->readMessageEnd();
+  uint32_t bytes = iprot->getTransport()->readEnd();
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->postRead(ctx, "VecService.get_group", bytes);
+  }
+
+  VecService_get_group_result result;
+  try {
+    iface_->get_group(result.success, args.group_id);
+    result.__isset.success = true;
+  } catch (VecException &e) {
+    result.e = e;
+    result.__isset.e = true;
+  } catch (const std::exception& e) {
+    if (this->eventHandler_.get() != NULL) {
+      this->eventHandler_->handlerError(ctx, "VecService.get_group");
+    }
+
+    ::apache::thrift::TApplicationException x(e.what());
+    oprot->writeMessageBegin("get_group", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+    x.write(oprot);
+    oprot->writeMessageEnd();
+    oprot->getTransport()->writeEnd();
+    oprot->getTransport()->flush();
+    return;
+  }
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->preWrite(ctx, "VecService.get_group");
+  }
+
+  oprot->writeMessageBegin("get_group", ::apache::thrift::protocol::T_REPLY, seqid);
+  result.write(oprot);
+  oprot->writeMessageEnd();
+  bytes = oprot->getTransport()->writeEnd();
+  oprot->getTransport()->flush();
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->postWrite(ctx, "VecService.get_group", bytes);
+  }
+}
+
+void VecServiceProcessor::process_del_group(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+  void* ctx = NULL;
+  if (this->eventHandler_.get() != NULL) {
+    ctx = this->eventHandler_->getContext("VecService.del_group", callContext);
+  }
+  ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "VecService.del_group");
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->preRead(ctx, "VecService.del_group");
+  }
+
+  VecService_del_group_args args;
+  args.read(iprot);
+  iprot->readMessageEnd();
+  uint32_t bytes = iprot->getTransport()->readEnd();
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->postRead(ctx, "VecService.del_group", bytes);
+  }
+
+  VecService_del_group_result result;
+  try {
+    iface_->del_group(args.group_id);
+  } catch (VecException &e) {
+    result.e = e;
+    result.__isset.e = true;
+  } catch (const std::exception& e) {
+    if (this->eventHandler_.get() != NULL) {
+      this->eventHandler_->handlerError(ctx, "VecService.del_group");
+    }
+
+    ::apache::thrift::TApplicationException x(e.what());
+    oprot->writeMessageBegin("del_group", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+    x.write(oprot);
+    oprot->writeMessageEnd();
+    oprot->getTransport()->writeEnd();
+    oprot->getTransport()->flush();
+    return;
+  }
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->preWrite(ctx, "VecService.del_group");
+  }
+
+  oprot->writeMessageBegin("del_group", ::apache::thrift::protocol::T_REPLY, seqid);
+  result.write(oprot);
+  oprot->writeMessageEnd();
+  bytes = oprot->getTransport()->writeEnd();
+  oprot->getTransport()->flush();
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->postWrite(ctx, "VecService.del_group", bytes);
+  }
+}
+
+void VecServiceProcessor::process_add_vector(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+  void* ctx = NULL;
+  if (this->eventHandler_.get() != NULL) {
+    ctx = this->eventHandler_->getContext("VecService.add_vector", callContext);
+  }
+  ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "VecService.add_vector");
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->preRead(ctx, "VecService.add_vector");
+  }
+
+  VecService_add_vector_args args;
+  args.read(iprot);
+  iprot->readMessageEnd();
+  uint32_t bytes = iprot->getTransport()->readEnd();
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->postRead(ctx, "VecService.add_vector", bytes);
+  }
+
+  VecService_add_vector_result result;
+  try {
+    result.success = iface_->add_vector(args.group_id, args.tensor);
+    result.__isset.success = true;
+  } catch (VecException &e) {
+    result.e = e;
+    result.__isset.e = true;
+  } catch (const std::exception& e) {
+    if (this->eventHandler_.get() != NULL) {
+      this->eventHandler_->handlerError(ctx, "VecService.add_vector");
+    }
+
+    ::apache::thrift::TApplicationException x(e.what());
+    oprot->writeMessageBegin("add_vector", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+    x.write(oprot);
+    oprot->writeMessageEnd();
+    oprot->getTransport()->writeEnd();
+    oprot->getTransport()->flush();
+    return;
+  }
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->preWrite(ctx, "VecService.add_vector");
+  }
+
+  oprot->writeMessageBegin("add_vector", ::apache::thrift::protocol::T_REPLY, seqid);
+  result.write(oprot);
+  oprot->writeMessageEnd();
+  bytes = oprot->getTransport()->writeEnd();
+  oprot->getTransport()->flush();
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->postWrite(ctx, "VecService.add_vector", bytes);
+  }
+}
+
+void VecServiceProcessor::process_add_vector_batch(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+  void* ctx = NULL;
+  if (this->eventHandler_.get() != NULL) {
+    ctx = this->eventHandler_->getContext("VecService.add_vector_batch", callContext);
+  }
+  ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "VecService.add_vector_batch");
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->preRead(ctx, "VecService.add_vector_batch");
+  }
+
+  VecService_add_vector_batch_args args;
+  args.read(iprot);
+  iprot->readMessageEnd();
+  uint32_t bytes = iprot->getTransport()->readEnd();
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->postRead(ctx, "VecService.add_vector_batch", bytes);
+  }
+
+  VecService_add_vector_batch_result result;
+  try {
+    iface_->add_vector_batch(result.success, args.group_id, args.tensor_list);
+    result.__isset.success = true;
+  } catch (VecException &e) {
+    result.e = e;
+    result.__isset.e = true;
+  } catch (const std::exception& e) {
+    if (this->eventHandler_.get() != NULL) {
+      this->eventHandler_->handlerError(ctx, "VecService.add_vector_batch");
+    }
+
+    ::apache::thrift::TApplicationException x(e.what());
+    oprot->writeMessageBegin("add_vector_batch", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+    x.write(oprot);
+    oprot->writeMessageEnd();
+    oprot->getTransport()->writeEnd();
+    oprot->getTransport()->flush();
+    return;
+  }
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->preWrite(ctx, "VecService.add_vector_batch");
+  }
+
+  oprot->writeMessageBegin("add_vector_batch", ::apache::thrift::protocol::T_REPLY, seqid);
+  result.write(oprot);
+  oprot->writeMessageEnd();
+  bytes = oprot->getTransport()->writeEnd();
+  oprot->getTransport()->flush();
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->postWrite(ctx, "VecService.add_vector_batch", bytes);
+  }
+}
+
+void VecServiceProcessor::process_search_vector(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+  void* ctx = NULL;
+  if (this->eventHandler_.get() != NULL) {
+    ctx = this->eventHandler_->getContext("VecService.search_vector", callContext);
+  }
+  ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "VecService.search_vector");
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->preRead(ctx, "VecService.search_vector");
+  }
+
+  VecService_search_vector_args args;
+  args.read(iprot);
+  iprot->readMessageEnd();
+  uint32_t bytes = iprot->getTransport()->readEnd();
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->postRead(ctx, "VecService.search_vector", bytes);
+  }
+
+  VecService_search_vector_result result;
+  try {
+    iface_->search_vector(result.success, args.group_id, args.top_k, args.tensor, args.time_range_list);
+    result.__isset.success = true;
+  } catch (VecException &e) {
+    result.e = e;
+    result.__isset.e = true;
+  } catch (const std::exception& e) {
+    if (this->eventHandler_.get() != NULL) {
+      this->eventHandler_->handlerError(ctx, "VecService.search_vector");
+    }
+
+    ::apache::thrift::TApplicationException x(e.what());
+    oprot->writeMessageBegin("search_vector", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+    x.write(oprot);
+    oprot->writeMessageEnd();
+    oprot->getTransport()->writeEnd();
+    oprot->getTransport()->flush();
+    return;
+  }
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->preWrite(ctx, "VecService.search_vector");
+  }
+
+  oprot->writeMessageBegin("search_vector", ::apache::thrift::protocol::T_REPLY, seqid);
+  result.write(oprot);
+  oprot->writeMessageEnd();
+  bytes = oprot->getTransport()->writeEnd();
+  oprot->getTransport()->flush();
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->postWrite(ctx, "VecService.search_vector", bytes);
+  }
+}
+
+void VecServiceProcessor::process_search_vector_batch(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+  void* ctx = NULL;
+  if (this->eventHandler_.get() != NULL) {
+    ctx = this->eventHandler_->getContext("VecService.search_vector_batch", callContext);
+  }
+  ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "VecService.search_vector_batch");
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->preRead(ctx, "VecService.search_vector_batch");
+  }
+
+  VecService_search_vector_batch_args args;
+  args.read(iprot);
+  iprot->readMessageEnd();
+  uint32_t bytes = iprot->getTransport()->readEnd();
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->postRead(ctx, "VecService.search_vector_batch", bytes);
+  }
+
+  VecService_search_vector_batch_result result;
+  try {
+    iface_->search_vector_batch(result.success, args.group_id, args.top_k, args.tensor_list, args.time_range_list);
+    result.__isset.success = true;
+  } catch (VecException &e) {
+    result.e = e;
+    result.__isset.e = true;
+  } catch (const std::exception& e) {
+    if (this->eventHandler_.get() != NULL) {
+      this->eventHandler_->handlerError(ctx, "VecService.search_vector_batch");
+    }
+
+    ::apache::thrift::TApplicationException x(e.what());
+    oprot->writeMessageBegin("search_vector_batch", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+    x.write(oprot);
+    oprot->writeMessageEnd();
+    oprot->getTransport()->writeEnd();
+    oprot->getTransport()->flush();
+    return;
+  }
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->preWrite(ctx, "VecService.search_vector_batch");
+  }
+
+  oprot->writeMessageBegin("search_vector_batch", ::apache::thrift::protocol::T_REPLY, seqid);
+  result.write(oprot);
+  oprot->writeMessageEnd();
+  bytes = oprot->getTransport()->writeEnd();
+  oprot->getTransport()->flush();
+
+  if (this->eventHandler_.get() != NULL) {
+    this->eventHandler_->postWrite(ctx, "VecService.search_vector_batch", bytes);
+  }
+}
+
+::apache::thrift::stdcxx::shared_ptr< ::apache::thrift::TProcessor > VecServiceProcessorFactory::getProcessor(const ::apache::thrift::TConnectionInfo& connInfo) {
+  ::apache::thrift::ReleaseHandler< VecServiceIfFactory > cleanup(handlerFactory_);
+  ::apache::thrift::stdcxx::shared_ptr< VecServiceIf > handler(handlerFactory_->getHandler(connInfo), cleanup);
+  ::apache::thrift::stdcxx::shared_ptr< ::apache::thrift::TProcessor > processor(new VecServiceProcessor(handler));
+  return processor;
+}
+
+void VecServiceConcurrentClient::add_group(const VecGroup& group)
+{
+  int32_t seqid = send_add_group(group);
+  recv_add_group(seqid);
+}
+
+int32_t VecServiceConcurrentClient::send_add_group(const VecGroup& group)
+{
+  int32_t cseqid = this->sync_.generateSeqId();
+  ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_);
+  oprot_->writeMessageBegin("add_group", ::apache::thrift::protocol::T_CALL, cseqid);
+
+  VecService_add_group_pargs args;
+  args.group = &group;
+  args.write(oprot_);
+
+  oprot_->writeMessageEnd();
+  oprot_->getTransport()->writeEnd();
+  oprot_->getTransport()->flush();
+
+  sentry.commit();
+  return cseqid;
+}
+
+void VecServiceConcurrentClient::recv_add_group(const int32_t seqid)
+{
+
+  int32_t rseqid = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TMessageType mtype;
+
+  // the read mutex gets dropped and reacquired as part of waitForWork()
+  // The destructor of this sentry wakes up other clients
+  ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid);
+
+  while(true) {
+    if(!this->sync_.getPending(fname, mtype, rseqid)) {
+      iprot_->readMessageBegin(fname, mtype, rseqid);
+    }
+    if(seqid == rseqid) {
+      if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+        ::apache::thrift::TApplicationException x;
+        x.read(iprot_);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+        sentry.commit();
+        throw x;
+      }
+      if (mtype != ::apache::thrift::protocol::T_REPLY) {
+        iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+      }
+      if (fname.compare("add_group") != 0) {
+        iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+
+        // in a bad state, don't commit
+        using ::apache::thrift::protocol::TProtocolException;
+        throw TProtocolException(TProtocolException::INVALID_DATA);
+      }
+      VecService_add_group_presult result;
+      result.read(iprot_);
+      iprot_->readMessageEnd();
+      iprot_->getTransport()->readEnd();
+
+      if (result.__isset.e) {
+        sentry.commit();
+        throw result.e;
+      }
+      sentry.commit();
+      return;
+    }
+    // seqid != rseqid
+    this->sync_.updatePending(fname, mtype, rseqid);
+
+    // this will temporarily unlock the readMutex, and let other clients get work done
+    this->sync_.waitForWork(seqid);
+  } // end while(true)
+}
+
+void VecServiceConcurrentClient::get_group(VecGroup& _return, const std::string& group_id)
+{
+  int32_t seqid = send_get_group(group_id);
+  recv_get_group(_return, seqid);
+}
+
+int32_t VecServiceConcurrentClient::send_get_group(const std::string& group_id)
+{
+  int32_t cseqid = this->sync_.generateSeqId();
+  ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_);
+  oprot_->writeMessageBegin("get_group", ::apache::thrift::protocol::T_CALL, cseqid);
+
+  VecService_get_group_pargs args;
+  args.group_id = &group_id;
+  args.write(oprot_);
+
+  oprot_->writeMessageEnd();
+  oprot_->getTransport()->writeEnd();
+  oprot_->getTransport()->flush();
+
+  sentry.commit();
+  return cseqid;
+}
+
+void VecServiceConcurrentClient::recv_get_group(VecGroup& _return, const int32_t seqid)
+{
+
+  int32_t rseqid = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TMessageType mtype;
+
+  // the read mutex gets dropped and reacquired as part of waitForWork()
+  // The destructor of this sentry wakes up other clients
+  ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid);
+
+  while(true) {
+    if(!this->sync_.getPending(fname, mtype, rseqid)) {
+      iprot_->readMessageBegin(fname, mtype, rseqid);
+    }
+    if(seqid == rseqid) {
+      if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+        ::apache::thrift::TApplicationException x;
+        x.read(iprot_);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+        sentry.commit();
+        throw x;
+      }
+      if (mtype != ::apache::thrift::protocol::T_REPLY) {
+        iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+      }
+      if (fname.compare("get_group") != 0) {
+        iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+
+        // in a bad state, don't commit
+        using ::apache::thrift::protocol::TProtocolException;
+        throw TProtocolException(TProtocolException::INVALID_DATA);
+      }
+      VecService_get_group_presult result;
+      result.success = &_return;
+      result.read(iprot_);
+      iprot_->readMessageEnd();
+      iprot_->getTransport()->readEnd();
+
+      if (result.__isset.success) {
+        // _return pointer has now been filled
+        sentry.commit();
+        return;
+      }
+      if (result.__isset.e) {
+        sentry.commit();
+        throw result.e;
+      }
+      // in a bad state, don't commit
+      throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_group failed: unknown result");
+    }
+    // seqid != rseqid
+    this->sync_.updatePending(fname, mtype, rseqid);
+
+    // this will temporarily unlock the readMutex, and let other clients get work done
+    this->sync_.waitForWork(seqid);
+  } // end while(true)
+}
+
+void VecServiceConcurrentClient::del_group(const std::string& group_id)
+{
+  int32_t seqid = send_del_group(group_id);
+  recv_del_group(seqid);
+}
+
+int32_t VecServiceConcurrentClient::send_del_group(const std::string& group_id)
+{
+  int32_t cseqid = this->sync_.generateSeqId();
+  ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_);
+  oprot_->writeMessageBegin("del_group", ::apache::thrift::protocol::T_CALL, cseqid);
+
+  VecService_del_group_pargs args;
+  args.group_id = &group_id;
+  args.write(oprot_);
+
+  oprot_->writeMessageEnd();
+  oprot_->getTransport()->writeEnd();
+  oprot_->getTransport()->flush();
+
+  sentry.commit();
+  return cseqid;
+}
+
+void VecServiceConcurrentClient::recv_del_group(const int32_t seqid)
+{
+
+  int32_t rseqid = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TMessageType mtype;
+
+  // the read mutex gets dropped and reacquired as part of waitForWork()
+  // The destructor of this sentry wakes up other clients
+  ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid);
+
+  while(true) {
+    if(!this->sync_.getPending(fname, mtype, rseqid)) {
+      iprot_->readMessageBegin(fname, mtype, rseqid);
+    }
+    if(seqid == rseqid) {
+      if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+        ::apache::thrift::TApplicationException x;
+        x.read(iprot_);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+        sentry.commit();
+        throw x;
+      }
+      if (mtype != ::apache::thrift::protocol::T_REPLY) {
+        iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+      }
+      if (fname.compare("del_group") != 0) {
+        iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+
+        // in a bad state, don't commit
+        using ::apache::thrift::protocol::TProtocolException;
+        throw TProtocolException(TProtocolException::INVALID_DATA);
+      }
+      VecService_del_group_presult result;
+      result.read(iprot_);
+      iprot_->readMessageEnd();
+      iprot_->getTransport()->readEnd();
+
+      if (result.__isset.e) {
+        sentry.commit();
+        throw result.e;
+      }
+      sentry.commit();
+      return;
+    }
+    // seqid != rseqid
+    this->sync_.updatePending(fname, mtype, rseqid);
+
+    // this will temporarily unlock the readMutex, and let other clients get work done
+    this->sync_.waitForWork(seqid);
+  } // end while(true)
+}
+
+int64_t VecServiceConcurrentClient::add_vector(const std::string& group_id, const VecTensor& tensor)
+{
+  int32_t seqid = send_add_vector(group_id, tensor);
+  return recv_add_vector(seqid);
+}
+
+int32_t VecServiceConcurrentClient::send_add_vector(const std::string& group_id, const VecTensor& tensor)
+{
+  int32_t cseqid = this->sync_.generateSeqId();
+  ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_);
+  oprot_->writeMessageBegin("add_vector", ::apache::thrift::protocol::T_CALL, cseqid);
+
+  VecService_add_vector_pargs args;
+  args.group_id = &group_id;
+  args.tensor = &tensor;
+  args.write(oprot_);
+
+  oprot_->writeMessageEnd();
+  oprot_->getTransport()->writeEnd();
+  oprot_->getTransport()->flush();
+
+  sentry.commit();
+  return cseqid;
+}
+
+int64_t VecServiceConcurrentClient::recv_add_vector(const int32_t seqid)
+{
+
+  int32_t rseqid = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TMessageType mtype;
+
+  // the read mutex gets dropped and reacquired as part of waitForWork()
+  // The destructor of this sentry wakes up other clients
+  ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid);
+
+  while(true) {
+    if(!this->sync_.getPending(fname, mtype, rseqid)) {
+      iprot_->readMessageBegin(fname, mtype, rseqid);
+    }
+    if(seqid == rseqid) {
+      if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+        ::apache::thrift::TApplicationException x;
+        x.read(iprot_);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+        sentry.commit();
+        throw x;
+      }
+      if (mtype != ::apache::thrift::protocol::T_REPLY) {
+        iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+      }
+      if (fname.compare("add_vector") != 0) {
+        iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+
+        // in a bad state, don't commit
+        using ::apache::thrift::protocol::TProtocolException;
+        throw TProtocolException(TProtocolException::INVALID_DATA);
+      }
+      int64_t _return;
+      VecService_add_vector_presult result;
+      result.success = &_return;
+      result.read(iprot_);
+      iprot_->readMessageEnd();
+      iprot_->getTransport()->readEnd();
+
+      if (result.__isset.success) {
+        sentry.commit();
+        return _return;
+      }
+      if (result.__isset.e) {
+        sentry.commit();
+        throw result.e;
+      }
+      // in a bad state, don't commit
+      throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_vector failed: unknown result");
+    }
+    // seqid != rseqid
+    this->sync_.updatePending(fname, mtype, rseqid);
+
+    // this will temporarily unlock the readMutex, and let other clients get work done
+    this->sync_.waitForWork(seqid);
+  } // end while(true)
+}
+
+void VecServiceConcurrentClient::add_vector_batch(VecTensorIdList& _return, const std::string& group_id, const VecTensorList& tensor_list)
+{
+  int32_t seqid = send_add_vector_batch(group_id, tensor_list);
+  recv_add_vector_batch(_return, seqid);
+}
+
+int32_t VecServiceConcurrentClient::send_add_vector_batch(const std::string& group_id, const VecTensorList& tensor_list)
+{
+  int32_t cseqid = this->sync_.generateSeqId();
+  ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_);
+  oprot_->writeMessageBegin("add_vector_batch", ::apache::thrift::protocol::T_CALL, cseqid);
+
+  VecService_add_vector_batch_pargs args;
+  args.group_id = &group_id;
+  args.tensor_list = &tensor_list;
+  args.write(oprot_);
+
+  oprot_->writeMessageEnd();
+  oprot_->getTransport()->writeEnd();
+  oprot_->getTransport()->flush();
+
+  sentry.commit();
+  return cseqid;
+}
+
+void VecServiceConcurrentClient::recv_add_vector_batch(VecTensorIdList& _return, const int32_t seqid)
+{
+
+  int32_t rseqid = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TMessageType mtype;
+
+  // the read mutex gets dropped and reacquired as part of waitForWork()
+  // The destructor of this sentry wakes up other clients
+  ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid);
+
+  while(true) {
+    if(!this->sync_.getPending(fname, mtype, rseqid)) {
+      iprot_->readMessageBegin(fname, mtype, rseqid);
+    }
+    if(seqid == rseqid) {
+      if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+        ::apache::thrift::TApplicationException x;
+        x.read(iprot_);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+        sentry.commit();
+        throw x;
+      }
+      if (mtype != ::apache::thrift::protocol::T_REPLY) {
+        iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+      }
+      if (fname.compare("add_vector_batch") != 0) {
+        iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+
+        // in a bad state, don't commit
+        using ::apache::thrift::protocol::TProtocolException;
+        throw TProtocolException(TProtocolException::INVALID_DATA);
+      }
+      VecService_add_vector_batch_presult result;
+      result.success = &_return;
+      result.read(iprot_);
+      iprot_->readMessageEnd();
+      iprot_->getTransport()->readEnd();
+
+      if (result.__isset.success) {
+        // _return pointer has now been filled
+        sentry.commit();
+        return;
+      }
+      if (result.__isset.e) {
+        sentry.commit();
+        throw result.e;
+      }
+      // in a bad state, don't commit
+      throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_vector_batch failed: unknown result");
+    }
+    // seqid != rseqid
+    this->sync_.updatePending(fname, mtype, rseqid);
+
+    // this will temporarily unlock the readMutex, and let other clients get work done
+    this->sync_.waitForWork(seqid);
+  } // end while(true)
+}
+
+void VecServiceConcurrentClient::search_vector(VecSearchResult& _return, const std::string& group_id, const int64_t top_k, const VecTensor& tensor, const VecTimeRangeList& time_range_list)
+{
+  int32_t seqid = send_search_vector(group_id, top_k, tensor, time_range_list);
+  recv_search_vector(_return, seqid);
+}
+
+int32_t VecServiceConcurrentClient::send_search_vector(const std::string& group_id, const int64_t top_k, const VecTensor& tensor, const VecTimeRangeList& time_range_list)
+{
+  int32_t cseqid = this->sync_.generateSeqId();
+  ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_);
+  oprot_->writeMessageBegin("search_vector", ::apache::thrift::protocol::T_CALL, cseqid);
+
+  VecService_search_vector_pargs args;
+  args.group_id = &group_id;
+  args.top_k = &top_k;
+  args.tensor = &tensor;
+  args.time_range_list = &time_range_list;
+  args.write(oprot_);
+
+  oprot_->writeMessageEnd();
+  oprot_->getTransport()->writeEnd();
+  oprot_->getTransport()->flush();
+
+  sentry.commit();
+  return cseqid;
+}
+
+void VecServiceConcurrentClient::recv_search_vector(VecSearchResult& _return, const int32_t seqid)
+{
+
+  int32_t rseqid = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TMessageType mtype;
+
+  // the read mutex gets dropped and reacquired as part of waitForWork()
+  // The destructor of this sentry wakes up other clients
+  ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid);
+
+  while(true) {
+    if(!this->sync_.getPending(fname, mtype, rseqid)) {
+      iprot_->readMessageBegin(fname, mtype, rseqid);
+    }
+    if(seqid == rseqid) {
+      if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+        ::apache::thrift::TApplicationException x;
+        x.read(iprot_);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+        sentry.commit();
+        throw x;
+      }
+      if (mtype != ::apache::thrift::protocol::T_REPLY) {
+        iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+      }
+      if (fname.compare("search_vector") != 0) {
+        iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+
+        // in a bad state, don't commit
+        using ::apache::thrift::protocol::TProtocolException;
+        throw TProtocolException(TProtocolException::INVALID_DATA);
+      }
+      VecService_search_vector_presult result;
+      result.success = &_return;
+      result.read(iprot_);
+      iprot_->readMessageEnd();
+      iprot_->getTransport()->readEnd();
+
+      if (result.__isset.success) {
+        // _return pointer has now been filled
+        sentry.commit();
+        return;
+      }
+      if (result.__isset.e) {
+        sentry.commit();
+        throw result.e;
+      }
+      // in a bad state, don't commit
+      throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "search_vector failed: unknown result");
+    }
+    // seqid != rseqid
+    this->sync_.updatePending(fname, mtype, rseqid);
+
+    // this will temporarily unlock the readMutex, and let other clients get work done
+    this->sync_.waitForWork(seqid);
+  } // end while(true)
+}
+
+void VecServiceConcurrentClient::search_vector_batch(VecSearchResultList& _return, const std::string& group_id, const int64_t top_k, const VecTensorList& tensor_list, const VecTimeRangeList& time_range_list)
+{
+  int32_t seqid = send_search_vector_batch(group_id, top_k, tensor_list, time_range_list);
+  recv_search_vector_batch(_return, seqid);
+}
+
+int32_t VecServiceConcurrentClient::send_search_vector_batch(const std::string& group_id, const int64_t top_k, const VecTensorList& tensor_list, const VecTimeRangeList& time_range_list)
+{
+  int32_t cseqid = this->sync_.generateSeqId();
+  ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_);
+  oprot_->writeMessageBegin("search_vector_batch", ::apache::thrift::protocol::T_CALL, cseqid);
+
+  VecService_search_vector_batch_pargs args;
+  args.group_id = &group_id;
+  args.top_k = &top_k;
+  args.tensor_list = &tensor_list;
+  args.time_range_list = &time_range_list;
+  args.write(oprot_);
+
+  oprot_->writeMessageEnd();
+  oprot_->getTransport()->writeEnd();
+  oprot_->getTransport()->flush();
+
+  sentry.commit();
+  return cseqid;
+}
+
+void VecServiceConcurrentClient::recv_search_vector_batch(VecSearchResultList& _return, const int32_t seqid)
+{
+
+  int32_t rseqid = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TMessageType mtype;
+
+  // the read mutex gets dropped and reacquired as part of waitForWork()
+  // The destructor of this sentry wakes up other clients
+  ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid);
+
+  while(true) {
+    if(!this->sync_.getPending(fname, mtype, rseqid)) {
+      iprot_->readMessageBegin(fname, mtype, rseqid);
+    }
+    if(seqid == rseqid) {
+      if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+        ::apache::thrift::TApplicationException x;
+        x.read(iprot_);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+        sentry.commit();
+        throw x;
+      }
+      if (mtype != ::apache::thrift::protocol::T_REPLY) {
+        iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+      }
+      if (fname.compare("search_vector_batch") != 0) {
+        iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+        iprot_->readMessageEnd();
+        iprot_->getTransport()->readEnd();
+
+        // in a bad state, don't commit
+        using ::apache::thrift::protocol::TProtocolException;
+        throw TProtocolException(TProtocolException::INVALID_DATA);
+      }
+      VecService_search_vector_batch_presult result;
+      result.success = &_return;
+      result.read(iprot_);
+      iprot_->readMessageEnd();
+      iprot_->getTransport()->readEnd();
+
+      if (result.__isset.success) {
+        // _return pointer has now been filled
+        sentry.commit();
+        return;
+      }
+      if (result.__isset.e) {
+        sentry.commit();
+        throw result.e;
+      }
+      // in a bad state, don't commit
+      throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "search_vector_batch failed: unknown result");
+    }
+    // seqid != rseqid
+    this->sync_.updatePending(fname, mtype, rseqid);
+
+    // this will temporarily unlock the readMutex, and let other clients get work done
+    this->sync_.waitForWork(seqid);
+  } // end while(true)
+}
+
+
+
diff --git a/cpp/src/thrift/gen-cpp/VecService.h b/cpp/src/thrift/gen-cpp/VecService.h
new file mode 100644
index 0000000000..5cccdfa03b
--- /dev/null
+++ b/cpp/src/thrift/gen-cpp/VecService.h
@@ -0,0 +1,1170 @@
+/**
+ * Autogenerated by Thrift Compiler (0.12.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#ifndef VecService_H
+#define VecService_H
+
+#include <thrift/TDispatchProcessor.h>
+#include <thrift/async/TConcurrentClientSyncInfo.h>
+#include "VectorService_types.h"
+
+
+
+#ifdef _MSC_VER
+  #pragma warning( push )
+  #pragma warning (disable : 4250 ) //inheriting methods via dominance 
+#endif
+
+class VecServiceIf {
+ public:
+  virtual ~VecServiceIf() {}
+
+  /**
+   * group interfaces
+   * 
+   * @param group
+   */
+  virtual void add_group(const VecGroup& group) = 0;
+  virtual void get_group(VecGroup& _return, const std::string& group_id) = 0;
+  virtual void del_group(const std::string& group_id) = 0;
+
+  /**
+   * vector interfaces
+   * 
+   * 
+   * @param group_id
+   * @param tensor
+   */
+  virtual int64_t add_vector(const std::string& group_id, const VecTensor& tensor) = 0;
+  virtual void add_vector_batch(VecTensorIdList& _return, const std::string& group_id, const VecTensorList& tensor_list) = 0;
+
+  /**
+   * search interfaces
+   * if time_range_list is empty, engine will search without time limit
+   * 
+   * @param group_id
+   * @param top_k
+   * @param tensor
+   * @param time_range_list
+   */
+  virtual void search_vector(VecSearchResult& _return, const std::string& group_id, const int64_t top_k, const VecTensor& tensor, const VecTimeRangeList& time_range_list) = 0;
+  virtual void search_vector_batch(VecSearchResultList& _return, const std::string& group_id, const int64_t top_k, const VecTensorList& tensor_list, const VecTimeRangeList& time_range_list) = 0;
+};
+
+class VecServiceIfFactory {
+ public:
+  typedef VecServiceIf Handler;
+
+  virtual ~VecServiceIfFactory() {}
+
+  virtual VecServiceIf* getHandler(const ::apache::thrift::TConnectionInfo& connInfo) = 0;
+  virtual void releaseHandler(VecServiceIf* /* handler */) = 0;
+};
+
+class VecServiceIfSingletonFactory : virtual public VecServiceIfFactory {
+ public:
+  VecServiceIfSingletonFactory(const ::apache::thrift::stdcxx::shared_ptr<VecServiceIf>& iface) : iface_(iface) {}
+  virtual ~VecServiceIfSingletonFactory() {}
+
+  virtual VecServiceIf* getHandler(const ::apache::thrift::TConnectionInfo&) {
+    return iface_.get();
+  }
+  virtual void releaseHandler(VecServiceIf* /* handler */) {}
+
+ protected:
+  ::apache::thrift::stdcxx::shared_ptr<VecServiceIf> iface_;
+};
+
+class VecServiceNull : virtual public VecServiceIf {
+ public:
+  virtual ~VecServiceNull() {}
+  void add_group(const VecGroup& /* group */) {
+    return;
+  }
+  void get_group(VecGroup& /* _return */, const std::string& /* group_id */) {
+    return;
+  }
+  void del_group(const std::string& /* group_id */) {
+    return;
+  }
+  int64_t add_vector(const std::string& /* group_id */, const VecTensor& /* tensor */) {
+    int64_t _return = 0;
+    return _return;
+  }
+  void add_vector_batch(VecTensorIdList& /* _return */, const std::string& /* group_id */, const VecTensorList& /* tensor_list */) {
+    return;
+  }
+  void search_vector(VecSearchResult& /* _return */, const std::string& /* group_id */, const int64_t /* top_k */, const VecTensor& /* tensor */, const VecTimeRangeList& /* time_range_list */) {
+    return;
+  }
+  void search_vector_batch(VecSearchResultList& /* _return */, const std::string& /* group_id */, const int64_t /* top_k */, const VecTensorList& /* tensor_list */, const VecTimeRangeList& /* time_range_list */) {
+    return;
+  }
+};
+
+typedef struct _VecService_add_group_args__isset {
+  _VecService_add_group_args__isset() : group(false) {}
+  bool group :1;
+} _VecService_add_group_args__isset;
+
+class VecService_add_group_args {
+ public:
+
+  VecService_add_group_args(const VecService_add_group_args&);
+  VecService_add_group_args& operator=(const VecService_add_group_args&);
+  VecService_add_group_args() {
+  }
+
+  virtual ~VecService_add_group_args() throw();
+  VecGroup group;
+
+  _VecService_add_group_args__isset __isset;
+
+  void __set_group(const VecGroup& val);
+
+  bool operator == (const VecService_add_group_args & rhs) const
+  {
+    if (!(group == rhs.group))
+      return false;
+    return true;
+  }
+  bool operator != (const VecService_add_group_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecService_add_group_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class VecService_add_group_pargs {
+ public:
+
+
+  virtual ~VecService_add_group_pargs() throw();
+  const VecGroup* group;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _VecService_add_group_result__isset {
+  _VecService_add_group_result__isset() : e(false) {}
+  bool e :1;
+} _VecService_add_group_result__isset;
+
+class VecService_add_group_result {
+ public:
+
+  VecService_add_group_result(const VecService_add_group_result&);
+  VecService_add_group_result& operator=(const VecService_add_group_result&);
+  VecService_add_group_result() {
+  }
+
+  virtual ~VecService_add_group_result() throw();
+  VecException e;
+
+  _VecService_add_group_result__isset __isset;
+
+  void __set_e(const VecException& val);
+
+  bool operator == (const VecService_add_group_result & rhs) const
+  {
+    if (!(e == rhs.e))
+      return false;
+    return true;
+  }
+  bool operator != (const VecService_add_group_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecService_add_group_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _VecService_add_group_presult__isset {
+  _VecService_add_group_presult__isset() : e(false) {}
+  bool e :1;
+} _VecService_add_group_presult__isset;
+
+class VecService_add_group_presult {
+ public:
+
+
+  virtual ~VecService_add_group_presult() throw();
+  VecException e;
+
+  _VecService_add_group_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _VecService_get_group_args__isset {
+  _VecService_get_group_args__isset() : group_id(false) {}
+  bool group_id :1;
+} _VecService_get_group_args__isset;
+
+class VecService_get_group_args {
+ public:
+
+  VecService_get_group_args(const VecService_get_group_args&);
+  VecService_get_group_args& operator=(const VecService_get_group_args&);
+  VecService_get_group_args() : group_id() {
+  }
+
+  virtual ~VecService_get_group_args() throw();
+  std::string group_id;
+
+  _VecService_get_group_args__isset __isset;
+
+  void __set_group_id(const std::string& val);
+
+  bool operator == (const VecService_get_group_args & rhs) const
+  {
+    if (!(group_id == rhs.group_id))
+      return false;
+    return true;
+  }
+  bool operator != (const VecService_get_group_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecService_get_group_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class VecService_get_group_pargs {
+ public:
+
+
+  virtual ~VecService_get_group_pargs() throw();
+  const std::string* group_id;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _VecService_get_group_result__isset {
+  _VecService_get_group_result__isset() : success(false), e(false) {}
+  bool success :1;
+  bool e :1;
+} _VecService_get_group_result__isset;
+
+class VecService_get_group_result {
+ public:
+
+  VecService_get_group_result(const VecService_get_group_result&);
+  VecService_get_group_result& operator=(const VecService_get_group_result&);
+  VecService_get_group_result() {
+  }
+
+  virtual ~VecService_get_group_result() throw();
+  VecGroup success;
+  VecException e;
+
+  _VecService_get_group_result__isset __isset;
+
+  void __set_success(const VecGroup& val);
+
+  void __set_e(const VecException& val);
+
+  bool operator == (const VecService_get_group_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    if (!(e == rhs.e))
+      return false;
+    return true;
+  }
+  bool operator != (const VecService_get_group_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecService_get_group_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _VecService_get_group_presult__isset {
+  _VecService_get_group_presult__isset() : success(false), e(false) {}
+  bool success :1;
+  bool e :1;
+} _VecService_get_group_presult__isset;
+
+class VecService_get_group_presult {
+ public:
+
+
+  virtual ~VecService_get_group_presult() throw();
+  VecGroup* success;
+  VecException e;
+
+  _VecService_get_group_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _VecService_del_group_args__isset {
+  _VecService_del_group_args__isset() : group_id(false) {}
+  bool group_id :1;
+} _VecService_del_group_args__isset;
+
+class VecService_del_group_args {
+ public:
+
+  VecService_del_group_args(const VecService_del_group_args&);
+  VecService_del_group_args& operator=(const VecService_del_group_args&);
+  VecService_del_group_args() : group_id() {
+  }
+
+  virtual ~VecService_del_group_args() throw();
+  std::string group_id;
+
+  _VecService_del_group_args__isset __isset;
+
+  void __set_group_id(const std::string& val);
+
+  bool operator == (const VecService_del_group_args & rhs) const
+  {
+    if (!(group_id == rhs.group_id))
+      return false;
+    return true;
+  }
+  bool operator != (const VecService_del_group_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecService_del_group_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class VecService_del_group_pargs {
+ public:
+
+
+  virtual ~VecService_del_group_pargs() throw();
+  const std::string* group_id;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _VecService_del_group_result__isset {
+  _VecService_del_group_result__isset() : e(false) {}
+  bool e :1;
+} _VecService_del_group_result__isset;
+
+class VecService_del_group_result {
+ public:
+
+  VecService_del_group_result(const VecService_del_group_result&);
+  VecService_del_group_result& operator=(const VecService_del_group_result&);
+  VecService_del_group_result() {
+  }
+
+  virtual ~VecService_del_group_result() throw();
+  VecException e;
+
+  _VecService_del_group_result__isset __isset;
+
+  void __set_e(const VecException& val);
+
+  bool operator == (const VecService_del_group_result & rhs) const
+  {
+    if (!(e == rhs.e))
+      return false;
+    return true;
+  }
+  bool operator != (const VecService_del_group_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecService_del_group_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _VecService_del_group_presult__isset {
+  _VecService_del_group_presult__isset() : e(false) {}
+  bool e :1;
+} _VecService_del_group_presult__isset;
+
+class VecService_del_group_presult {
+ public:
+
+
+  virtual ~VecService_del_group_presult() throw();
+  VecException e;
+
+  _VecService_del_group_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _VecService_add_vector_args__isset {
+  _VecService_add_vector_args__isset() : group_id(false), tensor(false) {}
+  bool group_id :1;
+  bool tensor :1;
+} _VecService_add_vector_args__isset;
+
+class VecService_add_vector_args {
+ public:
+
+  VecService_add_vector_args(const VecService_add_vector_args&);
+  VecService_add_vector_args& operator=(const VecService_add_vector_args&);
+  VecService_add_vector_args() : group_id() {
+  }
+
+  virtual ~VecService_add_vector_args() throw();
+  std::string group_id;
+  VecTensor tensor;
+
+  _VecService_add_vector_args__isset __isset;
+
+  void __set_group_id(const std::string& val);
+
+  void __set_tensor(const VecTensor& val);
+
+  bool operator == (const VecService_add_vector_args & rhs) const
+  {
+    if (!(group_id == rhs.group_id))
+      return false;
+    if (!(tensor == rhs.tensor))
+      return false;
+    return true;
+  }
+  bool operator != (const VecService_add_vector_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecService_add_vector_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class VecService_add_vector_pargs {
+ public:
+
+
+  virtual ~VecService_add_vector_pargs() throw();
+  const std::string* group_id;
+  const VecTensor* tensor;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _VecService_add_vector_result__isset {
+  _VecService_add_vector_result__isset() : success(false), e(false) {}
+  bool success :1;
+  bool e :1;
+} _VecService_add_vector_result__isset;
+
+class VecService_add_vector_result {
+ public:
+
+  VecService_add_vector_result(const VecService_add_vector_result&);
+  VecService_add_vector_result& operator=(const VecService_add_vector_result&);
+  VecService_add_vector_result() : success(0) {
+  }
+
+  virtual ~VecService_add_vector_result() throw();
+  int64_t success;
+  VecException e;
+
+  _VecService_add_vector_result__isset __isset;
+
+  void __set_success(const int64_t val);
+
+  void __set_e(const VecException& val);
+
+  bool operator == (const VecService_add_vector_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    if (!(e == rhs.e))
+      return false;
+    return true;
+  }
+  bool operator != (const VecService_add_vector_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecService_add_vector_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _VecService_add_vector_presult__isset {
+  _VecService_add_vector_presult__isset() : success(false), e(false) {}
+  bool success :1;
+  bool e :1;
+} _VecService_add_vector_presult__isset;
+
+class VecService_add_vector_presult {
+ public:
+
+
+  virtual ~VecService_add_vector_presult() throw();
+  int64_t* success;
+  VecException e;
+
+  _VecService_add_vector_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _VecService_add_vector_batch_args__isset {
+  _VecService_add_vector_batch_args__isset() : group_id(false), tensor_list(false) {}
+  bool group_id :1;
+  bool tensor_list :1;
+} _VecService_add_vector_batch_args__isset;
+
+class VecService_add_vector_batch_args {
+ public:
+
+  VecService_add_vector_batch_args(const VecService_add_vector_batch_args&);
+  VecService_add_vector_batch_args& operator=(const VecService_add_vector_batch_args&);
+  VecService_add_vector_batch_args() : group_id() {
+  }
+
+  virtual ~VecService_add_vector_batch_args() throw();
+  std::string group_id;
+  VecTensorList tensor_list;
+
+  _VecService_add_vector_batch_args__isset __isset;
+
+  void __set_group_id(const std::string& val);
+
+  void __set_tensor_list(const VecTensorList& val);
+
+  bool operator == (const VecService_add_vector_batch_args & rhs) const
+  {
+    if (!(group_id == rhs.group_id))
+      return false;
+    if (!(tensor_list == rhs.tensor_list))
+      return false;
+    return true;
+  }
+  bool operator != (const VecService_add_vector_batch_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecService_add_vector_batch_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class VecService_add_vector_batch_pargs {
+ public:
+
+
+  virtual ~VecService_add_vector_batch_pargs() throw();
+  const std::string* group_id;
+  const VecTensorList* tensor_list;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _VecService_add_vector_batch_result__isset {
+  _VecService_add_vector_batch_result__isset() : success(false), e(false) {}
+  bool success :1;
+  bool e :1;
+} _VecService_add_vector_batch_result__isset;
+
+class VecService_add_vector_batch_result {
+ public:
+
+  VecService_add_vector_batch_result(const VecService_add_vector_batch_result&);
+  VecService_add_vector_batch_result& operator=(const VecService_add_vector_batch_result&);
+  VecService_add_vector_batch_result() {
+  }
+
+  virtual ~VecService_add_vector_batch_result() throw();
+  VecTensorIdList success;
+  VecException e;
+
+  _VecService_add_vector_batch_result__isset __isset;
+
+  void __set_success(const VecTensorIdList& val);
+
+  void __set_e(const VecException& val);
+
+  bool operator == (const VecService_add_vector_batch_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    if (!(e == rhs.e))
+      return false;
+    return true;
+  }
+  bool operator != (const VecService_add_vector_batch_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecService_add_vector_batch_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _VecService_add_vector_batch_presult__isset {
+  _VecService_add_vector_batch_presult__isset() : success(false), e(false) {}
+  bool success :1;
+  bool e :1;
+} _VecService_add_vector_batch_presult__isset;
+
+class VecService_add_vector_batch_presult {
+ public:
+
+
+  virtual ~VecService_add_vector_batch_presult() throw();
+  VecTensorIdList* success;
+  VecException e;
+
+  _VecService_add_vector_batch_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _VecService_search_vector_args__isset {
+  _VecService_search_vector_args__isset() : group_id(false), top_k(false), tensor(false), time_range_list(false) {}
+  bool group_id :1;
+  bool top_k :1;
+  bool tensor :1;
+  bool time_range_list :1;
+} _VecService_search_vector_args__isset;
+
+class VecService_search_vector_args {
+ public:
+
+  VecService_search_vector_args(const VecService_search_vector_args&);
+  VecService_search_vector_args& operator=(const VecService_search_vector_args&);
+  VecService_search_vector_args() : group_id(), top_k(0) {
+  }
+
+  virtual ~VecService_search_vector_args() throw();
+  std::string group_id;
+  int64_t top_k;
+  VecTensor tensor;
+  VecTimeRangeList time_range_list;
+
+  _VecService_search_vector_args__isset __isset;
+
+  void __set_group_id(const std::string& val);
+
+  void __set_top_k(const int64_t val);
+
+  void __set_tensor(const VecTensor& val);
+
+  void __set_time_range_list(const VecTimeRangeList& val);
+
+  bool operator == (const VecService_search_vector_args & rhs) const
+  {
+    if (!(group_id == rhs.group_id))
+      return false;
+    if (!(top_k == rhs.top_k))
+      return false;
+    if (!(tensor == rhs.tensor))
+      return false;
+    if (!(time_range_list == rhs.time_range_list))
+      return false;
+    return true;
+  }
+  bool operator != (const VecService_search_vector_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecService_search_vector_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class VecService_search_vector_pargs {
+ public:
+
+
+  virtual ~VecService_search_vector_pargs() throw();
+  const std::string* group_id;
+  const int64_t* top_k;
+  const VecTensor* tensor;
+  const VecTimeRangeList* time_range_list;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _VecService_search_vector_result__isset {
+  _VecService_search_vector_result__isset() : success(false), e(false) {}
+  bool success :1;
+  bool e :1;
+} _VecService_search_vector_result__isset;
+
+class VecService_search_vector_result {
+ public:
+
+  VecService_search_vector_result(const VecService_search_vector_result&);
+  VecService_search_vector_result& operator=(const VecService_search_vector_result&);
+  VecService_search_vector_result() {
+  }
+
+  virtual ~VecService_search_vector_result() throw();
+  VecSearchResult success;
+  VecException e;
+
+  _VecService_search_vector_result__isset __isset;
+
+  void __set_success(const VecSearchResult& val);
+
+  void __set_e(const VecException& val);
+
+  bool operator == (const VecService_search_vector_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    if (!(e == rhs.e))
+      return false;
+    return true;
+  }
+  bool operator != (const VecService_search_vector_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecService_search_vector_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _VecService_search_vector_presult__isset {
+  _VecService_search_vector_presult__isset() : success(false), e(false) {}
+  bool success :1;
+  bool e :1;
+} _VecService_search_vector_presult__isset;
+
+class VecService_search_vector_presult {
+ public:
+
+
+  virtual ~VecService_search_vector_presult() throw();
+  VecSearchResult* success;
+  VecException e;
+
+  _VecService_search_vector_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _VecService_search_vector_batch_args__isset {
+  _VecService_search_vector_batch_args__isset() : group_id(false), top_k(false), tensor_list(false), time_range_list(false) {}
+  bool group_id :1;
+  bool top_k :1;
+  bool tensor_list :1;
+  bool time_range_list :1;
+} _VecService_search_vector_batch_args__isset;
+
+class VecService_search_vector_batch_args {
+ public:
+
+  VecService_search_vector_batch_args(const VecService_search_vector_batch_args&);
+  VecService_search_vector_batch_args& operator=(const VecService_search_vector_batch_args&);
+  VecService_search_vector_batch_args() : group_id(), top_k(0) {
+  }
+
+  virtual ~VecService_search_vector_batch_args() throw();
+  std::string group_id;
+  int64_t top_k;
+  VecTensorList tensor_list;
+  VecTimeRangeList time_range_list;
+
+  _VecService_search_vector_batch_args__isset __isset;
+
+  void __set_group_id(const std::string& val);
+
+  void __set_top_k(const int64_t val);
+
+  void __set_tensor_list(const VecTensorList& val);
+
+  void __set_time_range_list(const VecTimeRangeList& val);
+
+  bool operator == (const VecService_search_vector_batch_args & rhs) const
+  {
+    if (!(group_id == rhs.group_id))
+      return false;
+    if (!(top_k == rhs.top_k))
+      return false;
+    if (!(tensor_list == rhs.tensor_list))
+      return false;
+    if (!(time_range_list == rhs.time_range_list))
+      return false;
+    return true;
+  }
+  bool operator != (const VecService_search_vector_batch_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecService_search_vector_batch_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class VecService_search_vector_batch_pargs {
+ public:
+
+
+  virtual ~VecService_search_vector_batch_pargs() throw();
+  const std::string* group_id;
+  const int64_t* top_k;
+  const VecTensorList* tensor_list;
+  const VecTimeRangeList* time_range_list;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _VecService_search_vector_batch_result__isset {
+  _VecService_search_vector_batch_result__isset() : success(false), e(false) {}
+  bool success :1;
+  bool e :1;
+} _VecService_search_vector_batch_result__isset;
+
+class VecService_search_vector_batch_result {
+ public:
+
+  VecService_search_vector_batch_result(const VecService_search_vector_batch_result&);
+  VecService_search_vector_batch_result& operator=(const VecService_search_vector_batch_result&);
+  VecService_search_vector_batch_result() {
+  }
+
+  virtual ~VecService_search_vector_batch_result() throw();
+  VecSearchResultList success;
+  VecException e;
+
+  _VecService_search_vector_batch_result__isset __isset;
+
+  void __set_success(const VecSearchResultList& val);
+
+  void __set_e(const VecException& val);
+
+  bool operator == (const VecService_search_vector_batch_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    if (!(e == rhs.e))
+      return false;
+    return true;
+  }
+  bool operator != (const VecService_search_vector_batch_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecService_search_vector_batch_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _VecService_search_vector_batch_presult__isset {
+  _VecService_search_vector_batch_presult__isset() : success(false), e(false) {}
+  bool success :1;
+  bool e :1;
+} _VecService_search_vector_batch_presult__isset;
+
+class VecService_search_vector_batch_presult {
+ public:
+
+
+  virtual ~VecService_search_vector_batch_presult() throw();
+  VecSearchResultList* success;
+  VecException e;
+
+  _VecService_search_vector_batch_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+class VecServiceClient : virtual public VecServiceIf {
+ public:
+  VecServiceClient(apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) {
+    setProtocol(prot);
+  }
+  VecServiceClient(apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> iprot, apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> oprot) {
+    setProtocol(iprot,oprot);
+  }
+ private:
+  void setProtocol(apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) {
+  setProtocol(prot,prot);
+  }
+  void setProtocol(apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> iprot, apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> oprot) {
+    piprot_=iprot;
+    poprot_=oprot;
+    iprot_ = iprot.get();
+    oprot_ = oprot.get();
+  }
+ public:
+  apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> getInputProtocol() {
+    return piprot_;
+  }
+  apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> getOutputProtocol() {
+    return poprot_;
+  }
+  void add_group(const VecGroup& group);
+  void send_add_group(const VecGroup& group);
+  void recv_add_group();
+  void get_group(VecGroup& _return, const std::string& group_id);
+  void send_get_group(const std::string& group_id);
+  void recv_get_group(VecGroup& _return);
+  void del_group(const std::string& group_id);
+  void send_del_group(const std::string& group_id);
+  void recv_del_group();
+  int64_t add_vector(const std::string& group_id, const VecTensor& tensor);
+  void send_add_vector(const std::string& group_id, const VecTensor& tensor);
+  int64_t recv_add_vector();
+  void add_vector_batch(VecTensorIdList& _return, const std::string& group_id, const VecTensorList& tensor_list);
+  void send_add_vector_batch(const std::string& group_id, const VecTensorList& tensor_list);
+  void recv_add_vector_batch(VecTensorIdList& _return);
+  void search_vector(VecSearchResult& _return, const std::string& group_id, const int64_t top_k, const VecTensor& tensor, const VecTimeRangeList& time_range_list);
+  void send_search_vector(const std::string& group_id, const int64_t top_k, const VecTensor& tensor, const VecTimeRangeList& time_range_list);
+  void recv_search_vector(VecSearchResult& _return);
+  void search_vector_batch(VecSearchResultList& _return, const std::string& group_id, const int64_t top_k, const VecTensorList& tensor_list, const VecTimeRangeList& time_range_list);
+  void send_search_vector_batch(const std::string& group_id, const int64_t top_k, const VecTensorList& tensor_list, const VecTimeRangeList& time_range_list);
+  void recv_search_vector_batch(VecSearchResultList& _return);
+ protected:
+  apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> piprot_;
+  apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> poprot_;
+  ::apache::thrift::protocol::TProtocol* iprot_;
+  ::apache::thrift::protocol::TProtocol* oprot_;
+};
+
+class VecServiceProcessor : public ::apache::thrift::TDispatchProcessor {
+ protected:
+  ::apache::thrift::stdcxx::shared_ptr<VecServiceIf> iface_;
+  virtual bool dispatchCall(::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, const std::string& fname, int32_t seqid, void* callContext);
+ private:
+  typedef  void (VecServiceProcessor::*ProcessFunction)(int32_t, ::apache::thrift::protocol::TProtocol*, ::apache::thrift::protocol::TProtocol*, void*);
+  typedef std::map<std::string, ProcessFunction> ProcessMap;
+  ProcessMap processMap_;
+  void process_add_group(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+  void process_get_group(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+  void process_del_group(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+  void process_add_vector(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+  void process_add_vector_batch(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+  void process_search_vector(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+  void process_search_vector_batch(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ public:
+  VecServiceProcessor(::apache::thrift::stdcxx::shared_ptr<VecServiceIf> iface) :
+    iface_(iface) {
+    processMap_["add_group"] = &VecServiceProcessor::process_add_group;
+    processMap_["get_group"] = &VecServiceProcessor::process_get_group;
+    processMap_["del_group"] = &VecServiceProcessor::process_del_group;
+    processMap_["add_vector"] = &VecServiceProcessor::process_add_vector;
+    processMap_["add_vector_batch"] = &VecServiceProcessor::process_add_vector_batch;
+    processMap_["search_vector"] = &VecServiceProcessor::process_search_vector;
+    processMap_["search_vector_batch"] = &VecServiceProcessor::process_search_vector_batch;
+  }
+
+  virtual ~VecServiceProcessor() {}
+};
+
+class VecServiceProcessorFactory : public ::apache::thrift::TProcessorFactory {
+ public:
+  VecServiceProcessorFactory(const ::apache::thrift::stdcxx::shared_ptr< VecServiceIfFactory >& handlerFactory) :
+      handlerFactory_(handlerFactory) {}
+
+  ::apache::thrift::stdcxx::shared_ptr< ::apache::thrift::TProcessor > getProcessor(const ::apache::thrift::TConnectionInfo& connInfo);
+
+ protected:
+  ::apache::thrift::stdcxx::shared_ptr< VecServiceIfFactory > handlerFactory_;
+};
+
+class VecServiceMultiface : virtual public VecServiceIf {
+ public:
+  VecServiceMultiface(std::vector<apache::thrift::stdcxx::shared_ptr<VecServiceIf> >& ifaces) : ifaces_(ifaces) {
+  }
+  virtual ~VecServiceMultiface() {}
+ protected:
+  std::vector<apache::thrift::stdcxx::shared_ptr<VecServiceIf> > ifaces_;
+  VecServiceMultiface() {}
+  void add(::apache::thrift::stdcxx::shared_ptr<VecServiceIf> iface) {
+    ifaces_.push_back(iface);
+  }
+ public:
+  void add_group(const VecGroup& group) {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->add_group(group);
+    }
+    ifaces_[i]->add_group(group);
+  }
+
+  void get_group(VecGroup& _return, const std::string& group_id) {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->get_group(_return, group_id);
+    }
+    ifaces_[i]->get_group(_return, group_id);
+    return;
+  }
+
+  void del_group(const std::string& group_id) {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->del_group(group_id);
+    }
+    ifaces_[i]->del_group(group_id);
+  }
+
+  int64_t add_vector(const std::string& group_id, const VecTensor& tensor) {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->add_vector(group_id, tensor);
+    }
+    return ifaces_[i]->add_vector(group_id, tensor);
+  }
+
+  void add_vector_batch(VecTensorIdList& _return, const std::string& group_id, const VecTensorList& tensor_list) {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->add_vector_batch(_return, group_id, tensor_list);
+    }
+    ifaces_[i]->add_vector_batch(_return, group_id, tensor_list);
+    return;
+  }
+
+  void search_vector(VecSearchResult& _return, const std::string& group_id, const int64_t top_k, const VecTensor& tensor, const VecTimeRangeList& time_range_list) {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->search_vector(_return, group_id, top_k, tensor, time_range_list);
+    }
+    ifaces_[i]->search_vector(_return, group_id, top_k, tensor, time_range_list);
+    return;
+  }
+
+  void search_vector_batch(VecSearchResultList& _return, const std::string& group_id, const int64_t top_k, const VecTensorList& tensor_list, const VecTimeRangeList& time_range_list) {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->search_vector_batch(_return, group_id, top_k, tensor_list, time_range_list);
+    }
+    ifaces_[i]->search_vector_batch(_return, group_id, top_k, tensor_list, time_range_list);
+    return;
+  }
+
+};
+
+// The 'concurrent' client is a thread safe client that correctly handles
+// out of order responses.  It is slower than the regular client, so should
+// only be used when you need to share a connection among multiple threads
+class VecServiceConcurrentClient : virtual public VecServiceIf {
+ public:
+  VecServiceConcurrentClient(apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) {
+    setProtocol(prot);
+  }
+  VecServiceConcurrentClient(apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> iprot, apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> oprot) {
+    setProtocol(iprot,oprot);
+  }
+ private:
+  void setProtocol(apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) {
+  setProtocol(prot,prot);
+  }
+  void setProtocol(apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> iprot, apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> oprot) {
+    piprot_=iprot;
+    poprot_=oprot;
+    iprot_ = iprot.get();
+    oprot_ = oprot.get();
+  }
+ public:
+  apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> getInputProtocol() {
+    return piprot_;
+  }
+  apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> getOutputProtocol() {
+    return poprot_;
+  }
+  void add_group(const VecGroup& group);
+  int32_t send_add_group(const VecGroup& group);
+  void recv_add_group(const int32_t seqid);
+  void get_group(VecGroup& _return, const std::string& group_id);
+  int32_t send_get_group(const std::string& group_id);
+  void recv_get_group(VecGroup& _return, const int32_t seqid);
+  void del_group(const std::string& group_id);
+  int32_t send_del_group(const std::string& group_id);
+  void recv_del_group(const int32_t seqid);
+  int64_t add_vector(const std::string& group_id, const VecTensor& tensor);
+  int32_t send_add_vector(const std::string& group_id, const VecTensor& tensor);
+  int64_t recv_add_vector(const int32_t seqid);
+  void add_vector_batch(VecTensorIdList& _return, const std::string& group_id, const VecTensorList& tensor_list);
+  int32_t send_add_vector_batch(const std::string& group_id, const VecTensorList& tensor_list);
+  void recv_add_vector_batch(VecTensorIdList& _return, const int32_t seqid);
+  void search_vector(VecSearchResult& _return, const std::string& group_id, const int64_t top_k, const VecTensor& tensor, const VecTimeRangeList& time_range_list);
+  int32_t send_search_vector(const std::string& group_id, const int64_t top_k, const VecTensor& tensor, const VecTimeRangeList& time_range_list);
+  void recv_search_vector(VecSearchResult& _return, const int32_t seqid);
+  void search_vector_batch(VecSearchResultList& _return, const std::string& group_id, const int64_t top_k, const VecTensorList& tensor_list, const VecTimeRangeList& time_range_list);
+  int32_t send_search_vector_batch(const std::string& group_id, const int64_t top_k, const VecTensorList& tensor_list, const VecTimeRangeList& time_range_list);
+  void recv_search_vector_batch(VecSearchResultList& _return, const int32_t seqid);
+ protected:
+  apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> piprot_;
+  apache::thrift::stdcxx::shared_ptr< ::apache::thrift::protocol::TProtocol> poprot_;
+  ::apache::thrift::protocol::TProtocol* iprot_;
+  ::apache::thrift::protocol::TProtocol* oprot_;
+  ::apache::thrift::async::TConcurrentClientSyncInfo sync_;
+};
+
+#ifdef _MSC_VER
+  #pragma warning( pop )
+#endif
+
+
+
+#endif
diff --git a/cpp/src/thrift/gen-cpp/VecService_server.skeleton.cpp b/cpp/src/thrift/gen-cpp/VecService_server.skeleton.cpp
new file mode 100644
index 0000000000..81726ee8fe
--- /dev/null
+++ b/cpp/src/thrift/gen-cpp/VecService_server.skeleton.cpp
@@ -0,0 +1,91 @@
+// This autogenerated skeleton file illustrates how to build a server.
+// You should copy it to another filename to avoid overwriting it.
+
+#include "VecService.h"
+#include <thrift/protocol/TBinaryProtocol.h>
+#include <thrift/server/TSimpleServer.h>
+#include <thrift/transport/TServerSocket.h>
+#include <thrift/transport/TBufferTransports.h>
+
+using namespace ::apache::thrift;
+using namespace ::apache::thrift::protocol;
+using namespace ::apache::thrift::transport;
+using namespace ::apache::thrift::server;
+
+class VecServiceHandler : virtual public VecServiceIf {
+ public:
+  VecServiceHandler() {
+    // Your initialization goes here
+  }
+
+  /**
+   * group interfaces
+   * 
+   * @param group
+   */
+  void add_group(const VecGroup& group) {
+    // Your implementation goes here
+    printf("add_group\n");
+  }
+
+  void get_group(VecGroup& _return, const std::string& group_id) {
+    // Your implementation goes here
+    printf("get_group\n");
+  }
+
+  void del_group(const std::string& group_id) {
+    // Your implementation goes here
+    printf("del_group\n");
+  }
+
+  /**
+   * vector interfaces
+   * 
+   * 
+   * @param group_id
+   * @param tensor
+   */
+  int64_t add_vector(const std::string& group_id, const VecTensor& tensor) {
+    // Your implementation goes here
+    printf("add_vector\n");
+  }
+
+  void add_vector_batch(VecTensorIdList& _return, const std::string& group_id, const VecTensorList& tensor_list) {
+    // Your implementation goes here
+    printf("add_vector_batch\n");
+  }
+
+  /**
+   * search interfaces
+   * if time_range_list is empty, engine will search without time limit
+   * 
+   * @param group_id
+   * @param top_k
+   * @param tensor
+   * @param time_range_list
+   */
+  void search_vector(VecSearchResult& _return, const std::string& group_id, const int64_t top_k, const VecTensor& tensor, const VecTimeRangeList& time_range_list) {
+    // Your implementation goes here
+    printf("search_vector\n");
+  }
+
+  void search_vector_batch(VecSearchResultList& _return, const std::string& group_id, const int64_t top_k, const VecTensorList& tensor_list, const VecTimeRangeList& time_range_list) {
+    // Your implementation goes here
+    printf("search_vector_batch\n");
+  }
+
+};
+
+int main(int argc, char **argv) {
+  int port = 9090;
+  ::apache::thrift::stdcxx::shared_ptr<VecServiceHandler> handler(new VecServiceHandler());
+  ::apache::thrift::stdcxx::shared_ptr<TProcessor> processor(new VecServiceProcessor(handler));
+  ::apache::thrift::stdcxx::shared_ptr<TServerTransport> serverTransport(new TServerSocket(port));
+  ::apache::thrift::stdcxx::shared_ptr<TTransportFactory> transportFactory(new TBufferedTransportFactory());
+  ::apache::thrift::stdcxx::shared_ptr<TProtocolFactory> protocolFactory(new TBinaryProtocolFactory());
+
+  TSimpleServer server(processor, serverTransport, transportFactory, protocolFactory);
+  server.serve();
+  return 0;
+}
+
diff --git a/cpp/src/thrift/gen-cpp/VectorService_constants.cpp b/cpp/src/thrift/gen-cpp/VectorService_constants.cpp
new file mode 100644
index 0000000000..b9275d5961
--- /dev/null
+++ b/cpp/src/thrift/gen-cpp/VectorService_constants.cpp
@@ -0,0 +1,17 @@
+/**
+ * Autogenerated by Thrift Compiler (0.12.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#include "VectorService_constants.h"
+
+
+
+const VectorServiceConstants g_VectorService_constants;
+
+VectorServiceConstants::VectorServiceConstants() {
+}
+
+
+
diff --git a/cpp/src/thrift/gen-cpp/VectorService_constants.h b/cpp/src/thrift/gen-cpp/VectorService_constants.h
new file mode 100644
index 0000000000..501980cbfe
--- /dev/null
+++ b/cpp/src/thrift/gen-cpp/VectorService_constants.h
@@ -0,0 +1,24 @@
+/**
+ * Autogenerated by Thrift Compiler (0.12.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#ifndef VectorService_CONSTANTS_H
+#define VectorService_CONSTANTS_H
+
+#include "VectorService_types.h"
+
+
+
+class VectorServiceConstants {
+ public:
+  VectorServiceConstants();
+
+};
+
+extern const VectorServiceConstants g_VectorService_constants;
+
+
+
+#endif
diff --git a/cpp/src/thrift/gen-cpp/VectorService_types.cpp b/cpp/src/thrift/gen-cpp/VectorService_types.cpp
new file mode 100644
index 0000000000..15f43ec975
--- /dev/null
+++ b/cpp/src/thrift/gen-cpp/VectorService_types.cpp
@@ -0,0 +1,1317 @@
+/**
+ * Autogenerated by Thrift Compiler (0.12.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#include "VectorService_types.h"
+
+#include <algorithm>
+#include <ostream>
+
+#include <thrift/TToString.h>
+
+
+
+int _kVecErrCodeValues[] = {
+  VecErrCode::SUCCESS,
+  VecErrCode::ILLEGAL_ARGUMENT,
+  VecErrCode::GROUP_NOT_EXISTS,
+  VecErrCode::ILLEGAL_TIME_RANGE,
+  VecErrCode::ILLEGAL_VECTOR_DIMENSION,
+  VecErrCode::OUT_OF_MEMORY
+};
+const char* _kVecErrCodeNames[] = {
+  "SUCCESS",
+  "ILLEGAL_ARGUMENT",
+  "GROUP_NOT_EXISTS",
+  "ILLEGAL_TIME_RANGE",
+  "ILLEGAL_VECTOR_DIMENSION",
+  "OUT_OF_MEMORY"
+};
+const std::map<int, const char*> _VecErrCode_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(6, _kVecErrCodeValues, _kVecErrCodeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+std::ostream& operator<<(std::ostream& out, const VecErrCode::type& val) {
+  std::map<int, const char*>::const_iterator it = _VecErrCode_VALUES_TO_NAMES.find(val);
+  if (it != _VecErrCode_VALUES_TO_NAMES.end()) {
+    out << it->second;
+  } else {
+    out << static_cast<int>(val);
+  }
+  return out;
+}
+
+
+VecException::~VecException() throw() {
+}
+
+
+void VecException::__set_code(const VecErrCode::type val) {
+  this->code = val;
+}
+
+void VecException::__set_reason(const std::string& val) {
+  this->reason = val;
+}
+std::ostream& operator<<(std::ostream& out, const VecException& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+
+uint32_t VecException::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          int32_t ecast0;
+          xfer += iprot->readI32(ecast0);
+          this->code = (VecErrCode::type)ecast0;
+          this->__isset.code = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->reason);
+          this->__isset.reason = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecException");
+
+  xfer += oprot->writeFieldBegin("code", ::apache::thrift::protocol::T_I32, 1);
+  xfer += oprot->writeI32((int32_t)this->code);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("reason", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->reason);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(VecException &a, VecException &b) {
+  using ::std::swap;
+  swap(a.code, b.code);
+  swap(a.reason, b.reason);
+  swap(a.__isset, b.__isset);
+}
+
+VecException::VecException(const VecException& other1) : TException() {
+  code = other1.code;
+  reason = other1.reason;
+  __isset = other1.__isset;
+}
+VecException& VecException::operator=(const VecException& other2) {
+  code = other2.code;
+  reason = other2.reason;
+  __isset = other2.__isset;
+  return *this;
+}
+void VecException::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "VecException(";
+  out << "code=" << to_string(code);
+  out << ", " << "reason=" << to_string(reason);
+  out << ")";
+}
+
+const char* VecException::what() const throw() {
+  try {
+    std::stringstream ss;
+    ss << "TException - service has thrown: " << *this;
+    this->thriftTExceptionMessageHolder_ = ss.str();
+    return this->thriftTExceptionMessageHolder_.c_str();
+  } catch (const std::exception&) {
+    return "TException - service has thrown: VecException";
+  }
+}
+
+
+VecGroup::~VecGroup() throw() {
+}
+
+
+void VecGroup::__set_id(const std::string& val) {
+  this->id = val;
+}
+
+void VecGroup::__set_dimension(const int32_t val) {
+  this->dimension = val;
+}
+
+void VecGroup::__set_index_type(const int32_t val) {
+  this->index_type = val;
+}
+std::ostream& operator<<(std::ostream& out, const VecGroup& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+
+uint32_t VecGroup::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->id);
+          this->__isset.id = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32(this->dimension);
+          this->__isset.dimension = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32(this->index_type);
+          this->__isset.index_type = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecGroup::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecGroup");
+
+  xfer += oprot->writeFieldBegin("id", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->id);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("dimension", ::apache::thrift::protocol::T_I32, 2);
+  xfer += oprot->writeI32(this->dimension);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("index_type", ::apache::thrift::protocol::T_I32, 3);
+  xfer += oprot->writeI32(this->index_type);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(VecGroup &a, VecGroup &b) {
+  using ::std::swap;
+  swap(a.id, b.id);
+  swap(a.dimension, b.dimension);
+  swap(a.index_type, b.index_type);
+  swap(a.__isset, b.__isset);
+}
+
+VecGroup::VecGroup(const VecGroup& other3) {
+  id = other3.id;
+  dimension = other3.dimension;
+  index_type = other3.index_type;
+  __isset = other3.__isset;
+}
+VecGroup& VecGroup::operator=(const VecGroup& other4) {
+  id = other4.id;
+  dimension = other4.dimension;
+  index_type = other4.index_type;
+  __isset = other4.__isset;
+  return *this;
+}
+void VecGroup::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "VecGroup(";
+  out << "id=" << to_string(id);
+  out << ", " << "dimension=" << to_string(dimension);
+  out << ", " << "index_type=" << to_string(index_type);
+  out << ")";
+}
+
+
+VecTensor::~VecTensor() throw() {
+}
+
+
+void VecTensor::__set_tensor(const std::vector<double> & val) {
+  this->tensor = val;
+}
+std::ostream& operator<<(std::ostream& out, const VecTensor& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+
+uint32_t VecTensor::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->tensor.clear();
+            uint32_t _size5;
+            ::apache::thrift::protocol::TType _etype8;
+            xfer += iprot->readListBegin(_etype8, _size5);
+            this->tensor.resize(_size5);
+            uint32_t _i9;
+            for (_i9 = 0; _i9 < _size5; ++_i9)
+            {
+              xfer += iprot->readDouble(this->tensor[_i9]);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.tensor = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecTensor::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecTensor");
+
+  xfer += oprot->writeFieldBegin("tensor", ::apache::thrift::protocol::T_LIST, 2);
+  {
+    xfer += oprot->writeListBegin(::apache::thrift::protocol::T_DOUBLE, static_cast<uint32_t>(this->tensor.size()));
+    std::vector<double> ::const_iterator _iter10;
+    for (_iter10 = this->tensor.begin(); _iter10 != this->tensor.end(); ++_iter10)
+    {
+      xfer += oprot->writeDouble((*_iter10));
+    }
+    xfer += oprot->writeListEnd();
+  }
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(VecTensor &a, VecTensor &b) {
+  using ::std::swap;
+  swap(a.tensor, b.tensor);
+  swap(a.__isset, b.__isset);
+}
+
+VecTensor::VecTensor(const VecTensor& other11) {
+  tensor = other11.tensor;
+  __isset = other11.__isset;
+}
+VecTensor& VecTensor::operator=(const VecTensor& other12) {
+  tensor = other12.tensor;
+  __isset = other12.__isset;
+  return *this;
+}
+void VecTensor::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "VecTensor(";
+  out << "tensor=" << to_string(tensor);
+  out << ")";
+}
+
+
+VecTensorList::~VecTensorList() throw() {
+}
+
+
+void VecTensorList::__set_tensor_list(const std::vector<VecTensor> & val) {
+  this->tensor_list = val;
+}
+std::ostream& operator<<(std::ostream& out, const VecTensorList& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+
+uint32_t VecTensorList::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->tensor_list.clear();
+            uint32_t _size13;
+            ::apache::thrift::protocol::TType _etype16;
+            xfer += iprot->readListBegin(_etype16, _size13);
+            this->tensor_list.resize(_size13);
+            uint32_t _i17;
+            for (_i17 = 0; _i17 < _size13; ++_i17)
+            {
+              xfer += this->tensor_list[_i17].read(iprot);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.tensor_list = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecTensorList::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecTensorList");
+
+  xfer += oprot->writeFieldBegin("tensor_list", ::apache::thrift::protocol::T_LIST, 1);
+  {
+    xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->tensor_list.size()));
+    std::vector<VecTensor> ::const_iterator _iter18;
+    for (_iter18 = this->tensor_list.begin(); _iter18 != this->tensor_list.end(); ++_iter18)
+    {
+      xfer += (*_iter18).write(oprot);
+    }
+    xfer += oprot->writeListEnd();
+  }
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(VecTensorList &a, VecTensorList &b) {
+  using ::std::swap;
+  swap(a.tensor_list, b.tensor_list);
+  swap(a.__isset, b.__isset);
+}
+
+VecTensorList::VecTensorList(const VecTensorList& other19) {
+  tensor_list = other19.tensor_list;
+  __isset = other19.__isset;
+}
+VecTensorList& VecTensorList::operator=(const VecTensorList& other20) {
+  tensor_list = other20.tensor_list;
+  __isset = other20.__isset;
+  return *this;
+}
+void VecTensorList::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "VecTensorList(";
+  out << "tensor_list=" << to_string(tensor_list);
+  out << ")";
+}
+
+
+VecTensorIdList::~VecTensorIdList() throw() {
+}
+
+
+void VecTensorIdList::__set_id_list(const std::vector<int64_t> & val) {
+  this->id_list = val;
+}
+std::ostream& operator<<(std::ostream& out, const VecTensorIdList& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+
+uint32_t VecTensorIdList::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->id_list.clear();
+            uint32_t _size21;
+            ::apache::thrift::protocol::TType _etype24;
+            xfer += iprot->readListBegin(_etype24, _size21);
+            this->id_list.resize(_size21);
+            uint32_t _i25;
+            for (_i25 = 0; _i25 < _size21; ++_i25)
+            {
+              xfer += iprot->readI64(this->id_list[_i25]);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.id_list = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecTensorIdList::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecTensorIdList");
+
+  xfer += oprot->writeFieldBegin("id_list", ::apache::thrift::protocol::T_LIST, 1);
+  {
+    xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->id_list.size()));
+    std::vector<int64_t> ::const_iterator _iter26;
+    for (_iter26 = this->id_list.begin(); _iter26 != this->id_list.end(); ++_iter26)
+    {
+      xfer += oprot->writeI64((*_iter26));
+    }
+    xfer += oprot->writeListEnd();
+  }
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(VecTensorIdList &a, VecTensorIdList &b) {
+  using ::std::swap;
+  swap(a.id_list, b.id_list);
+  swap(a.__isset, b.__isset);
+}
+
+VecTensorIdList::VecTensorIdList(const VecTensorIdList& other27) {
+  id_list = other27.id_list;
+  __isset = other27.__isset;
+}
+VecTensorIdList& VecTensorIdList::operator=(const VecTensorIdList& other28) {
+  id_list = other28.id_list;
+  __isset = other28.__isset;
+  return *this;
+}
+void VecTensorIdList::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "VecTensorIdList(";
+  out << "id_list=" << to_string(id_list);
+  out << ")";
+}
+
+
+VecSearchResult::~VecSearchResult() throw() {
+}
+
+
+void VecSearchResult::__set_id_list(const std::vector<int64_t> & val) {
+  this->id_list = val;
+}
+std::ostream& operator<<(std::ostream& out, const VecSearchResult& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+
+uint32_t VecSearchResult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->id_list.clear();
+            uint32_t _size29;
+            ::apache::thrift::protocol::TType _etype32;
+            xfer += iprot->readListBegin(_etype32, _size29);
+            this->id_list.resize(_size29);
+            uint32_t _i33;
+            for (_i33 = 0; _i33 < _size29; ++_i33)
+            {
+              xfer += iprot->readI64(this->id_list[_i33]);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.id_list = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecSearchResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecSearchResult");
+
+  xfer += oprot->writeFieldBegin("id_list", ::apache::thrift::protocol::T_LIST, 1);
+  {
+    xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->id_list.size()));
+    std::vector<int64_t> ::const_iterator _iter34;
+    for (_iter34 = this->id_list.begin(); _iter34 != this->id_list.end(); ++_iter34)
+    {
+      xfer += oprot->writeI64((*_iter34));
+    }
+    xfer += oprot->writeListEnd();
+  }
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(VecSearchResult &a, VecSearchResult &b) {
+  using ::std::swap;
+  swap(a.id_list, b.id_list);
+  swap(a.__isset, b.__isset);
+}
+
+VecSearchResult::VecSearchResult(const VecSearchResult& other35) {
+  id_list = other35.id_list;
+  __isset = other35.__isset;
+}
+VecSearchResult& VecSearchResult::operator=(const VecSearchResult& other36) {
+  id_list = other36.id_list;
+  __isset = other36.__isset;
+  return *this;
+}
+void VecSearchResult::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "VecSearchResult(";
+  out << "id_list=" << to_string(id_list);
+  out << ")";
+}
+
+
+VecSearchResultList::~VecSearchResultList() throw() {
+}
+
+
+void VecSearchResultList::__set_result_list(const std::vector<VecSearchResult> & val) {
+  this->result_list = val;
+}
+std::ostream& operator<<(std::ostream& out, const VecSearchResultList& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+
+uint32_t VecSearchResultList::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->result_list.clear();
+            uint32_t _size37;
+            ::apache::thrift::protocol::TType _etype40;
+            xfer += iprot->readListBegin(_etype40, _size37);
+            this->result_list.resize(_size37);
+            uint32_t _i41;
+            for (_i41 = 0; _i41 < _size37; ++_i41)
+            {
+              xfer += this->result_list[_i41].read(iprot);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.result_list = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecSearchResultList::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecSearchResultList");
+
+  xfer += oprot->writeFieldBegin("result_list", ::apache::thrift::protocol::T_LIST, 1);
+  {
+    xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->result_list.size()));
+    std::vector<VecSearchResult> ::const_iterator _iter42;
+    for (_iter42 = this->result_list.begin(); _iter42 != this->result_list.end(); ++_iter42)
+    {
+      xfer += (*_iter42).write(oprot);
+    }
+    xfer += oprot->writeListEnd();
+  }
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(VecSearchResultList &a, VecSearchResultList &b) {
+  using ::std::swap;
+  swap(a.result_list, b.result_list);
+  swap(a.__isset, b.__isset);
+}
+
+VecSearchResultList::VecSearchResultList(const VecSearchResultList& other43) {
+  result_list = other43.result_list;
+  __isset = other43.__isset;
+}
+VecSearchResultList& VecSearchResultList::operator=(const VecSearchResultList& other44) {
+  result_list = other44.result_list;
+  __isset = other44.__isset;
+  return *this;
+}
+void VecSearchResultList::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "VecSearchResultList(";
+  out << "result_list=" << to_string(result_list);
+  out << ")";
+}
+
+
+VecDateTime::~VecDateTime() throw() {
+}
+
+
+void VecDateTime::__set_year(const int32_t val) {
+  this->year = val;
+}
+
+void VecDateTime::__set_month(const int32_t val) {
+  this->month = val;
+}
+
+void VecDateTime::__set_day(const int32_t val) {
+  this->day = val;
+}
+
+void VecDateTime::__set_hour(const int32_t val) {
+  this->hour = val;
+}
+
+void VecDateTime::__set_minute(const int32_t val) {
+  this->minute = val;
+}
+
+void VecDateTime::__set_second(const int32_t val) {
+  this->second = val;
+}
+std::ostream& operator<<(std::ostream& out, const VecDateTime& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+
+uint32_t VecDateTime::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32(this->year);
+          this->__isset.year = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32(this->month);
+          this->__isset.month = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32(this->day);
+          this->__isset.day = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32(this->hour);
+          this->__isset.hour = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 5:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32(this->minute);
+          this->__isset.minute = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 6:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32(this->second);
+          this->__isset.second = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecDateTime::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecDateTime");
+
+  xfer += oprot->writeFieldBegin("year", ::apache::thrift::protocol::T_I32, 1);
+  xfer += oprot->writeI32(this->year);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("month", ::apache::thrift::protocol::T_I32, 2);
+  xfer += oprot->writeI32(this->month);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("day", ::apache::thrift::protocol::T_I32, 3);
+  xfer += oprot->writeI32(this->day);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("hour", ::apache::thrift::protocol::T_I32, 4);
+  xfer += oprot->writeI32(this->hour);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("minute", ::apache::thrift::protocol::T_I32, 5);
+  xfer += oprot->writeI32(this->minute);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("second", ::apache::thrift::protocol::T_I32, 6);
+  xfer += oprot->writeI32(this->second);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(VecDateTime &a, VecDateTime &b) {
+  using ::std::swap;
+  swap(a.year, b.year);
+  swap(a.month, b.month);
+  swap(a.day, b.day);
+  swap(a.hour, b.hour);
+  swap(a.minute, b.minute);
+  swap(a.second, b.second);
+  swap(a.__isset, b.__isset);
+}
+
+VecDateTime::VecDateTime(const VecDateTime& other45) {
+  year = other45.year;
+  month = other45.month;
+  day = other45.day;
+  hour = other45.hour;
+  minute = other45.minute;
+  second = other45.second;
+  __isset = other45.__isset;
+}
+VecDateTime& VecDateTime::operator=(const VecDateTime& other46) {
+  year = other46.year;
+  month = other46.month;
+  day = other46.day;
+  hour = other46.hour;
+  minute = other46.minute;
+  second = other46.second;
+  __isset = other46.__isset;
+  return *this;
+}
+void VecDateTime::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "VecDateTime(";
+  out << "year=" << to_string(year);
+  out << ", " << "month=" << to_string(month);
+  out << ", " << "day=" << to_string(day);
+  out << ", " << "hour=" << to_string(hour);
+  out << ", " << "minute=" << to_string(minute);
+  out << ", " << "second=" << to_string(second);
+  out << ")";
+}
+
+
+VecTimeRange::~VecTimeRange() throw() {
+}
+
+
+void VecTimeRange::__set_time_begin(const VecDateTime& val) {
+  this->time_begin = val;
+}
+
+void VecTimeRange::__set_begine_closed(const bool val) {
+  this->begine_closed = val;
+}
+
+void VecTimeRange::__set_time_end(const VecDateTime& val) {
+  this->time_end = val;
+}
+
+void VecTimeRange::__set_end_closed(const bool val) {
+  this->end_closed = val;
+}
+std::ostream& operator<<(std::ostream& out, const VecTimeRange& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+
+uint32_t VecTimeRange::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->time_begin.read(iprot);
+          this->__isset.time_begin = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->begine_closed);
+          this->__isset.begine_closed = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->time_end.read(iprot);
+          this->__isset.time_end = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->end_closed);
+          this->__isset.end_closed = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecTimeRange::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecTimeRange");
+
+  xfer += oprot->writeFieldBegin("time_begin", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += this->time_begin.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("begine_closed", ::apache::thrift::protocol::T_BOOL, 2);
+  xfer += oprot->writeBool(this->begine_closed);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("time_end", ::apache::thrift::protocol::T_STRUCT, 3);
+  xfer += this->time_end.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("end_closed", ::apache::thrift::protocol::T_BOOL, 4);
+  xfer += oprot->writeBool(this->end_closed);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(VecTimeRange &a, VecTimeRange &b) {
+  using ::std::swap;
+  swap(a.time_begin, b.time_begin);
+  swap(a.begine_closed, b.begine_closed);
+  swap(a.time_end, b.time_end);
+  swap(a.end_closed, b.end_closed);
+  swap(a.__isset, b.__isset);
+}
+
+VecTimeRange::VecTimeRange(const VecTimeRange& other47) {
+  time_begin = other47.time_begin;
+  begine_closed = other47.begine_closed;
+  time_end = other47.time_end;
+  end_closed = other47.end_closed;
+  __isset = other47.__isset;
+}
+VecTimeRange& VecTimeRange::operator=(const VecTimeRange& other48) {
+  time_begin = other48.time_begin;
+  begine_closed = other48.begine_closed;
+  time_end = other48.time_end;
+  end_closed = other48.end_closed;
+  __isset = other48.__isset;
+  return *this;
+}
+void VecTimeRange::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "VecTimeRange(";
+  out << "time_begin=" << to_string(time_begin);
+  out << ", " << "begine_closed=" << to_string(begine_closed);
+  out << ", " << "time_end=" << to_string(time_end);
+  out << ", " << "end_closed=" << to_string(end_closed);
+  out << ")";
+}
+
+
+VecTimeRangeList::~VecTimeRangeList() throw() {
+}
+
+
+void VecTimeRangeList::__set_range_list(const std::vector<VecTimeRange> & val) {
+  this->range_list = val;
+}
+std::ostream& operator<<(std::ostream& out, const VecTimeRangeList& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+
+uint32_t VecTimeRangeList::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  ::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->range_list.clear();
+            uint32_t _size49;
+            ::apache::thrift::protocol::TType _etype52;
+            xfer += iprot->readListBegin(_etype52, _size49);
+            this->range_list.resize(_size49);
+            uint32_t _i53;
+            for (_i53 = 0; _i53 < _size49; ++_i53)
+            {
+              xfer += this->range_list[_i53].read(iprot);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.range_list = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t VecTimeRangeList::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  ::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("VecTimeRangeList");
+
+  xfer += oprot->writeFieldBegin("range_list", ::apache::thrift::protocol::T_LIST, 1);
+  {
+    xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->range_list.size()));
+    std::vector<VecTimeRange> ::const_iterator _iter54;
+    for (_iter54 = this->range_list.begin(); _iter54 != this->range_list.end(); ++_iter54)
+    {
+      xfer += (*_iter54).write(oprot);
+    }
+    xfer += oprot->writeListEnd();
+  }
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(VecTimeRangeList &a, VecTimeRangeList &b) {
+  using ::std::swap;
+  swap(a.range_list, b.range_list);
+  swap(a.__isset, b.__isset);
+}
+
+VecTimeRangeList::VecTimeRangeList(const VecTimeRangeList& other55) {
+  range_list = other55.range_list;
+  __isset = other55.__isset;
+}
+VecTimeRangeList& VecTimeRangeList::operator=(const VecTimeRangeList& other56) {
+  range_list = other56.range_list;
+  __isset = other56.__isset;
+  return *this;
+}
+void VecTimeRangeList::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "VecTimeRangeList(";
+  out << "range_list=" << to_string(range_list);
+  out << ")";
+}
+
+
diff --git a/cpp/src/thrift/gen-cpp/VectorService_types.h b/cpp/src/thrift/gen-cpp/VectorService_types.h
new file mode 100644
index 0000000000..0f8a3a2e4b
--- /dev/null
+++ b/cpp/src/thrift/gen-cpp/VectorService_types.h
@@ -0,0 +1,548 @@
+/**
+ * Autogenerated by Thrift Compiler (0.12.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#ifndef VectorService_TYPES_H
+#define VectorService_TYPES_H
+
+#include <iosfwd>
+
+#include <thrift/Thrift.h>
+#include <thrift/TApplicationException.h>
+#include <thrift/TBase.h>
+#include <thrift/protocol/TProtocol.h>
+#include <thrift/transport/TTransport.h>
+
+#include <thrift/stdcxx.h>
+
+
+
+
+struct VecErrCode {
+  enum type {
+    SUCCESS = 0,
+    ILLEGAL_ARGUMENT = 1,
+    GROUP_NOT_EXISTS = 2,
+    ILLEGAL_TIME_RANGE = 3,
+    ILLEGAL_VECTOR_DIMENSION = 4,
+    OUT_OF_MEMORY = 5
+  };
+};
+
+extern const std::map<int, const char*> _VecErrCode_VALUES_TO_NAMES;
+
+std::ostream& operator<<(std::ostream& out, const VecErrCode::type& val);
+
+class VecException;
+
+class VecGroup;
+
+class VecTensor;
+
+class VecTensorList;
+
+class VecTensorIdList;
+
+class VecSearchResult;
+
+class VecSearchResultList;
+
+class VecDateTime;
+
+class VecTimeRange;
+
+class VecTimeRangeList;
+
+typedef struct _VecException__isset {
+  _VecException__isset() : code(false), reason(false) {}
+  bool code :1;
+  bool reason :1;
+} _VecException__isset;
+
+class VecException : public ::apache::thrift::TException {
+ public:
+
+  VecException(const VecException&);
+  VecException& operator=(const VecException&);
+  VecException() : code((VecErrCode::type)0), reason() {
+  }
+
+  virtual ~VecException() throw();
+  VecErrCode::type code;
+  std::string reason;
+
+  _VecException__isset __isset;
+
+  void __set_code(const VecErrCode::type val);
+
+  void __set_reason(const std::string& val);
+
+  bool operator == (const VecException & rhs) const
+  {
+    if (!(code == rhs.code))
+      return false;
+    if (!(reason == rhs.reason))
+      return false;
+    return true;
+  }
+  bool operator != (const VecException &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecException & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+  mutable std::string thriftTExceptionMessageHolder_;
+  const char* what() const throw();
+};
+
+void swap(VecException &a, VecException &b);
+
+std::ostream& operator<<(std::ostream& out, const VecException& obj);
+
+typedef struct _VecGroup__isset {
+  _VecGroup__isset() : id(false), dimension(false), index_type(false) {}
+  bool id :1;
+  bool dimension :1;
+  bool index_type :1;
+} _VecGroup__isset;
+
+class VecGroup : public virtual ::apache::thrift::TBase {
+ public:
+
+  VecGroup(const VecGroup&);
+  VecGroup& operator=(const VecGroup&);
+  VecGroup() : id(), dimension(0), index_type(0) {
+  }
+
+  virtual ~VecGroup() throw();
+  std::string id;
+  int32_t dimension;
+  int32_t index_type;
+
+  _VecGroup__isset __isset;
+
+  void __set_id(const std::string& val);
+
+  void __set_dimension(const int32_t val);
+
+  void __set_index_type(const int32_t val);
+
+  bool operator == (const VecGroup & rhs) const
+  {
+    if (!(id == rhs.id))
+      return false;
+    if (!(dimension == rhs.dimension))
+      return false;
+    if (!(index_type == rhs.index_type))
+      return false;
+    return true;
+  }
+  bool operator != (const VecGroup &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecGroup & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(VecGroup &a, VecGroup &b);
+
+std::ostream& operator<<(std::ostream& out, const VecGroup& obj);
+
+typedef struct _VecTensor__isset {
+  _VecTensor__isset() : tensor(false) {}
+  bool tensor :1;
+} _VecTensor__isset;
+
+class VecTensor : public virtual ::apache::thrift::TBase {
+ public:
+
+  VecTensor(const VecTensor&);
+  VecTensor& operator=(const VecTensor&);
+  VecTensor() {
+  }
+
+  virtual ~VecTensor() throw();
+  std::vector<double>  tensor;
+
+  _VecTensor__isset __isset;
+
+  void __set_tensor(const std::vector<double> & val);
+
+  bool operator == (const VecTensor & rhs) const
+  {
+    if (!(tensor == rhs.tensor))
+      return false;
+    return true;
+  }
+  bool operator != (const VecTensor &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecTensor & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(VecTensor &a, VecTensor &b);
+
+std::ostream& operator<<(std::ostream& out, const VecTensor& obj);
+
+typedef struct _VecTensorList__isset {
+  _VecTensorList__isset() : tensor_list(false) {}
+  bool tensor_list :1;
+} _VecTensorList__isset;
+
+class VecTensorList : public virtual ::apache::thrift::TBase {
+ public:
+
+  VecTensorList(const VecTensorList&);
+  VecTensorList& operator=(const VecTensorList&);
+  VecTensorList() {
+  }
+
+  virtual ~VecTensorList() throw();
+  std::vector<VecTensor>  tensor_list;
+
+  _VecTensorList__isset __isset;
+
+  void __set_tensor_list(const std::vector<VecTensor> & val);
+
+  bool operator == (const VecTensorList & rhs) const
+  {
+    if (!(tensor_list == rhs.tensor_list))
+      return false;
+    return true;
+  }
+  bool operator != (const VecTensorList &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecTensorList & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(VecTensorList &a, VecTensorList &b);
+
+std::ostream& operator<<(std::ostream& out, const VecTensorList& obj);
+
+typedef struct _VecTensorIdList__isset {
+  _VecTensorIdList__isset() : id_list(false) {}
+  bool id_list :1;
+} _VecTensorIdList__isset;
+
+class VecTensorIdList : public virtual ::apache::thrift::TBase {
+ public:
+
+  VecTensorIdList(const VecTensorIdList&);
+  VecTensorIdList& operator=(const VecTensorIdList&);
+  VecTensorIdList() {
+  }
+
+  virtual ~VecTensorIdList() throw();
+  std::vector<int64_t>  id_list;
+
+  _VecTensorIdList__isset __isset;
+
+  void __set_id_list(const std::vector<int64_t> & val);
+
+  bool operator == (const VecTensorIdList & rhs) const
+  {
+    if (!(id_list == rhs.id_list))
+      return false;
+    return true;
+  }
+  bool operator != (const VecTensorIdList &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecTensorIdList & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(VecTensorIdList &a, VecTensorIdList &b);
+
+std::ostream& operator<<(std::ostream& out, const VecTensorIdList& obj);
+
+typedef struct _VecSearchResult__isset {
+  _VecSearchResult__isset() : id_list(false) {}
+  bool id_list :1;
+} _VecSearchResult__isset;
+
+class VecSearchResult : public virtual ::apache::thrift::TBase {
+ public:
+
+  VecSearchResult(const VecSearchResult&);
+  VecSearchResult& operator=(const VecSearchResult&);
+  VecSearchResult() {
+  }
+
+  virtual ~VecSearchResult() throw();
+  std::vector<int64_t>  id_list;
+
+  _VecSearchResult__isset __isset;
+
+  void __set_id_list(const std::vector<int64_t> & val);
+
+  bool operator == (const VecSearchResult & rhs) const
+  {
+    if (!(id_list == rhs.id_list))
+      return false;
+    return true;
+  }
+  bool operator != (const VecSearchResult &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecSearchResult & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(VecSearchResult &a, VecSearchResult &b);
+
+std::ostream& operator<<(std::ostream& out, const VecSearchResult& obj);
+
+typedef struct _VecSearchResultList__isset {
+  _VecSearchResultList__isset() : result_list(false) {}
+  bool result_list :1;
+} _VecSearchResultList__isset;
+
+class VecSearchResultList : public virtual ::apache::thrift::TBase {
+ public:
+
+  VecSearchResultList(const VecSearchResultList&);
+  VecSearchResultList& operator=(const VecSearchResultList&);
+  VecSearchResultList() {
+  }
+
+  virtual ~VecSearchResultList() throw();
+  std::vector<VecSearchResult>  result_list;
+
+  _VecSearchResultList__isset __isset;
+
+  void __set_result_list(const std::vector<VecSearchResult> & val);
+
+  bool operator == (const VecSearchResultList & rhs) const
+  {
+    if (!(result_list == rhs.result_list))
+      return false;
+    return true;
+  }
+  bool operator != (const VecSearchResultList &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecSearchResultList & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(VecSearchResultList &a, VecSearchResultList &b);
+
+std::ostream& operator<<(std::ostream& out, const VecSearchResultList& obj);
+
+typedef struct _VecDateTime__isset {
+  _VecDateTime__isset() : year(false), month(false), day(false), hour(false), minute(false), second(false) {}
+  bool year :1;
+  bool month :1;
+  bool day :1;
+  bool hour :1;
+  bool minute :1;
+  bool second :1;
+} _VecDateTime__isset;
+
+class VecDateTime : public virtual ::apache::thrift::TBase {
+ public:
+
+  VecDateTime(const VecDateTime&);
+  VecDateTime& operator=(const VecDateTime&);
+  VecDateTime() : year(0), month(0), day(0), hour(0), minute(0), second(0) {
+  }
+
+  virtual ~VecDateTime() throw();
+  int32_t year;
+  int32_t month;
+  int32_t day;
+  int32_t hour;
+  int32_t minute;
+  int32_t second;
+
+  _VecDateTime__isset __isset;
+
+  void __set_year(const int32_t val);
+
+  void __set_month(const int32_t val);
+
+  void __set_day(const int32_t val);
+
+  void __set_hour(const int32_t val);
+
+  void __set_minute(const int32_t val);
+
+  void __set_second(const int32_t val);
+
+  bool operator == (const VecDateTime & rhs) const
+  {
+    if (!(year == rhs.year))
+      return false;
+    if (!(month == rhs.month))
+      return false;
+    if (!(day == rhs.day))
+      return false;
+    if (!(hour == rhs.hour))
+      return false;
+    if (!(minute == rhs.minute))
+      return false;
+    if (!(second == rhs.second))
+      return false;
+    return true;
+  }
+  bool operator != (const VecDateTime &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecDateTime & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(VecDateTime &a, VecDateTime &b);
+
+std::ostream& operator<<(std::ostream& out, const VecDateTime& obj);
+
+typedef struct _VecTimeRange__isset {
+  _VecTimeRange__isset() : time_begin(false), begine_closed(false), time_end(false), end_closed(false) {}
+  bool time_begin :1;
+  bool begine_closed :1;
+  bool time_end :1;
+  bool end_closed :1;
+} _VecTimeRange__isset;
+
+class VecTimeRange : public virtual ::apache::thrift::TBase {
+ public:
+
+  VecTimeRange(const VecTimeRange&);
+  VecTimeRange& operator=(const VecTimeRange&);
+  VecTimeRange() : begine_closed(0), end_closed(0) {
+  }
+
+  virtual ~VecTimeRange() throw();
+  VecDateTime time_begin;
+  bool begine_closed;
+  VecDateTime time_end;
+  bool end_closed;
+
+  _VecTimeRange__isset __isset;
+
+  void __set_time_begin(const VecDateTime& val);
+
+  void __set_begine_closed(const bool val);
+
+  void __set_time_end(const VecDateTime& val);
+
+  void __set_end_closed(const bool val);
+
+  bool operator == (const VecTimeRange & rhs) const
+  {
+    if (!(time_begin == rhs.time_begin))
+      return false;
+    if (!(begine_closed == rhs.begine_closed))
+      return false;
+    if (!(time_end == rhs.time_end))
+      return false;
+    if (!(end_closed == rhs.end_closed))
+      return false;
+    return true;
+  }
+  bool operator != (const VecTimeRange &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecTimeRange & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(VecTimeRange &a, VecTimeRange &b);
+
+std::ostream& operator<<(std::ostream& out, const VecTimeRange& obj);
+
+typedef struct _VecTimeRangeList__isset {
+  _VecTimeRangeList__isset() : range_list(false) {}
+  bool range_list :1;
+} _VecTimeRangeList__isset;
+
+class VecTimeRangeList : public virtual ::apache::thrift::TBase {
+ public:
+
+  VecTimeRangeList(const VecTimeRangeList&);
+  VecTimeRangeList& operator=(const VecTimeRangeList&);
+  VecTimeRangeList() {
+  }
+
+  virtual ~VecTimeRangeList() throw();
+  std::vector<VecTimeRange>  range_list;
+
+  _VecTimeRangeList__isset __isset;
+
+  void __set_range_list(const std::vector<VecTimeRange> & val);
+
+  bool operator == (const VecTimeRangeList & rhs) const
+  {
+    if (!(range_list == rhs.range_list))
+      return false;
+    return true;
+  }
+  bool operator != (const VecTimeRangeList &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const VecTimeRangeList & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(VecTimeRangeList &a, VecTimeRangeList &b);
+
+std::ostream& operator<<(std::ostream& out, const VecTimeRangeList& obj);
+
+
+
+#endif
diff --git a/cpp/src/utils/CommonUtil.cpp b/cpp/src/utils/CommonUtil.cpp
index 8d991756ad..985a7ce761 100644
--- a/cpp/src/utils/CommonUtil.cpp
+++ b/cpp/src/utils/CommonUtil.cpp
@@ -33,12 +33,12 @@ namespace fs = boost::filesystem;
 
 void CommonUtil::PrintInfo(const std::string& info){
     SERVER_LOG_INFO << info;
-//    std::cout << info << std::endl;
+    std::cout << "Info:" << info << std::endl;
 }
 
 void CommonUtil::PrintError(const std::string& info){
     SERVER_LOG_ERROR << info;
-//    std::cout << info << std::endl;
+    std::cout << "Error:" << info << std::endl;
 }
 
 bool CommonUtil::GetSystemMemInfo(unsigned long &totalMem, unsigned long &freeMem) {
diff --git a/cpp/src/utils/SignalUtil.cpp b/cpp/src/utils/SignalUtil.cpp
index 8ffa89d942..bc27691abb 100644
--- a/cpp/src/utils/SignalUtil.cpp
+++ b/cpp/src/utils/SignalUtil.cpp
@@ -24,7 +24,6 @@ void SignalUtil::HandleSignal(int signum){
             server_ptr->Stop();
 
             exit(0);
-
         }
         default:{
             SignalUtil::PrintStacktrace();
diff --git a/cpp/test_client/CMakeLists.txt b/cpp/test_client/CMakeLists.txt
new file mode 100644
index 0000000000..1de68f6e4d
--- /dev/null
+++ b/cpp/test_client/CMakeLists.txt
@@ -0,0 +1,42 @@
+#-------------------------------------------------------------------------------
+# Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
+# Unauthorized copying of this file, via any medium is strictly prohibited.
+# Proprietary and confidential.
+#-------------------------------------------------------------------------------
+
+include_directories(../src)
+
+aux_source_directory(./src client_src)
+aux_source_directory(../src/config config_files)
+
+set(util_files
+        ../src/utils/CommonUtil.cpp
+        ../src/utils/LogUtil.cpp
+        ../src/utils/TimeRecorder.cpp)
+
+set(service_files
+    ../src/thrift/gen-cpp/VecService.cpp
+    ../src/thrift/gen-cpp/VectorService_constants.cpp
+    ../src/thrift/gen-cpp/VectorService_types.cpp)
+
+add_executable(test_client
+    ./main.cpp
+    ${client_src}
+    ${service_files}
+    ${config_files}
+    ${util_files}
+    ${VECWISE_THIRD_PARTY_BUILD}/include/easylogging++.cc)
+
+link_directories(
+    "${CMAKE_BINARY_DIR}/lib"
+    "${VECWISE_THIRD_PARTY_BUILD}/lib"
+)
+
+set(client_libs
+    yaml-cpp
+    boost_system
+    boost_filesystem
+    thrift
+    pthread)
+
+target_link_libraries(test_client ${client_libs})
diff --git a/cpp/test_client/main.cpp b/cpp/test_client/main.cpp
new file mode 100644
index 0000000000..8b68d1c1da
--- /dev/null
+++ b/cpp/test_client/main.cpp
@@ -0,0 +1,74 @@
+////////////////////////////////////////////////////////////////////////////////
+// Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
+// Unauthorized copying of this file, via any medium is strictly prohibited.
+// Proprietary and confidential.
+////////////////////////////////////////////////////////////////////////////////
+
+#include <getopt.h>
+#include <libgen.h>
+#include <cstring>
+#include <string>
+#include <easylogging++.h>
+
+#include "src/ClientApp.h"
+
+INITIALIZE_EASYLOGGINGPP
+
+void print_help(const std::string &app_name);
+
+int
+main(int argc, char *argv[]) {
+    printf("Client start...\n");
+
+    std::string app_name = basename(argv[0]);
+    static struct option long_options[] = {{"conf_file", required_argument, 0, 'c'},
+                                           {"help", no_argument, 0, 'h'},
+                                           {NULL, 0, 0, 0}};
+
+    int option_index = 0;
+    std::string config_filename;
+    app_name = argv[0];
+
+    if(argc < 2) {
+        print_help(app_name);
+        printf("Client exit...\n");
+        return EXIT_FAILURE;
+    }
+
+    int value;
+    while ((value = getopt_long(argc, argv, "c:p:dh", long_options, &option_index)) != -1) {
+        switch (value) {
+            case 'c': {
+                char *config_filename_ptr = strdup(optarg);
+                config_filename = config_filename_ptr;
+                free(config_filename_ptr);
+                printf("Loading configuration from: %s\n", config_filename.c_str());
+                break;
+            }
+            case 'h':
+                print_help(app_name);
+                return EXIT_SUCCESS;
+            case '?':
+                print_help(app_name);
+                return EXIT_FAILURE;
+            default:
+                print_help(app_name);
+                break;
+        }
+    }
+
+    zilliz::vecwise::client::ClientApp app;
+    app.Run(config_filename);
+
+    printf("Client exit...\n");
+    return 0;
+}
+
+void
+print_help(const std::string &app_name) {
+    printf("\n Usage: %s [OPTIONS]\n\n", app_name.c_str());
+    printf("  Options:\n");
+    printf("   -h --help                 Print this help\n");
+    printf("   -c --conf_file filename   Read configuration from the file\n");
+    printf("\n");
+}
\ No newline at end of file
diff --git a/cpp/test_client/src/ClientApp.cpp b/cpp/test_client/src/ClientApp.cpp
new file mode 100644
index 0000000000..03403c7b74
--- /dev/null
+++ b/cpp/test_client/src/ClientApp.cpp
@@ -0,0 +1,19 @@
+/*******************************************************************************
+ * Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
+ * Unauthorized copying of this file, via any medium is strictly prohibited.
+ * Proprietary and confidential.
+ ******************************************************************************/
+#include "ClientApp.h"
+
+namespace zilliz {
+namespace vecwise {
+namespace client {
+
+void ClientApp::Run(const std::string &config_file) {
+
+}
+
+}
+}
+}
+
diff --git a/cpp/test_client/src/ClientApp.h b/cpp/test_client/src/ClientApp.h
new file mode 100644
index 0000000000..1518730d35
--- /dev/null
+++ b/cpp/test_client/src/ClientApp.h
@@ -0,0 +1,21 @@
+/*******************************************************************************
+ * Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
+ * Unauthorized copying of this file, via any medium is strictly prohibited.
+ * Proprietary and confidential.
+ ******************************************************************************/
+#pragma once
+
+#include <string>
+
+namespace zilliz {
+namespace vecwise {
+namespace client {
+
+class ClientApp {
+public:
+    void Run(const std::string& config_file);
+};
+
+}
+}
+}