Initial commit with some notes and proto

pull/24376/head
Paul Dix 2019-11-22 16:59:04 -05:00
commit b9b5a815b7
6 changed files with 1343 additions and 0 deletions

3
.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
/target
**/*.rs.bk
.idea/

1147
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

19
Cargo.toml Normal file
View File

@ -0,0 +1,19 @@
[package]
name = "delorean"
version = "0.1.0"
authors = ["Paul Dix <paul@pauldix.net>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
futures = "0.1"
tokio = "0.1.22"
tower-grpc = "0.1.1"
tower-hyper = "0.1.1"
tower-util = "0.1.0"
prost = "0.5.0"
bytes = "0.4.12"
[build-dependencies]
tower-grpc-build = { git = "https://github.com/tower-rs/tower-grpc" }

11
build.rs Normal file
View File

@ -0,0 +1,11 @@
use tower_grpc_build;
fn main() {
// Build kv
tower_grpc_build::Config::new()
.enable_server(true)
.enable_client(true)
.build(&["proto/delorean/delorean.proto"], &["proto/delorean"])
.unwrap_or_else(|e| panic!("protobuf compilation failed: {}", e));
println!("cargo:rerun-if-changed=proto/delorean/delorean.proto");
}

View File

@ -0,0 +1,82 @@
syntax = "proto3";
package delorean;
// TODO: how should requests handle authentication & authorization?
message CreateBucketRequest {
uint32 org_id = 1;
Bucket bucket = 2;
}
message CreateBucketResponse {
}
message DeleteBucketResponse {
}
message DeleteBucketRequest {
uint32 id = 1;
}
message GetBucketsResponse {
repeated Bucket buckets = 1;
}
message Organization {
uint32 id = 1;
string name = 2;
repeated Bucket buckets = 3;
}
message Bucket {
uint32 id = 1;
string name = 2;
string retention = 3;
}
message S3PartitionRule {
}
service Delorean {
rpc CreateBucket(CreateBucketRequest) returns (CreateBucketResponse) {}
rpc DeleteBucket(DeleteBucketRequest) returns (DeleteBucketResponse) {}
rpc GetBuckets(Organization) returns (GetBucketsResponse) {}
}
/*
S3 Organization scheme
Considerations:
* Buckets are tied to a region, so storage servers should use buckets from a specific region
one bucket per region. don't do per org as there is a default 100 bucket limit per account
prefix:
indexes have their own spot. They can be built based on snapshot levels, which are like granularities. So
<server_id>/<org_id>/<bucket_id>/index/<snapshot level>/
<server_id>/<org_id>/<bucket_id>/data/<start time><end time><id start><id end>.tsm
Some things we'd want to query:
measurement = cpu and host = serverA
tag keys where host = server a
hosts where measurement = redis
fields where measurement = redis
hosts
group data by measurement_range, time, size limit. Roll over to next measurement range when we hit either time or size
so we can say we want to snapshot to S3 at least every 1h or 100MB. Should also be able to force S3 snapshot.
We can have overlapping S3 data, which is fine. Need ability to request that the server looks at overlaps and compacts them
given a set of key/value pairs, determine which ranges we need to lookup.
are the set of ranges fixed or dynamic? If dynamic they have to be able to overlap. Which means we'll need to be able to compact them?
be able to tell the server how many requests to send to s3 in parallel to load
configure when to roll to deeper prefix (size/file count based)
on boot it should load up its S3 file index and keep in memory
*/

81
src/main.rs Normal file
View File

@ -0,0 +1,81 @@
use futures::{future, Future, Sink};
use crate::delorean::{
server,
CreateBucketRequest,
CreateBucketResponse,
GetBucketsResponse,
DeleteBucketRequest,
DeleteBucketResponse,
Bucket,
Organization,
};
use std::sync::{Arc, Mutex};
use std::fmt::Error;
use tower_grpc::{Request, Response};
use tower_hyper::server::{Http, Server};
use tokio::net::TcpListener;
use tower_util::Ready;
pub mod delorean {
include!(concat!(env!("OUT_DIR"), "/delorean.rs"));
}
//#[derive(Debug, Clone)]
//struct Delorean {
// buckets: Arc<Vec<Bucket>>,
//}
//
//impl delorean::server::Delorean for Delorean {
// type CreateBucketFuture = future::FutureResult<Response<CreateBucketResponse>, tower_grpc::Status>;
// fn create_bucket(&mut self, request: Request<CreateBucketRequest>) -> Self::CreateBucketFuture {
// println!("CreateBucket: {:?}", request);
// let response = Response::new(CreateBucketResponse{});
// future::ok(response)
// }
//
// type DeleteBucketFuture = future::FutureResult<Response<DeleteBucketResponse>, tower_grpc::Status>;
// //futures::future::result_::FutureResult<tower_grpc::response::Response<delorean::DeleteBucketResponse>, tower_grpc::status::Status>
// fn delete_bucket(&mut self, request: Request<DeleteBucketRequest>) -> Self::DeleteBucketFuture {
// println!("DeleteBucket: {:?}", request);
// future::ok(Response::new(DeleteBucketResponse{}))
// }
//
// type GetBucketsFuture = future::FutureResult<Response<GetBucketsResponse>, tower_grpc::Status>;
// fn get_buckets(&mut self, request: Request<Organization>) -> Self::GetBucketsFuture {
// println!("GetBuckets: {:?}", request);
// future::ok(Response::new(GetBucketsResponse{buckets: self.buckets.clone().to_vec()}))
// }
//}
fn main() {
println!("Hello, world!");
// let handler = Delorean {
// buckets: Arc::new(vec![]),
// };
//
// let new_service = server::DeloreanDbServer::new(handler);
// let mut server = Server::new(new_service);
// let http = Http::new().http2_only(true).clone();
//
// let addr = "127.0.0.1:10000".parse().unwrap();
// let bind = TcpListener::bind(&addr).expect("bind");
//
// println!("listening on {:?}", addr);
//
// let serve = bind
// .incoming()
// .for_each(move |sock| {
// if let Err(e) = sock.set_nodelay(true) {
// return Err(e);
// }
//
// let serve = server.serve_with(sock, http.clone());
// tokio::spawn(serve.map_err(|e| println!("h2 error: {:?}", e)));
//
// Ok(())
// })
// .map_err(|e| eprintln!("accept error: {}", e));
//
// tokio::run(serve);
}