2021-03-17 15:25:27 +00:00
|
|
|
use generated_types::{
|
|
|
|
google::protobuf::{Duration, Empty},
|
2021-05-24 13:35:35 +00:00
|
|
|
influxdata::iox::management::v1::{database_rules::RoutingRules, *},
|
2021-03-17 15:25:27 +00:00
|
|
|
};
|
2021-05-24 10:34:54 +00:00
|
|
|
use influxdb_iox_client::{management::CreateDatabaseError, operations, write::WriteError};
|
2021-03-23 13:52:27 +00:00
|
|
|
|
2021-03-12 13:56:14 +00:00
|
|
|
use test_helpers::assert_contains;
|
2021-03-02 17:51:46 +00:00
|
|
|
|
2021-03-30 16:57:11 +00:00
|
|
|
use super::scenario::{
|
|
|
|
create_readable_database, create_two_partition_database, create_unreadable_database, rand_name,
|
2021-03-11 19:33:04 +00:00
|
|
|
};
|
2021-03-17 15:25:27 +00:00
|
|
|
use crate::common::server_fixture::ServerFixture;
|
2021-05-24 10:34:54 +00:00
|
|
|
use tonic::Code;
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_serving_readiness() {
|
|
|
|
let server_fixture = ServerFixture::create_single_use().await;
|
|
|
|
let mut mgmt_client = server_fixture.management_client();
|
|
|
|
let mut write_client = server_fixture.write_client();
|
|
|
|
|
|
|
|
let name = "foo";
|
|
|
|
let lp_data = "bar baz=1 10";
|
|
|
|
|
|
|
|
mgmt_client
|
|
|
|
.update_server_id(42)
|
|
|
|
.await
|
|
|
|
.expect("set ID failed");
|
|
|
|
mgmt_client
|
|
|
|
.create_database(DatabaseRules {
|
|
|
|
name: name.to_string(),
|
|
|
|
..Default::default()
|
|
|
|
})
|
|
|
|
.await
|
|
|
|
.expect("create database failed");
|
|
|
|
|
|
|
|
mgmt_client.set_serving_readiness(false).await.unwrap();
|
|
|
|
let err = write_client.write(name, lp_data).await.unwrap_err();
|
|
|
|
assert!(
|
|
|
|
matches!(&err, WriteError::ServerError(status) if status.code() == Code::Unavailable),
|
|
|
|
"{}",
|
|
|
|
&err
|
|
|
|
);
|
|
|
|
|
|
|
|
mgmt_client.set_serving_readiness(true).await.unwrap();
|
|
|
|
write_client.write(name, lp_data).await.unwrap();
|
|
|
|
}
|
2021-03-08 17:35:19 +00:00
|
|
|
|
2021-03-10 14:05:35 +00:00
|
|
|
#[tokio::test]
|
2021-03-12 13:56:14 +00:00
|
|
|
async fn test_list_update_remotes() {
|
2021-03-10 14:05:35 +00:00
|
|
|
let server_fixture = ServerFixture::create_single_use().await;
|
2021-03-15 13:13:55 +00:00
|
|
|
let mut client = server_fixture.management_client();
|
2021-03-10 14:05:35 +00:00
|
|
|
|
2021-03-08 18:32:41 +00:00
|
|
|
const TEST_REMOTE_ID_1: u32 = 42;
|
|
|
|
const TEST_REMOTE_ADDR_1: &str = "1.2.3.4:1234";
|
|
|
|
const TEST_REMOTE_ID_2: u32 = 84;
|
|
|
|
const TEST_REMOTE_ADDR_2: &str = "4.3.2.1:4321";
|
|
|
|
const TEST_REMOTE_ADDR_2_UPDATED: &str = "40.30.20.10:4321";
|
|
|
|
|
|
|
|
let res = client.list_remotes().await.expect("list remotes failed");
|
|
|
|
assert_eq!(res.len(), 0);
|
|
|
|
|
|
|
|
client
|
|
|
|
.update_remote(TEST_REMOTE_ID_1, TEST_REMOTE_ADDR_1)
|
|
|
|
.await
|
|
|
|
.expect("update failed");
|
|
|
|
|
|
|
|
let res = client.list_remotes().await.expect("list remotes failed");
|
|
|
|
assert_eq!(res.len(), 1);
|
|
|
|
|
|
|
|
client
|
|
|
|
.update_remote(TEST_REMOTE_ID_2, TEST_REMOTE_ADDR_2)
|
|
|
|
.await
|
|
|
|
.expect("update failed");
|
|
|
|
|
|
|
|
let res = client.list_remotes().await.expect("list remotes failed");
|
|
|
|
assert_eq!(res.len(), 2);
|
|
|
|
assert_eq!(res[0].id, TEST_REMOTE_ID_1);
|
|
|
|
assert_eq!(res[0].connection_string, TEST_REMOTE_ADDR_1);
|
|
|
|
assert_eq!(res[1].id, TEST_REMOTE_ID_2);
|
|
|
|
assert_eq!(res[1].connection_string, TEST_REMOTE_ADDR_2);
|
|
|
|
|
|
|
|
client
|
|
|
|
.delete_remote(TEST_REMOTE_ID_1)
|
|
|
|
.await
|
|
|
|
.expect("delete failed");
|
|
|
|
|
|
|
|
client
|
|
|
|
.delete_remote(TEST_REMOTE_ID_1)
|
|
|
|
.await
|
|
|
|
.expect_err("expected delete to fail");
|
|
|
|
|
|
|
|
let res = client.list_remotes().await.expect("list remotes failed");
|
|
|
|
assert_eq!(res.len(), 1);
|
|
|
|
assert_eq!(res[0].id, TEST_REMOTE_ID_2);
|
|
|
|
assert_eq!(res[0].connection_string, TEST_REMOTE_ADDR_2);
|
|
|
|
|
|
|
|
client
|
|
|
|
.update_remote(TEST_REMOTE_ID_2, TEST_REMOTE_ADDR_2_UPDATED)
|
|
|
|
.await
|
|
|
|
.expect("update failed");
|
|
|
|
|
|
|
|
let res = client.list_remotes().await.expect("list remotes failed");
|
|
|
|
assert_eq!(res.len(), 1);
|
|
|
|
assert_eq!(res[0].id, TEST_REMOTE_ID_2);
|
|
|
|
assert_eq!(res[0].connection_string, TEST_REMOTE_ADDR_2_UPDATED);
|
|
|
|
}
|
|
|
|
|
2021-03-12 13:56:14 +00:00
|
|
|
#[tokio::test]
|
|
|
|
async fn test_set_get_writer_id() {
|
|
|
|
let server_fixture = ServerFixture::create_single_use().await;
|
2021-03-15 13:13:55 +00:00
|
|
|
let mut client = server_fixture.management_client();
|
2021-03-12 13:56:14 +00:00
|
|
|
|
2021-03-02 17:51:46 +00:00
|
|
|
const TEST_ID: u32 = 42;
|
|
|
|
|
|
|
|
client
|
2021-04-22 15:27:59 +00:00
|
|
|
.update_server_id(TEST_ID)
|
2021-03-02 17:51:46 +00:00
|
|
|
.await
|
|
|
|
.expect("set ID failed");
|
|
|
|
|
2021-04-22 15:27:59 +00:00
|
|
|
let got = client.get_server_id().await.expect("get ID failed");
|
2021-03-02 17:51:46 +00:00
|
|
|
|
|
|
|
assert_eq!(got.get(), TEST_ID);
|
|
|
|
}
|
|
|
|
|
2021-03-12 13:56:14 +00:00
|
|
|
#[tokio::test]
|
|
|
|
async fn test_create_database_duplicate_name() {
|
|
|
|
let server_fixture = ServerFixture::create_shared().await;
|
2021-03-15 13:13:55 +00:00
|
|
|
let mut client = server_fixture.management_client();
|
2021-03-12 13:56:14 +00:00
|
|
|
|
2021-03-02 17:51:46 +00:00
|
|
|
let db_name = rand_name();
|
|
|
|
|
|
|
|
client
|
|
|
|
.create_database(DatabaseRules {
|
|
|
|
name: db_name.clone(),
|
|
|
|
..Default::default()
|
|
|
|
})
|
|
|
|
.await
|
|
|
|
.expect("create database failed");
|
|
|
|
|
|
|
|
let err = client
|
|
|
|
.create_database(DatabaseRules {
|
|
|
|
name: db_name,
|
|
|
|
..Default::default()
|
|
|
|
})
|
|
|
|
.await
|
|
|
|
.expect_err("create database failed");
|
|
|
|
|
|
|
|
assert!(matches!(
|
|
|
|
dbg!(err),
|
|
|
|
CreateDatabaseError::DatabaseAlreadyExists
|
|
|
|
))
|
|
|
|
}
|
|
|
|
|
2021-03-12 13:56:14 +00:00
|
|
|
#[tokio::test]
|
|
|
|
async fn test_create_database_invalid_name() {
|
|
|
|
let server_fixture = ServerFixture::create_shared().await;
|
2021-03-15 13:13:55 +00:00
|
|
|
let mut client = server_fixture.management_client();
|
2021-03-12 13:56:14 +00:00
|
|
|
|
2021-03-02 17:51:46 +00:00
|
|
|
let err = client
|
|
|
|
.create_database(DatabaseRules {
|
|
|
|
name: "my_example\ndb".to_string(),
|
|
|
|
..Default::default()
|
|
|
|
})
|
|
|
|
.await
|
|
|
|
.expect_err("expected request to fail");
|
|
|
|
|
|
|
|
assert!(matches!(dbg!(err), CreateDatabaseError::InvalidArgument(_)));
|
|
|
|
}
|
|
|
|
|
2021-03-12 13:56:14 +00:00
|
|
|
#[tokio::test]
|
|
|
|
async fn test_list_databases() {
|
|
|
|
let server_fixture = ServerFixture::create_shared().await;
|
2021-03-15 13:13:55 +00:00
|
|
|
let mut client = server_fixture.management_client();
|
2021-03-12 13:56:14 +00:00
|
|
|
|
2021-03-02 17:51:46 +00:00
|
|
|
let name = rand_name();
|
|
|
|
client
|
|
|
|
.create_database(DatabaseRules {
|
|
|
|
name: name.clone(),
|
|
|
|
..Default::default()
|
|
|
|
})
|
|
|
|
.await
|
|
|
|
.expect("create database failed");
|
|
|
|
|
|
|
|
let names = client
|
|
|
|
.list_databases()
|
|
|
|
.await
|
|
|
|
.expect("list databases failed");
|
|
|
|
assert!(names.contains(&name));
|
|
|
|
}
|
|
|
|
|
2021-03-12 13:56:14 +00:00
|
|
|
#[tokio::test]
|
2021-03-23 13:52:27 +00:00
|
|
|
async fn test_create_get_update_database() {
|
2021-03-12 13:56:14 +00:00
|
|
|
let server_fixture = ServerFixture::create_shared().await;
|
2021-03-15 13:13:55 +00:00
|
|
|
let mut client = server_fixture.management_client();
|
2021-03-12 13:56:14 +00:00
|
|
|
|
2021-03-02 17:51:46 +00:00
|
|
|
let db_name = rand_name();
|
|
|
|
|
|
|
|
// Specify everything to allow direct comparison between request and response
|
|
|
|
// Otherwise would expect difference due to server-side defaulting
|
2021-03-23 13:52:27 +00:00
|
|
|
let mut rules = DatabaseRules {
|
2021-03-02 17:51:46 +00:00
|
|
|
name: db_name.clone(),
|
|
|
|
partition_template: Some(PartitionTemplate {
|
|
|
|
parts: vec![partition_template::Part {
|
|
|
|
part: Some(partition_template::part::Part::Table(Empty {})),
|
|
|
|
}],
|
|
|
|
}),
|
2021-04-16 20:27:26 +00:00
|
|
|
write_buffer_config: Some(WriteBufferConfig {
|
2021-03-02 17:51:46 +00:00
|
|
|
buffer_size: 24,
|
|
|
|
segment_size: 2,
|
2021-04-16 20:27:26 +00:00
|
|
|
buffer_rollover: write_buffer_config::Rollover::DropIncoming as _,
|
2021-03-02 17:51:46 +00:00
|
|
|
persist_segments: true,
|
|
|
|
close_segment_after: Some(Duration {
|
|
|
|
seconds: 324,
|
|
|
|
nanos: 2,
|
|
|
|
}),
|
|
|
|
}),
|
2021-03-29 15:32:36 +00:00
|
|
|
lifecycle_rules: Some(LifecycleRules {
|
|
|
|
buffer_size_hard: 553,
|
|
|
|
sort_order: Some(lifecycle_rules::SortOrder {
|
2021-03-02 17:51:46 +00:00
|
|
|
order: Order::Asc as _,
|
2021-03-29 15:32:36 +00:00
|
|
|
sort: Some(lifecycle_rules::sort_order::Sort::CreatedAtTime(Empty {})),
|
2021-03-02 17:51:46 +00:00
|
|
|
}),
|
2021-03-29 15:32:36 +00:00
|
|
|
..Default::default()
|
2021-03-02 17:51:46 +00:00
|
|
|
}),
|
2021-05-24 13:35:35 +00:00
|
|
|
routing_rules: None,
|
2021-06-02 09:34:19 +00:00
|
|
|
worker_cleanup_avg_sleep: Some(Duration {
|
|
|
|
seconds: 2,
|
|
|
|
nanos: 0,
|
|
|
|
}),
|
2021-03-02 17:51:46 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
client
|
|
|
|
.create_database(rules.clone())
|
|
|
|
.await
|
|
|
|
.expect("create database failed");
|
|
|
|
|
|
|
|
let response = client
|
2021-03-23 13:52:27 +00:00
|
|
|
.get_database(&db_name)
|
|
|
|
.await
|
|
|
|
.expect("get database failed");
|
|
|
|
|
2021-05-24 13:35:35 +00:00
|
|
|
assert_eq!(response.routing_rules, None);
|
2021-03-23 13:52:27 +00:00
|
|
|
|
2021-05-24 13:35:35 +00:00
|
|
|
rules.routing_rules = Some(RoutingRules::ShardConfig(ShardConfig {
|
2021-03-23 13:52:27 +00:00
|
|
|
ignore_errors: true,
|
|
|
|
..Default::default()
|
2021-05-24 13:35:35 +00:00
|
|
|
}));
|
2021-03-23 13:52:27 +00:00
|
|
|
|
|
|
|
let updated_rules = client
|
|
|
|
.update_database(rules.clone())
|
|
|
|
.await
|
|
|
|
.expect("update database failed");
|
|
|
|
|
|
|
|
assert_eq!(updated_rules, rules);
|
|
|
|
|
|
|
|
let response = client
|
|
|
|
.get_database(&db_name)
|
2021-03-02 17:51:46 +00:00
|
|
|
.await
|
|
|
|
.expect("get database failed");
|
|
|
|
|
2021-05-24 13:35:35 +00:00
|
|
|
assert!(matches!(
|
|
|
|
response.routing_rules,
|
|
|
|
Some(RoutingRules::ShardConfig(cfg)) if cfg.ignore_errors,
|
|
|
|
));
|
2021-03-02 17:51:46 +00:00
|
|
|
}
|
2021-03-12 13:56:14 +00:00
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_chunk_get() {
|
|
|
|
use generated_types::influxdata::iox::management::v1::{Chunk, ChunkStorage};
|
|
|
|
|
|
|
|
let fixture = ServerFixture::create_shared().await;
|
2021-03-15 13:13:55 +00:00
|
|
|
let mut management_client = fixture.management_client();
|
|
|
|
let mut write_client = fixture.write_client();
|
2021-03-12 13:56:14 +00:00
|
|
|
|
|
|
|
let db_name = rand_name();
|
|
|
|
create_readable_database(&db_name, fixture.grpc_channel()).await;
|
|
|
|
|
|
|
|
let lp_lines = vec![
|
|
|
|
"cpu,region=west user=23.2 100",
|
|
|
|
"cpu,region=west user=21.0 150",
|
|
|
|
"disk,region=east bytes=99i 200",
|
|
|
|
];
|
|
|
|
|
|
|
|
write_client
|
|
|
|
.write(&db_name, lp_lines.join("\n"))
|
|
|
|
.await
|
|
|
|
.expect("write succeded");
|
|
|
|
|
|
|
|
let mut chunks = management_client
|
|
|
|
.list_chunks(&db_name)
|
|
|
|
.await
|
|
|
|
.expect("listing chunks");
|
|
|
|
|
|
|
|
// ensure the output order is consistent
|
|
|
|
chunks.sort_by(|c1, c2| c1.partition_key.cmp(&c2.partition_key));
|
|
|
|
|
2021-03-30 19:03:23 +00:00
|
|
|
// make sure there were timestamps prior to normalization
|
|
|
|
assert!(
|
|
|
|
chunks[0].time_of_first_write.is_some()
|
|
|
|
&& chunks[0].time_of_last_write.is_some()
|
2021-04-30 20:59:45 +00:00
|
|
|
&& chunks[0].time_closed.is_none(), // chunk is not yet closed
|
2021-03-30 19:03:23 +00:00
|
|
|
"actual:{:#?}",
|
|
|
|
chunks[0]
|
|
|
|
);
|
|
|
|
|
|
|
|
let chunks = normalize_chunks(chunks);
|
|
|
|
|
2021-03-12 13:56:14 +00:00
|
|
|
let expected: Vec<Chunk> = vec![
|
|
|
|
Chunk {
|
|
|
|
partition_key: "cpu".into(),
|
2021-04-22 11:16:36 +00:00
|
|
|
table_name: "cpu".into(),
|
2021-03-12 13:56:14 +00:00
|
|
|
id: 0,
|
|
|
|
storage: ChunkStorage::OpenMutableBuffer as i32,
|
2021-05-28 13:51:56 +00:00
|
|
|
estimated_bytes: 100,
|
2021-04-29 13:53:25 +00:00
|
|
|
row_count: 2,
|
2021-03-30 19:03:23 +00:00
|
|
|
time_of_first_write: None,
|
|
|
|
time_of_last_write: None,
|
2021-04-30 20:59:45 +00:00
|
|
|
time_closed: None,
|
2021-03-12 13:56:14 +00:00
|
|
|
},
|
|
|
|
Chunk {
|
|
|
|
partition_key: "disk".into(),
|
2021-04-22 11:16:36 +00:00
|
|
|
table_name: "disk".into(),
|
2021-03-12 13:56:14 +00:00
|
|
|
id: 0,
|
|
|
|
storage: ChunkStorage::OpenMutableBuffer as i32,
|
2021-05-28 13:51:56 +00:00
|
|
|
estimated_bytes: 82,
|
2021-04-29 13:53:25 +00:00
|
|
|
row_count: 1,
|
2021-03-30 19:03:23 +00:00
|
|
|
time_of_first_write: None,
|
|
|
|
time_of_last_write: None,
|
2021-04-30 20:59:45 +00:00
|
|
|
time_closed: None,
|
2021-03-12 13:56:14 +00:00
|
|
|
},
|
|
|
|
];
|
|
|
|
assert_eq!(
|
|
|
|
expected, chunks,
|
|
|
|
"expected:\n\n{:#?}\n\nactual:{:#?}",
|
|
|
|
expected, chunks
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_chunk_get_errors() {
|
|
|
|
let fixture = ServerFixture::create_shared().await;
|
2021-03-15 13:13:55 +00:00
|
|
|
let mut management_client = fixture.management_client();
|
2021-03-12 13:56:14 +00:00
|
|
|
let db_name = rand_name();
|
|
|
|
|
|
|
|
let err = management_client
|
|
|
|
.list_chunks(&db_name)
|
|
|
|
.await
|
|
|
|
.expect_err("no db had been created");
|
|
|
|
|
|
|
|
assert_contains!(
|
|
|
|
err.to_string(),
|
|
|
|
"Some requested entity was not found: Resource database"
|
|
|
|
);
|
|
|
|
|
|
|
|
create_unreadable_database(&db_name, fixture.grpc_channel()).await;
|
|
|
|
}
|
2021-03-11 19:33:04 +00:00
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_partition_list() {
|
|
|
|
let fixture = ServerFixture::create_shared().await;
|
2021-03-15 13:13:55 +00:00
|
|
|
let mut management_client = fixture.management_client();
|
2021-03-11 19:33:04 +00:00
|
|
|
|
|
|
|
let db_name = rand_name();
|
|
|
|
create_two_partition_database(&db_name, fixture.grpc_channel()).await;
|
|
|
|
|
|
|
|
let mut partitions = management_client
|
|
|
|
.list_partitions(&db_name)
|
|
|
|
.await
|
|
|
|
.expect("listing partition");
|
|
|
|
|
|
|
|
// ensure the output order is consistent
|
2021-03-12 20:02:50 +00:00
|
|
|
partitions.sort_by(|p1, p2| p1.key.cmp(&p2.key));
|
2021-03-11 19:33:04 +00:00
|
|
|
|
2021-03-12 20:02:50 +00:00
|
|
|
let expected = vec![
|
|
|
|
Partition {
|
|
|
|
key: "cpu".to_string(),
|
|
|
|
},
|
|
|
|
Partition {
|
|
|
|
key: "mem".to_string(),
|
|
|
|
},
|
|
|
|
];
|
2021-03-11 19:33:04 +00:00
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
expected, partitions,
|
|
|
|
"expected:\n\n{:#?}\n\nactual:{:#?}",
|
|
|
|
expected, partitions
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_partition_list_error() {
|
|
|
|
let fixture = ServerFixture::create_shared().await;
|
2021-03-15 13:13:55 +00:00
|
|
|
let mut management_client = fixture.management_client();
|
2021-03-11 19:33:04 +00:00
|
|
|
|
|
|
|
let err = management_client
|
|
|
|
.list_partitions("this database does not exist")
|
|
|
|
.await
|
|
|
|
.expect_err("expected error");
|
|
|
|
|
|
|
|
assert_contains!(err.to_string(), "Database not found");
|
|
|
|
}
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_partition_get() {
|
|
|
|
use generated_types::influxdata::iox::management::v1::Partition;
|
|
|
|
|
|
|
|
let fixture = ServerFixture::create_shared().await;
|
2021-03-15 13:13:55 +00:00
|
|
|
let mut management_client = fixture.management_client();
|
2021-03-11 19:33:04 +00:00
|
|
|
|
|
|
|
let db_name = rand_name();
|
|
|
|
create_two_partition_database(&db_name, fixture.grpc_channel()).await;
|
|
|
|
|
|
|
|
let partition_key = "cpu";
|
|
|
|
let partition = management_client
|
|
|
|
.get_partition(&db_name, partition_key)
|
|
|
|
.await
|
|
|
|
.expect("getting partition");
|
|
|
|
|
|
|
|
let expected = Partition { key: "cpu".into() };
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
expected, partition,
|
|
|
|
"expected:\n\n{:#?}\n\nactual:{:#?}",
|
|
|
|
expected, partition
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_partition_get_error() {
|
|
|
|
let fixture = ServerFixture::create_shared().await;
|
2021-03-15 13:13:55 +00:00
|
|
|
let mut management_client = fixture.management_client();
|
|
|
|
let mut write_client = fixture.write_client();
|
2021-03-11 19:33:04 +00:00
|
|
|
|
|
|
|
let err = management_client
|
|
|
|
.list_partitions("this database does not exist")
|
|
|
|
.await
|
|
|
|
.expect_err("expected error");
|
|
|
|
|
|
|
|
assert_contains!(err.to_string(), "Database not found");
|
|
|
|
|
|
|
|
let db_name = rand_name();
|
|
|
|
create_readable_database(&db_name, fixture.grpc_channel()).await;
|
|
|
|
|
|
|
|
let lp_lines =
|
|
|
|
vec!["processes,host=foo running=4i,sleeping=514i,total=519i 1591894310000000000"];
|
|
|
|
|
|
|
|
write_client
|
|
|
|
.write(&db_name, lp_lines.join("\n"))
|
|
|
|
.await
|
|
|
|
.expect("write succeded");
|
|
|
|
|
|
|
|
let err = management_client
|
|
|
|
.get_partition(&db_name, "non existent partition")
|
|
|
|
.await
|
|
|
|
.expect_err("exepcted error getting partition");
|
|
|
|
|
|
|
|
assert_contains!(err.to_string(), "Partition not found");
|
|
|
|
}
|
2021-03-15 16:41:18 +00:00
|
|
|
|
2021-03-16 20:10:55 +00:00
|
|
|
#[tokio::test]
|
|
|
|
async fn test_list_partition_chunks() {
|
|
|
|
let fixture = ServerFixture::create_shared().await;
|
|
|
|
let mut management_client = fixture.management_client();
|
|
|
|
let mut write_client = fixture.write_client();
|
|
|
|
|
|
|
|
let db_name = rand_name();
|
|
|
|
create_readable_database(&db_name, fixture.grpc_channel()).await;
|
|
|
|
|
|
|
|
let lp_lines = vec![
|
|
|
|
"cpu,region=west user=23.2 100",
|
|
|
|
"cpu,region=west user=21.0 150",
|
|
|
|
"disk,region=east bytes=99i 200",
|
|
|
|
];
|
|
|
|
|
|
|
|
write_client
|
|
|
|
.write(&db_name, lp_lines.join("\n"))
|
|
|
|
.await
|
|
|
|
.expect("write succeded");
|
|
|
|
|
|
|
|
let partition_key = "cpu";
|
|
|
|
let chunks = management_client
|
|
|
|
.list_partition_chunks(&db_name, partition_key)
|
|
|
|
.await
|
|
|
|
.expect("getting partition chunks");
|
|
|
|
|
2021-03-30 19:03:23 +00:00
|
|
|
let chunks = normalize_chunks(chunks);
|
|
|
|
|
2021-03-16 20:10:55 +00:00
|
|
|
let expected: Vec<Chunk> = vec![Chunk {
|
|
|
|
partition_key: "cpu".into(),
|
2021-04-22 11:16:36 +00:00
|
|
|
table_name: "cpu".into(),
|
2021-03-16 20:10:55 +00:00
|
|
|
id: 0,
|
|
|
|
storage: ChunkStorage::OpenMutableBuffer as i32,
|
2021-05-28 13:51:56 +00:00
|
|
|
estimated_bytes: 100,
|
2021-04-29 13:53:25 +00:00
|
|
|
row_count: 2,
|
2021-03-30 19:03:23 +00:00
|
|
|
time_of_first_write: None,
|
|
|
|
time_of_last_write: None,
|
2021-04-30 20:59:45 +00:00
|
|
|
time_closed: None,
|
2021-03-16 20:10:55 +00:00
|
|
|
}];
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
expected, chunks,
|
|
|
|
"expected:\n\n{:#?}\n\nactual:{:#?}",
|
|
|
|
expected, chunks
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_list_partition_chunk_errors() {
|
|
|
|
let fixture = ServerFixture::create_shared().await;
|
|
|
|
let mut management_client = fixture.management_client();
|
|
|
|
let db_name = rand_name();
|
|
|
|
|
|
|
|
let err = management_client
|
|
|
|
.list_partition_chunks(&db_name, "cpu")
|
|
|
|
.await
|
|
|
|
.expect_err("no db had been created");
|
|
|
|
|
|
|
|
assert_contains!(
|
|
|
|
err.to_string(),
|
|
|
|
"Some requested entity was not found: Resource database"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2021-03-15 16:41:18 +00:00
|
|
|
#[tokio::test]
|
|
|
|
async fn test_new_partition_chunk() {
|
|
|
|
let fixture = ServerFixture::create_shared().await;
|
|
|
|
let mut management_client = fixture.management_client();
|
|
|
|
let mut write_client = fixture.write_client();
|
|
|
|
|
|
|
|
let db_name = rand_name();
|
|
|
|
create_readable_database(&db_name, fixture.grpc_channel()).await;
|
|
|
|
|
|
|
|
let lp_lines = vec!["cpu,region=west user=23.2 100"];
|
|
|
|
|
|
|
|
write_client
|
|
|
|
.write(&db_name, lp_lines.join("\n"))
|
|
|
|
.await
|
|
|
|
.expect("write succeded");
|
|
|
|
|
|
|
|
let chunks = management_client
|
|
|
|
.list_chunks(&db_name)
|
|
|
|
.await
|
|
|
|
.expect("listing chunks");
|
|
|
|
|
|
|
|
assert_eq!(chunks.len(), 1, "Chunks: {:#?}", chunks);
|
|
|
|
let partition_key = "cpu";
|
2021-04-22 11:16:36 +00:00
|
|
|
let table_name = "cpu";
|
2021-03-15 16:41:18 +00:00
|
|
|
|
|
|
|
// Rollover the a second chunk
|
|
|
|
management_client
|
2021-04-22 11:16:36 +00:00
|
|
|
.new_partition_chunk(&db_name, partition_key, table_name)
|
2021-03-15 16:41:18 +00:00
|
|
|
.await
|
|
|
|
.expect("new partition chunk");
|
|
|
|
|
|
|
|
// Load some more data and now expect that we have a second chunk
|
|
|
|
|
|
|
|
let lp_lines = vec!["cpu,region=west user=21.0 150"];
|
|
|
|
|
|
|
|
write_client
|
|
|
|
.write(&db_name, lp_lines.join("\n"))
|
|
|
|
.await
|
|
|
|
.expect("write succeded");
|
|
|
|
|
|
|
|
let chunks = management_client
|
|
|
|
.list_chunks(&db_name)
|
|
|
|
.await
|
|
|
|
.expect("listing chunks");
|
|
|
|
|
|
|
|
assert_eq!(chunks.len(), 2, "Chunks: {:#?}", chunks);
|
|
|
|
|
|
|
|
// Made all chunks in the same partition
|
|
|
|
assert_eq!(
|
|
|
|
chunks.iter().filter(|c| c.partition_key == "cpu").count(),
|
|
|
|
2,
|
|
|
|
"Chunks: {:#?}",
|
|
|
|
chunks
|
|
|
|
);
|
|
|
|
|
2021-03-24 17:57:11 +00:00
|
|
|
// Rollover a (currently non existent) partition which is not OK
|
|
|
|
let err = management_client
|
2021-04-22 11:16:36 +00:00
|
|
|
.new_partition_chunk(&db_name, "non_existent_partition", table_name)
|
2021-03-15 16:41:18 +00:00
|
|
|
.await
|
2021-03-24 17:57:11 +00:00
|
|
|
.expect_err("new partition chunk");
|
2021-03-15 16:41:18 +00:00
|
|
|
|
|
|
|
assert_eq!(
|
2021-03-24 17:57:11 +00:00
|
|
|
"Resource partition/non_existent_partition not found",
|
|
|
|
err.to_string()
|
2021-03-15 16:41:18 +00:00
|
|
|
);
|
2021-04-22 11:16:36 +00:00
|
|
|
|
|
|
|
// Rollover a (currently non existent) table in an existing partition which is not OK
|
|
|
|
let err = management_client
|
|
|
|
.new_partition_chunk(&db_name, partition_key, "non_existing_table")
|
|
|
|
.await
|
|
|
|
.expect_err("new partition chunk");
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
"Resource table/cpu:non_existing_table not found",
|
|
|
|
err.to_string()
|
|
|
|
);
|
2021-03-15 16:41:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_new_partition_chunk_error() {
|
|
|
|
let fixture = ServerFixture::create_shared().await;
|
|
|
|
let mut management_client = fixture.management_client();
|
|
|
|
|
|
|
|
let err = management_client
|
2021-04-22 11:16:36 +00:00
|
|
|
.new_partition_chunk(
|
|
|
|
"this database does not exist",
|
|
|
|
"nor_does_this_partition",
|
|
|
|
"nor_does_this_table",
|
|
|
|
)
|
2021-03-15 16:41:18 +00:00
|
|
|
.await
|
|
|
|
.expect_err("expected error");
|
|
|
|
|
2021-03-24 17:57:11 +00:00
|
|
|
assert_contains!(
|
|
|
|
err.to_string(),
|
|
|
|
"Resource database/this database does not exist not found"
|
|
|
|
);
|
2021-03-15 16:41:18 +00:00
|
|
|
}
|
2021-03-17 15:25:27 +00:00
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_close_partition_chunk() {
|
|
|
|
use influxdb_iox_client::management::generated_types::operation_metadata::Job;
|
|
|
|
use influxdb_iox_client::management::generated_types::ChunkStorage;
|
|
|
|
|
|
|
|
let fixture = ServerFixture::create_shared().await;
|
|
|
|
let mut management_client = fixture.management_client();
|
|
|
|
let mut write_client = fixture.write_client();
|
|
|
|
let mut operations_client = fixture.operations_client();
|
|
|
|
|
|
|
|
let db_name = rand_name();
|
|
|
|
create_readable_database(&db_name, fixture.grpc_channel()).await;
|
|
|
|
|
|
|
|
let partition_key = "cpu";
|
2021-04-22 11:16:36 +00:00
|
|
|
let table_name = "cpu";
|
2021-03-17 15:25:27 +00:00
|
|
|
let lp_lines = vec!["cpu,region=west user=23.2 100"];
|
|
|
|
|
|
|
|
write_client
|
|
|
|
.write(&db_name, lp_lines.join("\n"))
|
|
|
|
.await
|
|
|
|
.expect("write succeded");
|
|
|
|
|
|
|
|
let chunks = management_client
|
|
|
|
.list_chunks(&db_name)
|
|
|
|
.await
|
|
|
|
.expect("listing chunks");
|
|
|
|
|
|
|
|
assert_eq!(chunks.len(), 1, "Chunks: {:#?}", chunks);
|
|
|
|
assert_eq!(chunks[0].id, 0);
|
|
|
|
assert_eq!(chunks[0].storage, ChunkStorage::OpenMutableBuffer as i32);
|
|
|
|
|
|
|
|
// Move the chunk to read buffer
|
|
|
|
let operation = management_client
|
2021-04-22 11:16:36 +00:00
|
|
|
.close_partition_chunk(&db_name, partition_key, table_name, 0)
|
2021-03-17 15:25:27 +00:00
|
|
|
.await
|
|
|
|
.expect("new partition chunk");
|
|
|
|
|
|
|
|
println!("Operation response is {:?}", operation);
|
2021-04-30 17:09:28 +00:00
|
|
|
let operation_id = operation.id();
|
2021-03-17 15:25:27 +00:00
|
|
|
|
2021-03-30 16:57:11 +00:00
|
|
|
let meta = operations::ClientOperation::try_new(operation)
|
|
|
|
.unwrap()
|
|
|
|
.metadata();
|
2021-03-17 15:25:27 +00:00
|
|
|
|
|
|
|
// ensure we got a legit job description back
|
|
|
|
if let Some(Job::CloseChunk(close_chunk)) = meta.job {
|
|
|
|
assert_eq!(close_chunk.db_name, db_name);
|
|
|
|
assert_eq!(close_chunk.partition_key, partition_key);
|
|
|
|
assert_eq!(close_chunk.chunk_id, 0);
|
|
|
|
} else {
|
|
|
|
panic!("unexpected job returned")
|
|
|
|
};
|
|
|
|
|
|
|
|
// wait for the job to be done
|
|
|
|
operations_client
|
|
|
|
.wait_operation(operation_id, Some(std::time::Duration::from_secs(1)))
|
|
|
|
.await
|
|
|
|
.expect("failed to wait operation");
|
|
|
|
|
|
|
|
// And now the chunk should be good
|
|
|
|
let mut chunks = management_client
|
|
|
|
.list_chunks(&db_name)
|
|
|
|
.await
|
|
|
|
.expect("listing chunks");
|
|
|
|
chunks.sort_by(|c1, c2| c1.id.cmp(&c2.id));
|
|
|
|
|
2021-03-29 17:55:07 +00:00
|
|
|
assert_eq!(chunks.len(), 1, "Chunks: {:#?}", chunks);
|
2021-03-17 15:25:27 +00:00
|
|
|
assert_eq!(chunks[0].id, 0);
|
|
|
|
assert_eq!(chunks[0].storage, ChunkStorage::ReadBuffer as i32);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_close_partition_chunk_error() {
|
|
|
|
let fixture = ServerFixture::create_shared().await;
|
|
|
|
let mut management_client = fixture.management_client();
|
|
|
|
|
|
|
|
let err = management_client
|
2021-04-22 11:16:36 +00:00
|
|
|
.close_partition_chunk(
|
|
|
|
"this database does not exist",
|
|
|
|
"nor_does_this_partition",
|
|
|
|
"nor_does_this_table",
|
|
|
|
0,
|
|
|
|
)
|
2021-03-17 15:25:27 +00:00
|
|
|
.await
|
|
|
|
.expect_err("expected error");
|
|
|
|
|
|
|
|
assert_contains!(err.to_string(), "Database not found");
|
|
|
|
}
|
2021-03-30 19:03:23 +00:00
|
|
|
|
2021-04-01 11:05:58 +00:00
|
|
|
#[tokio::test]
|
|
|
|
async fn test_chunk_lifecycle() {
|
|
|
|
use influxdb_iox_client::management::generated_types::ChunkStorage;
|
|
|
|
|
|
|
|
let fixture = ServerFixture::create_shared().await;
|
|
|
|
let mut management_client = fixture.management_client();
|
|
|
|
let mut write_client = fixture.write_client();
|
|
|
|
|
|
|
|
let db_name = rand_name();
|
|
|
|
management_client
|
|
|
|
.create_database(DatabaseRules {
|
|
|
|
name: db_name.clone(),
|
|
|
|
lifecycle_rules: Some(LifecycleRules {
|
|
|
|
mutable_linger_seconds: 1,
|
|
|
|
..Default::default()
|
|
|
|
}),
|
|
|
|
..Default::default()
|
|
|
|
})
|
|
|
|
.await
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let lp_lines = vec!["cpu,region=west user=23.2 100"];
|
|
|
|
|
|
|
|
write_client
|
|
|
|
.write(&db_name, lp_lines.join("\n"))
|
|
|
|
.await
|
|
|
|
.expect("write succeded");
|
|
|
|
|
|
|
|
let chunks = management_client
|
|
|
|
.list_chunks(&db_name)
|
|
|
|
.await
|
|
|
|
.expect("listing chunks");
|
|
|
|
|
|
|
|
assert_eq!(chunks.len(), 1);
|
|
|
|
assert_eq!(chunks[0].storage, ChunkStorage::OpenMutableBuffer as i32);
|
|
|
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
|
|
|
|
|
|
|
|
let chunks = management_client
|
|
|
|
.list_chunks(&db_name)
|
|
|
|
.await
|
|
|
|
.expect("listing chunks");
|
|
|
|
|
|
|
|
assert_eq!(chunks.len(), 1);
|
|
|
|
assert_eq!(chunks[0].storage, ChunkStorage::ReadBuffer as i32);
|
|
|
|
}
|
|
|
|
|
2021-03-30 19:03:23 +00:00
|
|
|
/// Normalizes a set of Chunks for comparison by removing timestamps
|
|
|
|
fn normalize_chunks(chunks: Vec<Chunk>) -> Vec<Chunk> {
|
|
|
|
chunks
|
|
|
|
.into_iter()
|
|
|
|
.map(|summary| {
|
|
|
|
let Chunk {
|
|
|
|
partition_key,
|
2021-04-22 11:16:36 +00:00
|
|
|
table_name,
|
2021-03-30 19:03:23 +00:00
|
|
|
id,
|
|
|
|
storage,
|
|
|
|
estimated_bytes,
|
2021-04-29 13:53:25 +00:00
|
|
|
row_count,
|
2021-03-30 19:03:23 +00:00
|
|
|
..
|
|
|
|
} = summary;
|
|
|
|
Chunk {
|
|
|
|
partition_key,
|
2021-04-22 11:16:36 +00:00
|
|
|
table_name,
|
2021-03-30 19:03:23 +00:00
|
|
|
id,
|
|
|
|
storage,
|
|
|
|
estimated_bytes,
|
2021-04-29 13:53:25 +00:00
|
|
|
row_count,
|
2021-03-30 19:03:23 +00:00
|
|
|
time_of_first_write: None,
|
|
|
|
time_of_last_write: None,
|
2021-04-30 20:59:45 +00:00
|
|
|
time_closed: None,
|
2021-03-30 19:03:23 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
}
|