fix: serialize distinct cache in catalog (#25990)
The distinct cache info for tables was not serialized in the catalog. This fixes it, but also updates the catalog serialization to use the snapshot type serialization from the Catalog type all the way down. The Eq and PartialEq impls were removed from Catalog and InnerCatalog as they were only used in tests, and wer replaced by pure insta snapshot tests. A test was added to check that the distinct cache serializes/deserializespull/25856/merge
parent
04f10ad290
commit
9646691d96
|
@ -3747,6 +3747,7 @@ checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
|
|||
dependencies = [
|
||||
"autocfg",
|
||||
"scopeguard",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
|
@ -89,7 +89,7 @@ mockall = { version = "0.13.0" }
|
|||
non-empty-string = "0.2.5"
|
||||
num_cpus = "1.16.0"
|
||||
object_store = "0.11.1"
|
||||
parking_lot = "0.12.1"
|
||||
parking_lot = { version = "0.12.1", features = ["serde"] }
|
||||
paste = "1.0.15"
|
||||
parquet = { version = "53.0.0", features = ["object_store"] }
|
||||
pbjson = "0.6.0"
|
||||
|
|
|
@ -19,7 +19,7 @@ use iox_time::Time;
|
|||
use observability_deps::tracing::{debug, info, warn};
|
||||
use parking_lot::RwLock;
|
||||
use schema::{Schema, SchemaBuilder};
|
||||
use serde::{Deserialize, Serialize, Serializer};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::borrow::Cow;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BTreeMap;
|
||||
|
@ -160,26 +160,12 @@ impl CatalogSequenceNumber {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Catalog {
|
||||
#[serde(flatten)]
|
||||
inner: RwLock<InnerCatalog>,
|
||||
}
|
||||
|
||||
impl PartialEq for Catalog {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.inner.read().eq(&other.inner.read())
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for Catalog {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
self.inner.read().serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
impl Catalog {
|
||||
/// Limit for the number of Databases that InfluxDB 3 Core OSS can have
|
||||
pub(crate) const NUM_DBS_LIMIT: usize = 5;
|
||||
|
@ -359,79 +345,21 @@ impl Catalog {
|
|||
}
|
||||
}
|
||||
|
||||
#[serde_with::serde_as]
|
||||
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone, Default)]
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct InnerCatalog {
|
||||
/// The catalog is a map of databases with their table schemas
|
||||
databases: SerdeVecMap<DbId, Arc<DatabaseSchema>>,
|
||||
sequence: CatalogSequenceNumber,
|
||||
pub(crate) databases: SerdeVecMap<DbId, Arc<DatabaseSchema>>,
|
||||
pub(crate) sequence: CatalogSequenceNumber,
|
||||
/// The `node_id` is the prefix that is passed in when starting up
|
||||
/// (`node_identifier_prefix`)
|
||||
// TODO: deprecate this alias
|
||||
#[serde(alias = "writer_id")]
|
||||
node_id: Arc<str>,
|
||||
pub(crate) node_id: Arc<str>,
|
||||
/// The instance_id uniquely identifies the instance that generated the catalog
|
||||
instance_id: Arc<str>,
|
||||
pub(crate) instance_id: Arc<str>,
|
||||
/// If true, the catalog has been updated since the last time it was serialized
|
||||
#[serde(skip)]
|
||||
updated: bool,
|
||||
#[serde_as(as = "DbMapAsArray")]
|
||||
db_map: BiHashMap<DbId, Arc<str>>,
|
||||
pub(crate) updated: bool,
|
||||
pub(crate) db_map: BiHashMap<DbId, Arc<str>>,
|
||||
}
|
||||
|
||||
serde_with::serde_conv!(
|
||||
DbMapAsArray,
|
||||
BiHashMap<DbId, Arc<str>>,
|
||||
|map: &BiHashMap<DbId, Arc<str>>| {
|
||||
map.iter().fold(Vec::new(), |mut acc, (id, name)| {
|
||||
acc.push(DbMap {
|
||||
db_id: *id,
|
||||
name: Arc::clone(&name)
|
||||
});
|
||||
acc
|
||||
})
|
||||
},
|
||||
|vec: Vec<DbMap>| -> Result<_, std::convert::Infallible> {
|
||||
Ok(vec.into_iter().fold(BiHashMap::new(), |mut acc, db| {
|
||||
acc.insert(db.db_id, db.name);
|
||||
acc
|
||||
}))
|
||||
}
|
||||
);
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct DbMap {
|
||||
db_id: DbId,
|
||||
name: Arc<str>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct TableMap {
|
||||
table_id: TableId,
|
||||
name: Arc<str>,
|
||||
}
|
||||
|
||||
serde_with::serde_conv!(
|
||||
TableMapAsArray,
|
||||
BiHashMap<TableId, Arc<str>>,
|
||||
|map: &BiHashMap<TableId, Arc<str>>| {
|
||||
map.iter().fold(Vec::new(), |mut acc, (table_id, name)| {
|
||||
acc.push(TableMap {
|
||||
table_id: *table_id,
|
||||
name: Arc::clone(&name)
|
||||
});
|
||||
acc
|
||||
})
|
||||
},
|
||||
|vec: Vec<TableMap>| -> Result<_, std::convert::Infallible> {
|
||||
let mut map = BiHashMap::new();
|
||||
for item in vec {
|
||||
map.insert(item.table_id, item.name);
|
||||
}
|
||||
Ok(map)
|
||||
}
|
||||
);
|
||||
|
||||
impl InnerCatalog {
|
||||
pub(crate) fn new(node_id: Arc<str>, instance_id: Arc<str>) -> Self {
|
||||
Self {
|
||||
|
@ -1439,19 +1367,20 @@ mod tests {
|
|||
.databases
|
||||
.insert(database.id, Arc::new(database));
|
||||
|
||||
insta::with_settings!({
|
||||
sort_maps => true,
|
||||
description => "catalog serialization to help catch breaking changes"
|
||||
}, {
|
||||
insta::assert_json_snapshot!(catalog);
|
||||
});
|
||||
|
||||
// Serialize/deserialize to ensure roundtrip to/from JSON
|
||||
let serialized = serde_json::to_string(&catalog).unwrap();
|
||||
let deserialized_inner: InnerCatalog = serde_json::from_str(&serialized).unwrap();
|
||||
let deserialized = Catalog::from_inner(deserialized_inner);
|
||||
assert_eq!(catalog, deserialized);
|
||||
assert_eq!(instance_id, deserialized.instance_id());
|
||||
insta::allow_duplicates! {
|
||||
insta::with_settings!({
|
||||
sort_maps => true,
|
||||
description => "catalog serialization to help catch breaking changes"
|
||||
}, {
|
||||
insta::assert_json_snapshot!(catalog);
|
||||
// Serialize/deserialize to ensure roundtrip to/from JSON
|
||||
let serialized = serde_json::to_string(&catalog).unwrap();
|
||||
let catalog: Catalog = serde_json::from_str(&serialized).unwrap();
|
||||
insta::assert_json_snapshot!(catalog);
|
||||
assert_eq!(instance_id, catalog.instance_id());
|
||||
assert_eq!(catalog.db_name_to_id("test_db"), Some(DbId::from(0)));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1483,8 +1412,7 @@ mod tests {
|
|||
],
|
||||
"sequence": 0,
|
||||
"node_id": "test",
|
||||
"instance_id": "test",
|
||||
"db_map": []
|
||||
"instance_id": "test"
|
||||
}"#;
|
||||
let err = serde_json::from_str::<InnerCatalog>(json).unwrap_err();
|
||||
assert_contains!(err.to_string(), "duplicate key found");
|
||||
|
@ -1529,8 +1457,7 @@ mod tests {
|
|||
],
|
||||
"sequence": 0,
|
||||
"node_id": "test",
|
||||
"instance_id": "test",
|
||||
"db_map": []
|
||||
"instance_id": "test"
|
||||
}"#;
|
||||
let err = serde_json::from_str::<InnerCatalog>(json).unwrap_err();
|
||||
assert_contains!(err.to_string(), "duplicate key found");
|
||||
|
@ -1681,17 +1608,18 @@ mod tests {
|
|||
.databases
|
||||
.insert(database.id, Arc::new(database));
|
||||
|
||||
insta::with_settings!({
|
||||
sort_maps => true,
|
||||
description => "catalog serialization to help catch breaking changes"
|
||||
}, {
|
||||
insta::assert_json_snapshot!(catalog);
|
||||
});
|
||||
|
||||
let serialized = serde_json::to_string(&catalog).unwrap();
|
||||
let deserialized_inner: InnerCatalog = serde_json::from_str(&serialized).unwrap();
|
||||
let deserialized = Catalog::from_inner(deserialized_inner);
|
||||
assert_eq!(catalog, deserialized);
|
||||
insta::allow_duplicates! {
|
||||
insta::with_settings!({
|
||||
sort_maps => true,
|
||||
description => "catalog serialization to help catch breaking changes"
|
||||
}, {
|
||||
insta::assert_json_snapshot!(catalog);
|
||||
let serialized = serde_json::to_string(&catalog).unwrap();
|
||||
let catalog: Catalog = serde_json::from_str(&serialized).unwrap();
|
||||
insta::assert_json_snapshot!(catalog);
|
||||
assert_eq!(catalog.db_name_to_id("test_db"), Some(DbId::from(0)));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1747,17 +1675,71 @@ mod tests {
|
|||
.databases
|
||||
.insert(database.id, Arc::new(database));
|
||||
|
||||
insta::with_settings!({
|
||||
sort_maps => true,
|
||||
description => "catalog serialization to help catch breaking changes"
|
||||
}, {
|
||||
insta::assert_json_snapshot!(catalog);
|
||||
});
|
||||
insta::allow_duplicates! {
|
||||
insta::with_settings!({
|
||||
sort_maps => true,
|
||||
description => "catalog serialization to help catch breaking changes"
|
||||
}, {
|
||||
insta::assert_json_snapshot!(catalog);
|
||||
let serialized = serde_json::to_string(&catalog).unwrap();
|
||||
let catalog: Catalog = serde_json::from_str(&serialized).unwrap();
|
||||
insta::assert_json_snapshot!(catalog);
|
||||
assert_eq!(catalog.db_name_to_id("test_db"), Some(DbId::from(0)));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let serialized = serde_json::to_string(&catalog).unwrap();
|
||||
let deserialized_inner: InnerCatalog = serde_json::from_str(&serialized).unwrap();
|
||||
let deserialized = Catalog::from_inner(deserialized_inner);
|
||||
assert_eq!(catalog, deserialized);
|
||||
#[test]
|
||||
fn test_serialize_distinct_cache() {
|
||||
let node_id = Arc::from("sample-host-id");
|
||||
let instance_id = Arc::from("instance-id");
|
||||
let catalog = Catalog::new(node_id, instance_id);
|
||||
let mut database = catalog.db_or_create("test_db").unwrap().as_ref().clone();
|
||||
use InfluxColumnType::*;
|
||||
use InfluxFieldType::*;
|
||||
let table_id = TableId::new();
|
||||
let table_name = Arc::<str>::from("test_table");
|
||||
let tag_1_id = ColumnId::new();
|
||||
let tag_2_id = ColumnId::new();
|
||||
let tag_3_id = ColumnId::new();
|
||||
let mut table_def = TableDefinition::new(
|
||||
table_id,
|
||||
Arc::clone(&table_name),
|
||||
vec![
|
||||
(tag_1_id, "tag_1".into(), Tag),
|
||||
(tag_2_id, "tag_2".into(), Tag),
|
||||
(tag_3_id, "tag_3".into(), Tag),
|
||||
(ColumnId::new(), "time".into(), Timestamp),
|
||||
(ColumnId::new(), "field".into(), Field(String)),
|
||||
],
|
||||
vec![tag_1_id, tag_2_id, tag_3_id],
|
||||
)
|
||||
.unwrap();
|
||||
table_def.add_distinct_cache(DistinctCacheDefinition {
|
||||
table_id,
|
||||
table_name,
|
||||
cache_name: Arc::<str>::from("test_cache"),
|
||||
column_ids: vec![tag_1_id, tag_2_id],
|
||||
max_cardinality: 100,
|
||||
max_age_seconds: 10,
|
||||
});
|
||||
database
|
||||
.insert_table(table_id, Arc::new(table_def))
|
||||
.unwrap();
|
||||
catalog.inner.write().upsert_db(database);
|
||||
|
||||
insta::allow_duplicates! {
|
||||
insta::with_settings!({
|
||||
sort_maps => true,
|
||||
description => "catalog serialization to help catch breaking changes"
|
||||
}, {
|
||||
insta::assert_json_snapshot!(catalog);
|
||||
let serialized = serde_json::to_string(&catalog).unwrap();
|
||||
let catalog: Catalog = serde_json::from_str(&serialized).unwrap();
|
||||
insta::assert_json_snapshot!(catalog);
|
||||
assert_eq!(catalog.db_name_to_id("test_db"), Some(DbId::from(0)));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
use crate::catalog::CatalogSequenceNumber;
|
||||
use crate::catalog::ColumnDefinition;
|
||||
use crate::catalog::DatabaseSchema;
|
||||
use crate::catalog::InnerCatalog;
|
||||
use crate::catalog::TableDefinition;
|
||||
use arrow::datatypes::DataType as ArrowDataType;
|
||||
use bimap::BiHashMap;
|
||||
|
@ -8,6 +10,7 @@ use influxdb3_id::ColumnId;
|
|||
use influxdb3_id::DbId;
|
||||
use influxdb3_id::SerdeVecMap;
|
||||
use influxdb3_id::TableId;
|
||||
use influxdb3_wal::DistinctCacheDefinition;
|
||||
use influxdb3_wal::{LastCacheDefinition, LastCacheValueColumnsDef, PluginType, TriggerDefinition};
|
||||
use schema::InfluxColumnType;
|
||||
use schema::InfluxFieldType;
|
||||
|
@ -15,6 +18,71 @@ use schema::TIME_DATA_TIMEZONE;
|
|||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
|
||||
impl Serialize for InnerCatalog {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let snapshot = CatalogSnapshot::from(self);
|
||||
snapshot.serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for InnerCatalog {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
CatalogSnapshot::deserialize(deserializer).map(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct CatalogSnapshot {
|
||||
databases: SerdeVecMap<DbId, DatabaseSnapshot>,
|
||||
sequence: CatalogSequenceNumber,
|
||||
#[serde(alias = "writer_id")]
|
||||
node_id: Arc<str>,
|
||||
instance_id: Arc<str>,
|
||||
}
|
||||
|
||||
impl From<&InnerCatalog> for CatalogSnapshot {
|
||||
fn from(catalog: &InnerCatalog) -> Self {
|
||||
Self {
|
||||
databases: catalog
|
||||
.databases
|
||||
.iter()
|
||||
.map(|(id, db)| (*id, db.as_ref().into()))
|
||||
.collect(),
|
||||
sequence: catalog.sequence,
|
||||
node_id: Arc::clone(&catalog.node_id),
|
||||
instance_id: Arc::clone(&catalog.instance_id),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CatalogSnapshot> for InnerCatalog {
|
||||
fn from(snap: CatalogSnapshot) -> Self {
|
||||
let db_map = snap
|
||||
.databases
|
||||
.iter()
|
||||
.map(|(id, db)| (*id, Arc::clone(&db.name)))
|
||||
.collect();
|
||||
Self {
|
||||
databases: snap
|
||||
.databases
|
||||
.into_iter()
|
||||
.map(|(id, db)| (id, Arc::new(db.into())))
|
||||
.collect(),
|
||||
sequence: snap.sequence,
|
||||
node_id: snap.node_id,
|
||||
instance_id: snap.instance_id,
|
||||
updated: false,
|
||||
db_map,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for DatabaseSchema {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
|
@ -137,6 +205,8 @@ struct TableSnapshot {
|
|||
cols: SerdeVecMap<ColumnId, ColumnDefinitionSnapshot>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
last_caches: Vec<LastCacheSnapshot>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
distinct_caches: Vec<DistinctCacheSnapshot>,
|
||||
deleted: bool,
|
||||
}
|
||||
|
||||
|
@ -299,6 +369,7 @@ impl From<&TableDefinition> for TableSnapshot {
|
|||
})
|
||||
.collect(),
|
||||
last_caches: def.last_caches.values().map(Into::into).collect(),
|
||||
distinct_caches: def.distinct_caches.values().map(Into::into).collect(),
|
||||
deleted: def.deleted,
|
||||
}
|
||||
}
|
||||
|
@ -384,6 +455,11 @@ impl From<TableSnapshot> for TableDefinition {
|
|||
.into_iter()
|
||||
.map(|lc_snap| (Arc::clone(&lc_snap.name), lc_snap.into()))
|
||||
.collect(),
|
||||
distinct_caches: snap
|
||||
.distinct_caches
|
||||
.into_iter()
|
||||
.map(|dc_snap| (Arc::clone(&dc_snap.name), dc_snap.into()))
|
||||
.collect(),
|
||||
..table_def
|
||||
}
|
||||
}
|
||||
|
@ -482,3 +558,39 @@ impl From<LastCacheSnapshot> for LastCacheDefinition {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct DistinctCacheSnapshot {
|
||||
table_id: TableId,
|
||||
table: Arc<str>,
|
||||
name: Arc<str>,
|
||||
cols: Vec<ColumnId>,
|
||||
max_cardinality: usize,
|
||||
max_age_seconds: u64,
|
||||
}
|
||||
|
||||
impl From<&DistinctCacheDefinition> for DistinctCacheSnapshot {
|
||||
fn from(def: &DistinctCacheDefinition) -> Self {
|
||||
Self {
|
||||
table_id: def.table_id,
|
||||
table: Arc::clone(&def.table_name),
|
||||
name: Arc::clone(&def.cache_name),
|
||||
cols: def.column_ids.clone(),
|
||||
max_cardinality: def.max_cardinality,
|
||||
max_age_seconds: def.max_age_seconds,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DistinctCacheSnapshot> for DistinctCacheDefinition {
|
||||
fn from(snap: DistinctCacheSnapshot) -> Self {
|
||||
Self {
|
||||
table_id: snap.table_id,
|
||||
table_name: snap.table,
|
||||
cache_name: snap.name,
|
||||
column_ids: snap.cols,
|
||||
max_cardinality: snap.max_cardinality,
|
||||
max_age_seconds: snap.max_age_seconds,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -267,8 +267,7 @@ expression: catalog
|
|||
}
|
||||
]
|
||||
],
|
||||
"sequence": 0,
|
||||
"node_id": "sample-host-id",
|
||||
"instance_id": "instance-id",
|
||||
"db_map": []
|
||||
"node_id": "sample-host-id",
|
||||
"sequence": 0
|
||||
}
|
||||
|
|
|
@ -0,0 +1,121 @@
|
|||
---
|
||||
source: influxdb3_catalog/src/catalog.rs
|
||||
description: catalog serialization to help catch breaking changes
|
||||
expression: catalog
|
||||
---
|
||||
{
|
||||
"databases": [
|
||||
[
|
||||
0,
|
||||
{
|
||||
"id": 0,
|
||||
"name": "test_db",
|
||||
"tables": [
|
||||
[
|
||||
0,
|
||||
{
|
||||
"table_id": 0,
|
||||
"table_name": "test_table",
|
||||
"key": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"cols": [
|
||||
[
|
||||
4,
|
||||
{
|
||||
"name": "field",
|
||||
"id": 4,
|
||||
"type": "str",
|
||||
"influx_type": "field",
|
||||
"nullable": true
|
||||
}
|
||||
],
|
||||
[
|
||||
0,
|
||||
{
|
||||
"name": "tag_1",
|
||||
"id": 0,
|
||||
"type": {
|
||||
"dict": [
|
||||
"i32",
|
||||
"str"
|
||||
]
|
||||
},
|
||||
"influx_type": "tag",
|
||||
"nullable": false
|
||||
}
|
||||
],
|
||||
[
|
||||
1,
|
||||
{
|
||||
"name": "tag_2",
|
||||
"id": 1,
|
||||
"type": {
|
||||
"dict": [
|
||||
"i32",
|
||||
"str"
|
||||
]
|
||||
},
|
||||
"influx_type": "tag",
|
||||
"nullable": false
|
||||
}
|
||||
],
|
||||
[
|
||||
2,
|
||||
{
|
||||
"name": "tag_3",
|
||||
"id": 2,
|
||||
"type": {
|
||||
"dict": [
|
||||
"i32",
|
||||
"str"
|
||||
]
|
||||
},
|
||||
"influx_type": "tag",
|
||||
"nullable": false
|
||||
}
|
||||
],
|
||||
[
|
||||
3,
|
||||
{
|
||||
"name": "time",
|
||||
"id": 3,
|
||||
"type": {
|
||||
"time": [
|
||||
"ns",
|
||||
null
|
||||
]
|
||||
},
|
||||
"influx_type": "time",
|
||||
"nullable": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"distinct_caches": [
|
||||
{
|
||||
"table_id": 0,
|
||||
"table": "test_table",
|
||||
"name": "test_cache",
|
||||
"cols": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"max_cardinality": 100,
|
||||
"max_age_seconds": 10
|
||||
}
|
||||
],
|
||||
"deleted": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"processing_engine_triggers": [],
|
||||
"deleted": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"instance_id": "instance-id",
|
||||
"node_id": "sample-host-id",
|
||||
"sequence": 2
|
||||
}
|
|
@ -118,8 +118,7 @@ expression: catalog
|
|||
}
|
||||
]
|
||||
],
|
||||
"sequence": 0,
|
||||
"node_id": "sample-host-id",
|
||||
"instance_id": "instance-id",
|
||||
"db_map": []
|
||||
"node_id": "sample-host-id",
|
||||
"sequence": 0
|
||||
}
|
||||
|
|
|
@ -102,8 +102,7 @@ expression: catalog
|
|||
}
|
||||
]
|
||||
],
|
||||
"sequence": 0,
|
||||
"node_id": "sample-host-id",
|
||||
"instance_id": "instance-id",
|
||||
"db_map": []
|
||||
"node_id": "sample-host-id",
|
||||
"sequence": 0
|
||||
}
|
||||
|
|
|
@ -92,12 +92,6 @@ expression: catalog_json
|
|||
}
|
||||
]
|
||||
],
|
||||
"db_map": [
|
||||
{
|
||||
"db_id": 0,
|
||||
"name": "db"
|
||||
}
|
||||
],
|
||||
"instance_id": "[uuid]",
|
||||
"node_id": "test_host",
|
||||
"sequence": 3
|
||||
|
|
|
@ -82,12 +82,6 @@ expression: catalog_json
|
|||
}
|
||||
]
|
||||
],
|
||||
"db_map": [
|
||||
{
|
||||
"db_id": 0,
|
||||
"name": "db"
|
||||
}
|
||||
],
|
||||
"instance_id": "[uuid]",
|
||||
"node_id": "test_host",
|
||||
"sequence": 2
|
||||
|
|
|
@ -79,12 +79,6 @@ expression: catalog_json
|
|||
}
|
||||
]
|
||||
],
|
||||
"db_map": [
|
||||
{
|
||||
"db_id": 0,
|
||||
"name": "db"
|
||||
}
|
||||
],
|
||||
"instance_id": "[uuid]",
|
||||
"node_id": "test_host",
|
||||
"sequence": 4
|
||||
|
|
Loading…
Reference in New Issue