Merge branch 'main' into sharding_refa
commit
1f3bf6b5a6
|
@ -185,9 +185,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "async-stream"
|
||||
version = "0.3.0"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3670df70cbc01729f901f94c887814b3c68db038aad1329a418bae178bc5295c"
|
||||
checksum = "0a26cb53174ddd320edfff199a853f93d571f48eeb4dde75e67a9a3dbb7b7e5e"
|
||||
dependencies = [
|
||||
"async-stream-impl",
|
||||
"futures-core",
|
||||
|
@ -195,9 +195,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "async-stream-impl"
|
||||
version = "0.3.0"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a3548b8efc9f8e8a5a0a2808c5bd8451a9031b9e5b879a79590304ae928b0a70"
|
||||
checksum = "db134ba52475c060f3329a8ef0f8786d6b872ed01515d4b79c162e5798da1340"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
@ -206,9 +206,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "async-trait"
|
||||
version = "0.1.48"
|
||||
version = "0.1.50"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "36ea56748e10732c49404c153638a15ec3d6211ec5ff35d9bb20e13b93576adf"
|
||||
checksum = "0b98e84bbb4cbcdd97da190ba0c58a1bb0de2c1fdf67d159e192ed766aeca722"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
@ -295,11 +295,12 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.56"
|
||||
version = "0.3.58"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc"
|
||||
checksum = "88fb5a785d6b44fd9d6700935608639af1b8356de1e55d5f7c2740f4faa15d82"
|
||||
dependencies = [
|
||||
"addr2line",
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"miniz_oxide",
|
||||
|
@ -542,9 +543,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "const_fn"
|
||||
version = "0.4.6"
|
||||
version = "0.4.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "076a6803b0dacd6a88cfe64deba628b01533ff5ef265687e6938280c1afd0a28"
|
||||
checksum = "402da840495de3f976eaefc3485b7f5eb5b0bf9761f9a47be27fe975b3b8c2ec"
|
||||
|
||||
[[package]]
|
||||
name = "constant_time_eq"
|
||||
|
@ -833,9 +834,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "dirs"
|
||||
version = "3.0.1"
|
||||
version = "3.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "142995ed02755914747cc6ca76fc7e4583cd18578746716d0508ea6ed558b9ff"
|
||||
checksum = "30baa043103c9d0c2a57cf537cc2f35623889dc0d405e6c3cccfadbc81c71309"
|
||||
dependencies = [
|
||||
"dirs-sys",
|
||||
]
|
||||
|
@ -852,12 +853,12 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "dirs-sys"
|
||||
version = "0.3.5"
|
||||
version = "0.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a"
|
||||
checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"redox_users 0.3.5",
|
||||
"redox_users 0.4.0",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
|
@ -917,6 +918,12 @@ dependencies = [
|
|||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "endian-type"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d"
|
||||
|
||||
[[package]]
|
||||
name = "env_logger"
|
||||
version = "0.8.3"
|
||||
|
@ -972,9 +979,9 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d"
|
|||
|
||||
[[package]]
|
||||
name = "flatbuffers"
|
||||
version = "0.8.3"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "63b5b0f949bdb25c077c83184ac689ae38ae392683d4c71e98be9736db66e94c"
|
||||
checksum = "c3c502342b7d6d73beb1b8bab39dc01deba0c8ef66f4e6f1eba7c69ee6b38069"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"smallvec",
|
||||
|
@ -1033,6 +1040,16 @@ dependencies = [
|
|||
"percent-encoding",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fs2"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fs_extra"
|
||||
version = "1.2.0"
|
||||
|
@ -1123,7 +1140,7 @@ dependencies = [
|
|||
"futures-sink",
|
||||
"futures-task",
|
||||
"futures-util",
|
||||
"pin-project 1.0.6",
|
||||
"pin-project 1.0.7",
|
||||
"pin-utils",
|
||||
]
|
||||
|
||||
|
@ -1322,15 +1339,15 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "httparse"
|
||||
version = "1.3.6"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bc35c995b9d93ec174cf9a27d425c7892722101e14993cd227fdb51d70cf9589"
|
||||
checksum = "4a1ce40d6fc9764887c2fdc7305c3dcc429ba11ff981c1509416afd5697e4437"
|
||||
|
||||
[[package]]
|
||||
name = "httpdate"
|
||||
version = "0.3.2"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47"
|
||||
checksum = "05842d0d43232b23ccb7060ecb0f0626922c21f30012e97b767b30afd4a5d4b9"
|
||||
|
||||
[[package]]
|
||||
name = "human_format"
|
||||
|
@ -1346,9 +1363,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
|
|||
|
||||
[[package]]
|
||||
name = "hyper"
|
||||
version = "0.14.5"
|
||||
version = "0.14.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8bf09f61b52cfcf4c00de50df88ae423d6c02354e385a86341133b5338630ad1"
|
||||
checksum = "1e5f105c494081baa3bf9e200b279e27ec1623895cd504c7dbef8d0b080fcf54"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-channel",
|
||||
|
@ -1360,7 +1377,7 @@ dependencies = [
|
|||
"httparse",
|
||||
"httpdate",
|
||||
"itoa",
|
||||
"pin-project 1.0.6",
|
||||
"pin-project 1.0.7",
|
||||
"socket2",
|
||||
"tokio",
|
||||
"tower-service",
|
||||
|
@ -1400,9 +1417,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "idna"
|
||||
version = "0.2.2"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "89829a5d69c23d348314a7ac337fe39173b61149a9864deabd260983aed48c21"
|
||||
checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8"
|
||||
dependencies = [
|
||||
"matches",
|
||||
"unicode-bidi",
|
||||
|
@ -1450,7 +1467,7 @@ dependencies = [
|
|||
"criterion",
|
||||
"csv",
|
||||
"data_types",
|
||||
"dirs 3.0.1",
|
||||
"dirs 3.0.2",
|
||||
"dotenv",
|
||||
"flate2",
|
||||
"futures",
|
||||
|
@ -1483,6 +1500,7 @@ dependencies = [
|
|||
"read_buffer",
|
||||
"reqwest",
|
||||
"routerify",
|
||||
"rustyline",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_urlencoded 0.7.0",
|
||||
|
@ -1626,9 +1644,9 @@ checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
|
|||
|
||||
[[package]]
|
||||
name = "jobserver"
|
||||
version = "0.1.21"
|
||||
version = "0.1.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2"
|
||||
checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
@ -1670,9 +1688,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
|
|||
|
||||
[[package]]
|
||||
name = "lexical-core"
|
||||
version = "0.7.5"
|
||||
version = "0.7.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "21f866863575d0e1d654fbeeabdc927292fdf862873dc3c96c6f753357e13374"
|
||||
checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe"
|
||||
dependencies = [
|
||||
"arrayvec",
|
||||
"bitflags",
|
||||
|
@ -1683,9 +1701,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.93"
|
||||
version = "0.2.94"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9385f66bf6105b241aa65a61cb923ef20efc665cb9f9bb50ac2f0c4b7f378d41"
|
||||
checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e"
|
||||
|
||||
[[package]]
|
||||
name = "libloading"
|
||||
|
@ -1914,6 +1932,27 @@ dependencies = [
|
|||
"tempfile",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nibble_vec"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43"
|
||||
dependencies = [
|
||||
"smallvec",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nix"
|
||||
version = "0.20.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fa9b4819da1bc61c0ea48b63b7bc8604064dd43013e7cc325df098d49cd7c18a"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nom"
|
||||
version = "5.1.2"
|
||||
|
@ -2170,7 +2209,7 @@ dependencies = [
|
|||
"js-sys",
|
||||
"lazy_static",
|
||||
"percent-encoding",
|
||||
"pin-project 1.0.6",
|
||||
"pin-project 1.0.7",
|
||||
"rand 0.8.3",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
|
@ -2300,7 +2339,7 @@ dependencies = [
|
|||
"cfg-if",
|
||||
"instant",
|
||||
"libc",
|
||||
"redox_syscall 0.2.5",
|
||||
"redox_syscall 0.2.6",
|
||||
"smallvec",
|
||||
"winapi",
|
||||
]
|
||||
|
@ -2407,11 +2446,11 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "pin-project"
|
||||
version = "1.0.6"
|
||||
version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bc174859768806e91ae575187ada95c91a29e96a98dc5d2cd9a1fed039501ba6"
|
||||
checksum = "c7509cc106041c40a4518d2af7a61530e1eed0e6285296a3d8c5472806ccc4a4"
|
||||
dependencies = [
|
||||
"pin-project-internal 1.0.6",
|
||||
"pin-project-internal 1.0.7",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -2427,9 +2466,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "pin-project-internal"
|
||||
version = "1.0.6"
|
||||
version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a490329918e856ed1b083f244e3bfe2d8c4f336407e4ea9e1a9f479ff09049e5"
|
||||
checksum = "48c950132583b500556b1efd71d45b319029f2b71518d979fcc208e16b42426f"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
@ -2644,9 +2683,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "protobuf"
|
||||
version = "2.22.1"
|
||||
version = "2.23.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b7f4a129bb3754c25a4e04032a90173c68f85168f77118ac4cb4936e7f06f92"
|
||||
checksum = "45604fc7a88158e7d514d8e22e14ac746081e7a70d7690074dd0029ee37458d6"
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
|
@ -2685,6 +2724,16 @@ dependencies = [
|
|||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "radix_trie"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c069c179fcdc6a2fe24d8d18305cf085fdbd4f922c041943e203685d6a1c58fd"
|
||||
dependencies = [
|
||||
"endian-type",
|
||||
"nibble_vec",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.7.3"
|
||||
|
@ -2832,9 +2881,9 @@ checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
|
|||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.5"
|
||||
version = "0.2.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9"
|
||||
checksum = "8270314b5ccceb518e7e578952f0b72b88222d02e8f77f5ecf7abbb673539041"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
]
|
||||
|
@ -2857,14 +2906,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||
checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64"
|
||||
dependencies = [
|
||||
"getrandom 0.2.2",
|
||||
"redox_syscall 0.2.5",
|
||||
"redox_syscall 0.2.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.4.5"
|
||||
version = "1.4.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "957056ecddbeba1b26965114e191d2e8589ce74db242b6ea25fc4062427a5c19"
|
||||
checksum = "2a26af418b574bd56588335b3a3659a65725d4e636eb1016c2f9e3b38c7cc759"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
|
@ -3079,9 +3128,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "rustls"
|
||||
version = "0.19.0"
|
||||
version = "0.19.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b"
|
||||
checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7"
|
||||
dependencies = [
|
||||
"base64 0.13.0",
|
||||
"log",
|
||||
|
@ -3102,6 +3151,29 @@ dependencies = [
|
|||
"security-framework",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustyline"
|
||||
version = "8.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b9e1b597fcd1eeb1d6b25b493538e5aa19629eb08932184b85fef931ba87e893"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"cfg-if",
|
||||
"dirs-next",
|
||||
"fs2",
|
||||
"libc",
|
||||
"log",
|
||||
"memchr",
|
||||
"nix",
|
||||
"radix_trie",
|
||||
"scopeguard",
|
||||
"smallvec",
|
||||
"unicode-segmentation",
|
||||
"unicode-width",
|
||||
"utf8parse",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.5"
|
||||
|
@ -3367,9 +3439,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "slab"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8"
|
||||
checksum = "f173ac3d1a7e3b28003f40de0b5ce7fe2710f9b9dc3fc38664cebee46b3b6527"
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
|
@ -3581,7 +3653,7 @@ dependencies = [
|
|||
"cfg-if",
|
||||
"libc",
|
||||
"rand 0.8.3",
|
||||
"redox_syscall 0.2.5",
|
||||
"redox_syscall 0.2.6",
|
||||
"remove_dir_all",
|
||||
"winapi",
|
||||
]
|
||||
|
@ -3864,7 +3936,7 @@ dependencies = [
|
|||
"http-body",
|
||||
"hyper",
|
||||
"percent-encoding",
|
||||
"pin-project 1.0.6",
|
||||
"pin-project 1.0.7",
|
||||
"prost",
|
||||
"prost-derive",
|
||||
"tokio",
|
||||
|
@ -3890,9 +3962,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "tonic-health"
|
||||
version = "0.3.0"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a93d6649c8f5436d65337af08887a516183a096d785ef1fc3acf69ed60dbec6b"
|
||||
checksum = "ee731ed39b584aa7fadc723eca93608c3de6ad316531d594632a2c85d28c4e59"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"bytes",
|
||||
|
@ -3912,7 +3984,7 @@ dependencies = [
|
|||
"futures-core",
|
||||
"futures-util",
|
||||
"indexmap",
|
||||
"pin-project 1.0.6",
|
||||
"pin-project 1.0.7",
|
||||
"rand 0.8.3",
|
||||
"slab",
|
||||
"tokio",
|
||||
|
@ -3974,7 +4046,7 @@ version = "0.2.5"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2"
|
||||
dependencies = [
|
||||
"pin-project 1.0.6",
|
||||
"pin-project 1.0.7",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
|
@ -4029,7 +4101,7 @@ dependencies = [
|
|||
"futures",
|
||||
"hashbrown 0.9.1",
|
||||
"observability_deps",
|
||||
"pin-project 1.0.6",
|
||||
"pin-project 1.0.7",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
]
|
||||
|
@ -4107,6 +4179,12 @@ dependencies = [
|
|||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "utf8parse"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "936e4b492acfd135421d8dca4b1aa80a7bfc26e702ef3af710e0752684df5372"
|
||||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "0.8.2"
|
||||
|
@ -4119,9 +4197,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "vcpkg"
|
||||
version = "0.2.11"
|
||||
version = "0.2.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb"
|
||||
checksum = "cbdbff6266a24120518560b5dc983096efb98462e51d0d68169895b237be3e5d"
|
||||
|
||||
[[package]]
|
||||
name = "vec_map"
|
||||
|
@ -4360,9 +4438,9 @@ checksum = "b07db065a5cf61a7e4ba64f29e67db906fb1787316516c4e6e5ff0fea1efcd8a"
|
|||
|
||||
[[package]]
|
||||
name = "zeroize"
|
||||
version = "1.2.0"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "81a974bcdd357f0dca4d41677db03436324d45a4c9ed2d0b873a5a360ce41c36"
|
||||
checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd"
|
||||
|
||||
[[package]]
|
||||
name = "zstd"
|
||||
|
|
|
@ -83,6 +83,7 @@ prettytable-rs = "0.8"
|
|||
prost = "0.7"
|
||||
# Forked to upgrade hyper and tokio
|
||||
routerify = { git = "https://github.com/influxdata/routerify", rev = "274e250" }
|
||||
rustyline = "8.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0.44"
|
||||
serde_urlencoded = "0.7.0"
|
||||
|
|
|
@ -87,6 +87,12 @@ impl<'a> std::convert::From<DatabaseName<'a>> for String {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> std::convert::From<&DatabaseName<'a>> for String {
|
||||
fn from(name: &DatabaseName<'a>) -> Self {
|
||||
name.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> std::convert::TryFrom<&'a str> for DatabaseName<'a> {
|
||||
type Error = DatabaseNameError;
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ use generated_types::{
|
|||
use influxdb_line_protocol::ParsedLine;
|
||||
use regex::Regex;
|
||||
use snafu::{OptionExt, Snafu};
|
||||
use std::num::NonZeroU64;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
convert::{TryFrom, TryInto},
|
||||
|
@ -214,6 +215,10 @@ pub struct LifecycleRules {
|
|||
|
||||
/// Do not allow writing new data to this database
|
||||
pub immutable: bool,
|
||||
|
||||
/// The background worker will evaluate whether there is work to do
|
||||
/// at every `period` milliseconds.
|
||||
pub background_worker_period_millis: Option<NonZeroU64>,
|
||||
}
|
||||
|
||||
impl From<LifecycleRules> for management::LifecycleRules {
|
||||
|
@ -243,6 +248,9 @@ impl From<LifecycleRules> for management::LifecycleRules {
|
|||
drop_non_persisted: config.drop_non_persisted,
|
||||
persist: config.persist,
|
||||
immutable: config.immutable,
|
||||
background_worker_period_millis: config
|
||||
.background_worker_period_millis
|
||||
.map_or(0, NonZeroU64::get),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -261,6 +269,7 @@ impl TryFrom<management::LifecycleRules> for LifecycleRules {
|
|||
drop_non_persisted: proto.drop_non_persisted,
|
||||
persist: proto.persist,
|
||||
immutable: proto.immutable,
|
||||
background_worker_period_millis: NonZeroU64::new(proto.background_worker_period_millis),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1378,6 +1387,7 @@ mod tests {
|
|||
drop_non_persisted: true,
|
||||
persist: true,
|
||||
immutable: true,
|
||||
background_worker_period_millis: 1000,
|
||||
};
|
||||
|
||||
let config: LifecycleRules = protobuf.clone().try_into().unwrap();
|
||||
|
@ -1417,6 +1427,25 @@ mod tests {
|
|||
assert_eq!(back.buffer_size_hard, protobuf.buffer_size_hard);
|
||||
assert_eq!(back.drop_non_persisted, protobuf.drop_non_persisted);
|
||||
assert_eq!(back.immutable, protobuf.immutable);
|
||||
assert_eq!(
|
||||
back.background_worker_period_millis,
|
||||
protobuf.background_worker_period_millis
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn default_background_worker_period_millis() {
|
||||
let protobuf = management::LifecycleRules {
|
||||
background_worker_period_millis: 0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let config: LifecycleRules = protobuf.clone().try_into().unwrap();
|
||||
let back: management::LifecycleRules = config.into();
|
||||
assert_eq!(
|
||||
back.background_worker_period_millis,
|
||||
protobuf.background_worker_period_millis
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
|
@ -155,6 +155,13 @@ message LifecycleRules {
|
|||
|
||||
// Do not allow writing new data to this database
|
||||
bool immutable = 8;
|
||||
|
||||
/// The background worker will evaluate whether there is work to do
|
||||
/// at every `period` milliseconds.
|
||||
///
|
||||
/// If 0, the default period is used
|
||||
// (See data_types::database_rules::LifecycleRules::DEFAULT_BACKGROUND_WORKER_PERIOD_MILLIS)
|
||||
uint64 background_worker_period_millis = 10;
|
||||
}
|
||||
|
||||
message DatabaseRules {
|
||||
|
|
|
@ -59,7 +59,7 @@ pub fn encode(src: &[u64], dst: &mut Vec<u8>) -> Result<(), Box<dyn Error>> {
|
|||
}
|
||||
|
||||
let max_val = 1u64 << (bit_n & 0x3f) as u64;
|
||||
let mut val = ((idx + 2) << S8B_BIT_SIZE) as u64;
|
||||
let mut val = ((idx as u64) + 2) << S8B_BIT_SIZE;
|
||||
for (k, in_v) in src[i..].iter().enumerate() {
|
||||
if k < int_n {
|
||||
if *in_v >= max_val {
|
||||
|
|
|
@ -29,7 +29,7 @@ fn sequenced_entry(c: &mut Criterion) {
|
|||
554
|
||||
);
|
||||
|
||||
let clock_value = ClockValue::new(23);
|
||||
let clock_value = ClockValue::try_from(23).unwrap();
|
||||
let server_id = ServerId::try_from(2).unwrap();
|
||||
|
||||
group.bench_function("new_from_entry_bytes", |b| {
|
||||
|
|
|
@ -6,12 +6,17 @@ use data_types::{database_rules::Partitioner, server_id::ServerId};
|
|||
use generated_types::wb;
|
||||
use influxdb_line_protocol::{FieldValue, ParsedLine};
|
||||
|
||||
use std::{collections::BTreeMap, convert::TryFrom, fmt};
|
||||
use std::{
|
||||
collections::BTreeMap,
|
||||
convert::{TryFrom, TryInto},
|
||||
fmt,
|
||||
};
|
||||
|
||||
use chrono::Utc;
|
||||
use crc32fast::Hasher;
|
||||
use flatbuffers::FlatBufferBuilder;
|
||||
use ouroboros::self_referencing;
|
||||
use snafu::{ResultExt, Snafu};
|
||||
|
||||
pub fn type_description(value: wb::ColumnValue) -> &'static str {
|
||||
match value {
|
||||
|
@ -37,7 +42,6 @@ pub struct ReplicatedWrite {
|
|||
#[borrows(data)]
|
||||
#[covariant]
|
||||
write_buffer_batch: Option<wb::WriteBufferBatch<'this>>,
|
||||
server_id: ServerId,
|
||||
}
|
||||
|
||||
impl ReplicatedWrite {
|
||||
|
@ -63,7 +67,10 @@ impl ReplicatedWrite {
|
|||
}
|
||||
|
||||
pub fn server_id(&self) -> ServerId {
|
||||
*self.borrow_server_id()
|
||||
self.fb()
|
||||
.server_id()
|
||||
.try_into()
|
||||
.expect("ServerId should have been validated when this was built from flatbuffers")
|
||||
}
|
||||
|
||||
/// Returns the serialized bytes for the write
|
||||
|
@ -78,24 +85,46 @@ impl ReplicatedWrite {
|
|||
}
|
||||
}
|
||||
|
||||
impl TryFrom<(Vec<u8>, ServerId)> for ReplicatedWrite {
|
||||
type Error = flatbuffers::InvalidFlatbuffer;
|
||||
#[derive(Debug, Snafu)]
|
||||
pub enum ReplicatedWriteError {
|
||||
#[snafu(display("{}", source))]
|
||||
InvalidFlatbuffer {
|
||||
source: flatbuffers::InvalidFlatbuffer,
|
||||
},
|
||||
#[snafu(display("{}", source))]
|
||||
InvalidServerId {
|
||||
source: data_types::server_id::Error,
|
||||
},
|
||||
}
|
||||
|
||||
fn try_from(data: (Vec<u8>, ServerId)) -> Result<Self, Self::Error> {
|
||||
impl TryFrom<Vec<u8>> for ReplicatedWrite {
|
||||
type Error = ReplicatedWriteError;
|
||||
|
||||
fn try_from(data: Vec<u8>) -> Result<Self, Self::Error> {
|
||||
ReplicatedWriteTryBuilder {
|
||||
data: data.0,
|
||||
fb_builder: |data| flatbuffers::root::<wb::ReplicatedWrite<'_>>(data),
|
||||
data,
|
||||
fb_builder: |data| {
|
||||
let fb = flatbuffers::root::<wb::ReplicatedWrite<'_>>(data)
|
||||
.context(InvalidFlatbuffer)?;
|
||||
|
||||
// Raise an error now if the server ID is invalid so that `SequencedEntry`'s
|
||||
// `server_id` method can assume it has a valid `ServerId`
|
||||
TryInto::<ServerId>::try_into(fb.server_id()).context(InvalidServerId)?;
|
||||
|
||||
Ok(fb)
|
||||
},
|
||||
write_buffer_batch_builder: |data| match flatbuffers::root::<wb::ReplicatedWrite<'_>>(
|
||||
data,
|
||||
)?
|
||||
)
|
||||
.context(InvalidFlatbuffer)?
|
||||
.payload()
|
||||
{
|
||||
Some(payload) => Ok(Some(flatbuffers::root::<wb::WriteBufferBatch<'_>>(
|
||||
&payload,
|
||||
)?)),
|
||||
Some(payload) => Ok(Some(
|
||||
flatbuffers::root::<wb::WriteBufferBatch<'_>>(&payload)
|
||||
.context(InvalidFlatbuffer)?,
|
||||
)),
|
||||
None => Ok(None),
|
||||
},
|
||||
server_id: data.1,
|
||||
}
|
||||
.try_build()
|
||||
}
|
||||
|
@ -210,7 +239,7 @@ pub fn lines_to_replicated_write(
|
|||
fbb.finish(write, None);
|
||||
|
||||
let (mut data, idx) = fbb.collapse();
|
||||
ReplicatedWrite::try_from((data.split_off(idx), server_id))
|
||||
ReplicatedWrite::try_from(data.split_off(idx))
|
||||
.expect("Flatbuffer data just constructed should be valid")
|
||||
}
|
||||
|
||||
|
|
|
@ -9,13 +9,17 @@ use data_types::{
|
|||
use generated_types::entry as entry_fb;
|
||||
use influxdb_line_protocol::{FieldValue, ParsedLine};
|
||||
|
||||
use std::{collections::BTreeMap, convert::TryFrom};
|
||||
use std::{
|
||||
collections::BTreeMap,
|
||||
convert::{TryFrom, TryInto},
|
||||
fmt::Formatter,
|
||||
num::NonZeroU64,
|
||||
};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use flatbuffers::{FlatBufferBuilder, Follow, ForwardsUOffset, Vector, VectorIter, WIPOffset};
|
||||
use ouroboros::self_referencing;
|
||||
use snafu::{ResultExt, Snafu};
|
||||
use std::fmt::Formatter;
|
||||
use snafu::{OptionExt, ResultExt, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
pub enum Error {
|
||||
|
@ -1161,18 +1165,42 @@ enum ColumnRaw<'a> {
|
|||
}
|
||||
|
||||
#[derive(Debug, PartialOrd, PartialEq, Copy, Clone)]
|
||||
pub struct ClockValue(u64);
|
||||
pub struct ClockValue(NonZeroU64);
|
||||
|
||||
impl ClockValue {
|
||||
pub fn get(&self) -> u64 {
|
||||
pub fn new(v: NonZeroU64) -> Self {
|
||||
Self(v)
|
||||
}
|
||||
|
||||
pub fn get(&self) -> NonZeroU64 {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn new(v: u64) -> Self {
|
||||
Self { 0: v }
|
||||
pub fn get_u64(&self) -> u64 {
|
||||
self.0.get()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<u64> for ClockValue {
|
||||
type Error = ClockValueError;
|
||||
|
||||
fn try_from(value: u64) -> Result<Self, Self::Error> {
|
||||
NonZeroU64::new(value)
|
||||
.map(Self)
|
||||
.context(ValueMayNotBeZero)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
pub struct ClockValueError(InnerClockValueError);
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
enum InnerClockValueError {
|
||||
#[snafu(display("Clock values must not be zero"))]
|
||||
ValueMayNotBeZero,
|
||||
}
|
||||
|
||||
#[self_referencing]
|
||||
#[derive(Debug)]
|
||||
pub struct SequencedEntry {
|
||||
|
@ -1183,7 +1211,6 @@ pub struct SequencedEntry {
|
|||
#[borrows(data)]
|
||||
#[covariant]
|
||||
entry: Option<entry_fb::Entry<'this>>,
|
||||
server_id: ServerId,
|
||||
}
|
||||
|
||||
impl SequencedEntry {
|
||||
|
@ -1205,7 +1232,7 @@ impl SequencedEntry {
|
|||
let sequenced_entry = entry_fb::SequencedEntry::create(
|
||||
&mut fbb,
|
||||
&entry_fb::SequencedEntryArgs {
|
||||
clock_value: clock_value.get(),
|
||||
clock_value: clock_value.get_u64(),
|
||||
server_id: server_id.get_u32(),
|
||||
entry_bytes: Some(entry_bytes),
|
||||
},
|
||||
|
@ -1214,7 +1241,7 @@ impl SequencedEntry {
|
|||
fbb.finish(sequenced_entry, None);
|
||||
|
||||
let (mut data, idx) = fbb.collapse();
|
||||
let sequenced_entry = Self::try_from((data.split_off(idx), server_id))
|
||||
let sequenced_entry = Self::try_from(data.split_off(idx))
|
||||
.expect("Flatbuffer data just constructed should be valid");
|
||||
|
||||
Ok(sequenced_entry)
|
||||
|
@ -1239,28 +1266,62 @@ impl SequencedEntry {
|
|||
}
|
||||
|
||||
pub fn clock_value(&self) -> ClockValue {
|
||||
ClockValue::new(self.fb().clock_value())
|
||||
self.fb()
|
||||
.clock_value()
|
||||
.try_into()
|
||||
.expect("ClockValue should have been validated when this was built from flatbuffers")
|
||||
}
|
||||
|
||||
pub fn server_id(&self) -> ServerId {
|
||||
*self.borrow_server_id()
|
||||
self.fb()
|
||||
.server_id()
|
||||
.try_into()
|
||||
.expect("ServerId should have been validated when this was built from flatbuffers")
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<(Vec<u8>, ServerId)> for SequencedEntry {
|
||||
type Error = flatbuffers::InvalidFlatbuffer;
|
||||
#[derive(Debug, Snafu)]
|
||||
pub enum SequencedEntryError {
|
||||
#[snafu(display("{}", source))]
|
||||
InvalidFlatbuffer {
|
||||
source: flatbuffers::InvalidFlatbuffer,
|
||||
},
|
||||
#[snafu(display("{}", source))]
|
||||
InvalidServerId {
|
||||
source: data_types::server_id::Error,
|
||||
},
|
||||
#[snafu(display("{}", source))]
|
||||
InvalidClockValue { source: ClockValueError },
|
||||
}
|
||||
|
||||
fn try_from(data: (Vec<u8>, ServerId)) -> Result<Self, Self::Error> {
|
||||
impl TryFrom<Vec<u8>> for SequencedEntry {
|
||||
type Error = SequencedEntryError;
|
||||
|
||||
fn try_from(data: Vec<u8>) -> Result<Self, Self::Error> {
|
||||
SequencedEntryTryBuilder {
|
||||
server_id: data.1,
|
||||
data: data.0,
|
||||
fb_builder: |data| flatbuffers::root::<entry_fb::SequencedEntry<'_>>(data),
|
||||
entry_builder: |data| match flatbuffers::root::<entry_fb::SequencedEntry<'_>>(data)?
|
||||
data,
|
||||
fb_builder: |data| {
|
||||
let fb = flatbuffers::root::<entry_fb::SequencedEntry<'_>>(data)
|
||||
.context(InvalidFlatbuffer)?;
|
||||
|
||||
// Raise an error now if the server ID is invalid so that `SequencedEntry`'s
|
||||
// `server_id` method can assume it has a valid `ServerId`
|
||||
TryInto::<ServerId>::try_into(fb.server_id()).context(InvalidServerId)?;
|
||||
|
||||
// Raise an error now if the clock value is invalid so that `SequencedEntry`'s
|
||||
// `clock_value` method can assume it has a valid `ClockValue`
|
||||
TryInto::<ClockValue>::try_into(fb.clock_value()).context(InvalidClockValue)?;
|
||||
|
||||
Ok(fb)
|
||||
},
|
||||
entry_builder: |data| match flatbuffers::root::<entry_fb::SequencedEntry<'_>>(data)
|
||||
.context(InvalidFlatbuffer)?
|
||||
.entry_bytes()
|
||||
{
|
||||
Some(entry_bytes) => Ok(Some(flatbuffers::root::<entry_fb::Entry<'_>>(
|
||||
&entry_bytes,
|
||||
)?)),
|
||||
Some(entry_bytes) => Ok(Some(
|
||||
flatbuffers::root::<entry_fb::Entry<'_>>(&entry_bytes)
|
||||
.context(InvalidFlatbuffer)?,
|
||||
)),
|
||||
None => Ok(None),
|
||||
},
|
||||
}
|
||||
|
@ -1888,7 +1949,7 @@ mod tests {
|
|||
lines_to_sharded_entries(&lines, sharder(1).as_ref(), &partitioner(1)).unwrap();
|
||||
|
||||
let entry_bytes = sharded_entries.first().unwrap().entry.data();
|
||||
let clock_value = ClockValue::new(23);
|
||||
let clock_value = ClockValue::try_from(23).unwrap();
|
||||
let server_id = ServerId::try_from(2).unwrap();
|
||||
let sequenced_entry =
|
||||
SequencedEntry::new_from_entry_bytes(clock_value, server_id, entry_bytes).unwrap();
|
||||
|
@ -1958,4 +2019,85 @@ mod tests {
|
|||
let values = col.values().f64_values().unwrap();
|
||||
assert_eq!(&values, &[None, Some(23.2), None]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_sequenced_entry_server_id() {
|
||||
let lp = vec![
|
||||
"a,host=a val=23i 983",
|
||||
"a,host=a,region=west val2=23.2 2343",
|
||||
"a val=21i,bool=true,string=\"hello\" 222",
|
||||
]
|
||||
.join("\n");
|
||||
let lines: Vec<_> = parse_lines(&lp).map(|l| l.unwrap()).collect();
|
||||
|
||||
let sharded_entries =
|
||||
lines_to_sharded_entries(&lines, sharder(1).as_ref(), &partitioner(1)).unwrap();
|
||||
|
||||
let entry_bytes = sharded_entries.first().unwrap().entry.data();
|
||||
|
||||
const OVERHEAD: usize = 4 * std::mem::size_of::<u64>();
|
||||
let mut fbb = FlatBufferBuilder::new_with_capacity(entry_bytes.len() + OVERHEAD);
|
||||
|
||||
let entry_bytes = fbb.create_vector_direct(entry_bytes);
|
||||
let sequenced_entry = entry_fb::SequencedEntry::create(
|
||||
&mut fbb,
|
||||
&entry_fb::SequencedEntryArgs {
|
||||
clock_value: 3,
|
||||
server_id: 0, // <----- IMPORTANT PART this is invalid and should error
|
||||
entry_bytes: Some(entry_bytes),
|
||||
},
|
||||
);
|
||||
|
||||
fbb.finish(sequenced_entry, None);
|
||||
|
||||
let (mut data, idx) = fbb.collapse();
|
||||
let result = SequencedEntry::try_from(data.split_off(idx));
|
||||
|
||||
assert!(
|
||||
matches!(result, Err(SequencedEntryError::InvalidServerId { .. })),
|
||||
"result was {:?}",
|
||||
result
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_sequenced_entry_clock_value() {
|
||||
let lp = vec![
|
||||
"a,host=a val=23i 983",
|
||||
"a,host=a,region=west val2=23.2 2343",
|
||||
"a val=21i,bool=true,string=\"hello\" 222",
|
||||
]
|
||||
.join("\n");
|
||||
let lines: Vec<_> = parse_lines(&lp).map(|l| l.unwrap()).collect();
|
||||
|
||||
let sharded_entries =
|
||||
lines_to_sharded_entries(&lines, sharder(1).as_ref(), &partitioner(1)).unwrap();
|
||||
|
||||
let entry_bytes = sharded_entries.first().unwrap().entry.data();
|
||||
|
||||
const OVERHEAD: usize = 4 * std::mem::size_of::<u64>();
|
||||
let mut fbb = FlatBufferBuilder::new_with_capacity(entry_bytes.len() + OVERHEAD);
|
||||
|
||||
let entry_bytes = fbb.create_vector_direct(entry_bytes);
|
||||
|
||||
let sequenced_entry = entry_fb::SequencedEntry::create(
|
||||
&mut fbb,
|
||||
&entry_fb::SequencedEntryArgs {
|
||||
clock_value: 0, // <----- IMPORTANT PART this is invalid and should error
|
||||
server_id: 5,
|
||||
entry_bytes: Some(entry_bytes),
|
||||
},
|
||||
);
|
||||
|
||||
fbb.finish(sequenced_entry, None);
|
||||
|
||||
let (mut data, idx) = fbb.collapse();
|
||||
let result = SequencedEntry::try_from(data.split_off(idx));
|
||||
|
||||
assert!(
|
||||
matches!(result, Err(SequencedEntryError::InvalidClockValue { .. })),
|
||||
"result was {:?}",
|
||||
result
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ mod tests;
|
|||
use observability_deps::{
|
||||
opentelemetry::metrics::Meter as OTMeter,
|
||||
opentelemetry::{
|
||||
metrics::{registry::RegistryMeterProvider, MeterProvider},
|
||||
metrics::{registry::RegistryMeterProvider, MeterProvider, ValueRecorderBuilder},
|
||||
sdk::{
|
||||
export::metrics::ExportKindSelector,
|
||||
metrics::{controllers, selectors::simple::Selector},
|
||||
|
@ -236,6 +236,85 @@ impl Domain {
|
|||
|
||||
metrics::Counter::new(counter, default_labels)
|
||||
}
|
||||
|
||||
/// Registers a new histogram metric.
|
||||
///
|
||||
/// `name` in the common case should be a noun describing the thing being
|
||||
/// observed. For example: "request", "migration", "conversion".
|
||||
///
|
||||
/// `attribute` should be the attribute that is being observed. Good examples
|
||||
/// of attributes are: "duration" or "usage".
|
||||
///
|
||||
/// `unit` is required and will appear at the end of the metric name.
|
||||
/// Consider reviewing
|
||||
/// https://prometheus.io/docs/practices/naming/#base-units for appropriate
|
||||
/// units. Examples include "bytes", "seconds", "celsius"
|
||||
///
|
||||
pub fn register_histogram_metric(
|
||||
&self,
|
||||
name: &str,
|
||||
attribute: &str,
|
||||
unit: &str,
|
||||
description: impl Into<String>,
|
||||
) -> HistogramBuilder<'_> {
|
||||
let histogram = self
|
||||
.meter
|
||||
.f64_value_recorder(format!(
|
||||
"{}.{}",
|
||||
self.build_metric_prefix(attribute, Some(name)),
|
||||
unit
|
||||
))
|
||||
.with_description(description);
|
||||
|
||||
HistogramBuilder::new(histogram)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct HistogramBuilder<'a> {
|
||||
histogram: ValueRecorderBuilder<'a, f64>,
|
||||
labels: Option<Vec<KeyValue>>,
|
||||
boundaries: Option<Vec<f64>>,
|
||||
}
|
||||
|
||||
impl<'a> HistogramBuilder<'a> {
|
||||
fn new(histogram: ValueRecorderBuilder<'a, f64>) -> Self {
|
||||
Self {
|
||||
histogram,
|
||||
labels: None,
|
||||
boundaries: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set some default labels on the Histogram metric.
|
||||
pub fn with_labels(self, labels: Vec<KeyValue>) -> Self {
|
||||
Self {
|
||||
histogram: self.histogram,
|
||||
labels: Some(labels),
|
||||
boundaries: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set some bucket boundaries on the Histogram metric.
|
||||
pub fn with_bucket_boundaries(self, boundaries: Vec<f64>) -> Self {
|
||||
Self {
|
||||
histogram: self.histogram,
|
||||
labels: self.labels,
|
||||
boundaries: Some(boundaries),
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialise the Histogram metric ready for observations.
|
||||
pub fn init(self) -> metrics::Histogram {
|
||||
let labels = self.labels.unwrap_or_default();
|
||||
let boundaries = self.boundaries.unwrap_or_default();
|
||||
|
||||
if !boundaries.is_empty() {
|
||||
todo!("histogram boundaries not yet configurable.");
|
||||
}
|
||||
|
||||
metrics::Histogram::new(self.histogram.init(), labels)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -339,8 +418,8 @@ mod test {
|
|||
|
||||
#[test]
|
||||
fn red_metric_labels() {
|
||||
let reg = MetricRegistry::new();
|
||||
let domain = reg.register_domain("ftp");
|
||||
let reg = TestMetricRegistry::default();
|
||||
let domain = reg.registry().register_domain("ftp");
|
||||
|
||||
// create a RED metrics
|
||||
let metric = domain.register_red_metric(None);
|
||||
|
@ -396,7 +475,7 @@ mod test {
|
|||
r#"ftp_requests_total{account="abc123",status="ok"} 1"#,
|
||||
r#"ftp_requests_total{account="other",status="client_error"} 1"#,
|
||||
];
|
||||
let metrics_response = String::from_utf8(reg.metrics_as_text()).unwrap();
|
||||
let metrics_response = String::from_utf8(reg.registry().metrics_as_text()).unwrap();
|
||||
for line in should_contain_lines {
|
||||
assert!(
|
||||
metrics_response.contains(line),
|
||||
|
@ -436,4 +515,70 @@ mod test {
|
|||
.eq(1.0)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn histogram_metric() {
|
||||
let reg = TestMetricRegistry::default();
|
||||
let domain = reg.registry().register_domain("chunker");
|
||||
|
||||
// create a histogram metric using the builder
|
||||
let metric = domain
|
||||
.register_histogram_metric(
|
||||
"conversion",
|
||||
"duration",
|
||||
"seconds",
|
||||
"The distribution of chunk conversion latencies",
|
||||
)
|
||||
.with_labels(vec![KeyValue::new("db", "mydb")]) // Optional
|
||||
// .with_bucket_boundaries(vec![1,2,3]) <- this is a future TODO
|
||||
.init();
|
||||
|
||||
// Get an observation to start measuring something.
|
||||
metric.observe(22.32); // manual observation
|
||||
|
||||
// There is also a timer that will handle durations for you.
|
||||
let ob = metric.timer();
|
||||
std::thread::sleep(Duration::from_millis(20));
|
||||
|
||||
// will record a duration >= 20ms
|
||||
ob.record_with_labels(&[KeyValue::new("stage", "beta")]);
|
||||
|
||||
reg.has_metric_family("chunker_conversion_duration_seconds")
|
||||
.with_labels(&[("db", "mydb")])
|
||||
.histogram()
|
||||
.sample_count_eq(1)
|
||||
.unwrap();
|
||||
|
||||
reg.has_metric_family("chunker_conversion_duration_seconds")
|
||||
.with_labels(&[("db", "mydb")])
|
||||
.histogram()
|
||||
.sample_sum_eq(22.32)
|
||||
.unwrap();
|
||||
|
||||
reg.has_metric_family("chunker_conversion_duration_seconds")
|
||||
.with_labels(&[("db", "mydb"), ("stage", "beta")])
|
||||
.histogram()
|
||||
.sample_count_eq(1)
|
||||
.unwrap();
|
||||
|
||||
reg.has_metric_family("chunker_conversion_duration_seconds")
|
||||
.with_labels(&[("db", "mydb"), ("stage", "beta")])
|
||||
.histogram()
|
||||
.sample_sum_gte(0.02) // 20ms
|
||||
.unwrap();
|
||||
|
||||
// TODO(edd): need to figure out how to set custom buckets then
|
||||
// these assertions can be tested.
|
||||
// reg.has_metric_family("chunker_conversion_duration_seconds")
|
||||
// .with_labels(&[("db", "mydb"), ("stage", "beta")])
|
||||
// .histogram()
|
||||
// .bucket_cumulative_count_eq(11.2, 1)
|
||||
// .unwrap();
|
||||
|
||||
// reg.has_metric_family("chunker_conversion_duration_seconds")
|
||||
// .with_labels(&[("db", "mydb"), ("stage", "beta")])
|
||||
// .histogram()
|
||||
// .bucket_cumulative_count_eq(0.12, 0)
|
||||
// .unwrap();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ use std::{
|
|||
};
|
||||
|
||||
use observability_deps::opentelemetry::metrics::{
|
||||
Counter as OTCounter, ValueRecorder as OTHistorgram,
|
||||
Counter as OTCounter, ValueRecorder as OTHistogram,
|
||||
};
|
||||
|
||||
pub use observability_deps::opentelemetry::KeyValue;
|
||||
|
@ -70,13 +70,13 @@ impl Display for RedRequestStatus {
|
|||
pub struct RedMetric {
|
||||
default_labels: Vec<KeyValue>,
|
||||
requests: OTCounter<u64>,
|
||||
duration: OTHistorgram<f64>,
|
||||
duration: OTHistogram<f64>,
|
||||
}
|
||||
|
||||
impl RedMetric {
|
||||
pub(crate) fn new(
|
||||
requests: OTCounter<u64>,
|
||||
duration: OTHistorgram<f64>,
|
||||
duration: OTHistogram<f64>,
|
||||
mut default_labels: Vec<KeyValue>,
|
||||
) -> Self {
|
||||
// TODO(edd): decide what to do if `labels` contains
|
||||
|
@ -273,3 +273,73 @@ impl Counter {
|
|||
self.add_with_labels(1, labels)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// A Histogram is a metric exposing a distribution of observations.
|
||||
pub struct Histogram {
|
||||
default_labels: Vec<KeyValue>,
|
||||
histogram: OTHistogram<f64>,
|
||||
}
|
||||
|
||||
impl Histogram {
|
||||
pub(crate) fn new(histogram: OTHistogram<f64>, default_labels: Vec<KeyValue>) -> Self {
|
||||
Self {
|
||||
histogram,
|
||||
default_labels,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a new observation to the histogram including the provided labels.
|
||||
pub fn observe_with_labels(&self, observation: f64, labels: &[KeyValue]) {
|
||||
// merge labels
|
||||
let labels = if labels.is_empty() {
|
||||
// If there are no labels specified just borrow defaults
|
||||
Cow::Borrowed(&self.default_labels)
|
||||
} else {
|
||||
// Otherwise merge the provided labels and the defaults.
|
||||
// Note: provided labels need to go last so that they overwrite
|
||||
// any default labels.
|
||||
//
|
||||
// PERF(edd): this seems expensive to me.
|
||||
let mut new_labels: Vec<KeyValue> = self.default_labels.clone();
|
||||
new_labels.extend_from_slice(labels);
|
||||
Cow::Owned(new_labels)
|
||||
};
|
||||
|
||||
self.histogram.record(observation, &labels);
|
||||
}
|
||||
|
||||
/// Add a new observation to the histogram
|
||||
pub fn observe(&self, observation: f64) {
|
||||
self.observe_with_labels(observation, &[]);
|
||||
}
|
||||
|
||||
/// A helper method for observing latencies. Returns a new timing instrument
|
||||
/// which will handle submitting an observation containing a duration.
|
||||
pub fn timer(&self) -> HistogramTimer<'_> {
|
||||
HistogramTimer::new(&self)
|
||||
}
|
||||
}
|
||||
#[derive(Debug)]
|
||||
pub struct HistogramTimer<'a> {
|
||||
start: Instant,
|
||||
histogram: &'a Histogram,
|
||||
}
|
||||
|
||||
impl<'a> HistogramTimer<'a> {
|
||||
pub fn new(histogram: &'a Histogram) -> Self {
|
||||
Self {
|
||||
start: Instant::now(),
|
||||
histogram,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn record(self) {
|
||||
self.record_with_labels(&[]);
|
||||
}
|
||||
|
||||
pub fn record_with_labels(self, labels: &[KeyValue]) {
|
||||
self.histogram
|
||||
.observe_with_labels(self.start.elapsed().as_secs_f64(), labels);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,13 @@ pub enum Error {
|
|||
metrics: String,
|
||||
},
|
||||
|
||||
#[snafu(display("bucket {:?} is not in metric family: {}\n{}", bound, name, metrics))]
|
||||
HistogramBucketNotFoundError {
|
||||
bound: f64,
|
||||
name: String,
|
||||
metrics: String,
|
||||
},
|
||||
|
||||
#[snafu(display("metric '{}' failed assertion: '{}'\n{}", name, msg, metrics))]
|
||||
FailedMetricAssertionError {
|
||||
name: String,
|
||||
|
@ -253,56 +260,56 @@ impl<'a> Counter<'a> {
|
|||
pub fn gte(self, v: f64) -> Result<(), Error> {
|
||||
let c = self.c?; // return previous errors
|
||||
|
||||
if v < c.get_value() {
|
||||
return FailedMetricAssertionError {
|
||||
ensure!(
|
||||
c.get_value() >= v,
|
||||
FailedMetricAssertionError {
|
||||
name: self.family_name,
|
||||
msg: format!("{:?} >= {:?} failed", c.get_value(), v),
|
||||
metrics: self.metric_dump,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn gt(self, v: f64) -> Result<(), Error> {
|
||||
let c = self.c?; // return previous errors
|
||||
|
||||
if v <= c.get_value() {
|
||||
return FailedMetricAssertionError {
|
||||
ensure!(
|
||||
c.get_value() > v,
|
||||
FailedMetricAssertionError {
|
||||
name: self.family_name,
|
||||
msg: format!("{:?} > {:?} failed", c.get_value(), v),
|
||||
metrics: self.metric_dump,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn lte(self, v: f64) -> Result<(), Error> {
|
||||
let c = self.c?; // return previous errors
|
||||
|
||||
if v > c.get_value() {
|
||||
return FailedMetricAssertionError {
|
||||
ensure!(
|
||||
c.get_value() <= v,
|
||||
FailedMetricAssertionError {
|
||||
name: self.family_name,
|
||||
msg: format!("{:?} <= {:?} failed", c.get_value(), v),
|
||||
metrics: self.metric_dump,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn lt(self, v: f64) -> Result<(), Error> {
|
||||
let c = self.c?; // return previous errors
|
||||
|
||||
if v >= c.get_value() {
|
||||
return FailedMetricAssertionError {
|
||||
ensure!(
|
||||
c.get_value() < v,
|
||||
FailedMetricAssertionError {
|
||||
name: self.family_name,
|
||||
msg: format!("{:?} < {:?} failed", c.get_value(), v),
|
||||
metrics: self.metric_dump,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -318,143 +325,168 @@ pub struct Histogram<'a> {
|
|||
}
|
||||
|
||||
impl<'a> Histogram<'a> {
|
||||
pub fn bucket_cumulative_count_eq(self, bound: f64, count: u64) -> Result<(), Error> {
|
||||
let c = self.c?; // return previous errors
|
||||
|
||||
let bucket = c
|
||||
.get_bucket()
|
||||
.iter()
|
||||
.find(|bucket| bucket.get_upper_bound() == bound)
|
||||
.context(HistogramBucketNotFoundError {
|
||||
bound,
|
||||
name: &self.family_name,
|
||||
metrics: &self.metric_dump,
|
||||
})?;
|
||||
|
||||
ensure!(
|
||||
count == bucket.get_cumulative_count(),
|
||||
FailedMetricAssertionError {
|
||||
name: &self.family_name,
|
||||
msg: format!("{:?} == {:?} failed", bucket.get_cumulative_count(), count),
|
||||
metrics: self.metric_dump,
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn sample_sum_eq(self, v: f64) -> Result<(), Error> {
|
||||
let c = self.c?; // return previous errors
|
||||
|
||||
if v != c.get_sample_sum() {
|
||||
return FailedMetricAssertionError {
|
||||
ensure!(
|
||||
v == c.get_sample_sum(),
|
||||
FailedMetricAssertionError {
|
||||
name: self.family_name,
|
||||
msg: format!("{:?} == {:?} failed", c.get_sample_sum(), v),
|
||||
metrics: self.metric_dump,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn sample_sum_gte(self, v: f64) -> Result<(), Error> {
|
||||
let c = self.c?; // return previous errors
|
||||
|
||||
if v < c.get_sample_sum() {
|
||||
return FailedMetricAssertionError {
|
||||
ensure!(
|
||||
c.get_sample_sum() >= v,
|
||||
FailedMetricAssertionError {
|
||||
name: self.family_name,
|
||||
msg: format!("{:?} >= {:?} failed", c.get_sample_sum(), v),
|
||||
metrics: self.metric_dump,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn sample_sum_gt(self, v: f64) -> Result<(), Error> {
|
||||
let c = self.c?; // return previous errors
|
||||
|
||||
if v <= c.get_sample_sum() {
|
||||
return FailedMetricAssertionError {
|
||||
ensure!(
|
||||
c.get_sample_sum() > v,
|
||||
FailedMetricAssertionError {
|
||||
name: self.family_name,
|
||||
msg: format!("{:?} > {:?} failed", c.get_sample_sum(), v),
|
||||
metrics: self.metric_dump,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn sample_sum_lte(self, v: f64) -> Result<(), Error> {
|
||||
let c = self.c?; // return previous errors
|
||||
|
||||
if v > c.get_sample_sum() {
|
||||
return FailedMetricAssertionError {
|
||||
ensure!(
|
||||
c.get_sample_sum() <= v,
|
||||
FailedMetricAssertionError {
|
||||
name: self.family_name,
|
||||
msg: format!("{:?} <= {:?} failed", c.get_sample_sum(), v),
|
||||
metrics: self.metric_dump,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn sample_sum_lt(self, v: f64) -> Result<(), Error> {
|
||||
let c = self.c?; // return previous errors
|
||||
|
||||
if v >= c.get_sample_sum() {
|
||||
return FailedMetricAssertionError {
|
||||
ensure!(
|
||||
c.get_sample_sum() < v,
|
||||
FailedMetricAssertionError {
|
||||
name: self.family_name,
|
||||
msg: format!("{:?} < {:?} failed", c.get_sample_sum(), v),
|
||||
metrics: self.metric_dump,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn sample_count_eq(self, v: u64) -> Result<(), Error> {
|
||||
let c = self.c?; // return previous errors
|
||||
|
||||
if v != c.get_sample_count() {
|
||||
return FailedMetricAssertionError {
|
||||
ensure!(
|
||||
c.get_sample_count() == v,
|
||||
FailedMetricAssertionError {
|
||||
name: self.family_name,
|
||||
msg: format!("{:?} == {:?} failed", c.get_sample_count(), v),
|
||||
metrics: self.metric_dump,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn sample_count_gte(self, v: u64) -> Result<(), Error> {
|
||||
let c = self.c?; // return previous errors
|
||||
|
||||
if v < c.get_sample_count() {
|
||||
return FailedMetricAssertionError {
|
||||
ensure!(
|
||||
c.get_sample_count() >= v,
|
||||
FailedMetricAssertionError {
|
||||
name: self.family_name,
|
||||
msg: format!("{:?} >= {:?} failed", c.get_sample_count(), v),
|
||||
metrics: self.metric_dump,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn sample_count_gt(self, v: u64) -> Result<(), Error> {
|
||||
let c = self.c?; // return previous errors
|
||||
|
||||
if v <= c.get_sample_count() {
|
||||
return FailedMetricAssertionError {
|
||||
ensure!(
|
||||
c.get_sample_count() > v,
|
||||
FailedMetricAssertionError {
|
||||
name: self.family_name,
|
||||
msg: format!("{:?} > {:?} failed", c.get_sample_count(), v),
|
||||
metrics: self.metric_dump,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn sample_count_lte(self, v: u64) -> Result<(), Error> {
|
||||
let c = self.c?; // return previous errors
|
||||
|
||||
if v > c.get_sample_count() {
|
||||
return FailedMetricAssertionError {
|
||||
ensure!(
|
||||
c.get_sample_count() <= v,
|
||||
FailedMetricAssertionError {
|
||||
name: self.family_name,
|
||||
msg: format!("{:?} <= {:?} failed", c.get_sample_count(), v),
|
||||
metrics: self.metric_dump,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn sample_count_lt(self, v: u64) -> Result<(), Error> {
|
||||
let c = self.c?; // return previous errors
|
||||
|
||||
if v >= c.get_sample_count() {
|
||||
return FailedMetricAssertionError {
|
||||
ensure!(
|
||||
c.get_sample_count() < v,
|
||||
FailedMetricAssertionError {
|
||||
name: self.family_name,
|
||||
msg: format!("{:?} < {:?} failed", c.get_sample_count(), v),
|
||||
metrics: self.metric_dump,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ fn chunk(count: usize) -> Chunk {
|
|||
for write in entry.partition_writes().iter().flatten() {
|
||||
chunk
|
||||
.write_table_batches(
|
||||
ClockValue::new(0),
|
||||
ClockValue::try_from(5).unwrap(),
|
||||
ServerId::try_from(1).unwrap(),
|
||||
write.table_batches().as_slice(),
|
||||
)
|
||||
|
|
|
@ -16,7 +16,7 @@ fn write_chunk(count: usize, entries: &[Entry]) {
|
|||
for write in entry.partition_writes().iter().flatten() {
|
||||
chunk
|
||||
.write_table_batches(
|
||||
ClockValue::new(0),
|
||||
ClockValue::try_from(5).unwrap(),
|
||||
ServerId::try_from(1).unwrap(),
|
||||
write.table_batches().as_slice(),
|
||||
)
|
||||
|
|
|
@ -247,7 +247,7 @@ pub mod test_helpers {
|
|||
|
||||
for w in entry.partition_writes().unwrap() {
|
||||
chunk.write_table_batches(
|
||||
ClockValue::new(0),
|
||||
ClockValue::try_from(5).unwrap(),
|
||||
ServerId::try_from(1).unwrap(),
|
||||
&w.table_batches(),
|
||||
)?;
|
||||
|
|
|
@ -424,13 +424,14 @@ mod tests {
|
|||
let mut dictionary = Dictionary::new();
|
||||
let mut table = Table::new(dictionary.lookup_value_or_insert("foo"));
|
||||
let server_id = ServerId::try_from(1).unwrap();
|
||||
let clock_value = ClockValue::try_from(5).unwrap();
|
||||
|
||||
let lp = "foo,t1=asdf iv=1i,uv=1u,fv=1.0,bv=true,sv=\"hi\" 1";
|
||||
let entry = lp_to_entry(&lp);
|
||||
table
|
||||
.write_columns(
|
||||
&mut dictionary,
|
||||
ClockValue::new(0),
|
||||
clock_value,
|
||||
server_id,
|
||||
entry
|
||||
.partition_writes()
|
||||
|
@ -449,7 +450,7 @@ mod tests {
|
|||
let response = table
|
||||
.write_columns(
|
||||
&mut dictionary,
|
||||
ClockValue::new(0),
|
||||
clock_value,
|
||||
server_id,
|
||||
entry
|
||||
.partition_writes()
|
||||
|
@ -483,7 +484,7 @@ mod tests {
|
|||
let response = table
|
||||
.write_columns(
|
||||
&mut dictionary,
|
||||
ClockValue::new(0),
|
||||
clock_value,
|
||||
server_id,
|
||||
entry
|
||||
.partition_writes()
|
||||
|
@ -517,7 +518,7 @@ mod tests {
|
|||
let response = table
|
||||
.write_columns(
|
||||
&mut dictionary,
|
||||
ClockValue::new(0),
|
||||
clock_value,
|
||||
server_id,
|
||||
entry
|
||||
.partition_writes()
|
||||
|
@ -551,7 +552,7 @@ mod tests {
|
|||
let response = table
|
||||
.write_columns(
|
||||
&mut dictionary,
|
||||
ClockValue::new(0),
|
||||
clock_value,
|
||||
server_id,
|
||||
entry
|
||||
.partition_writes()
|
||||
|
@ -585,7 +586,7 @@ mod tests {
|
|||
let response = table
|
||||
.write_columns(
|
||||
&mut dictionary,
|
||||
ClockValue::new(0),
|
||||
clock_value,
|
||||
server_id,
|
||||
entry
|
||||
.partition_writes()
|
||||
|
@ -619,7 +620,7 @@ mod tests {
|
|||
let response = table
|
||||
.write_columns(
|
||||
&mut dictionary,
|
||||
ClockValue::new(0),
|
||||
clock_value,
|
||||
server_id,
|
||||
entry
|
||||
.partition_writes()
|
||||
|
@ -664,7 +665,7 @@ mod tests {
|
|||
table
|
||||
.write_columns(
|
||||
dictionary,
|
||||
ClockValue::new(0),
|
||||
ClockValue::try_from(5).unwrap(),
|
||||
ServerId::try_from(1).unwrap(),
|
||||
batch.columns(),
|
||||
)
|
||||
|
|
|
@ -10,7 +10,7 @@ pub mod seriesset;
|
|||
pub mod stringset;
|
||||
mod task;
|
||||
pub use context::{DEFAULT_CATALOG, DEFAULT_SCHEMA};
|
||||
use futures::Future;
|
||||
use futures::{future, Future};
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
|
@ -26,7 +26,7 @@ use schema_pivot::SchemaPivotNode;
|
|||
use fieldlist::{FieldList, IntoFieldList};
|
||||
use seriesset::{Error as SeriesSetError, SeriesSetConverter, SeriesSetItem};
|
||||
use stringset::{IntoStringSet, StringSetRef};
|
||||
use tokio::sync::mpsc::{self, error::SendError};
|
||||
use tokio::sync::mpsc::error::SendError;
|
||||
|
||||
use snafu::{ResultExt, Snafu};
|
||||
|
||||
|
@ -125,31 +125,22 @@ impl Executor {
|
|||
}
|
||||
}
|
||||
|
||||
/// Executes the embedded plans, each as separate tasks, sending
|
||||
/// the resulting `SeriesSet`s one by one to the `tx` channel.
|
||||
/// Executes the embedded plans, each as separate tasks combining the results
|
||||
/// into the returned collection of items.
|
||||
///
|
||||
/// The SeriesSets are guaranteed to come back ordered by table_name
|
||||
///
|
||||
/// Note that the returned future resolves (e.g. "returns") once
|
||||
/// all plans have been sent to `tx`. This means that the future
|
||||
/// will not resolve if there is nothing hooked up receiving
|
||||
/// results from the other end of the channel and the channel
|
||||
/// can't hold all the resulting series.
|
||||
/// The SeriesSets are guaranteed to come back ordered by table_name.
|
||||
pub async fn to_series_set(
|
||||
&self,
|
||||
series_set_plans: SeriesSetPlans,
|
||||
tx: mpsc::Sender<Result<SeriesSetItem, SeriesSetError>>,
|
||||
) -> Result<()> {
|
||||
) -> Result<Vec<SeriesSetItem>, Error> {
|
||||
let SeriesSetPlans { mut plans } = series_set_plans;
|
||||
|
||||
if plans.is_empty() {
|
||||
return Ok(());
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
// sort by table name and send the results to separate
|
||||
// channels
|
||||
// sort plans by table name
|
||||
plans.sort_by(|a, b| a.table_name.cmp(&b.table_name));
|
||||
let mut rx_channels = Vec::new(); // sorted by table names
|
||||
|
||||
// Run the plans in parallel
|
||||
let handles = plans
|
||||
|
@ -157,8 +148,6 @@ impl Executor {
|
|||
.map(|plan| {
|
||||
// TODO run these on some executor other than the main tokio pool (maybe?)
|
||||
let ctx = self.new_context();
|
||||
let (plan_tx, plan_rx) = mpsc::channel(1);
|
||||
rx_channels.push(plan_rx);
|
||||
|
||||
self.exec.spawn(async move {
|
||||
let SeriesSetPlan {
|
||||
|
@ -180,7 +169,7 @@ impl Executor {
|
|||
.await
|
||||
.context(SeriesSetExecution)?;
|
||||
|
||||
SeriesSetConverter::new(plan_tx)
|
||||
SeriesSetConverter::default()
|
||||
.convert(
|
||||
table_name,
|
||||
tag_columns,
|
||||
|
@ -189,30 +178,21 @@ impl Executor {
|
|||
it,
|
||||
)
|
||||
.await
|
||||
.context(SeriesSetConversion)?;
|
||||
|
||||
Ok(())
|
||||
.context(SeriesSetConversion)
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// transfer data from the rx streams in order
|
||||
for mut rx in rx_channels {
|
||||
while let Some(r) = rx.recv().await {
|
||||
tx.send(r)
|
||||
.await
|
||||
.map_err(|e| Error::SendingDuringConversion {
|
||||
source: Box::new(e),
|
||||
})?
|
||||
}
|
||||
// join_all ensures that the results are consumed in the same order they
|
||||
// were spawned maintaining the guarantee to return results ordered
|
||||
// by the plan sort order.
|
||||
let handles = future::try_join_all(handles).await.context(TaskJoinError)?;
|
||||
let mut results = vec![];
|
||||
for handle in handles {
|
||||
results.extend(handle?.into_iter());
|
||||
}
|
||||
|
||||
// now, wait for all the values to resolve so we can report
|
||||
// any errors
|
||||
for join_handle in handles {
|
||||
join_handle.await.context(TaskJoinError)??;
|
||||
}
|
||||
Ok(())
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
/// Executes `plan` and return the resulting FieldList
|
||||
|
|
|
@ -31,7 +31,7 @@ use arrow_deps::{
|
|||
datafusion::physical_plan::SendableRecordBatchStream,
|
||||
};
|
||||
use snafu::{ResultExt, Snafu};
|
||||
use tokio::sync::mpsc::{self, error::SendError};
|
||||
use tokio::sync::mpsc::error::SendError;
|
||||
use tokio_stream::StreamExt;
|
||||
|
||||
use croaring::bitmap::Bitmap;
|
||||
|
@ -117,20 +117,13 @@ pub enum SeriesSetItem {
|
|||
Data(SeriesSet),
|
||||
}
|
||||
|
||||
// Handles converting record batches into SeriesSets, and sending them
|
||||
// to tx
|
||||
#[derive(Debug)]
|
||||
pub struct SeriesSetConverter {
|
||||
tx: mpsc::Sender<Result<SeriesSetItem>>,
|
||||
}
|
||||
// Handles converting record batches into SeriesSets
|
||||
#[derive(Debug, Default)]
|
||||
pub struct SeriesSetConverter {}
|
||||
|
||||
impl SeriesSetConverter {
|
||||
pub fn new(tx: mpsc::Sender<Result<SeriesSetItem>>) -> Self {
|
||||
Self { tx }
|
||||
}
|
||||
|
||||
/// Convert the results from running a DataFusion plan into the
|
||||
/// appropriate SeriesSetItems, sending them self.tx
|
||||
/// appropriate SeriesSetItems.
|
||||
///
|
||||
/// The results must be in the logical format described in this
|
||||
/// module's documentation (i.e. ordered by tag keys)
|
||||
|
@ -146,44 +139,15 @@ impl SeriesSetConverter {
|
|||
///
|
||||
/// it: record batch iterator that produces data in the desired order
|
||||
pub async fn convert(
|
||||
&mut self,
|
||||
table_name: Arc<String>,
|
||||
tag_columns: Arc<Vec<Arc<String>>>,
|
||||
field_columns: FieldColumns,
|
||||
num_prefix_tag_group_columns: Option<usize>,
|
||||
it: SendableRecordBatchStream,
|
||||
) -> Result<()> {
|
||||
// Make sure that any error that results from processing is sent along
|
||||
if let Err(e) = self
|
||||
.convert_impl(
|
||||
table_name,
|
||||
tag_columns,
|
||||
field_columns,
|
||||
num_prefix_tag_group_columns,
|
||||
it,
|
||||
)
|
||||
.await
|
||||
{
|
||||
self.tx
|
||||
.send(Err(e))
|
||||
.await
|
||||
.map_err(|e| Error::SendingDuringConversion {
|
||||
source: Box::new(e),
|
||||
})?
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Does the actual conversion, returning any error in processing
|
||||
pub async fn convert_impl(
|
||||
&mut self,
|
||||
table_name: Arc<String>,
|
||||
tag_columns: Arc<Vec<Arc<String>>>,
|
||||
field_columns: FieldColumns,
|
||||
num_prefix_tag_group_columns: Option<usize>,
|
||||
mut it: SendableRecordBatchStream,
|
||||
) -> Result<()> {
|
||||
) -> Result<Vec<SeriesSetItem>, Error> {
|
||||
let mut group_generator = GroupGenerator::new(num_prefix_tag_group_columns);
|
||||
let mut results = vec![];
|
||||
|
||||
// for now, only handle a single record batch
|
||||
if let Some(batch) = it.next().await {
|
||||
|
@ -255,25 +219,15 @@ impl SeriesSetConverter {
|
|||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
results.reserve(series_sets.len());
|
||||
for series_set in series_sets {
|
||||
if let Some(group_desc) = group_generator.next_series(&series_set) {
|
||||
self.tx
|
||||
.send(Ok(SeriesSetItem::GroupStart(group_desc)))
|
||||
.await
|
||||
.map_err(|e| Error::SendingDuringGroupedConversion {
|
||||
source: Box::new(e),
|
||||
})?;
|
||||
results.push(SeriesSetItem::GroupStart(group_desc));
|
||||
}
|
||||
|
||||
self.tx
|
||||
.send(Ok(SeriesSetItem::Data(series_set)))
|
||||
.await
|
||||
.map_err(|e| Error::SendingDuringConversion {
|
||||
source: Box::new(e),
|
||||
})?;
|
||||
results.push(SeriesSetItem::Data(series_set));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
/// returns a bitset with all row indexes where the value of the
|
||||
|
@ -494,7 +448,7 @@ mod tests {
|
|||
let results = convert(table_name, &tag_columns, &field_columns, input).await;
|
||||
|
||||
assert_eq!(results.len(), 1);
|
||||
let series_set = results[0].as_ref().expect("Correctly converted");
|
||||
let series_set = &results[0];
|
||||
|
||||
assert_eq!(*series_set.table_name, "foo");
|
||||
assert!(series_set.tags.is_empty());
|
||||
|
@ -548,7 +502,7 @@ mod tests {
|
|||
let results = convert(table_name, &tag_columns, &field_columns, input).await;
|
||||
|
||||
assert_eq!(results.len(), 1);
|
||||
let series_set = results[0].as_ref().expect("Correctly converted");
|
||||
let series_set = &results[0];
|
||||
|
||||
assert_eq!(*series_set.table_name, "foo");
|
||||
assert!(series_set.tags.is_empty());
|
||||
|
@ -602,7 +556,7 @@ mod tests {
|
|||
let results = convert(table_name, &tag_columns, &field_columns, input).await;
|
||||
|
||||
assert_eq!(results.len(), 1);
|
||||
let series_set = results[0].as_ref().expect("Correctly converted");
|
||||
let series_set = &results[0];
|
||||
|
||||
assert_eq!(*series_set.table_name, "bar");
|
||||
assert_eq!(series_set.tags, str_pair_vec_to_vec(&[("tag_a", "one")]));
|
||||
|
@ -639,7 +593,7 @@ mod tests {
|
|||
let results = convert(table_name, &tag_columns, &field_columns, input).await;
|
||||
|
||||
assert_eq!(results.len(), 2);
|
||||
let series_set1 = results[0].as_ref().expect("Correctly converted");
|
||||
let series_set1 = &results[0];
|
||||
|
||||
assert_eq!(*series_set1.table_name, "foo");
|
||||
assert_eq!(series_set1.tags, str_pair_vec_to_vec(&[("tag_a", "one")]));
|
||||
|
@ -650,7 +604,7 @@ mod tests {
|
|||
assert_eq!(series_set1.start_row, 0);
|
||||
assert_eq!(series_set1.num_rows, 3);
|
||||
|
||||
let series_set2 = results[1].as_ref().expect("Correctly converted");
|
||||
let series_set2 = &results[1];
|
||||
|
||||
assert_eq!(*series_set2.table_name, "foo");
|
||||
assert_eq!(series_set2.tags, str_pair_vec_to_vec(&[("tag_a", "two")]));
|
||||
|
@ -688,7 +642,7 @@ mod tests {
|
|||
let results = convert(table_name, &tag_columns, &field_columns, input).await;
|
||||
|
||||
assert_eq!(results.len(), 3);
|
||||
let series_set1 = results[0].as_ref().expect("Correctly converted");
|
||||
let series_set1 = &results[0];
|
||||
|
||||
assert_eq!(*series_set1.table_name, "foo");
|
||||
assert_eq!(
|
||||
|
@ -698,7 +652,7 @@ mod tests {
|
|||
assert_eq!(series_set1.start_row, 0);
|
||||
assert_eq!(series_set1.num_rows, 2);
|
||||
|
||||
let series_set2 = results[1].as_ref().expect("Correctly converted");
|
||||
let series_set2 = &results[1];
|
||||
|
||||
assert_eq!(*series_set2.table_name, "foo");
|
||||
assert_eq!(
|
||||
|
@ -708,7 +662,7 @@ mod tests {
|
|||
assert_eq!(series_set2.start_row, 2);
|
||||
assert_eq!(series_set2.num_rows, 1);
|
||||
|
||||
let series_set3 = results[2].as_ref().expect("Correctly converted");
|
||||
let series_set3 = &results[2];
|
||||
|
||||
assert_eq!(*series_set3.table_name, "foo");
|
||||
assert_eq!(
|
||||
|
@ -757,11 +711,11 @@ mod tests {
|
|||
|
||||
assert_eq!(results.len(), 5, "results were\n{:#?}", results); // 3 series, two groups (one and two)
|
||||
|
||||
let group_1 = extract_group(results[0].as_ref().expect("correctly made group"));
|
||||
let series_set1 = extract_series_set(results[1].as_ref().expect("Correctly converted"));
|
||||
let series_set2 = extract_series_set(results[2].as_ref().expect("Correctly converted"));
|
||||
let group_2 = extract_group(results[3].as_ref().expect("correctly made group"));
|
||||
let series_set3 = extract_series_set(results[4].as_ref().expect("Correctly converted"));
|
||||
let group_1 = extract_group(&results[0]);
|
||||
let series_set1 = extract_series_set(&results[1]);
|
||||
let series_set2 = extract_series_set(&results[2]);
|
||||
let group_2 = extract_group(&results[3]);
|
||||
let series_set3 = extract_series_set(&results[4]);
|
||||
|
||||
assert_eq!(group_1.tags, str_pair_vec_to_vec(&[("tag_a", "one")]));
|
||||
|
||||
|
@ -820,9 +774,9 @@ mod tests {
|
|||
|
||||
assert_eq!(results.len(), 3, "results were\n{:#?}", results); // 3 series, two groups (one and two)
|
||||
|
||||
let group_1 = extract_group(results[0].as_ref().expect("correctly made group"));
|
||||
let series_set1 = extract_series_set(results[1].as_ref().expect("Correctly converted"));
|
||||
let series_set2 = extract_series_set(results[2].as_ref().expect("Correctly converted"));
|
||||
let group_1 = extract_group(&results[0]);
|
||||
let series_set1 = extract_series_set(&results[1]);
|
||||
let series_set2 = extract_series_set(&results[2]);
|
||||
|
||||
assert_eq!(group_1.tags, &[]);
|
||||
|
||||
|
@ -863,34 +817,24 @@ mod tests {
|
|||
tag_columns: &'a [&'a str],
|
||||
field_columns: &'a [&'a str],
|
||||
it: SendableRecordBatchStream,
|
||||
) -> Vec<Result<SeriesSet>> {
|
||||
let (tx, mut rx) = mpsc::channel(1);
|
||||
let mut converter = SeriesSetConverter::new(tx);
|
||||
) -> Vec<SeriesSet> {
|
||||
let mut converter = SeriesSetConverter::default();
|
||||
|
||||
let table_name = Arc::new(table_name.into());
|
||||
let tag_columns = str_vec_to_arc_vec(tag_columns);
|
||||
let field_columns = FieldColumns::from(field_columns);
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
converter
|
||||
.convert(table_name, tag_columns, field_columns, None, it)
|
||||
.await
|
||||
.expect("Conversion happened without error")
|
||||
});
|
||||
|
||||
let mut results = Vec::new();
|
||||
while let Some(r) = rx.recv().await {
|
||||
results.push(r.map(|item| {
|
||||
converter
|
||||
.convert(table_name, tag_columns, field_columns, None, it)
|
||||
.await
|
||||
.expect("Conversion happened without error").into_iter().map(|item|{
|
||||
if let SeriesSetItem::Data(series_set) = item {
|
||||
series_set
|
||||
}
|
||||
else {
|
||||
else {
|
||||
panic!("Unexpected result from converting. Expected SeriesSetItem::Data, got: {:?}", item)
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
results
|
||||
}).collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
/// Test helper: run conversion to groups and return a Vec
|
||||
|
@ -900,32 +844,23 @@ mod tests {
|
|||
num_prefix_tag_group_columns: usize,
|
||||
field_columns: &'a [&'a str],
|
||||
it: SendableRecordBatchStream,
|
||||
) -> Vec<Result<SeriesSetItem>> {
|
||||
let (tx, mut rx) = mpsc::channel(1);
|
||||
let mut converter = SeriesSetConverter::new(tx);
|
||||
) -> Vec<SeriesSetItem> {
|
||||
let mut converter = SeriesSetConverter::default();
|
||||
|
||||
let table_name = Arc::new(table_name.into());
|
||||
let tag_columns = str_vec_to_arc_vec(tag_columns);
|
||||
let field_columns = FieldColumns::from(field_columns);
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
converter
|
||||
.convert(
|
||||
table_name,
|
||||
tag_columns,
|
||||
field_columns,
|
||||
Some(num_prefix_tag_group_columns),
|
||||
it,
|
||||
)
|
||||
.await
|
||||
.expect("Conversion happened without error")
|
||||
});
|
||||
|
||||
let mut results = Vec::new();
|
||||
while let Some(r) = rx.recv().await {
|
||||
results.push(r)
|
||||
}
|
||||
results
|
||||
converter
|
||||
.convert(
|
||||
table_name,
|
||||
tag_columns,
|
||||
field_columns,
|
||||
Some(num_prefix_tag_group_columns),
|
||||
it,
|
||||
)
|
||||
.await
|
||||
.expect("Conversion happened without error")
|
||||
}
|
||||
|
||||
/// Test helper: parses the csv content into a single record batch arrow
|
||||
|
|
|
@ -245,14 +245,16 @@ impl Chunk {
|
|||
.sum::<usize>();
|
||||
|
||||
// This call is expensive. Complete it before locking.
|
||||
let now = std::time::Instant::now();
|
||||
let row_group = RowGroup::from(table_data);
|
||||
let compressing_took = now.elapsed();
|
||||
let table_name = table_name.into();
|
||||
|
||||
let rows = self.rows();
|
||||
let rows = row_group.rows();
|
||||
let rg_size = row_group.size();
|
||||
let compression = format!("{:.2}%", (rg_size as f64 / rb_size as f64) * 100.0);
|
||||
let compression = format!("{:.2}%", (1.0 - (rg_size as f64 / rb_size as f64)) * 100.0);
|
||||
let chunk_id = self.id();
|
||||
info!(%rows, rb_size, rg_size, %compression, ?table_name, %chunk_id, "row group added");
|
||||
info!(%rows, rb_size, rg_size, %compression, ?table_name, %chunk_id, ?compressing_took, "row group added");
|
||||
|
||||
let mut chunk_data = self.chunk_data.write();
|
||||
|
||||
|
|
|
@ -0,0 +1,76 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This script will filter the git history for a given revision range
|
||||
# and report which PRs got landed in that range.
|
||||
#
|
||||
# It knows about our good and bad habits and deals with both merge
|
||||
# commits, and stash/rebase merges.
|
||||
#
|
||||
# Example usage:
|
||||
#
|
||||
# ./scripts/git-log-prs HEAD~10..main
|
||||
# ./scripts/git-log-prs 8376983..main --titles
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
usage() {
|
||||
echo "$0 [ --titles ] <rev_range>"
|
||||
exit 1
|
||||
}
|
||||
|
||||
ensure_command_gh() {
|
||||
type -P "gh" &> /dev/null || {
|
||||
echo "Command 'gh' not found"
|
||||
echo
|
||||
echo "Macos:"
|
||||
echo " brew install gh"
|
||||
echo
|
||||
echo "Other OS:"
|
||||
echo " see https://github.com/cli/cli"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
main() {
|
||||
local positionals=()
|
||||
local titles=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
local key="$1"
|
||||
|
||||
case "${key}" in
|
||||
--titles)
|
||||
titles=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
positionals+=("$1")
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ ${#positionals[@]} -lt 1 ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
local rev_range="${positionals[0]}"
|
||||
|
||||
git log \
|
||||
--committer='GitHub <noreply@github.com>' \
|
||||
--pretty=format:'%s' \
|
||||
"${rev_range}" \
|
||||
| sed 's/Merge pull request #\([0-9]*\).*/\1/' \
|
||||
| sed 's/.*(#\([0-9]*\))$/\1/' \
|
||||
| grep -E '^[0-9]*$' \
|
||||
| while read -r pr; do
|
||||
local title=""
|
||||
if [ -n "${titles}" ]; then
|
||||
ensure_command_gh
|
||||
title=$(gh pr view "${pr}" --json title --jq '.title')
|
||||
fi
|
||||
echo "${pr} ${title}"
|
||||
done
|
||||
}
|
||||
|
||||
main "$@"
|
|
@ -1,8 +1,18 @@
|
|||
mod read_filter;
|
||||
mod read_group;
|
||||
mod tag_values;
|
||||
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
|
||||
use read_filter::benchmark_read_filter;
|
||||
use read_group::benchmark_read_group;
|
||||
use tag_values::benchmark_tag_values;
|
||||
|
||||
criterion_group!(benches, benchmark_tag_values);
|
||||
// criterion_group!(benches, benchmark_tag_values, benchmark_read_filter);
|
||||
criterion_group!(
|
||||
benches,
|
||||
benchmark_tag_values,
|
||||
benchmark_read_filter,
|
||||
benchmark_read_group,
|
||||
);
|
||||
criterion_main!(benches);
|
||||
|
|
|
@ -0,0 +1,124 @@
|
|||
use std::io::Read;
|
||||
|
||||
use arrow_deps::datafusion::{logical_plan::Expr, scalar::ScalarValue};
|
||||
use criterion::{BenchmarkId, Criterion};
|
||||
// This is a struct that tells Criterion.rs to use the "futures" crate's
|
||||
// current-thread executor
|
||||
use flate2::read::GzDecoder;
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
use query::frontend::influxrpc::InfluxRpcPlanner;
|
||||
use query::predicate::PredicateBuilder;
|
||||
use query::{exec::Executor, predicate::Predicate};
|
||||
use server::{benchmarks::scenarios::DbScenario, db::Db};
|
||||
|
||||
// Uses the `query_tests` module to generate some chunk scenarios, specifically
|
||||
// the scenarios where there are:
|
||||
//
|
||||
// - a single open mutable buffer chunk;
|
||||
// - a closed mutable buffer chunk and another open one;
|
||||
// - an open mutable buffer chunk and a closed read buffer chunk;
|
||||
// - two closed read buffer chunks.
|
||||
//
|
||||
// The chunks are all fed the *same* line protocol, so these benchmarks are
|
||||
// useful for assessing the differences in performance between querying the
|
||||
// chunks held in different execution engines.
|
||||
//
|
||||
// These benchmarks use a synthetically generated set of line protocol using
|
||||
// `inch`. Each point is a new series containing 10 tag keys, which results in
|
||||
// ten columns in IOx. There is a single field column and a timestamp column.
|
||||
//
|
||||
// - tag0, cardinality 2.
|
||||
// - tag1, cardinality 10.
|
||||
// - tag2, cardinality 10.
|
||||
// - tag3, cardinality 50.
|
||||
// - tag4, cardinality 100.
|
||||
//
|
||||
// In total there are 10K rows. The timespan of the points in the line
|
||||
// protocol is around 1m of wall-clock time.
|
||||
async fn setup_scenarios() -> Vec<DbScenario> {
|
||||
let raw = include_bytes!("../../tests/fixtures/lineproto/read_filter.lp.gz");
|
||||
let mut gz = GzDecoder::new(&raw[..]);
|
||||
let mut lp = String::new();
|
||||
gz.read_to_string(&mut lp).unwrap();
|
||||
|
||||
let db =
|
||||
server::benchmarks::scenarios::make_two_chunk_scenarios("2021-04-26T13", &lp, &lp).await;
|
||||
db
|
||||
}
|
||||
|
||||
// Run all benchmarks for `read_filter`.
|
||||
pub fn benchmark_read_filter(c: &mut Criterion) {
|
||||
let scenarios = Runtime::new().unwrap().block_on(setup_scenarios());
|
||||
execute_benchmark_group(c, scenarios.as_slice());
|
||||
}
|
||||
|
||||
// Runs an async criterion benchmark against the provided scenarios and
|
||||
// predicate.
|
||||
fn execute_benchmark_group(c: &mut Criterion, scenarios: &[DbScenario]) {
|
||||
let planner = InfluxRpcPlanner::new();
|
||||
|
||||
let predicates = vec![
|
||||
(PredicateBuilder::default().build(), "no_pred"),
|
||||
(
|
||||
PredicateBuilder::default()
|
||||
.add_expr(
|
||||
Expr::Column("tag3".to_owned())
|
||||
.eq(Expr::Literal(ScalarValue::Utf8(Some("value49".to_owned())))),
|
||||
)
|
||||
.build(),
|
||||
"with_pred_tag_3=value49",
|
||||
),
|
||||
];
|
||||
|
||||
for scenario in scenarios {
|
||||
let DbScenario { scenario_name, db } = scenario;
|
||||
let mut group = c.benchmark_group(format!("read_filter/{}", scenario_name));
|
||||
|
||||
for (predicate, pred_name) in &predicates {
|
||||
let chunks = db.partition_chunk_summaries("2021-04-26T13").len();
|
||||
// The number of expected frames, based on the expected number of
|
||||
// individual series keys.
|
||||
let exp_data_frames = if predicate.is_empty() { 10000 } else { 200 } * chunks;
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::from_parameter(pred_name),
|
||||
predicate,
|
||||
|b, predicate| {
|
||||
let executor = db.executor();
|
||||
b.to_async(Runtime::new().unwrap()).iter(|| {
|
||||
build_and_execute_plan(
|
||||
&planner,
|
||||
executor.as_ref(),
|
||||
db,
|
||||
predicate.clone(),
|
||||
exp_data_frames,
|
||||
)
|
||||
});
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
}
|
||||
|
||||
// Plans and runs a tag_values query.
|
||||
async fn build_and_execute_plan(
|
||||
planner: &InfluxRpcPlanner,
|
||||
executor: &Executor,
|
||||
db: &Db,
|
||||
predicate: Predicate,
|
||||
exp_data_frames: usize,
|
||||
) {
|
||||
let plan = planner
|
||||
.read_filter(db, predicate)
|
||||
.expect("built plan successfully");
|
||||
|
||||
let results = executor
|
||||
.to_series_set(plan)
|
||||
.await
|
||||
.expect("Running series set plan");
|
||||
|
||||
assert_eq!(results.len(), exp_data_frames);
|
||||
}
|
|
@ -0,0 +1,132 @@
|
|||
use std::io::Read;
|
||||
|
||||
use arrow_deps::datafusion::{logical_plan::Expr, scalar::ScalarValue};
|
||||
use criterion::{BenchmarkId, Criterion};
|
||||
// This is a struct that tells Criterion.rs to use the "futures" crate's
|
||||
// current-thread executor
|
||||
use flate2::read::GzDecoder;
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
use query::predicate::PredicateBuilder;
|
||||
use query::{exec::Executor, predicate::Predicate};
|
||||
use query::{frontend::influxrpc::InfluxRpcPlanner, group_by::Aggregate};
|
||||
use server::{benchmarks::scenarios::DbScenario, db::Db};
|
||||
|
||||
// Uses the `query_tests` module to generate some chunk scenarios, specifically
|
||||
// the scenarios where there are:
|
||||
//
|
||||
// - a single open mutable buffer chunk;
|
||||
// - a closed mutable buffer chunk and another open one;
|
||||
// - an open mutable buffer chunk and a closed read buffer chunk;
|
||||
// - two closed read buffer chunks.
|
||||
//
|
||||
// The chunks are all fed the *same* line protocol, so these benchmarks are
|
||||
// useful for assessing the differences in performance between querying the
|
||||
// chunks held in different execution engines.
|
||||
//
|
||||
// These benchmarks use a synthetically generated set of line protocol using
|
||||
// `inch`. Each point is a new series containing 5 tag keys, which results in
|
||||
// five tag columns in IOx. There is a single field column and a timestamp column.
|
||||
//
|
||||
// - tag0, cardinality 2.
|
||||
// - tag1, cardinality 10.
|
||||
// - tag2, cardinality 10.
|
||||
// - tag3, cardinality 50.
|
||||
// - tag4, cardinality 1.
|
||||
//
|
||||
// In total there are 10K rows. The timespan of the points in the line
|
||||
// protocol is around 1m of wall-clock time.
|
||||
async fn setup_scenarios() -> Vec<DbScenario> {
|
||||
let raw = include_bytes!("../../tests/fixtures/lineproto/read_filter.lp.gz");
|
||||
let mut gz = GzDecoder::new(&raw[..]);
|
||||
let mut lp = String::new();
|
||||
gz.read_to_string(&mut lp).unwrap();
|
||||
|
||||
let db =
|
||||
server::benchmarks::scenarios::make_two_chunk_scenarios("2021-04-26T13", &lp, &lp).await;
|
||||
db
|
||||
}
|
||||
|
||||
// Run all benchmarks for `read_group`.
|
||||
pub fn benchmark_read_group(c: &mut Criterion) {
|
||||
let scenarios = Runtime::new().unwrap().block_on(setup_scenarios());
|
||||
execute_benchmark_group(c, scenarios.as_slice());
|
||||
}
|
||||
|
||||
// Runs an async criterion benchmark against the provided scenarios and
|
||||
// predicate.
|
||||
fn execute_benchmark_group(c: &mut Criterion, scenarios: &[DbScenario]) {
|
||||
let planner = InfluxRpcPlanner::new();
|
||||
|
||||
let predicates = vec![
|
||||
(PredicateBuilder::default().build(), "no_pred"),
|
||||
(
|
||||
PredicateBuilder::default()
|
||||
.add_expr(
|
||||
Expr::Column("tag3".to_owned())
|
||||
.eq(Expr::Literal(ScalarValue::Utf8(Some("value49".to_owned())))),
|
||||
)
|
||||
.build(),
|
||||
"with_pred_tag_3=value49",
|
||||
),
|
||||
];
|
||||
|
||||
for scenario in scenarios {
|
||||
let DbScenario { scenario_name, db } = scenario;
|
||||
let mut group = c.benchmark_group(format!("read_group/{}", scenario_name));
|
||||
|
||||
for (predicate, pred_name) in &predicates {
|
||||
// The number of expected frames, based on the expected number of
|
||||
// individual series keys, which for grouping is the same no matter
|
||||
// how many chunks we query over.
|
||||
let exp_data_frames = if predicate.is_empty() {
|
||||
10000 + 10 // 10 groups when grouping on `tag2`
|
||||
} else {
|
||||
200 + 10 // 10 groups when grouping on `tag2`
|
||||
};
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::from_parameter(pred_name),
|
||||
predicate,
|
||||
|b, predicate| {
|
||||
let executor = db.executor();
|
||||
b.to_async(Runtime::new().unwrap()).iter(|| {
|
||||
build_and_execute_plan(
|
||||
&planner,
|
||||
executor.as_ref(),
|
||||
db,
|
||||
predicate.clone(),
|
||||
Aggregate::Sum,
|
||||
&["tag2"],
|
||||
exp_data_frames,
|
||||
)
|
||||
});
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
}
|
||||
|
||||
// Plans and runs a tag_values query.
|
||||
async fn build_and_execute_plan(
|
||||
planner: &InfluxRpcPlanner,
|
||||
executor: &Executor,
|
||||
db: &Db,
|
||||
predicate: Predicate,
|
||||
agg: Aggregate,
|
||||
group: &[&str],
|
||||
exp_frames: usize,
|
||||
) {
|
||||
let plan = planner
|
||||
.read_group(db, predicate, agg, group)
|
||||
.expect("built plan successfully");
|
||||
|
||||
let results = executor
|
||||
.to_series_set(plan)
|
||||
.await
|
||||
.expect("Running series set plan");
|
||||
|
||||
assert_eq!(results.len(), exp_frames);
|
||||
}
|
|
@ -49,11 +49,6 @@ pub enum Error {
|
|||
#[snafu(display("segment id must be between [1, 1,000,000,000)"))]
|
||||
SegmentIdOutOfBounds,
|
||||
|
||||
#[snafu(display("Server ID must not be 0: {}", source))]
|
||||
ServerIdInvalid {
|
||||
source: data_types::server_id::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("unable to compress segment id {}: {}", segment_id, source))]
|
||||
UnableToCompressData {
|
||||
segment_id: u64,
|
||||
|
@ -76,6 +71,11 @@ pub enum Error {
|
|||
source: flatbuffers::InvalidFlatbuffer,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid ReplicatedWrite: {}", source))]
|
||||
InvalidReplicatedWrite {
|
||||
source: internal_types::data::ReplicatedWriteError,
|
||||
},
|
||||
|
||||
#[snafu(display("the flatbuffers size is too small; only found {} bytes", bytes))]
|
||||
FlatbuffersSegmentTooSmall { bytes: usize },
|
||||
|
||||
|
@ -520,14 +520,12 @@ impl Segment {
|
|||
.writes()
|
||||
.context(FlatbuffersMissingField { field: "writes" })?;
|
||||
let mut segment = Self::new_with_capacity(fb_segment.id(), writes.len());
|
||||
let server_id = ServerId::try_from(fb_segment.server_id()).context(ServerIdInvalid)?;
|
||||
for w in writes {
|
||||
let data = w
|
||||
.payload()
|
||||
.context(FlatbuffersMissingField { field: "payload" })?
|
||||
.to_vec();
|
||||
let rw =
|
||||
ReplicatedWrite::try_from((data, server_id)).context(InvalidFlatbuffersSegment)?;
|
||||
let rw = ReplicatedWrite::try_from(data).context(InvalidReplicatedWrite)?;
|
||||
|
||||
segment.append(Arc::new(rw))?;
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ use catalog::{
|
|||
Catalog,
|
||||
};
|
||||
pub(crate) use chunk::DbChunk;
|
||||
use core::num::NonZeroU64;
|
||||
use data_types::{
|
||||
chunk::ChunkSummary,
|
||||
database_rules::DatabaseRules,
|
||||
|
@ -25,7 +26,7 @@ use data_types::{
|
|||
};
|
||||
use internal_types::{
|
||||
arrow::sort::sort_record_batch,
|
||||
entry::{self, ClockValue, Entry, SequencedEntry},
|
||||
entry::{self, ClockValue, ClockValueError, Entry, SequencedEntry},
|
||||
selection::Selection,
|
||||
};
|
||||
use lifecycle::LifecycleManager;
|
||||
|
@ -40,7 +41,7 @@ use read_buffer::Chunk as ReadBufferChunk;
|
|||
use snafu::{ensure, ResultExt, Snafu};
|
||||
use std::{
|
||||
any::Any,
|
||||
convert::TryInto,
|
||||
convert::{TryFrom, TryInto},
|
||||
num::NonZeroUsize,
|
||||
sync::{
|
||||
atomic::{AtomicU64, AtomicUsize, Ordering},
|
||||
|
@ -192,10 +193,14 @@ pub enum Error {
|
|||
SchemaConversion {
|
||||
source: internal_types::schema::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid Clock Value: {}", source))]
|
||||
InvalidClockValue { source: ClockValueError },
|
||||
}
|
||||
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
|
||||
const STARTING_SEQUENCE: u64 = 1;
|
||||
const DEFAULT_BACKGROUND_WORKER_PERIOD_MILLIS: u64 = 1000;
|
||||
|
||||
/// This is the main IOx Database object. It is the root object of any
|
||||
/// specific InfluxDB IOx instance
|
||||
|
@ -353,7 +358,8 @@ impl Db {
|
|||
let store = Arc::clone(&object_store);
|
||||
let write_buffer = write_buffer.map(Mutex::new);
|
||||
let catalog = Arc::new(Catalog::new());
|
||||
let system_tables = SystemSchemaProvider::new(Arc::clone(&catalog), Arc::clone(&jobs));
|
||||
let system_tables =
|
||||
SystemSchemaProvider::new(&db_name, Arc::clone(&catalog), Arc::clone(&jobs));
|
||||
let system_tables = Arc::new(system_tables);
|
||||
|
||||
let domain = metrics.register_domain("catalog");
|
||||
|
@ -830,7 +836,20 @@ impl Db {
|
|||
) {
|
||||
info!("started background worker");
|
||||
|
||||
let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(1));
|
||||
fn make_interval(millis: Option<NonZeroU64>) -> tokio::time::Interval {
|
||||
tokio::time::interval(tokio::time::Duration::from_millis(
|
||||
millis.map_or(DEFAULT_BACKGROUND_WORKER_PERIOD_MILLIS, NonZeroU64::get),
|
||||
))
|
||||
}
|
||||
|
||||
let mut period_millis = {
|
||||
self.rules
|
||||
.read()
|
||||
.lifecycle_rules
|
||||
.background_worker_period_millis
|
||||
};
|
||||
|
||||
let mut interval = make_interval(period_millis);
|
||||
let mut lifecycle_manager = LifecycleManager::new(Arc::clone(&self));
|
||||
|
||||
while !shutdown.is_cancelled() {
|
||||
|
@ -838,6 +857,16 @@ impl Db {
|
|||
|
||||
lifecycle_manager.check_for_work();
|
||||
|
||||
let possibly_updated_period_millis = {
|
||||
self.rules
|
||||
.read()
|
||||
.lifecycle_rules
|
||||
.background_worker_period_millis
|
||||
};
|
||||
if period_millis != possibly_updated_period_millis {
|
||||
period_millis = possibly_updated_period_millis;
|
||||
interval = make_interval(period_millis);
|
||||
}
|
||||
tokio::select! {
|
||||
_ = interval.tick() => {},
|
||||
_ = shutdown.cancelled() => break
|
||||
|
@ -856,7 +885,7 @@ impl Db {
|
|||
pub fn store_entry(&self, entry: Entry) -> Result<()> {
|
||||
// TODO: build this based on either this or on the write buffer, if configured
|
||||
let sequenced_entry = SequencedEntry::new_from_entry_bytes(
|
||||
ClockValue::new(self.next_sequence()),
|
||||
ClockValue::try_from(self.next_sequence()).context(InvalidClockValue)?,
|
||||
self.server_id,
|
||||
entry.data(),
|
||||
)
|
||||
|
|
|
@ -269,12 +269,11 @@ impl SchemaProvider for Catalog {
|
|||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::num::NonZeroU32;
|
||||
|
||||
use super::*;
|
||||
use data_types::server_id::ServerId;
|
||||
use internal_types::entry::{test_helpers::lp_to_entry, ClockValue};
|
||||
use query::predicate::PredicateBuilder;
|
||||
use std::convert::TryFrom;
|
||||
use tracker::MemRegistry;
|
||||
|
||||
fn create_open_chunk(partition: &Arc<RwLock<Partition>>, table: &str, registry: &MemRegistry) {
|
||||
|
@ -285,8 +284,8 @@ mod tests {
|
|||
partition
|
||||
.create_open_chunk(
|
||||
batch,
|
||||
ClockValue::new(0),
|
||||
ServerId::new(NonZeroU32::new(1).unwrap()),
|
||||
ClockValue::try_from(5).unwrap(),
|
||||
ServerId::try_from(1).unwrap(),
|
||||
registry,
|
||||
)
|
||||
.unwrap();
|
||||
|
|
|
@ -275,7 +275,10 @@ mod tests {
|
|||
use super::*;
|
||||
use data_types::server_id::ServerId;
|
||||
use internal_types::entry::{test_helpers::lp_to_entry, ClockValue};
|
||||
use std::num::{NonZeroU32, NonZeroUsize};
|
||||
use std::{
|
||||
convert::TryFrom,
|
||||
num::{NonZeroU32, NonZeroUsize},
|
||||
};
|
||||
use tracker::MemRegistry;
|
||||
|
||||
fn from_secs(secs: i64) -> DateTime<Utc> {
|
||||
|
@ -294,8 +297,8 @@ mod tests {
|
|||
batch,
|
||||
"",
|
||||
id,
|
||||
ClockValue::new(0),
|
||||
ServerId::new(NonZeroU32::new(1).unwrap()),
|
||||
ClockValue::try_from(5).unwrap(),
|
||||
ServerId::try_from(1).unwrap(),
|
||||
&MemRegistry::new(),
|
||||
)
|
||||
.unwrap();
|
||||
|
|
|
@ -37,13 +37,19 @@ const OPERATIONS: &str = "operations";
|
|||
|
||||
#[derive(Debug)]
|
||||
pub struct SystemSchemaProvider {
|
||||
db_name: String,
|
||||
catalog: Arc<Catalog>,
|
||||
jobs: Arc<JobRegistry>,
|
||||
}
|
||||
|
||||
impl SystemSchemaProvider {
|
||||
pub fn new(catalog: Arc<Catalog>, jobs: Arc<JobRegistry>) -> Self {
|
||||
Self { catalog, jobs }
|
||||
pub fn new(db_name: impl Into<String>, catalog: Arc<Catalog>, jobs: Arc<JobRegistry>) -> Self {
|
||||
let db_name = db_name.into();
|
||||
Self {
|
||||
db_name,
|
||||
catalog,
|
||||
jobs,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -69,7 +75,7 @@ impl SchemaProvider for SystemSchemaProvider {
|
|||
COLUMNS => from_partition_summaries(self.catalog.partition_summaries())
|
||||
.log_if_error("chunks table")
|
||||
.ok()?,
|
||||
OPERATIONS => from_task_trackers(self.jobs.tracked())
|
||||
OPERATIONS => from_task_trackers(&self.db_name, self.jobs.tracked())
|
||||
.log_if_error("operations table")
|
||||
.ok()?,
|
||||
_ => return None,
|
||||
|
@ -185,14 +191,22 @@ fn from_partition_summaries(partitions: Vec<PartitionSummary>) -> Result<RecordB
|
|||
)
|
||||
}
|
||||
|
||||
fn from_task_trackers(jobs: Vec<TaskTracker<Job>>) -> Result<RecordBatch> {
|
||||
fn from_task_trackers(db_name: &str, jobs: Vec<TaskTracker<Job>>) -> Result<RecordBatch> {
|
||||
let jobs = jobs
|
||||
.into_iter()
|
||||
.filter(|job| job.metadata().db_name() == Some(db_name))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let ids = StringArray::from_iter(jobs.iter().map(|job| Some(job.id().to_string())));
|
||||
let statuses = StringArray::from_iter(jobs.iter().map(|job| Some(job.get_status().name())));
|
||||
let cpu_time_used = Time64NanosecondArray::from_iter(
|
||||
jobs.iter()
|
||||
.map(|job| job.get_status().cpu_nanos().map(|n| n as i64)),
|
||||
);
|
||||
let db_names = StringArray::from_iter(jobs.iter().map(|job| job.metadata().db_name()));
|
||||
let wall_time_used = Time64NanosecondArray::from_iter(
|
||||
jobs.iter()
|
||||
.map(|job| job.get_status().wall_nanos().map(|n| n as i64)),
|
||||
);
|
||||
let partition_keys =
|
||||
StringArray::from_iter(jobs.iter().map(|job| job.metadata().partition_key()));
|
||||
let chunk_ids = UInt32Array::from_iter(jobs.iter().map(|job| job.metadata().chunk_id()));
|
||||
|
@ -203,7 +217,7 @@ fn from_task_trackers(jobs: Vec<TaskTracker<Job>>) -> Result<RecordBatch> {
|
|||
Field::new("id", ids.data_type().clone(), false),
|
||||
Field::new("status", statuses.data_type().clone(), false),
|
||||
Field::new("cpu_time_used", cpu_time_used.data_type().clone(), true),
|
||||
Field::new("db_name", db_names.data_type().clone(), true),
|
||||
Field::new("wall_time_used", wall_time_used.data_type().clone(), true),
|
||||
Field::new("partition_key", partition_keys.data_type().clone(), true),
|
||||
Field::new("chunk_id", chunk_ids.data_type().clone(), true),
|
||||
Field::new("description", descriptions.data_type().clone(), true),
|
||||
|
@ -215,7 +229,7 @@ fn from_task_trackers(jobs: Vec<TaskTracker<Job>>) -> Result<RecordBatch> {
|
|||
Arc::new(ids),
|
||||
Arc::new(statuses),
|
||||
Arc::new(cpu_time_used),
|
||||
Arc::new(db_names),
|
||||
Arc::new(wall_time_used),
|
||||
Arc::new(partition_keys),
|
||||
Arc::new(chunk_ids),
|
||||
Arc::new(descriptions),
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
//! Tests for the Influx gRPC queries
|
||||
#[cfg(test)]
|
||||
use super::util::run_series_set_plan;
|
||||
|
||||
use crate::query_tests::scenarios::*;
|
||||
use arrow_deps::datafusion::logical_plan::{col, lit};
|
||||
use async_trait::async_trait;
|
||||
|
|
|
@ -50,29 +50,29 @@ pub fn dump_series_set(s: SeriesSet) -> Vec<String> {
|
|||
}
|
||||
|
||||
/// Run a series set plan to completion and produce a Vec<String> representation
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if there is an error executing a plan, or if unexpected series set
|
||||
/// items are returned.
|
||||
#[cfg(test)]
|
||||
pub async fn run_series_set_plan(executor: Arc<Executor>, plans: SeriesSetPlans) -> Vec<String> {
|
||||
// Use a channel sufficiently large to buffer the series
|
||||
let (tx, mut rx) = mpsc::channel(100);
|
||||
executor
|
||||
.to_series_set(plans, tx)
|
||||
.await
|
||||
.expect("Running series set plan");
|
||||
let results = executor.to_series_set(plans).await;
|
||||
|
||||
// gather up the sets and compare them
|
||||
let mut results = vec![];
|
||||
while let Some(r) = rx.recv().await {
|
||||
let item = r.expect("unexpected error in execution");
|
||||
let item = if let SeriesSetItem::Data(series_set) = item {
|
||||
series_set
|
||||
} else {
|
||||
panic!(
|
||||
"Unexpected result from converting. Expected SeriesSetItem::Data, got: {:?}",
|
||||
item
|
||||
)
|
||||
};
|
||||
|
||||
results.push(item);
|
||||
}
|
||||
let mut results = results
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|item| {
|
||||
if let SeriesSetItem::Data(series_set) = item {
|
||||
series_set
|
||||
} else {
|
||||
panic!(
|
||||
"Unexpected result from converting. Expected SeriesSetItem::Data, got: {:?}",
|
||||
item
|
||||
)
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// sort the results so that we can reliably compare
|
||||
results.sort_by(|r1, r2| {
|
||||
|
|
|
@ -314,19 +314,19 @@ async fn sql_select_from_system_columns() {
|
|||
async fn sql_select_from_system_operations() {
|
||||
test_helpers::maybe_start_logging();
|
||||
let expected = vec![
|
||||
"+----+----------+-----------+-------------+---------------+----------+---------------------------------+",
|
||||
"| id | status | took_time | db_name | partition_key | chunk_id | description |",
|
||||
"+----+----------+-----------+-------------+---------------+----------+---------------------------------+",
|
||||
"| 0 | Complete | true | placeholder | 1970-01-01T00 | 0 | Loading chunk to ReadBuffer |",
|
||||
"| 1 | Complete | true | placeholder | 1970-01-01T00 | 0 | Writing chunk to Object Storage |",
|
||||
"+----+----------+-----------+-------------+---------------+----------+---------------------------------+",
|
||||
"+----+----------+---------------+----------------+---------------+----------+---------------------------------+",
|
||||
"| id | status | took_cpu_time | took_wall_time | partition_key | chunk_id | description |",
|
||||
"+----+----------+---------------+----------------+---------------+----------+---------------------------------+",
|
||||
"| 0 | Complete | true | true | 1970-01-01T00 | 0 | Loading chunk to ReadBuffer |",
|
||||
"| 1 | Complete | true | true | 1970-01-01T00 | 0 | Writing chunk to Object Storage |",
|
||||
"+----+----------+---------------+----------------+---------------+----------+---------------------------------+",
|
||||
];
|
||||
|
||||
// Check that the cpu time used reported is greater than zero as it isn't
|
||||
// repeatable
|
||||
run_sql_test_case!(
|
||||
TwoMeasurementsManyFieldsLifecycle {},
|
||||
"SELECT id, status, CAST(cpu_time_used AS BIGINT) > 0 as took_time, db_name, partition_key, chunk_id, description from system.operations",
|
||||
"SELECT id, status, CAST(cpu_time_used AS BIGINT) > 0 as took_cpu_time, CAST(wall_time_used AS BIGINT) > 0 as took_wall_time, partition_key, chunk_id, description from system.operations",
|
||||
&expected
|
||||
);
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@ pub fn make_db() -> TestDb {
|
|||
}
|
||||
}
|
||||
|
||||
/// Used for testing: create a Database with a local store and a specified name
|
||||
pub fn make_database(server_id: ServerId, object_store: Arc<ObjectStore>, db_name: &str) -> Db {
|
||||
let exec = Arc::new(Executor::new(1));
|
||||
let metrics_registry = Arc::new(metrics::MetricRegistry::new());
|
||||
|
|
|
@ -180,6 +180,7 @@ pub async fn command(url: String, config: Config) -> Result<()> {
|
|||
drop_non_persisted: command.drop_non_persisted,
|
||||
persist: command.persist,
|
||||
immutable: command.immutable,
|
||||
background_worker_period_millis: Default::default(),
|
||||
}),
|
||||
|
||||
// Default to hourly partitions
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
//! Entrypoint for interactive SQL repl loop
|
||||
|
||||
use observability_deps::tracing::debug;
|
||||
use snafu::{ResultExt, Snafu};
|
||||
use structopt::StructOpt;
|
||||
|
||||
use influxdb_iox_client::connection::{Builder, Connection};
|
||||
|
||||
mod repl;
|
||||
mod repl_command;
|
||||
|
||||
/// Start IOx interactive SQL REPL loop
|
||||
///
|
||||
/// Supports command history and interactive editing. History is
|
||||
/// stored in $HOME/.iox_sql_history.
|
||||
#[derive(Debug, StructOpt)]
|
||||
pub struct Config {
|
||||
// TODO add an option to avoid saving history
|
||||
// TODO add an option to specify the default database (rather than having to set it via USE DATABASE)
|
||||
// TODO add an option to specify output formatting (now it is hard coded to use pretty printing)
|
||||
}
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
pub enum Error {
|
||||
#[snafu(display("Error connecting to {}: {}", url, source))]
|
||||
Connecting {
|
||||
url: String,
|
||||
source: influxdb_iox_client::connection::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Storage health check failed: {}", source))]
|
||||
HealthCheck {
|
||||
source: influxdb_iox_client::health::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Repl Error: {}", source))]
|
||||
Repl { source: repl::Error },
|
||||
}
|
||||
|
||||
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
|
||||
/// Fire up the interactive REPL
|
||||
pub async fn command(url: String, config: Config) -> Result<()> {
|
||||
debug!("Starting interactive SQL prompt with {:?}", config);
|
||||
|
||||
let connection = Builder::default()
|
||||
.build(&url)
|
||||
.await
|
||||
.context(Connecting { url: &url })?;
|
||||
|
||||
println!("Connected to IOx Server at {}", url);
|
||||
check_health(connection.clone()).await?;
|
||||
|
||||
repl::Repl::new(connection).run().await.context(Repl)
|
||||
}
|
||||
|
||||
async fn check_health(connection: Connection) -> Result<()> {
|
||||
influxdb_iox_client::health::Client::new(connection)
|
||||
.check_storage()
|
||||
.await
|
||||
.context(HealthCheck)
|
||||
}
|
|
@ -0,0 +1,275 @@
|
|||
use std::{convert::TryInto, path::PathBuf, sync::Arc, time::Instant};
|
||||
|
||||
use arrow_deps::arrow::{
|
||||
array::{Array, StringArray},
|
||||
datatypes::{Field, Schema},
|
||||
record_batch::RecordBatch,
|
||||
};
|
||||
use observability_deps::tracing::{debug, info};
|
||||
use rustyline::{error::ReadlineError, Editor};
|
||||
use snafu::{ResultExt, Snafu};
|
||||
|
||||
use super::repl_command::ReplCommand;
|
||||
|
||||
use influxdb_iox_client::{connection::Connection, format::QueryOutputFormat};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
pub enum Error {
|
||||
#[snafu(display("Error reading command: {}", source))]
|
||||
Readline { source: ReadlineError },
|
||||
|
||||
#[snafu(display("Error loading remote state: {}", source))]
|
||||
LoadingRemoteState {
|
||||
source: Box<dyn std::error::Error + Send + Sync + 'static>,
|
||||
},
|
||||
|
||||
#[snafu(display("Error formatting results: {}", source))]
|
||||
FormattingResults {
|
||||
source: influxdb_iox_client::format::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Error parsing command: {}", message))]
|
||||
ParsingCommand { message: String },
|
||||
|
||||
#[snafu(display("Error running remote query: {}", source))]
|
||||
RunningRemoteQuery {
|
||||
source: influxdb_iox_client::flight::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
|
||||
/// (Potentially) cached remote state of the server
|
||||
struct RemoteState {
|
||||
db_names: Vec<String>,
|
||||
}
|
||||
|
||||
impl RemoteState {
|
||||
async fn try_new(
|
||||
management_client: &mut influxdb_iox_client::management::Client,
|
||||
) -> Result<Self> {
|
||||
let db_names = management_client
|
||||
.list_databases()
|
||||
.await
|
||||
.map_err(|e| Box::new(e) as _)
|
||||
.context(LoadingRemoteState)?;
|
||||
|
||||
Ok(Self { db_names })
|
||||
}
|
||||
}
|
||||
|
||||
/// Captures the state of the repl, gathers commands and executes them
|
||||
/// one by one
|
||||
pub struct Repl {
|
||||
/// Rustyline editor for interacting with user on command line
|
||||
rl: Editor<()>,
|
||||
|
||||
/// Current prompt
|
||||
prompt: String,
|
||||
|
||||
/// Client for interacting with IOx management API
|
||||
management_client: influxdb_iox_client::management::Client,
|
||||
|
||||
/// Client for running sql
|
||||
flight_client: influxdb_iox_client::flight::Client,
|
||||
|
||||
/// database name against which SQL commands are run
|
||||
current_database: Option<String>,
|
||||
}
|
||||
|
||||
impl Repl {
|
||||
fn print_help(&self) {
|
||||
print!("{}", ReplCommand::help())
|
||||
}
|
||||
|
||||
/// Create a new Repl instance, connected to the specified URL
|
||||
pub fn new(connection: Connection) -> Self {
|
||||
let management_client = influxdb_iox_client::management::Client::new(connection.clone());
|
||||
let flight_client = influxdb_iox_client::flight::Client::new(connection);
|
||||
|
||||
let mut rl = Editor::<()>::new();
|
||||
let history_file = history_file();
|
||||
|
||||
if let Err(e) = rl.load_history(&history_file) {
|
||||
debug!(%e, "error loading history file");
|
||||
}
|
||||
|
||||
let prompt = "> ".to_string();
|
||||
|
||||
Self {
|
||||
rl,
|
||||
prompt,
|
||||
management_client,
|
||||
flight_client,
|
||||
current_database: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Read Evaluate Print Loop (interactive command line) for SQL
|
||||
///
|
||||
/// Inspired / based on repl.rs from DataFusion
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
println!("Ready for commands. (Hint: try 'help;')");
|
||||
loop {
|
||||
match self.next_command()? {
|
||||
ReplCommand::Help => {
|
||||
self.print_help();
|
||||
}
|
||||
ReplCommand::ShowDatabases => {
|
||||
self.list_databases().await?;
|
||||
}
|
||||
ReplCommand::UseDatabase { db_name } => {
|
||||
self.use_database(db_name);
|
||||
}
|
||||
ReplCommand::SqlCommand { sql } => {
|
||||
self.run_sql(sql).await?;
|
||||
}
|
||||
ReplCommand::Exit => {
|
||||
info!("exiting at user request");
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parss the next command;
|
||||
fn next_command(&mut self) -> Result<ReplCommand> {
|
||||
let mut request = "".to_owned();
|
||||
loop {
|
||||
match self.rl.readline(&self.prompt) {
|
||||
Ok(ref line) if is_exit_command(line) && request.is_empty() => {
|
||||
return Ok(ReplCommand::Exit);
|
||||
}
|
||||
Ok(ref line) if line.trim_end().ends_with(';') => {
|
||||
request.push_str(line.trim_end());
|
||||
self.rl.add_history_entry(request.clone());
|
||||
|
||||
return request
|
||||
.try_into()
|
||||
.map_err(|message| Error::ParsingCommand { message });
|
||||
}
|
||||
Ok(ref line) => {
|
||||
request.push_str(line);
|
||||
request.push(' ');
|
||||
}
|
||||
Err(ReadlineError::Eof) => {
|
||||
debug!("Received Ctrl-D");
|
||||
return Ok(ReplCommand::Exit);
|
||||
}
|
||||
Err(ReadlineError::Interrupted) => {
|
||||
debug!("Received Ctrl-C");
|
||||
return Ok(ReplCommand::Exit);
|
||||
}
|
||||
// Some sort of real underlying error
|
||||
Err(e) => {
|
||||
return Err(Error::Readline { source: e });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// print all databases to the output
|
||||
async fn list_databases(&mut self) -> Result<()> {
|
||||
let state = self.remote_state().await?;
|
||||
let db_names = StringArray::from_iter_values(state.db_names.iter().map(|s| s.as_str()));
|
||||
|
||||
let schema = Schema::new(vec![Field::new(
|
||||
"db_name",
|
||||
db_names.data_type().clone(),
|
||||
false,
|
||||
)]);
|
||||
|
||||
let record_batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(db_names)])
|
||||
.expect("creating record batch successfully");
|
||||
|
||||
self.print_results(&[record_batch])
|
||||
}
|
||||
|
||||
// Run a command against the currently selected remote database
|
||||
async fn run_sql(&mut self, sql: String) -> Result<()> {
|
||||
let db_name = match &self.current_database {
|
||||
None => {
|
||||
println!("Error: no database selected.");
|
||||
println!("Hint: Run USE DATABASE <dbname> to select database");
|
||||
return Ok(());
|
||||
}
|
||||
Some(db_name) => db_name,
|
||||
};
|
||||
|
||||
info!(%db_name, %sql, "Running sql on remote database");
|
||||
|
||||
let start = Instant::now();
|
||||
let batches = scrape_query(&mut self.flight_client, &db_name, &sql).await?;
|
||||
let end = Instant::now();
|
||||
self.print_results(&batches)?;
|
||||
|
||||
println!("Query execution complete in {:?}", end - start);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn use_database(&mut self, db_name: String) {
|
||||
info!(%db_name, "setting current database");
|
||||
self.prompt = format!("{}> ", db_name);
|
||||
self.current_database = Some(db_name);
|
||||
}
|
||||
|
||||
// TODO make a setting for changing if we cache remote state or not
|
||||
async fn remote_state(&mut self) -> Result<RemoteState> {
|
||||
let state = RemoteState::try_new(&mut self.management_client).await?;
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
/// Prints to the specified output format
|
||||
fn print_results(&self, batches: &[RecordBatch]) -> Result<()> {
|
||||
// TODO make query output format configurable
|
||||
let output_format = QueryOutputFormat::Pretty;
|
||||
let formatted_results = output_format.format(batches).context(FormattingResults)?;
|
||||
println!("{}", formatted_results);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Repl {
|
||||
fn drop(&mut self) {
|
||||
let history_file = history_file();
|
||||
|
||||
if let Err(e) = self.rl.save_history(&history_file) {
|
||||
debug!(%e, "error saving history file");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_exit_command(line: &str) -> bool {
|
||||
let line = line.trim_end().to_lowercase();
|
||||
line == "quit" || line == "exit"
|
||||
}
|
||||
|
||||
/// Return the location of the history file (defaults to $HOME/".iox_sql_history")
|
||||
fn history_file() -> PathBuf {
|
||||
let mut buf = match std::env::var("HOME") {
|
||||
Ok(home) => PathBuf::from(home),
|
||||
Err(_) => PathBuf::new(),
|
||||
};
|
||||
buf.push(".iox_sql_history");
|
||||
buf
|
||||
}
|
||||
|
||||
/// Runs the specified `query` and returns the record batches of the result
|
||||
async fn scrape_query(
|
||||
client: &mut influxdb_iox_client::flight::Client,
|
||||
db_name: &str,
|
||||
query: &str,
|
||||
) -> Result<Vec<RecordBatch>> {
|
||||
let mut query_results = client
|
||||
.perform_query(db_name, query)
|
||||
.await
|
||||
.context(RunningRemoteQuery)?;
|
||||
|
||||
let mut batches = vec![];
|
||||
|
||||
while let Some(data) = query_results.next().await.context(RunningRemoteQuery)? {
|
||||
batches.push(data);
|
||||
}
|
||||
|
||||
Ok(batches)
|
||||
}
|
|
@ -0,0 +1,235 @@
|
|||
use std::convert::TryInto;
|
||||
|
||||
use observability_deps::tracing::{debug, warn};
|
||||
|
||||
/// Represents the parsed command from the user (which may be over many lines)
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum ReplCommand {
|
||||
Help,
|
||||
ShowDatabases,
|
||||
UseDatabase { db_name: String },
|
||||
SqlCommand { sql: String },
|
||||
Exit,
|
||||
}
|
||||
|
||||
impl TryInto<ReplCommand> for String {
|
||||
type Error = Self;
|
||||
|
||||
#[allow(clippy::if_same_then_else)]
|
||||
fn try_into(self) -> Result<ReplCommand, Self::Error> {
|
||||
debug!(%self, "tokenizing to ReplCommand");
|
||||
|
||||
if self.trim().is_empty() {
|
||||
return Err("No command specified".to_string());
|
||||
}
|
||||
|
||||
// tokenized commands, normalized whitespace but original case
|
||||
let raw_commands = self
|
||||
.trim()
|
||||
// chop off trailing semicolon
|
||||
.strip_suffix(";")
|
||||
.unwrap_or(&self)
|
||||
// tokenize on whitespace
|
||||
.split(' ')
|
||||
.map(|c| c.trim())
|
||||
.filter(|c| !c.is_empty())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// normalized commands (all lower case)
|
||||
let commands = raw_commands
|
||||
.iter()
|
||||
.map(|c| c.to_ascii_lowercase())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
debug!(?raw_commands, ?commands, "processing tokens");
|
||||
|
||||
if !commands.is_empty() && commands[0] == "help" {
|
||||
if commands.len() > 1 {
|
||||
let extra_content = commands[1..].join(" ");
|
||||
warn!(%extra_content, "ignoring tokens after 'help'");
|
||||
}
|
||||
Ok(ReplCommand::Help)
|
||||
} else if commands.len() == 1 && commands[0] == "exit" {
|
||||
Ok(ReplCommand::Exit)
|
||||
} else if commands.len() == 1 && commands[0] == "quit" {
|
||||
Ok(ReplCommand::Exit)
|
||||
} else if commands.len() == 2 && commands[0] == "use" && commands[1] == "database" {
|
||||
// USE DATABASE
|
||||
Err("name not specified. Usage: USE DATABASE <name>".to_string())
|
||||
} else if commands.len() == 3 && commands[0] == "use" && commands[1] == "database" {
|
||||
// USE DATABASE <name>
|
||||
Ok(ReplCommand::UseDatabase {
|
||||
db_name: raw_commands[2].to_string(),
|
||||
})
|
||||
} else if commands.len() == 2 && commands[0] == "use" {
|
||||
// USE <name>
|
||||
Ok(ReplCommand::UseDatabase {
|
||||
db_name: raw_commands[1].to_string(),
|
||||
})
|
||||
} else if commands.len() == 2 && commands[0] == "show" && commands[1] == "databases" {
|
||||
Ok(ReplCommand::ShowDatabases)
|
||||
} else {
|
||||
// Default is to treat the entire string like SQL
|
||||
Ok(ReplCommand::SqlCommand { sql: self })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReplCommand {
|
||||
/// Information for each command
|
||||
pub fn help() -> &'static str {
|
||||
r#"
|
||||
Available commands (not case sensitive):
|
||||
HELP (this one)
|
||||
|
||||
SHOW DATABASES: List databases available on the server
|
||||
|
||||
USE [DATABASE] <name>: Set the current database to name
|
||||
|
||||
[EXIT | QUIT]: Quit this session and exit the program
|
||||
|
||||
# Examples:
|
||||
SHOW DATABASES;
|
||||
USE DATABASE foo;
|
||||
|
||||
# Basic IOx SQL Primer
|
||||
|
||||
;; Explore Schema:
|
||||
SHOW TABLES; ;; Show available tables
|
||||
SHOW COLUMNS FROM my_table; ;; Show columns in the table
|
||||
|
||||
;; Show storage usage across partitions and tables
|
||||
SELECT
|
||||
partition_key, table_name, storage,
|
||||
count(*) as chunk_count,
|
||||
sum(estimated_bytes)/(1024*1024) as size_mb
|
||||
FROM
|
||||
system.chunks
|
||||
GROUP BY
|
||||
partition_key, table_name, storage
|
||||
ORDER BY
|
||||
size_mb DESC
|
||||
LIMIT 20
|
||||
;
|
||||
|
||||
"#
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl TryInto<ReplCommand> for &str {
|
||||
type Error = String;
|
||||
|
||||
fn try_into(self) -> Result<ReplCommand, Self::Error> {
|
||||
self.to_string().try_into()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[allow(clippy::unnecessary_wraps)]
|
||||
fn sql_cmd(sql: &str) -> Result<ReplCommand, String> {
|
||||
Ok(ReplCommand::SqlCommand {
|
||||
sql: sql.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty() {
|
||||
let expected: Result<ReplCommand, String> = Err("No command specified".to_string());
|
||||
|
||||
assert_eq!("".try_into(), expected);
|
||||
assert_eq!(" ".try_into(), expected);
|
||||
assert_eq!(" \t".try_into(), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn help() {
|
||||
let expected = Ok(ReplCommand::Help);
|
||||
assert_eq!("help;".try_into(), expected);
|
||||
assert_eq!("help".try_into(), expected);
|
||||
assert_eq!(" help".try_into(), expected);
|
||||
assert_eq!(" help ".try_into(), expected);
|
||||
assert_eq!(" HELP ".try_into(), expected);
|
||||
assert_eq!(" Help; ".try_into(), expected);
|
||||
assert_eq!(" help ; ".try_into(), expected);
|
||||
assert_eq!(" help me; ".try_into(), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn show_databases() {
|
||||
let expected = Ok(ReplCommand::ShowDatabases);
|
||||
assert_eq!("show databases".try_into(), expected);
|
||||
assert_eq!("show Databases".try_into(), expected);
|
||||
assert_eq!("show databases;".try_into(), expected);
|
||||
assert_eq!("SHOW DATABASES".try_into(), expected);
|
||||
|
||||
assert_eq!("SHOW DATABASES DD".try_into(), sql_cmd("SHOW DATABASES DD"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn use_database() {
|
||||
let expected = Ok(ReplCommand::UseDatabase {
|
||||
db_name: "Foo".to_string(),
|
||||
});
|
||||
assert_eq!("use Foo".try_into(), expected);
|
||||
assert_eq!("use Database Foo;".try_into(), expected);
|
||||
assert_eq!("use Database Foo ;".try_into(), expected);
|
||||
assert_eq!(" use Database Foo; ".try_into(), expected);
|
||||
assert_eq!(" use Database Foo; ".try_into(), expected);
|
||||
|
||||
// ensure that database name is case sensitive
|
||||
let expected = Ok(ReplCommand::UseDatabase {
|
||||
db_name: "FOO".to_string(),
|
||||
});
|
||||
assert_eq!("use FOO".try_into(), expected);
|
||||
assert_eq!("use DATABASE FOO;".try_into(), expected);
|
||||
assert_eq!("USE DATABASE FOO;".try_into(), expected);
|
||||
|
||||
let expected: Result<ReplCommand, String> =
|
||||
Err("name not specified. Usage: USE DATABASE <name>".to_string());
|
||||
assert_eq!("use Database;".try_into(), expected);
|
||||
assert_eq!("use DATABASE".try_into(), expected);
|
||||
assert_eq!("use database".try_into(), expected);
|
||||
|
||||
let expected = sql_cmd("use database foo bar");
|
||||
assert_eq!("use database foo bar".try_into(), expected);
|
||||
|
||||
let expected = sql_cmd("use database foo BAR");
|
||||
assert_eq!("use database foo BAR".try_into(), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sql_command() {
|
||||
let expected = sql_cmd("SELECT * from foo");
|
||||
assert_eq!("SELECT * from foo".try_into(), expected);
|
||||
// ensure that we aren't messing with capitalization
|
||||
assert_ne!("select * from foo".try_into(), expected);
|
||||
|
||||
let expected = sql_cmd("select * from foo");
|
||||
assert_eq!("select * from foo".try_into(), expected);
|
||||
|
||||
// default to sql command
|
||||
let expected = sql_cmd("blah");
|
||||
assert_eq!("blah".try_into(), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn exit() {
|
||||
let expected = Ok(ReplCommand::Exit);
|
||||
assert_eq!("exit".try_into(), expected);
|
||||
assert_eq!("exit;".try_into(), expected);
|
||||
assert_eq!("exit ;".try_into(), expected);
|
||||
assert_eq!("EXIT".try_into(), expected);
|
||||
|
||||
assert_eq!("quit".try_into(), expected);
|
||||
assert_eq!("quit;".try_into(), expected);
|
||||
assert_eq!("quit ;".try_into(), expected);
|
||||
assert_eq!("QUIT".try_into(), expected);
|
||||
|
||||
let expected = sql_cmd("quit dragging");
|
||||
assert_eq!("quit dragging".try_into(), expected);
|
||||
}
|
||||
}
|
|
@ -25,10 +25,8 @@ use generated_types::{
|
|||
};
|
||||
use observability_deps::tracing::{error, info};
|
||||
use query::{
|
||||
exec::fieldlist::FieldList,
|
||||
exec::seriesset::{Error as SeriesSetError, SeriesSetItem},
|
||||
predicate::PredicateBuilder,
|
||||
DatabaseStore,
|
||||
exec::fieldlist::FieldList, exec::seriesset::Error as SeriesSetError,
|
||||
predicate::PredicateBuilder, DatabaseStore,
|
||||
};
|
||||
use snafu::{OptionExt, ResultExt, Snafu};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
@ -210,14 +208,12 @@ impl<T> Storage for StorageService<T>
|
|||
where
|
||||
T: DatabaseStore + 'static,
|
||||
{
|
||||
type ReadFilterStream = ReceiverStream<Result<ReadResponse, Status>>;
|
||||
type ReadFilterStream = futures::stream::Iter<std::vec::IntoIter<Result<ReadResponse, Status>>>;
|
||||
|
||||
async fn read_filter(
|
||||
&self,
|
||||
req: tonic::Request<ReadFilterRequest>,
|
||||
) -> Result<tonic::Response<Self::ReadFilterStream>, Status> {
|
||||
let (tx, rx) = mpsc::channel(4);
|
||||
|
||||
let read_filter_request = req.into_inner();
|
||||
|
||||
let db_name = get_database_name(&read_filter_request)?;
|
||||
|
@ -230,27 +226,22 @@ where
|
|||
|
||||
info!(%db_name, ?range, predicate=%predicate.loggable(),"read filter");
|
||||
|
||||
read_filter_impl(
|
||||
tx.clone(),
|
||||
Arc::clone(&self.db_store),
|
||||
db_name,
|
||||
range,
|
||||
predicate,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| e.to_status())?;
|
||||
let results = read_filter_impl(Arc::clone(&self.db_store), db_name, range, predicate)
|
||||
.await
|
||||
.map_err(|e| e.to_status())?
|
||||
.into_iter()
|
||||
.map(Ok)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(tonic::Response::new(ReceiverStream::new(rx)))
|
||||
Ok(tonic::Response::new(futures::stream::iter(results)))
|
||||
}
|
||||
|
||||
type ReadGroupStream = ReceiverStream<Result<ReadResponse, Status>>;
|
||||
type ReadGroupStream = futures::stream::Iter<std::vec::IntoIter<Result<ReadResponse, Status>>>;
|
||||
|
||||
async fn read_group(
|
||||
&self,
|
||||
req: tonic::Request<ReadGroupRequest>,
|
||||
) -> Result<tonic::Response<Self::ReadGroupStream>, Status> {
|
||||
let (tx, rx) = mpsc::channel(4);
|
||||
|
||||
let read_group_request = req.into_inner();
|
||||
|
||||
let db_name = get_database_name(&read_group_request)?;
|
||||
|
@ -283,8 +274,7 @@ where
|
|||
let gby_agg = expr::make_read_group_aggregate(aggregate, group, group_keys)
|
||||
.context(ConvertingReadGroupAggregate { aggregate_string })?;
|
||||
|
||||
query_group_impl(
|
||||
tx.clone(),
|
||||
let results = query_group_impl(
|
||||
Arc::clone(&self.db_store),
|
||||
db_name,
|
||||
range,
|
||||
|
@ -292,19 +282,21 @@ where
|
|||
gby_agg,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| e.to_status())?;
|
||||
.map_err(|e| e.to_status())?
|
||||
.into_iter()
|
||||
.map(Ok)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(tonic::Response::new(ReceiverStream::new(rx)))
|
||||
Ok(tonic::Response::new(futures::stream::iter(results)))
|
||||
}
|
||||
|
||||
type ReadWindowAggregateStream = ReceiverStream<Result<ReadResponse, Status>>;
|
||||
type ReadWindowAggregateStream =
|
||||
futures::stream::Iter<std::vec::IntoIter<Result<ReadResponse, Status>>>;
|
||||
|
||||
async fn read_window_aggregate(
|
||||
&self,
|
||||
req: tonic::Request<ReadWindowAggregateRequest>,
|
||||
) -> Result<tonic::Response<Self::ReadGroupStream>, Status> {
|
||||
let (tx, rx) = mpsc::channel(4);
|
||||
|
||||
let read_window_aggregate_request = req.into_inner();
|
||||
|
||||
let db_name = get_database_name(&read_window_aggregate_request)?;
|
||||
|
@ -329,8 +321,7 @@ where
|
|||
let gby_agg = expr::make_read_window_aggregate(aggregate, window_every, offset, window)
|
||||
.context(ConvertingWindowAggregate { aggregate_string })?;
|
||||
|
||||
query_group_impl(
|
||||
tx.clone(),
|
||||
let results = query_group_impl(
|
||||
Arc::clone(&self.db_store),
|
||||
db_name,
|
||||
range,
|
||||
|
@ -338,9 +329,12 @@ where
|
|||
gby_agg,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| e.to_status())?;
|
||||
.map_err(|e| e.to_status())?
|
||||
.into_iter()
|
||||
.map(Ok)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(tonic::Response::new(ReceiverStream::new(rx)))
|
||||
Ok(tonic::Response::new(futures::stream::iter(results)))
|
||||
}
|
||||
|
||||
type TagKeysStream = ReceiverStream<Result<StringValuesResponse, Status>>;
|
||||
|
@ -843,14 +837,13 @@ where
|
|||
Ok(StringValuesResponse { values })
|
||||
}
|
||||
|
||||
/// Launch async tasks that send the result of executing read_filter to `tx`
|
||||
/// Launch async tasks that materialises the result of executing read_filter.
|
||||
async fn read_filter_impl<'a, T>(
|
||||
tx: mpsc::Sender<Result<ReadResponse, Status>>,
|
||||
db_store: Arc<T>,
|
||||
db_name: DatabaseName<'static>,
|
||||
range: Option<TimestampRange>,
|
||||
rpc_predicate: Option<Predicate>,
|
||||
) -> Result<()>
|
||||
) -> Result<Vec<ReadResponse>, Error>
|
||||
where
|
||||
T: DatabaseStore + 'static,
|
||||
{
|
||||
|
@ -872,68 +865,42 @@ where
|
|||
let db = db_store.db(db_name).context(DatabaseNotFound { db_name })?;
|
||||
let executor = db_store.executor();
|
||||
|
||||
// PERF - This used to send responses to the client before execution had
|
||||
// completed, but now it doesn't. We may need to revisit this in the future
|
||||
// if big queries are causing a significant latency in TTFB.
|
||||
|
||||
// Build the plans
|
||||
let series_plan = Planner::new(Arc::clone(&executor))
|
||||
.read_filter(db, predicate)
|
||||
.await
|
||||
.map_err(|e| Box::new(e) as _)
|
||||
.context(PlanningFilteringSeries { db_name })?;
|
||||
|
||||
// Spawn task to convert between series sets and the gRPC results
|
||||
// and to run the actual plans (so we can return a result to the
|
||||
// client before we start sending result)
|
||||
let (tx_series, rx_series) = mpsc::channel(4);
|
||||
tokio::spawn(async move {
|
||||
convert_series_set(rx_series, tx)
|
||||
.await
|
||||
.log_if_error("Converting series set")
|
||||
});
|
||||
// Execute the plans.
|
||||
let ss_items = executor
|
||||
.to_series_set(series_plan)
|
||||
.await
|
||||
.map_err(|e| Box::new(e) as _)
|
||||
.context(FilteringSeries {
|
||||
db_name: owned_db_name.as_str(),
|
||||
})
|
||||
.log_if_error("Running series set plan")?;
|
||||
|
||||
// fire up the plans and start the pipeline flowing
|
||||
tokio::spawn(async move {
|
||||
executor
|
||||
.to_series_set(series_plan, tx_series)
|
||||
.await
|
||||
.map_err(|e| Box::new(e) as _)
|
||||
.context(FilteringSeries {
|
||||
db_name: owned_db_name.as_str(),
|
||||
})
|
||||
.log_if_error("Running series set plan")
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Receives SeriesSets from rx, converts them to ReadResponse and
|
||||
/// and sends them to tx
|
||||
async fn convert_series_set(
|
||||
mut rx: mpsc::Receiver<Result<SeriesSetItem, SeriesSetError>>,
|
||||
tx: mpsc::Sender<Result<ReadResponse, Status>>,
|
||||
) -> Result<()> {
|
||||
while let Some(series_set) = rx.recv().await {
|
||||
let response = series_set
|
||||
.context(ComputingSeriesSet)
|
||||
.and_then(|series_set| {
|
||||
series_set_item_to_read_response(series_set).context(ConvertingSeriesSet)
|
||||
})
|
||||
.map_err(|e| Status::internal(e.to_string()));
|
||||
|
||||
tx.send(response)
|
||||
.await
|
||||
.map_err(|e| Box::new(e) as _)
|
||||
.context(SendingResults)?
|
||||
}
|
||||
Ok(())
|
||||
// Convert results into API responses
|
||||
ss_items
|
||||
.into_iter()
|
||||
.map(|series_set| series_set_item_to_read_response(series_set).context(ConvertingSeriesSet))
|
||||
.collect::<Result<Vec<ReadResponse>, Error>>()
|
||||
}
|
||||
|
||||
/// Launch async tasks that send the result of executing read_group to `tx`
|
||||
async fn query_group_impl<T>(
|
||||
tx: mpsc::Sender<Result<ReadResponse, Status>>,
|
||||
db_store: Arc<T>,
|
||||
db_name: DatabaseName<'static>,
|
||||
range: Option<TimestampRange>,
|
||||
rpc_predicate: Option<Predicate>,
|
||||
gby_agg: GroupByAndAggregate,
|
||||
) -> Result<()>
|
||||
) -> Result<Vec<ReadResponse>, Error>
|
||||
where
|
||||
T: DatabaseStore + 'static,
|
||||
{
|
||||
|
@ -972,29 +939,25 @@ where
|
|||
.map_err(|e| Box::new(e) as _)
|
||||
.context(PlanningGroupSeries { db_name })?;
|
||||
|
||||
// Spawn task to convert between series sets and the gRPC results
|
||||
// and to run the actual plans (so we can return a result to the
|
||||
// client before we start sending result)
|
||||
let (tx_series, rx_series) = mpsc::channel(4);
|
||||
tokio::spawn(async move {
|
||||
convert_series_set(rx_series, tx)
|
||||
.await
|
||||
.log_if_error("Converting grouped series set")
|
||||
});
|
||||
// PERF - This used to send responses to the client before execution had
|
||||
// completed, but now it doesn't. We may need to revisit this in the future
|
||||
// if big queries are causing a significant latency in TTFB.
|
||||
|
||||
// fire up the plans and start the pipeline flowing
|
||||
tokio::spawn(async move {
|
||||
executor
|
||||
.to_series_set(grouped_series_set_plan, tx_series)
|
||||
.await
|
||||
.map_err(|e| Box::new(e) as _)
|
||||
.context(GroupingSeries {
|
||||
db_name: owned_db_name.as_str(),
|
||||
})
|
||||
.log_if_error("Running Grouped SeriesSet Plan")
|
||||
});
|
||||
// Execute the plans
|
||||
let ss_items = executor
|
||||
.to_series_set(grouped_series_set_plan)
|
||||
.await
|
||||
.map_err(|e| Box::new(e) as _)
|
||||
.context(GroupingSeries {
|
||||
db_name: owned_db_name.as_str(),
|
||||
})
|
||||
.log_if_error("Running Grouped SeriesSet Plan")?;
|
||||
|
||||
Ok(())
|
||||
// Convert plans to API responses
|
||||
ss_items
|
||||
.into_iter()
|
||||
.map(|series_set| series_set_item_to_read_response(series_set).context(ConvertingSeriesSet))
|
||||
.collect::<Result<Vec<ReadResponse>, Error>>()
|
||||
}
|
||||
|
||||
/// Return field names, restricted via optional measurement, timestamp and
|
||||
|
|
12
src/main.rs
12
src/main.rs
|
@ -30,6 +30,7 @@ mod commands {
|
|||
pub mod run;
|
||||
pub mod server;
|
||||
pub mod server_remote;
|
||||
pub mod sql;
|
||||
pub mod stats;
|
||||
pub mod tracing;
|
||||
}
|
||||
|
@ -53,6 +54,9 @@ Examples:
|
|||
# Run the InfluxDB IOx server:
|
||||
influxdb_iox
|
||||
|
||||
# Run the interactive SQL prompt
|
||||
influxdb_iox sql
|
||||
|
||||
# Display all server settings
|
||||
influxdb_iox run --help
|
||||
|
||||
|
@ -147,6 +151,7 @@ enum Command {
|
|||
Stats(commands::stats::Config),
|
||||
Server(commands::server::Config),
|
||||
Operation(commands::operations::Config),
|
||||
Sql(commands::sql::Config),
|
||||
}
|
||||
|
||||
fn main() -> Result<(), std::io::Error> {
|
||||
|
@ -235,6 +240,13 @@ fn main() -> Result<(), std::io::Error> {
|
|||
std::process::exit(ReturnCode::Failure as _)
|
||||
}
|
||||
}
|
||||
Command::Sql(config) => {
|
||||
let _tracing_guard = handle_init_logs(init_simple_logs(log_verbose_count));
|
||||
if let Err(e) = commands::sql::command(host, config).await {
|
||||
eprintln!("{}", e);
|
||||
std::process::exit(ReturnCode::Failure as _)
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use super::scenario::{create_readable_database, rand_name, Scenario};
|
||||
use super::scenario::{collect_query, create_readable_database, rand_name, Scenario};
|
||||
use crate::common::server_fixture::ServerFixture;
|
||||
use arrow_deps::assert_table_eq;
|
||||
|
||||
|
@ -17,16 +17,12 @@ pub async fn test() {
|
|||
|
||||
let mut client = server_fixture.flight_client();
|
||||
|
||||
let mut query_results = client
|
||||
let query_results = client
|
||||
.perform_query(scenario.database_name(), sql_query)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut batches = vec![];
|
||||
|
||||
while let Some(data) = query_results.next().await.unwrap() {
|
||||
batches.push(data);
|
||||
}
|
||||
let batches = collect_query(query_results).await;
|
||||
|
||||
assert_table_eq!(expected_read_data, &batches);
|
||||
}
|
||||
|
|
|
@ -7,6 +7,8 @@ pub mod operations_cli;
|
|||
pub mod read_api;
|
||||
pub mod read_cli;
|
||||
pub mod scenario;
|
||||
mod sql_cli;
|
||||
pub mod storage_api;
|
||||
mod system_tables;
|
||||
pub mod write_api;
|
||||
pub mod write_cli;
|
||||
|
|
|
@ -2,6 +2,7 @@ use std::{sync::Arc, time::SystemTime};
|
|||
|
||||
use generated_types::google::protobuf::Empty;
|
||||
use generated_types::influxdata::iox::management::v1::*;
|
||||
use influxdb_iox_client::flight::PerformQuery;
|
||||
use rand::{
|
||||
distributions::{Alphanumeric, Standard},
|
||||
thread_rng, Rng,
|
||||
|
@ -365,3 +366,12 @@ pub async fn create_two_partition_database(
|
|||
.await
|
||||
.expect("write succeded");
|
||||
}
|
||||
|
||||
/// Collect the results of a query into a vector of record batches
|
||||
pub async fn collect_query(mut query_results: PerformQuery) -> Vec<RecordBatch> {
|
||||
let mut batches = vec![];
|
||||
while let Some(data) = query_results.next().await.unwrap() {
|
||||
batches.push(data);
|
||||
}
|
||||
batches
|
||||
}
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
use assert_cmd::Command;
|
||||
use predicates::prelude::*;
|
||||
|
||||
use crate::common::server_fixture::ServerFixture;
|
||||
|
||||
use super::scenario::{create_readable_database, create_two_partition_database, rand_name};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_error_connecting() {
|
||||
let addr = "http://hope_this_addr_does_not_exist";
|
||||
Command::cargo_bin("influxdb_iox")
|
||||
.unwrap()
|
||||
.arg("sql")
|
||||
.arg("--host")
|
||||
.arg(addr)
|
||||
.write_stdin("exit")
|
||||
.assert()
|
||||
.failure()
|
||||
.stderr(predicate::str::contains(
|
||||
"Error connecting to http://hope_this_addr_does_not_exist",
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic() {
|
||||
let fixture = ServerFixture::create_shared().await;
|
||||
let addr = fixture.grpc_base();
|
||||
Command::cargo_bin("influxdb_iox")
|
||||
.unwrap()
|
||||
.arg("sql")
|
||||
.arg("--host")
|
||||
.arg(addr)
|
||||
.write_stdin("exit")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(
|
||||
predicate::str::contains("Ready for commands")
|
||||
.and(predicate::str::contains("Connected to IOx Server")),
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_help() {
|
||||
let fixture = ServerFixture::create_shared().await;
|
||||
let addr = fixture.grpc_base();
|
||||
Command::cargo_bin("influxdb_iox")
|
||||
.unwrap()
|
||||
.arg("sql")
|
||||
.arg("--host")
|
||||
.arg(addr)
|
||||
.write_stdin("help;")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(
|
||||
predicate::str::contains("# Basic IOx SQL Primer").and(predicate::str::contains(
|
||||
"SHOW DATABASES: List databases available on the server",
|
||||
)),
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_exit() {
|
||||
let fixture = ServerFixture::create_shared().await;
|
||||
let addr = fixture.grpc_base();
|
||||
Command::cargo_bin("influxdb_iox")
|
||||
.unwrap()
|
||||
.arg("sql")
|
||||
.arg("--host")
|
||||
.arg(addr)
|
||||
// help should not be run as it is after the exit command
|
||||
.write_stdin("exit;\nhelp;")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("# Basic IOx SQL Primer").not());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sql_show_databases() {
|
||||
let fixture = ServerFixture::create_shared().await;
|
||||
let addr = fixture.grpc_base();
|
||||
|
||||
let db_name1 = rand_name();
|
||||
create_readable_database(&db_name1, fixture.grpc_channel()).await;
|
||||
|
||||
let db_name2 = rand_name();
|
||||
create_readable_database(&db_name2, fixture.grpc_channel()).await;
|
||||
|
||||
Command::cargo_bin("influxdb_iox")
|
||||
.unwrap()
|
||||
.arg("sql")
|
||||
.arg("--host")
|
||||
.arg(addr)
|
||||
.write_stdin("show databases;")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(
|
||||
predicate::str::contains("| db_name")
|
||||
.and(predicate::str::contains(&db_name1))
|
||||
.and(predicate::str::contains(&db_name2)),
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sql_use_database() {
|
||||
let fixture = ServerFixture::create_shared().await;
|
||||
let addr = fixture.grpc_base();
|
||||
|
||||
let db_name = rand_name();
|
||||
create_two_partition_database(&db_name, fixture.grpc_channel()).await;
|
||||
|
||||
let expected_output = r#"
|
||||
+------+---------+----------+---------------------+-------+
|
||||
| host | running | sleeping | time | total |
|
||||
+------+---------+----------+---------------------+-------+
|
||||
| foo | 4 | 514 | 2020-06-23 06:38:30 | 519 |
|
||||
+------+---------+----------+---------------------+-------+
|
||||
"#
|
||||
.trim();
|
||||
|
||||
Command::cargo_bin("influxdb_iox")
|
||||
.unwrap()
|
||||
.arg("sql")
|
||||
.arg("--host")
|
||||
.arg(addr)
|
||||
.write_stdin(format!("use {};\n\nselect * from cpu;", db_name))
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains(expected_output));
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
use crate::common::server_fixture::ServerFixture;
|
||||
use arrow_deps::assert_table_eq;
|
||||
|
||||
use super::scenario::{collect_query, create_readable_database, rand_name};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_operations() {
|
||||
let fixture = ServerFixture::create_shared().await;
|
||||
|
||||
let mut management_client = fixture.management_client();
|
||||
let mut write_client = fixture.write_client();
|
||||
let mut operations_client = fixture.operations_client();
|
||||
|
||||
let db_name1 = rand_name();
|
||||
let db_name2 = rand_name();
|
||||
create_readable_database(&db_name1, fixture.grpc_channel()).await;
|
||||
create_readable_database(&db_name2, fixture.grpc_channel()).await;
|
||||
|
||||
// write only into db_name1
|
||||
let partition_key = "cpu";
|
||||
let table_name = "cpu";
|
||||
let lp_lines = vec!["cpu,region=west user=23.2 100"];
|
||||
|
||||
write_client
|
||||
.write(&db_name1, lp_lines.join("\n"))
|
||||
.await
|
||||
.expect("write succeded");
|
||||
|
||||
// Move the chunk to read buffer
|
||||
let operation = management_client
|
||||
.close_partition_chunk(&db_name1, partition_key, table_name, 0)
|
||||
.await
|
||||
.expect("new partition chunk");
|
||||
|
||||
let operation_id = operation.name.parse().expect("not an integer");
|
||||
operations_client
|
||||
.wait_operation(operation_id, Some(std::time::Duration::from_secs(1)))
|
||||
.await
|
||||
.expect("failed to wait operation");
|
||||
|
||||
let mut client = fixture.flight_client();
|
||||
let sql_query = "select chunk_id, status, description from system.operations";
|
||||
|
||||
let query_results = client.perform_query(&db_name1, sql_query).await.unwrap();
|
||||
|
||||
let batches = collect_query(query_results).await;
|
||||
|
||||
// parameterize on db_name1
|
||||
|
||||
let expected_read_data = vec![
|
||||
"+----------+----------+-----------------------------+",
|
||||
"| chunk_id | status | description |",
|
||||
"+----------+----------+-----------------------------+",
|
||||
"| 0 | Complete | Loading chunk to ReadBuffer |",
|
||||
"+----------+----------+-----------------------------+",
|
||||
];
|
||||
|
||||
assert_table_eq!(expected_read_data, &batches);
|
||||
|
||||
// Should not see jobs from db1 when querying db2
|
||||
let query_results = client.perform_query(&db_name2, sql_query).await.unwrap();
|
||||
|
||||
let batches = collect_query(query_results).await;
|
||||
let expected_read_data = vec!["++", "||", "++", "++"];
|
||||
|
||||
assert_table_eq!(expected_read_data, &batches);
|
||||
}
|
Binary file not shown.
|
@ -159,7 +159,7 @@ impl TaskStatus {
|
|||
}
|
||||
}
|
||||
|
||||
/// If the job has competed, returns the total amount of CPU time
|
||||
/// If the job is running or competed, returns the total amount of CPU time
|
||||
/// spent executing futures
|
||||
pub fn cpu_nanos(&self) -> Option<usize> {
|
||||
match self {
|
||||
|
@ -168,6 +168,16 @@ impl TaskStatus {
|
|||
Self::Complete { cpu_nanos, .. } => Some(*cpu_nanos),
|
||||
}
|
||||
}
|
||||
|
||||
/// If the job has competed, returns the total amount of wall clock time
|
||||
/// spent executing futures
|
||||
pub fn wall_nanos(&self) -> Option<usize> {
|
||||
match self {
|
||||
Self::Creating => None,
|
||||
Self::Running { .. } => None,
|
||||
Self::Complete { wall_nanos, .. } => Some(*wall_nanos),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A Tracker can be used to monitor/cancel/wait for a set of associated futures
|
||||
|
|
Loading…
Reference in New Issue