fix: Move variables within format strings. Thanks clippy!

Changes made automatically using `cargo clippy --fix`.
pull/24376/head
Carol (Nichols || Goulding) 2023-01-30 17:20:05 -05:00
parent fbfbe1adb4
commit 30fea67701
No known key found for this signature in database
GPG Key ID: E907EE5A736F87D4
211 changed files with 817 additions and 1129 deletions

View File

@ -330,7 +330,7 @@ mod tests {
fn make_rng() -> StdRng {
let seed = OsRng::default().next_u64();
println!("Seed: {}", seed);
println!("Seed: {seed}");
StdRng::seed_from_u64(seed)
}

View File

@ -56,9 +56,9 @@ fn array_value_to_string(column: &ArrayRef, row: usize) -> Result<String> {
dur_column
.value(row)
.try_into()
.map_err(|e| ArrowError::InvalidArgumentError(format!("{:?}", e)))?,
.map_err(|e| ArrowError::InvalidArgumentError(format!("{e:?}")))?,
);
Ok(format!("{:?}", duration))
Ok(format!("{duration:?}"))
}
_ => {
// fallback to arrow's default printing for other types
@ -188,8 +188,7 @@ mod tests {
let actual: Vec<&str> = table.lines().collect();
assert_eq!(
expected, actual,
"Expected:\n\n{:#?}\nActual:\n\n{:#?}\n",
expected, actual
"Expected:\n\n{expected:#?}\nActual:\n\n{actual:#?}\n"
);
}

View File

@ -37,15 +37,13 @@ fn optimize_dict_col(
) -> Result<ArrayRef> {
if key_type != &DataType::Int32 {
return Err(ArrowError::NotYetImplemented(format!(
"truncating non-Int32 dictionaries not supported: {}",
key_type
"truncating non-Int32 dictionaries not supported: {key_type}"
)));
}
if value_type != &DataType::Utf8 {
return Err(ArrowError::NotYetImplemented(format!(
"truncating non-string dictionaries not supported: {}",
value_type
"truncating non-string dictionaries not supported: {value_type}"
)));
}

View File

@ -264,7 +264,7 @@ mod tests {
base,
};
let assert_fuzzy_eq = |a: f64, b: f64| assert!((b - a).abs() < 0.0001, "{} != {}", a, b);
let assert_fuzzy_eq = |a: f64, b: f64| assert!((b - a).abs() < 0.0001, "{a} != {b}");
// Create a static rng that takes the minimum of the range
let rng = Box::new(StepRng::new(0, 0));

View File

@ -409,7 +409,7 @@ where
/// # Panic
/// Panics when the member with the specified ID is unknown (or was already unregistered).
fn unregister_member(&mut self, id: &str) {
assert!(self.members.remove(id).is_some(), "Member '{}' unknown", id);
assert!(self.members.remove(id).is_some(), "Member '{id}' unknown");
}
/// Add used resource too pool.

View File

@ -87,9 +87,7 @@ where
let size_bytes = (seen.m() + 7) / 8;
assert!(
size_bytes <= BOUND_SIZE_BYTES,
"size of bloom filter should be <= {} bytes but is {} bytes",
BOUND_SIZE_BYTES,
size_bytes,
"size of bloom filter should be <= {BOUND_SIZE_BYTES} bytes but is {size_bytes} bytes",
);
seen

View File

@ -18,7 +18,7 @@ pub enum CompactorAlgoVersion {
impl Display for CompactorAlgoVersion {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self)
write!(f, "{self:?}")
}
}

View File

@ -28,10 +28,10 @@ impl std::str::FromStr for SocketAddr {
if let Some(addr) = addrs.next() {
Ok(Self(addr))
} else {
Err(format!("Found no addresses for '{}'", s))
Err(format!("Found no addresses for '{s}'"))
}
}
Err(e) => Err(format!("Cannot parse socket address '{}': {}", s, e)),
Err(e) => Err(format!("Cannot parse socket address '{s}': {e}")),
}
}
}

View File

@ -97,7 +97,7 @@ impl From<tonic::transport::Error> for Error {
use std::error::Error;
let details = source
.source()
.map(|e| format!(" ({})", e))
.map(|e| format!(" ({e})"))
.unwrap_or_else(|| "".to_string());
Self::TransportError { source, details }
@ -271,7 +271,7 @@ mod tests {
.unwrap()
.into_http_connection();
let url = format!("{}/the_api", url);
let url = format!("{url}/the_api");
println!("Sending to {url}");
let m = mockito::mock("POST", "/the_api")

View File

@ -1170,7 +1170,7 @@ mod tests {
/// return metrics reported by compaction
fn extract_byte_metrics(&self) -> ExtractedByteMetrics {
let shard_id = self.candidate_partition.shard_id();
let attributes = Attributes::from([("shard_id", format!("{}", shard_id).into())]);
let attributes = Attributes::from([("shard_id", format!("{shard_id}").into())]);
let (sample_count, buckets_with_counts) =
if let Some(observer) = self.metric.get_observer(&attributes) {
@ -1339,7 +1339,7 @@ mod tests {
];
for order in file_orders {
println!("Testing order {:?}", order);
println!("Testing order {order:?}");
let test_setup = TestSetup::new_for_sort().await;
let parquet_files = test_setup.input_files();

View File

@ -99,7 +99,7 @@ mod tests {
.iter()
.map(|f| (f.id.get(), f.compaction_level))
.collect();
println!("{:?}", files_and_levels);
println!("{files_and_levels:?}");
assert_eq!(
files_and_levels,
vec![

View File

@ -131,7 +131,7 @@ impl Config {
.expect("retry forever");
if topic.is_none() {
panic!("Topic {} not found", topic_name);
panic!("Topic {topic_name} not found");
}
let topic = topic.unwrap();
@ -151,10 +151,7 @@ impl Config {
match shard {
Some(shard) => shard.id,
None => {
panic!(
"Topic {} and Shard Index {} not found",
topic_name, shard_index
)
panic!("Topic {topic_name} and Shard Index {shard_index} not found")
}
}
}

View File

@ -692,7 +692,7 @@ impl std::fmt::Display for ColumnType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = self.as_str();
write!(f, "{}", s)
write!(f, "{s}")
}
}
@ -1374,7 +1374,7 @@ impl DeletePredicate {
if !out.is_empty() {
write!(&mut out, " AND ").expect("writing to a string shouldn't fail");
}
write!(&mut out, "{}", expr).expect("writing to a string shouldn't fail");
write!(&mut out, "{expr}").expect("writing to a string shouldn't fail");
}
out
}
@ -1763,9 +1763,7 @@ impl ColumnSummary {
let total_count = self.total_count();
assert!(
total_count <= len,
"trying to shrink column stats from {} to {}",
total_count,
len
"trying to shrink column stats from {total_count} to {len}"
);
let delta = len - total_count;
self.stats.update_for_nulls(delta);
@ -2363,7 +2361,7 @@ pub struct TimestampMinMax {
impl TimestampMinMax {
/// Create a new TimestampMinMax. Panics if min > max.
pub fn new(min: i64, max: i64) -> Self {
assert!(min <= max, "expected min ({}) <= max ({})", min, max);
assert!(min <= max, "expected min ({min}) <= max ({max})");
Self { min, max }
}
@ -2415,13 +2413,13 @@ mod tests {
// Random chunk IDs use UUID-format
let id_random = ChunkId::new();
let inner: Uuid = id_random.get();
assert_eq!(format!("{:?}", id_random), format!("ChunkId({})", inner));
assert_eq!(format!("{}", id_random), format!("ChunkId({})", inner));
assert_eq!(format!("{id_random:?}"), format!("ChunkId({inner})"));
assert_eq!(format!("{id_random}"), format!("ChunkId({inner})"));
// Deterministic IDs use integer format
let id_test = ChunkId::new_test(42);
assert_eq!(format!("{:?}", id_test), "ChunkId(42)");
assert_eq!(format!("{}", id_test), "ChunkId(42)");
assert_eq!(format!("{id_test:?}"), "ChunkId(42)");
assert_eq!(format!("{id_test}"), "ChunkId(42)");
}
#[test]
@ -3346,7 +3344,7 @@ mod tests {
];
for (name, range) in cases {
println!("case: {}", name);
println!("case: {name}");
assert!(!range.contains(i64::MIN));
assert!(!range.contains(i64::MIN + 1));
assert!(range.contains(MIN_NANO_TIME));

View File

@ -382,7 +382,7 @@ mod tests {
let ts_predicate_expr = make_range_expr(101, 202, "time");
let expected_string =
"TimestampNanosecond(101, None) <= time AND time < TimestampNanosecond(202, None)";
let actual_string = format!("{:?}", ts_predicate_expr);
let actual_string = format!("{ts_predicate_expr:?}");
assert_eq!(actual_string, expected_string);
}

View File

@ -376,7 +376,7 @@ pub mod test_util {
match (a, b) {
(DmlOperation::Write(a), DmlOperation::Write(b)) => assert_writes_eq(a, b),
(DmlOperation::Delete(a), DmlOperation::Delete(b)) => assert_eq!(a, b),
(a, b) => panic!("a != b, {:?} vs {:?}", a, b),
(a, b) => panic!("a != b, {a:?} vs {b:?}"),
}
}
@ -384,7 +384,7 @@ pub mod test_util {
pub fn assert_write_op_eq(a: &DmlOperation, b: &DmlWrite) {
match a {
DmlOperation::Write(a) => assert_writes_eq(a, b),
_ => panic!("unexpected operation: {:?}", a),
_ => panic!("unexpected operation: {a:?}"),
}
}
@ -411,8 +411,7 @@ pub mod test_util {
assert_eq!(
pretty_format_batches(&[a_batch.to_arrow(Projection::All).unwrap()]).unwrap(),
pretty_format_batches(&[b_batch.to_arrow(Projection::All).unwrap()]).unwrap(),
"batches for table \"{}\" differ",
table_id
"batches for table \"{table_id}\" differ"
);
}
}
@ -421,7 +420,7 @@ pub mod test_util {
pub fn assert_delete_op_eq(a: &DmlOperation, b: &DmlDelete) {
match a {
DmlOperation::Delete(a) => assert_eq!(a, b),
_ => panic!("unexpected operation: {:?}", a),
_ => panic!("unexpected operation: {a:?}"),
}
}

View File

@ -512,8 +512,7 @@ mod tests {
assert!(
tname.starts_with("Test DedicatedExecutor"),
"Invalid thread name: {}",
tname,
"Invalid thread name: {tname}",
);
25usize

View File

@ -291,10 +291,7 @@ pub struct AlreadyExists {
impl AlreadyExists {
pub fn new(resource_type: ResourceType, resource_name: String) -> Self {
let description = format!(
"Resource {}/{} already exists",
resource_type, resource_name
);
let description = format!("Resource {resource_type}/{resource_name} already exists");
Self {
resource_type,
@ -369,7 +366,7 @@ pub struct NotFound {
impl NotFound {
pub fn new(resource_type: ResourceType, resource_name: String) -> Self {
let description = format!("Resource {}/{} not found", resource_type, resource_name);
let description = format!("Resource {resource_type}/{resource_name} not found");
Self {
resource_type,

View File

@ -10,14 +10,14 @@ use snafu::{ResultExt, Snafu};
fn expr_to_bytes_violation(field: impl Into<String>, e: DataFusionError) -> FieldViolation {
FieldViolation {
field: field.into(),
description: format!("Error converting Expr to bytes: {}", e),
description: format!("Error converting Expr to bytes: {e}"),
}
}
fn expr_from_bytes_violation(field: impl Into<String>, e: DataFusionError) -> FieldViolation {
FieldViolation {
field: field.into(),
description: format!("Error creating Expr from bytes: {}", e),
description: format!("Error creating Expr from bytes: {e}"),
}
}
@ -168,7 +168,7 @@ impl TryFrom<proto::Predicate> for Predicate {
// try to convert to ValueExpr
expr.try_into().map_err(|e| FieldViolation {
field: "expr".into(),
description: format!("Internal: Serialized expr a valid ValueExpr: {:?}", e),
description: format!("Internal: Serialized expr a valid ValueExpr: {e:?}"),
})
})
.collect::<Result<Vec<ValueExpr>, FieldViolation>>()?;

View File

@ -234,7 +234,7 @@ pub const ANY_TYPE_PREFIX: &str = "type.googleapis.com";
/// This is the full Protobuf package and message name prefixed by
/// "type.googleapis.com/"
pub fn protobuf_type_url(protobuf_type: &str) -> String {
format!("{}/{}", ANY_TYPE_PREFIX, protobuf_type)
format!("{ANY_TYPE_PREFIX}/{protobuf_type}")
}
/// Protobuf file descriptor containing all generated types.

View File

@ -148,8 +148,7 @@ mod tests {
output.merge(test.right);
assert_eq!(
&output, test.expected,
"Mismatch\n\nOutput:\n{:#?}\n\nTest:\n{:#?}",
output, test
"Mismatch\n\nOutput:\n{output:#?}\n\nTest:\n{test:#?}"
);
}
}

View File

@ -6,7 +6,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let request = tonic::Request::new(TestRequest { question: 41 });
let response = client.test_unary(request).await?;
println!("RESPONSE={:?}", response);
println!("RESPONSE={response:?}");
Ok(())
}

View File

@ -52,7 +52,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let addr = "[::1]:50051".parse().unwrap();
let service = TestService;
println!("TestService listening on {}", addr);
println!("TestService listening on {addr}");
// Create a binary log sink that writes length delimited binary log entries.
let file = std::fs::File::create("/tmp/grpcgo_binarylog.bin")?;
@ -63,7 +63,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
// The default NoReflection predicate filters out all gRPC reflection chatter.
let binlog_layer = binlog_layer.with_predicate(NoReflection);
// You can provide a custom logger.
let binlog_layer = binlog_layer.with_error_logger(|e| eprintln!("grpc binlog error: {:?}", e));
let binlog_layer = binlog_layer.with_error_logger(|e| eprintln!("grpc binlog error: {e:?}"));
Server::builder()
.layer(binlog_layer)

View File

@ -47,7 +47,7 @@ impl Sink for DebugSink {
type Error = ();
fn write(&self, data: GrpcLogEntry, _error_logger: impl ErrorLogger<Self::Error>) {
eprintln!("{:?}", data);
eprintln!("{data:?}");
}
}

View File

@ -31,7 +31,7 @@ impl Fixture {
let addr: SocketAddr = "127.0.0.1:0".parse()?;
let listener = tokio::net::TcpListener::bind(addr).await?;
let local_addr = listener.local_addr()?;
let local_addr = format!("http://{}", local_addr);
let local_addr = format!("http://{local_addr}");
tokio::spawn(async move {
Server::builder()
@ -66,7 +66,7 @@ impl Drop for Fixture {
let (tmp_tx, _) = tokio::sync::oneshot::channel();
let shutdown_tx = std::mem::replace(&mut self.shutdown_tx, tmp_tx);
if let Err(e) = shutdown_tx.send(()) {
eprintln!("error shutting down text fixture: {:?}", e);
eprintln!("error shutting down text fixture: {e:?}");
}
}
}

View File

@ -488,7 +488,7 @@ mod tests {
};
let join_handle = tokio::task::spawn(server);
let connection = Builder::default()
.build(format!("http://{}", bind_addr))
.build(format!("http://{bind_addr}"))
.await
.expect("failed to connect to server");
(connection, join_handle, requests)

View File

@ -47,14 +47,10 @@ mod tests {
let token = "some-token";
let mock_server = mock("POST", "/api/v2/buckets")
.match_header("Authorization", format!("Token {}", token).as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.match_header("Content-Type", "application/json")
.match_body(
format!(
r#"{{"orgID":"{}","name":"{}","retentionRules":[]}}"#,
org_id, bucket
)
.as_str(),
format!(r#"{{"orgID":"{org_id}","name":"{bucket}","retentionRules":[]}}"#).as_str(),
)
.create();

View File

@ -148,7 +148,7 @@ mod tests {
let token = "some-token";
let mock_server = mock("GET", BASE_PATH)
.match_header("Authorization", format!("Token {}", token).as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.create();
let client = Client::new(mockito::server_url(), token);
@ -163,8 +163,8 @@ mod tests {
let token = "some-token";
let org_id = "some-org_id";
let mock_server = mock("GET", format!("{}?orgID={}", BASE_PATH, org_id).as_str())
.match_header("Authorization", format!("Token {}", token).as_str())
let mock_server = mock("GET", format!("{BASE_PATH}?orgID={org_id}").as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.create();
let client = Client::new(mockito::server_url(), token);
@ -179,8 +179,8 @@ mod tests {
let token = "some-token";
let label_id = "some-id";
let mock_server = mock("GET", format!("{}/{}", BASE_PATH, label_id).as_str())
.match_header("Authorization", format!("Token {}", token).as_str())
let mock_server = mock("GET", format!("{BASE_PATH}/{label_id}").as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.create();
let client = Client::new(mockito::server_url(), token);
@ -199,12 +199,11 @@ mod tests {
properties.insert("some-key".to_string(), "some-value".to_string());
let mock_server = mock("POST", BASE_PATH)
.match_header("Authorization", format!("Token {}", token).as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.match_header("Content-Type", "application/json")
.match_body(
format!(
r#"{{"orgID":"{}","name":"{}","properties":{{"some-key":"some-value"}}}}"#,
org_id, name
r#"{{"orgID":"{org_id}","name":"{name}","properties":{{"some-key":"some-value"}}}}"#
)
.as_str(),
)
@ -224,9 +223,9 @@ mod tests {
let name = "some-user";
let mock_server = mock("POST", BASE_PATH)
.match_header("Authorization", format!("Token {}", token).as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.match_header("Content-Type", "application/json")
.match_body(format!(r#"{{"orgID":"{}","name":"{}"}}"#, org_id, name).as_str())
.match_body(format!(r#"{{"orgID":"{org_id}","name":"{name}"}}"#).as_str())
.create();
let client = Client::new(mockito::server_url(), token);
@ -244,15 +243,11 @@ mod tests {
let mut properties = HashMap::new();
properties.insert("some-key".to_string(), "some-value".to_string());
let mock_server = mock("PATCH", format!("{}/{}", BASE_PATH, label_id).as_str())
.match_header("Authorization", format!("Token {}", token).as_str())
let mock_server = mock("PATCH", format!("{BASE_PATH}/{label_id}").as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.match_header("Content-Type", "application/json")
.match_body(
format!(
r#"{{"name":"{}","properties":{{"some-key":"some-value"}}}}"#,
name
)
.as_str(),
format!(r#"{{"name":"{name}","properties":{{"some-key":"some-value"}}}}"#).as_str(),
)
.create();
@ -270,8 +265,8 @@ mod tests {
let token = "some-token";
let label_id = "some-label_id";
let mock_server = mock("PATCH", format!("{}/{}", BASE_PATH, label_id).as_str())
.match_header("Authorization", format!("Token {}", token).as_str())
let mock_server = mock("PATCH", format!("{BASE_PATH}/{label_id}").as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.match_header("Content-Type", "application/json")
.match_body("{}")
.create();
@ -288,8 +283,8 @@ mod tests {
let token = "some-token";
let label_id = "some-label_id";
let mock_server = mock("DELETE", format!("{}/{}", BASE_PATH, label_id).as_str())
.match_header("Authorization", format!("Token {}", token).as_str())
let mock_server = mock("DELETE", format!("{BASE_PATH}/{label_id}").as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.create();
let client = Client::new(mockito::server_url(), token);

View File

@ -155,7 +155,7 @@ mod tests {
let token = "some-token";
let mock_server = mock("GET", "/api/v2/query/suggestions")
.match_header("Authorization", format!("Token {}", token).as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.create();
let client = Client::new(mockito::server_url(), token);
@ -178,7 +178,7 @@ mod tests {
)
.as_str(),
)
.match_header("Authorization", format!("Token {}", token).as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.create();
let client = Client::new(mockito::server_url(), token);
@ -194,7 +194,7 @@ mod tests {
let org = "some-org";
let query: Option<Query> = Some(Query::new("some-influx-query-string".to_string()));
let mock_server = mock("POST", "/api/v2/query")
.match_header("Authorization", format!("Token {}", token).as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.match_header("Accepting-Encoding", "identity")
.match_header("Content-Type", "application/json")
.match_query(Matcher::UrlEncoded("org".into(), org.into()))
@ -219,7 +219,7 @@ mod tests {
let query: Option<Query> = None;
let mock_server = mock("POST", "/api/v2/query")
.match_header("Authorization", format!("Token {}", token).as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.match_header("Accepting-Encoding", "identity")
.match_header("Content-Type", "application/json")
.match_query(Matcher::UrlEncoded("org".into(), org.into()))
@ -242,7 +242,7 @@ mod tests {
let token = "some-token";
let query: Option<Query> = Some(Query::new("some-influx-query-string".to_string()));
let mock_server = mock("POST", "/api/v2/query/analyze")
.match_header("Authorization", format!("Token {}", token).as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.match_header("Content-Type", "application/json")
.match_body(
serde_json::to_string(&query.clone().unwrap_or_default())
@ -263,7 +263,7 @@ mod tests {
let token = "some-token";
let query: Option<Query> = None;
let mock_server = mock("POST", "/api/v2/query/analyze")
.match_header("Authorization", format!("Token {}", token).as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.match_header("Content-Type", "application/json")
.match_body(
serde_json::to_string(&query.clone().unwrap_or_default())
@ -285,7 +285,7 @@ mod tests {
let language_request: Option<LanguageRequest> =
Some(LanguageRequest::new("some-influx-query-string".to_string()));
let mock_server = mock("POST", "/api/v2/query/ast")
.match_header("Authorization", format!("Token {}", token).as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.match_header("Content-Type", "application/json")
.match_body(
serde_json::to_string(&language_request.clone().unwrap_or_default())
@ -306,7 +306,7 @@ mod tests {
let token = "some-token";
let language_request: Option<LanguageRequest> = None;
let mock_server = mock("POST", "/api/v2/query/ast")
.match_header("Authorization", format!("Token {}", token).as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.match_header("Content-Type", "application/json")
.match_body(
serde_json::to_string(&language_request.clone().unwrap_or_default())
@ -328,7 +328,7 @@ mod tests {
let org = "some-org";
let query: Option<Query> = Some(Query::new("some-influx-query-string".to_string()));
let mock_server = mock("POST", "/api/v2/query")
.match_header("Authorization", format!("Token {}", token).as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.match_header("Accepting-Encoding", "identity")
.match_header("Content-Type", "application/json")
.match_query(Matcher::UrlEncoded("org".into(), org.into()))

View File

@ -143,8 +143,7 @@ mod tests {
.match_header("Content-Type", "application/json")
.match_body(
format!(
r#"{{"username":"{}","org":"{}","bucket":"{}","password":"{}","retentionPeriodHrs":{}}}"#,
username, org, bucket, password, retention_period_hrs
r#"{{"username":"{username}","org":"{org}","bucket":"{bucket}","password":"{password}","retentionPeriodHrs":{retention_period_hrs}}}"#
).as_str(),
)
.create();
@ -175,12 +174,11 @@ mod tests {
let retention_period_hrs = 1;
let mock_server = mock("POST", "/api/v2/setup/user")
.match_header("Authorization", format!("Token {}", token).as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.match_header("Content-Type", "application/json")
.match_body(
format!(
r#"{{"username":"{}","org":"{}","bucket":"{}","password":"{}","retentionPeriodHrs":{}}}"#,
username, org, bucket, password, retention_period_hrs
r#"{{"username":"{username}","org":"{org}","bucket":"{bucket}","password":"{password}","retentionPeriodHrs":{retention_period_hrs}}}"#
).as_str(),
)
.create();
@ -210,11 +208,8 @@ mod tests {
let mock_server = mock("POST", "/api/v2/setup")
.match_header("Content-Type", "application/json")
.match_body(
format!(
r#"{{"username":"{}","org":"{}","bucket":"{}"}}"#,
username, org, bucket,
)
.as_str(),
format!(r#"{{"username":"{username}","org":"{org}","bucket":"{bucket}"}}"#,)
.as_str(),
)
.create();
@ -235,14 +230,11 @@ mod tests {
let bucket = "some-bucket";
let mock_server = mock("POST", "/api/v2/setup/user")
.match_header("Authorization", format!("Token {}", token).as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.match_header("Content-Type", "application/json")
.match_body(
format!(
r#"{{"username":"{}","org":"{}","bucket":"{}"}}"#,
username, org, bucket,
)
.as_str(),
format!(r#"{{"username":"{username}","org":"{org}","bucket":"{bucket}"}}"#,)
.as_str(),
)
.create();

View File

@ -74,9 +74,9 @@ mod tests {
let mock_server = mock(
"POST",
format!("/api/v2/write?bucket={}&org={}", bucket, org).as_str(),
format!("/api/v2/write?bucket={bucket}&org={org}").as_str(),
)
.match_header("Authorization", format!("Token {}", token).as_str())
.match_header("Authorization", format!("Token {token}").as_str())
.match_body(
"\
cpu,host=server01 usage=0.5

View File

@ -150,7 +150,7 @@ impl Client {
let auth_header = if token.is_empty() {
None
} else {
Some(format!("Token {}", token))
Some(format!("Token {token}"))
};
Self {

View File

@ -287,9 +287,9 @@ impl WriteFieldValue for FieldValue {
match self {
Bool(v) => write!(w, "{}", if *v { "t" } else { "f" }),
F64(v) => write!(w, "{}", v),
I64(v) => write!(w, "{}i", v),
U64(v) => write!(w, "{}u", v),
F64(v) => write!(w, "{v}"),
I64(v) => write!(w, "{v}i"),
U64(v) => write!(w, "{v}u"),
String(v) => {
w.write_all(br#"""#)?;
escape_and_write_value(v, FIELD_VALUE_STRING_DELIMITERS, &mut w)?;
@ -310,7 +310,7 @@ impl WriteTimestamp for i64 {
where
W: io::Write,
{
write!(w, "{}", self)
write!(w, "{self}")
}
}
@ -332,7 +332,7 @@ where
for (idx, delim) in value.match_indices(escaping_specification) {
let s = &value[last..idx];
write!(w, r#"{}\{}"#, s, delim)?;
write!(w, r#"{s}\{delim}"#)?;
last = idx + delim.len();
}

View File

@ -173,21 +173,21 @@ impl TestServer {
fn new() -> Self {
let ready = Mutex::new(ServerState::Started);
let http_port = NEXT_PORT.fetch_add(1, SeqCst);
let http_base = format!("http://127.0.0.1:{}", http_port);
let http_base = format!("http://127.0.0.1:{http_port}");
let temp_dir = test_helpers::tmp_dir().unwrap();
let mut log_path = temp_dir.path().to_path_buf();
log_path.push(format!("influxdb_server_fixture_{}.log", http_port));
log_path.push(format!("influxdb_server_fixture_{http_port}.log"));
let mut bolt_path = temp_dir.path().to_path_buf();
bolt_path.push(format!("influxd_{}.bolt", http_port));
bolt_path.push(format!("influxd_{http_port}.bolt"));
let mut engine_path = temp_dir.path().to_path_buf();
engine_path.push(format!("influxd_{}_engine", http_port));
engine_path.push(format!("influxd_{http_port}_engine"));
println!("****************");
println!("Server Logging to {:?}", log_path);
println!("Server Logging to {log_path:?}");
println!("****************");
let log_file = File::create(log_path).expect("Opening log file");
@ -201,7 +201,7 @@ impl TestServer {
let (server_process, docker_name) = if local {
let cmd = Command::new("influxd")
.arg("--http-bind-address")
.arg(format!(":{}", http_port))
.arg(format!(":{http_port}"))
.arg("--bolt-path")
.arg(bolt_path)
.arg("--engine-path")
@ -214,7 +214,7 @@ impl TestServer {
(cmd, None)
} else {
let ci_image = "quay.io/influxdb/rust:ci";
let container_name = format!("influxdb2_{}", http_port);
let container_name = format!("influxdb2_{http_port}");
Command::new("docker")
.arg("container")
@ -222,7 +222,7 @@ impl TestServer {
.arg("--name")
.arg(&container_name)
.arg("--publish")
.arg(format!("{}:8086", http_port))
.arg(format!("{http_port}:8086"))
.arg("--rm")
.arg("--pull")
.arg("always")
@ -272,11 +272,11 @@ impl TestServer {
loop {
match client.get(&url).send().await {
Ok(resp) => {
println!("Successfully got a response from HTTP: {:?}", resp);
println!("Successfully got a response from HTTP: {resp:?}");
return;
}
Err(e) => {
println!("Waiting for HTTP server to be up: {}", e);
println!("Waiting for HTTP server to be up: {e}");
}
}
interval.tick().await;
@ -287,14 +287,14 @@ impl TestServer {
match capped_check.await {
Ok(_) => {
println!("Successfully started {}", self);
println!("Successfully started {self}");
*ready = ServerState::Ready;
}
Err(e) => {
// tell others that this server had some problem
*ready = ServerState::Error;
std::mem::drop(ready);
panic!("Server was not ready in required time: {}", e);
panic!("Server was not ready in required time: {e}");
}
}
@ -324,7 +324,7 @@ impl TestServer {
Err(e) => {
*ready = ServerState::Error;
std::mem::drop(ready);
panic!("Could not onboard: {}", e);
panic!("Could not onboard: {e}");
}
}
}

View File

@ -78,22 +78,22 @@ impl Display for QualifiedMeasurementName {
database: None,
retention_policy: None,
name,
} => write!(f, "{}", name),
} => write!(f, "{name}"),
Self {
database: Some(db),
retention_policy: None,
name,
} => write!(f, "{}..{}", db, name),
} => write!(f, "{db}..{name}"),
Self {
database: None,
retention_policy: Some(rp),
name,
} => write!(f, "{}.{}", rp, name),
} => write!(f, "{rp}.{name}"),
Self {
database: Some(db),
retention_policy: Some(rp),
name,
} => write!(f, "{}.{}.{}", db, rp, name),
} => write!(f, "{db}.{rp}.{name}"),
}
}
}
@ -847,7 +847,7 @@ mod tests {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
Display::fmt(self.head(), f)?;
for arg in self.tail() {
write!(f, ", {}", arg)?;
write!(f, ", {arg}")?;
}
Ok(())
}
@ -888,7 +888,7 @@ mod tests {
if let Some(first) = self.head() {
Display::fmt(first, f)?;
for arg in self.tail() {
write!(f, ", {}", arg)?;
write!(f, ", {arg}")?;
}
}

View File

@ -60,19 +60,19 @@ impl Display for CreateDatabaseStatement {
f.write_str(" WITH")?;
if let Some(v) = self.duration {
write!(f, " DURATION {}", v)?;
write!(f, " DURATION {v}")?;
}
if let Some(v) = self.replication {
write!(f, " REPLICATION {}", v)?;
write!(f, " REPLICATION {v}")?;
}
if let Some(v) = self.shard_duration {
write!(f, " SHARD DURATION {}", v)?;
write!(f, " SHARD DURATION {v}")?;
}
if let Some(v) = &self.retention_name {
write!(f, " NAME {}", v)?;
write!(f, " NAME {v}")?;
}
}
Ok(())

View File

@ -34,12 +34,12 @@ impl Display for DeleteStatement {
match self {
Self::FromWhere { from, condition } => {
write!(f, " {}", from)?;
write!(f, " {from}")?;
if let Some(where_clause) = condition {
write!(f, " {}", where_clause)?;
write!(f, " {where_clause}")?;
}
}
Self::Where(where_clause) => write!(f, " {}", where_clause)?,
Self::Where(where_clause) => write!(f, " {where_clause}")?,
};
Ok(())

View File

@ -53,7 +53,7 @@ impl Display for ExplainStatement {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str("EXPLAIN ")?;
if let Some(options) = &self.options {
write!(f, "{} ", options)?;
write!(f, "{options} ")?;
}
Display::fmt(&self.select, f)
}

View File

@ -137,29 +137,29 @@ impl Display for Expr {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::VarRef { name, data_type } => {
write!(f, "{}", name)?;
write!(f, "{name}")?;
if let Some(d) = data_type {
write!(f, "::{}", d)?;
write!(f, "::{d}")?;
}
}
Self::BindParameter(v) => write!(f, "{}", v)?,
Self::Literal(v) => write!(f, "{}", v)?,
Self::Binary { lhs, op, rhs } => write!(f, "{} {} {}", lhs, op, rhs)?,
Self::Nested(e) => write!(f, "({})", e)?,
Self::BindParameter(v) => write!(f, "{v}")?,
Self::Literal(v) => write!(f, "{v}")?,
Self::Binary { lhs, op, rhs } => write!(f, "{lhs} {op} {rhs}")?,
Self::Nested(e) => write!(f, "({e})")?,
Self::Call { name, args } => {
write!(f, "{}(", name)?;
write!(f, "{name}(")?;
if !args.is_empty() {
let args = args.as_slice();
write!(f, "{}", args[0])?;
for arg in &args[1..] {
write!(f, ", {}", arg)?;
write!(f, ", {arg}")?;
}
}
write!(f, ")")?;
}
Self::Wildcard(Some(dt)) => write!(f, "*::{}", dt)?,
Self::Wildcard(Some(dt)) => write!(f, "*::{dt}")?,
Self::Wildcard(None) => f.write_char('*')?,
Self::Distinct(ident) => write!(f, "DISTINCT {}", ident)?,
Self::Distinct(ident) => write!(f, "DISTINCT {ident}")?,
}
Ok(())

View File

@ -98,8 +98,8 @@ impl Display for ConditionalExpression {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Expr(v) => fmt::Display::fmt(v, f),
Self::Binary { lhs, op, rhs } => write!(f, "{} {} {}", lhs, op, rhs),
Self::Grouped(v) => write!(f, "({})", v),
Self::Binary { lhs, op, rhs } => write!(f, "{lhs} {op} {rhs}"),
Self::Grouped(v) => write!(f, "({v})"),
}
}
}

View File

@ -120,7 +120,7 @@ mod test {
let mut calls = Vec::new();
let mut call_no = 0;
super::walk_expression::<()>(expr, &mut |n| {
calls.push(format!("{}: {:?}", call_no, n));
calls.push(format!("{call_no}: {n:?}"));
call_no += 1;
std::ops::ControlFlow::Continue(())
});
@ -137,7 +137,7 @@ mod test {
walk_expression_mut::<()>(expr, &mut |e| {
match e {
ExpressionMut::Arithmetic(n) => match n {
Expr::VarRef { name, .. } => *name = format!("c_{}", name).into(),
Expr::VarRef { name, .. } => *name = format!("c_{name}").into(),
Expr::Literal(Literal::Integer(v)) => *v *= 10,
Expr::Literal(Literal::Regex(v)) => *v = format!("c_{}", v.0).into(),
_ => {}
@ -160,7 +160,7 @@ mod test {
let mut calls = Vec::new();
let mut call_no = 0;
super::walk_expr::<()>(&expr, &mut |n| {
calls.push(format!("{}: {:?}", call_no, n));
calls.push(format!("{call_no}: {n:?}"));
call_no += 1;
std::ops::ControlFlow::Continue(())
});
@ -178,7 +178,7 @@ mod test {
let mut calls = Vec::new();
let mut call_no = 0;
super::walk_expr_mut::<()>(&mut expr, &mut |n| {
calls.push(format!("{}: {:?}", call_no, n));
calls.push(format!("{call_no}: {n:?}"));
call_no += 1;
std::ops::ControlFlow::Continue(())
});
@ -194,7 +194,7 @@ mod test {
let (_, mut expr) = arithmetic_expression("foo + bar + 5").unwrap();
walk_expr_mut::<()>(&mut expr, &mut |e| {
match e {
Expr::VarRef { name, .. } => *name = format!("c_{}", name).into(),
Expr::VarRef { name, .. } => *name = format!("c_{name}").into(),
Expr::Literal(Literal::Integer(v)) => *v *= 10,
_ => {}
}

View File

@ -22,9 +22,9 @@ impl<I: Display> Display for Error<I> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Syntax { input: _, message } => {
write!(f, "Syntax error: {}", message)
write!(f, "Syntax error: {message}")
}
Self::Nom(_, kind) => write!(f, "nom error: {:?}", kind),
Self::Nom(_, kind) => write!(f, "nom error: {kind:?}"),
}
}
}

View File

@ -103,17 +103,17 @@ impl From<Regex> for Literal {
impl Display for Literal {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Integer(v) => write!(f, "{}", v),
Self::Unsigned(v) => write!(f, "{}", v),
Self::Float(v) => write!(f, "{}", v),
Self::Integer(v) => write!(f, "{v}"),
Self::Unsigned(v) => write!(f, "{v}"),
Self::Float(v) => write!(f, "{v}"),
Self::String(v) => {
f.write_char('\'')?;
write_escaped!(f, v, '\n' => "\\n", '\\' => "\\\\", '\'' => "\\'", '"' => "\\\"");
f.write_char('\'')
}
Self::Boolean(v) => write!(f, "{}", if *v { "true" } else { "false" }),
Self::Duration(v) => write!(f, "{}", v),
Self::Regex(v) => write!(f, "{}", v),
Self::Duration(v) => write!(f, "{v}"),
Self::Regex(v) => write!(f, "{v}"),
Self::Timestamp(ts) => write!(f, "{}", ts.to_rfc3339()),
}
}
@ -274,7 +274,7 @@ impl Display for Duration {
for (div, unit) in DIVISORS.iter().filter(|(div, _)| v > *div) {
let units = i / div;
if units > 0 {
write!(f, "{}{}", units, unit)?;
write!(f, "{units}{unit}")?;
i -= units * div;
}
}

View File

@ -75,39 +75,39 @@ impl Display for SelectStatement {
write!(f, "SELECT {} {}", self.fields, self.from)?;
if let Some(where_clause) = &self.condition {
write!(f, " {}", where_clause)?;
write!(f, " {where_clause}")?;
}
if let Some(group_by) = &self.group_by {
write!(f, " {}", group_by)?;
write!(f, " {group_by}")?;
}
if let Some(fill_clause) = &self.fill {
write!(f, " {}", fill_clause)?;
write!(f, " {fill_clause}")?;
}
if let Some(order_by) = &self.order_by {
write!(f, " {}", order_by)?;
write!(f, " {order_by}")?;
}
if let Some(limit) = &self.limit {
write!(f, " {}", limit)?;
write!(f, " {limit}")?;
}
if let Some(offset) = &self.offset {
write!(f, " {}", offset)?;
write!(f, " {offset}")?;
}
if let Some(slimit) = &self.series_limit {
write!(f, " {}", slimit)?;
write!(f, " {slimit}")?;
}
if let Some(soffset) = &self.series_offset {
write!(f, " {}", soffset)?;
write!(f, " {soffset}")?;
}
if let Some(tz_clause) = &self.timezone {
write!(f, " {}", tz_clause)?;
write!(f, " {tz_clause}")?;
}
Ok(())
@ -180,7 +180,7 @@ impl Display for MeasurementSelection {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Self::Name(ref name) => fmt::Display::fmt(name, f),
Self::Subquery(ref subquery) => write!(f, "({})", subquery),
Self::Subquery(ref subquery) => write!(f, "({subquery})"),
}
}
}
@ -207,9 +207,9 @@ pub type FromMeasurementClause = ZeroOrMore<MeasurementSelection>;
impl Display for FromMeasurementClause {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
if let Some(first) = self.head() {
write!(f, "FROM {}", first)?;
write!(f, "FROM {first}")?;
for arg in self.tail() {
write!(f, ", {}", arg)?;
write!(f, ", {arg}")?;
}
}
@ -232,9 +232,9 @@ pub type GroupByClause = ZeroOrMore<Dimension>;
impl Display for GroupByClause {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
if let Some(first) = self.head() {
write!(f, "GROUP BY {}", first)?;
write!(f, "GROUP BY {first}")?;
for arg in self.tail() {
write!(f, ", {}", arg)?;
write!(f, ", {arg}")?;
}
}
@ -317,8 +317,8 @@ impl Display for Dimension {
Self::Time {
interval,
offset: Some(offset),
} => write!(f, "TIME({}, {})", interval, offset),
Self::Time { interval, .. } => write!(f, "TIME({})", interval),
} => write!(f, "TIME({interval}, {offset})"),
Self::Time { interval, .. } => write!(f, "TIME({interval})"),
Self::Tag(v) => Display::fmt(v, f),
Self::Regex(v) => Display::fmt(v, f),
Self::Wildcard => f.write_char('*'),
@ -439,7 +439,7 @@ impl Display for Field {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Display::fmt(&self.expr, f)?;
if let Some(alias) = &self.alias {
write!(f, " AS {}", alias)?;
write!(f, " AS {alias}")?;
}
Ok(())
}
@ -492,7 +492,7 @@ impl Display for FieldList {
if let Some(first) = self.head() {
Display::fmt(first, f)?;
for arg in self.tail() {
write!(f, ", {}", arg)?;
write!(f, ", {arg}")?;
}
}

View File

@ -35,19 +35,19 @@ impl fmt::Display for ShowFieldKeysStatement {
f.write_str("SHOW FIELD KEYS")?;
if let Some(ref on_clause) = self.database {
write!(f, " {}", on_clause)?;
write!(f, " {on_clause}")?;
}
if let Some(ref expr) = self.from {
write!(f, " {}", expr)?;
write!(f, " {expr}")?;
}
if let Some(ref limit) = self.limit {
write!(f, " {}", limit)?;
write!(f, " {limit}")?;
}
if let Some(ref offset) = self.offset {
write!(f, " {}", offset)?;
write!(f, " {offset}")?;
}
Ok(())

View File

@ -36,8 +36,8 @@ impl fmt::Display for ExtendedOnClause {
f.write_str("ON ")?;
match self {
Self::Database(db) => write!(f, "{}", db),
Self::DatabaseRetentionPolicy(db, rp) => write!(f, "{}.{}", db, rp),
Self::Database(db) => write!(f, "{db}"),
Self::DatabaseRetentionPolicy(db, rp) => write!(f, "{db}.{rp}"),
Self::AllDatabases => write!(f, "*"),
Self::AllDatabasesAndRetentionPolicies => write!(f, "*.*"),
}
@ -98,23 +98,23 @@ impl fmt::Display for ShowMeasurementsStatement {
write!(f, "SHOW MEASUREMENTS")?;
if let Some(ref on_clause) = self.on {
write!(f, " {}", on_clause)?;
write!(f, " {on_clause}")?;
}
if let Some(ref with_clause) = self.with_measurement {
write!(f, " {}", with_clause)?;
write!(f, " {with_clause}")?;
}
if let Some(ref where_clause) = self.condition {
write!(f, " {}", where_clause)?;
write!(f, " {where_clause}")?;
}
if let Some(ref limit) = self.limit {
write!(f, " {}", limit)?;
write!(f, " {limit}")?;
}
if let Some(ref offset) = self.offset {
write!(f, " {}", offset)?;
write!(f, " {offset}")?;
}
Ok(())
@ -135,8 +135,8 @@ impl fmt::Display for WithMeasurementClause {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.write_str("WITH MEASUREMENT ")?;
match self {
Self::Equals(ref name) => write!(f, "= {}", name),
Self::Regex(ref re) => write!(f, "=~ {}", re),
Self::Equals(ref name) => write!(f, "= {name}"),
Self::Regex(ref re) => write!(f, "=~ {re}"),
}
}
}

View File

@ -21,7 +21,7 @@ impl Display for ShowRetentionPoliciesStatement {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "SHOW RETENTION POLICIES")?;
if let Some(ref database) = self.database {
write!(f, " {}", database)?;
write!(f, " {database}")?;
}
Ok(())
}

View File

@ -40,23 +40,23 @@ impl fmt::Display for ShowTagKeysStatement {
write!(f, "SHOW TAG KEYS")?;
if let Some(ref on_clause) = self.database {
write!(f, " {}", on_clause)?;
write!(f, " {on_clause}")?;
}
if let Some(ref expr) = self.from {
write!(f, " {}", expr)?;
write!(f, " {expr}")?;
}
if let Some(ref cond) = self.condition {
write!(f, " {}", cond)?;
write!(f, " {cond}")?;
}
if let Some(ref limit) = self.limit {
write!(f, " {}", limit)?;
write!(f, " {limit}")?;
}
if let Some(ref offset) = self.offset {
write!(f, " {}", offset)?;
write!(f, " {offset}")?;
}
Ok(())

View File

@ -50,25 +50,25 @@ impl Display for ShowTagValuesStatement {
write!(f, "SHOW TAG VALUES")?;
if let Some(ref on_clause) = self.database {
write!(f, " {}", on_clause)?;
write!(f, " {on_clause}")?;
}
if let Some(ref from_clause) = self.from {
write!(f, " {}", from_clause)?;
write!(f, " {from_clause}")?;
}
write!(f, " {}", self.with_key)?;
if let Some(ref where_clause) = self.condition {
write!(f, " {}", where_clause)?;
write!(f, " {where_clause}")?;
}
if let Some(ref limit) = self.limit {
write!(f, " {}", limit)?;
write!(f, " {limit}")?;
}
if let Some(ref offset) = self.offset {
write!(f, " {}", offset)?;
write!(f, " {offset}")?;
}
Ok(())
@ -122,7 +122,7 @@ impl Display for InList {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Display::fmt(self.head(), f)?;
for arg in self.tail() {
write!(f, ", {}", arg)?;
write!(f, ", {arg}")?;
}
Ok(())
}
@ -148,11 +148,11 @@ impl Display for WithKeyClause {
f.write_str("WITH KEY ")?;
match self {
Self::Eq(v) => write!(f, "= {}", v),
Self::NotEq(v) => write!(f, "!= {}", v),
Self::EqRegex(v) => write!(f, "=~ {}", v),
Self::NotEqRegex(v) => write!(f, "=! {}", v),
Self::In(list) => write!(f, "IN ({})", list),
Self::Eq(v) => write!(f, "= {v}"),
Self::NotEq(v) => write!(f, "!= {v}"),
Self::EqRegex(v) => write!(f, "=~ {v}"),
Self::NotEqRegex(v) => write!(f, "=! {v}"),
Self::In(list) => write!(f, "IN ({list})"),
}
}
}

View File

@ -70,7 +70,7 @@ impl Display for ShowFromClause {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "FROM {}", self.head())?;
for arg in self.tail() {
write!(f, ", {}", arg)?;
write!(f, ", {arg}")?;
}
Ok(())
}
@ -94,7 +94,7 @@ impl Display for DeleteFromClause {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "FROM {}", self.head())?;
for arg in self.tail() {
write!(f, ", {}", arg)?;
write!(f, ", {arg}")?;
}
Ok(())
}

View File

@ -1241,13 +1241,13 @@ mod test {
fn push_pre(self, name: &str, n: impl Debug) -> Self {
let mut s = self.0;
s.push(format!("pre_visit_{}: {:?}", name, n));
s.push(format!("pre_visit_{name}: {n:?}"));
Self(s)
}
fn push_post(self, name: &str, n: impl Debug) -> Self {
let mut s = self.0;
s.push(format!("post_visit_{}: {:?}", name, n));
s.push(format!("post_visit_{name}: {n:?}"));
Self(s)
}
}

View File

@ -1178,11 +1178,11 @@ mod test {
}
fn push_pre(&mut self, name: &str, n: impl Debug) {
self.0.push(format!("pre_visit_{}: {:?}", name, n));
self.0.push(format!("pre_visit_{name}: {n:?}"));
}
fn push_post(&mut self, name: &str, n: impl Debug) {
self.0.push(format!("post_visit_{}: {:?}", name, n));
self.0.push(format!("post_visit_{name}: {n:?}"));
}
}

View File

@ -66,7 +66,7 @@ pub struct ValidationErrors(Vec<ValidationError>);
impl Display for ValidationErrors {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
self.0.iter().fold(Ok(()), |result, e| {
result.and_then(|_| writeln!(f, "- {}", e))
result.and_then(|_| writeln!(f, "- {e}"))
})
}
}
@ -167,7 +167,7 @@ pub async fn command(connection: Connection, config: Config) -> Result<(), Schem
// note that this will also apply the schema override, if the user provided one
let merged_tsm_schema = merger.merge().map_err(SchemaCommandError::Merging)?;
// just print the merged schema for now; we'll do more with this in future PRs
println!("Merged schema:\n{:?}", merged_tsm_schema);
println!("Merged schema:\n{merged_tsm_schema:?}");
// don't proceed unless we produce a valid merged schema
if let Err(errors) = validate_schema(&merged_tsm_schema) {

View File

@ -64,7 +64,7 @@ pub async fn command(connection: Connection, config: Config) -> Result<()> {
let formatted_result = format.format(&batches)?;
println!("{}", formatted_result);
println!("{formatted_result}");
Ok(())
}

View File

@ -93,7 +93,7 @@ pub async fn command(connection: Connection, config: Config) -> Result<()> {
let formatted_result = format.format(&batches)?;
println!("{}", formatted_result);
println!("{formatted_result}");
Ok(())
}

View File

@ -282,10 +282,7 @@ async fn load_schema(
.map(|k| k.to_string())
.collect::<Vec<_>>()
.join(", ");
println!(
"table {} with columns {} loaded into local catalog",
table_name, column_names
);
println!("table {table_name} with columns {column_names} loaded into local catalog");
}
let full_inserted_schema = get_schema_by_name(&namespace.name, repos.as_mut()).await?;
@ -343,11 +340,11 @@ async fn load_parquet_files(
let uuid = Uuid::parse_str(&p.object_store_id).expect("object store id should be valid");
let parquet_file = match repos.parquet_files().get_by_object_store_id(uuid).await? {
Some(f) => {
println!("found file {} in catalog", uuid);
println!("found file {uuid} in catalog");
f
}
None => {
println!("creating file {} in catalog", uuid);
println!("creating file {uuid} in catalog");
let params = ParquetFileParams {
shard_id: partition_mapping.shard_id,
namespace_id,

View File

@ -129,7 +129,7 @@ pub async fn main(
grpc_bind_address,
server_type,
} = service;
let server_type_name = format!("{:?}", server_type);
let server_type_name = format!("{server_type:?}");
let handle = tokio::spawn(async move {
let trace_exporter = common_state.trace_exporter();
info!(?grpc_bind_address, ?server_type, "Binding gRPC services");

View File

@ -219,14 +219,14 @@ impl Repl {
ReplCommand::ShowNamespaces => {
self.list_namespaces()
.await
.map_err(|e| println!("{}", e))
.map_err(|e| println!("{e}"))
.ok();
}
ReplCommand::UseNamespace { db_name } => {
self.use_namespace(db_name);
}
ReplCommand::SqlCommand { sql } => {
self.run_sql(sql).await.map_err(|e| println!("{}", e)).ok();
self.run_sql(sql).await.map_err(|e| println!("{e}")).ok();
}
ReplCommand::Exit => {
info!("exiting at user request");
@ -323,7 +323,7 @@ impl Repl {
let total_rows: usize = batches.into_iter().map(|b| b.num_rows()).sum();
if total_rows > 1 {
format!("{} rows", total_rows)
format!("{total_rows} rows")
} else if total_rows == 0 {
"no rows".to_string()
} else {
@ -333,14 +333,14 @@ impl Repl {
fn use_namespace(&mut self, db_name: String) {
info!(%db_name, "setting current namespace");
println!("You are now in remote mode, querying namespace {}", db_name);
println!("You are now in remote mode, querying namespace {db_name}");
self.set_query_engine(QueryEngine::Remote(db_name));
}
fn set_query_engine(&mut self, query_engine: QueryEngine) {
self.prompt = match &query_engine {
QueryEngine::Remote(db_name) => {
format!("{}> ", db_name)
format!("{db_name}> ")
}
};
self.query_engine = Some(query_engine)
@ -363,7 +363,7 @@ impl Repl {
.output_format
.format(batches)
.context(FormattingResultsSnafu)?;
println!("{}", formatted_results);
println!("{formatted_results}");
Ok(())
}
}

View File

@ -389,8 +389,7 @@ mod test_super {
let got = parse_range(input).unwrap();
assert_eq!(
got, exp,
"got {:?} for input {:?}, expected {:?}",
got, input, exp
"got {got:?} for input {input:?}, expected {exp:?}"
);
}
}

View File

@ -42,7 +42,7 @@ pub type Result<T, E = Error> = std::result::Result<T, E>;
pub fn pretty_print_frames(frames: &[Data]) -> Result<()> {
let rbs = frames_to_record_batches(frames)?;
for (k, rb) in rbs {
println!("\n_measurement: {}", k);
println!("\n_measurement: {k}");
println!("rows: {:?}\n", &rb.num_rows());
print_batches(&[rb]).context(ArrowSnafu)?;
}

View File

@ -118,11 +118,11 @@ pub async fn command(connection: Connection, config: Config) -> Result<()> {
match res {
Ok(Ok(lp_data)) => Some(lp_data),
Ok(Err(e)) => {
eprintln!("WARNING: ignoring error : {}", e);
eprintln!("WARNING: ignoring error : {e}");
None
}
Err(e) => {
eprintln!("WARNING: ignoring task fail: {}", e);
eprintln!("WARNING: ignoring task fail: {e}");
None
}
}

View File

@ -231,13 +231,12 @@ fn main() -> Result<(), std::io::Error> {
builder = builder.header(key, value);
// Emit trace id information
println!("Trace ID set to {}", trace_id);
println!("Trace ID set to {trace_id}");
}
if let Some(token) = config.token.as_ref() {
let key = http::header::HeaderName::from_str("Authorization").unwrap();
let value =
http::header::HeaderValue::from_str(&format!("Token {}", token)).unwrap();
let value = http::header::HeaderValue::from_str(&format!("Token {token}")).unwrap();
debug!(name=?key, value=?value, "Setting token header");
builder = builder.header(key, value);
}
@ -245,7 +244,7 @@ fn main() -> Result<(), std::io::Error> {
match builder.build(&host).await {
Ok(connection) => connection,
Err(e) => {
eprintln!("Error connecting to {}: {}", host, e);
eprintln!("Error connecting to {host}: {e}");
std::process::exit(ReturnCode::Failure as _)
}
}
@ -255,7 +254,7 @@ fn main() -> Result<(), std::io::Error> {
match r {
Ok(guard) => guard,
Err(e) => {
eprintln!("Initializing logs failed: {}", e);
eprintln!("Initializing logs failed: {e}");
std::process::exit(ReturnCode::Failure as _);
}
}
@ -265,7 +264,7 @@ fn main() -> Result<(), std::io::Error> {
None => {
let _tracing_guard = handle_init_logs(init_simple_logs(log_verbose_count));
if let Err(e) = all_in_one::command(config.all_in_one_config).await {
eprintln!("Server command failed: {}", e);
eprintln!("Server command failed: {e}");
std::process::exit(ReturnCode::Failure as _)
}
}
@ -273,7 +272,7 @@ fn main() -> Result<(), std::io::Error> {
let _tracing_guard = handle_init_logs(init_simple_logs(log_verbose_count));
let connection = connection().await;
if let Err(e) = commands::remote::command(connection, config).await {
eprintln!("{}", e);
eprintln!("{e}");
std::process::exit(ReturnCode::Failure as _)
}
}
@ -281,7 +280,7 @@ fn main() -> Result<(), std::io::Error> {
let _tracing_guard =
handle_init_logs(init_logs_and_tracing(log_verbose_count, &config));
if let Err(e) = commands::run::command(*config).await {
eprintln!("Server command failed: {}", e);
eprintln!("Server command failed: {e}");
std::process::exit(ReturnCode::Failure as _)
}
}
@ -289,7 +288,7 @@ fn main() -> Result<(), std::io::Error> {
let _tracing_guard = handle_init_logs(init_simple_logs(log_verbose_count));
let connection = connection().await;
if let Err(e) = commands::sql::command(connection, config).await {
eprintln!("{}", e);
eprintln!("{e}");
std::process::exit(ReturnCode::Failure as _)
}
}
@ -297,28 +296,28 @@ fn main() -> Result<(), std::io::Error> {
let _tracing_guard = handle_init_logs(init_simple_logs(log_verbose_count));
let connection = connection().await;
if let Err(e) = commands::storage::command(connection, config).await {
eprintln!("{}", e);
eprintln!("{e}");
std::process::exit(ReturnCode::Failure as _)
}
}
Some(Command::Catalog(config)) => {
let _tracing_guard = handle_init_logs(init_simple_logs(log_verbose_count));
if let Err(e) = commands::catalog::command(config).await {
eprintln!("{}", e);
eprintln!("{e}");
std::process::exit(ReturnCode::Failure as _)
}
}
Some(Command::Compactor(config)) => {
let _tracing_guard = handle_init_logs(init_simple_logs(log_verbose_count));
if let Err(e) = commands::compactor::command(*config).await {
eprintln!("{}", e);
eprintln!("{e}");
std::process::exit(ReturnCode::Failure as _)
}
}
Some(Command::Debug(config)) => {
let _tracing_guard = handle_init_logs(init_simple_logs(log_verbose_count));
if let Err(e) = commands::debug::command(connection, config).await {
eprintln!("{}", e);
eprintln!("{e}");
std::process::exit(ReturnCode::Failure as _)
}
}
@ -326,7 +325,7 @@ fn main() -> Result<(), std::io::Error> {
let _tracing_guard = handle_init_logs(init_simple_logs(log_verbose_count));
let connection = connection().await;
if let Err(e) = commands::write::command(connection, config).await {
eprintln!("{}", e);
eprintln!("{e}");
std::process::exit(ReturnCode::Failure as _)
}
}
@ -334,7 +333,7 @@ fn main() -> Result<(), std::io::Error> {
let _tracing_guard = handle_init_logs(init_simple_logs(log_verbose_count));
let connection = connection().await;
if let Err(e) = commands::query::command(connection, config).await {
eprintln!("{}", e);
eprintln!("{e}");
std::process::exit(ReturnCode::Failure as _)
}
}
@ -342,7 +341,7 @@ fn main() -> Result<(), std::io::Error> {
let _tracing_guard = handle_init_logs(init_simple_logs(log_verbose_count));
let connection = connection().await;
if let Err(e) = commands::query_ingester::command(connection, config).await {
eprintln!("{}", e);
eprintln!("{e}");
std::process::exit(ReturnCode::Failure as _)
}
}
@ -350,7 +349,7 @@ fn main() -> Result<(), std::io::Error> {
let _tracing_guard = handle_init_logs(init_simple_logs(log_verbose_count));
let connection = connection().await;
if let Err(e) = commands::import::command(connection, config).await {
eprintln!("{}", e);
eprintln!("{e}");
std::process::exit(ReturnCode::Failure as _)
}
}
@ -358,7 +357,7 @@ fn main() -> Result<(), std::io::Error> {
let _tracing_guard = handle_init_logs(init_simple_logs(log_verbose_count));
let connection = connection().await;
if let Err(e) = commands::namespace::command(connection, config).await {
eprintln!("{}", e);
eprintln!("{e}");
std::process::exit(ReturnCode::Failure as _)
}
}
@ -392,18 +391,13 @@ fn get_runtime(num_threads: Option<usize>) -> Result<Runtime, std::io::Error> {
match num_threads {
None => Runtime::new(),
Some(num_threads) => {
println!(
"Setting number of threads to '{}' per command line request",
num_threads
);
println!("Setting number of threads to '{num_threads}' per command line request");
let thread_counter = Arc::new(AtomicUsize::new(1));
match num_threads {
0 => {
let msg = format!(
"Invalid num-threads: '{}' must be greater than zero",
num_threads
);
let msg =
format!("Invalid num-threads: '{num_threads}' must be greater than zero");
Err(std::io::Error::new(kind, msg))
}
1 => Builder::new_current_thread().enable_all().build(),
@ -431,7 +425,7 @@ fn load_dotenv() {
// be applied when initialising the Config struct.
}
Err(e) => {
eprintln!("FATAL Error loading config from: {}", e);
eprintln!("FATAL Error loading config from: {e}");
eprintln!("Aborting");
std::process::exit(1);
}
@ -453,7 +447,7 @@ unsafe extern "C" fn signal_handler(sig: i32) {
use std::process::abort;
let name = std::thread::current()
.name()
.map(|n| format!(" for thread \"{}\"", n))
.map(|n| format!(" for thread \"{n}\""))
.unwrap_or_else(|| "".to_owned());
eprintln!(
"Signal {}, Stack trace{}\n{:?}",
@ -510,8 +504,7 @@ where
Ok(Self { key, value })
}
None => Err(format!(
"Invalid key value pair - expected 'KEY:VALUE' got '{}'",
s
"Invalid key value pair - expected 'KEY:VALUE' got '{s}'"
)),
}
}

View File

@ -12,7 +12,7 @@ async fn smoke() {
let org = rand_name();
let bucket = rand_name();
let namespace = format!("{}_{}", org, bucket);
let namespace = format!("{org}_{bucket}");
let table_name = "test_table";
// Set up all_in_one ====================================
@ -22,7 +22,7 @@ async fn smoke() {
let all_in_one = ServerFixture::create(test_config).await;
// Write some data into the v2 HTTP API ==============
let lp = format!("{},tag1=A,tag2=B val=42i 123456", table_name);
let lp = format!("{table_name},tag1=A,tag2=B val=42i 123456");
let response = write_to_router(lp, org, bucket, all_in_one.router_http_base()).await;
assert_eq!(
@ -32,7 +32,7 @@ async fn smoke() {
);
// run query
let sql = format!("select * from {}", table_name);
let sql = format!("select * from {table_name}");
let batches = run_sql(sql, namespace, all_in_one.querier_grpc_connection()).await;
let expected = [
@ -52,7 +52,7 @@ async fn ephemeral_mode() {
let org = rand_name();
let bucket = rand_name();
let namespace = format!("{}_{}", org, bucket);
let namespace = format!("{org}_{bucket}");
let table_name = "test_table";
// Set up all_in_one ====================================
@ -67,7 +67,7 @@ async fn ephemeral_mode() {
.now()
.timestamp_nanos()
.to_string();
let lp = format!("{},tag1=A,tag2=B val=42i {}", table_name, now);
let lp = format!("{table_name},tag1=A,tag2=B val=42i {now}");
let response = write_to_router(lp, org, bucket, all_in_one.router_http_base()).await;
assert_eq!(
@ -78,7 +78,7 @@ async fn ephemeral_mode() {
// run query
// do not select time becasue it changes every time
let sql = format!("select tag1, tag2, val from {}", table_name);
let sql = format!("select tag1, tag2, val from {table_name}");
let batches = run_sql(sql, namespace, all_in_one.querier_grpc_connection()).await;
let expected = [

View File

@ -141,13 +141,13 @@ async fn parquet_to_lp() {
.stdout
.clone();
println!("Got output {:?}", output);
println!("Got output {output:?}");
// test writing to output file as well
// Ensure files are actually wrote to the filesystem
let output_file =
tempfile::NamedTempFile::new().expect("Error making temp file");
println!("Writing to {:?}", output_file);
println!("Writing to {output_file:?}");
// convert to line protocol (to a file)
Command::cargo_bin("influxdb_iox")
@ -165,9 +165,7 @@ async fn parquet_to_lp() {
let file_contents = String::from_utf8_lossy(&file_contents);
assert!(
predicate::str::contains(line_protocol).eval(&file_contents),
"Could not file {} in {}",
line_protocol,
file_contents
"Could not file {line_protocol} in {file_contents}"
);
}
.boxed()
@ -347,7 +345,7 @@ async fn schema_cli() {
];
for (addr_type, addr) in addrs {
println!("Trying address {}: {}", addr_type, addr);
println!("Trying address {addr_type}: {addr}");
// Validate the output of the schema CLI command
Command::cargo_bin("influxdb_iox")
@ -588,7 +586,7 @@ async fn wait_for_query_result(
let assert = match assert.try_success() {
Err(e) => {
println!("Got err running command: {}, retrying", e);
println!("Got err running command: {e}, retrying");
continue;
}
Ok(a) => a,
@ -596,20 +594,17 @@ async fn wait_for_query_result(
match assert.try_stdout(predicate::str::contains(expected)) {
Err(e) => {
println!("No match: {}, retrying", e);
println!("No match: {e}, retrying");
}
Ok(r) => {
println!("Success: {:?}", r);
println!("Success: {r:?}");
return;
}
}
// sleep and try again
tokio::time::sleep(Duration::from_secs(1)).await
}
panic!(
"Did not find expected output {} within {:?}",
expected, max_wait_time
);
panic!("Did not find expected output {expected} within {max_wait_time:?}");
}
/// Test the namespace cli command

View File

@ -22,13 +22,12 @@ async fn flightsql_adhoc_query() {
&mut cluster,
vec![
Step::WriteLineProtocol(format!(
"{},tag1=A,tag2=B val=42i 123456\n\
{},tag1=A,tag2=C val=43i 123457",
table_name, table_name
"{table_name},tag1=A,tag2=B val=42i 123456\n\
{table_name},tag1=A,tag2=C val=43i 123457"
)),
Step::Custom(Box::new(move |state: &mut StepTestState| {
async move {
let sql = format!("select * from {}", table_name);
let sql = format!("select * from {table_name}");
let expected = vec![
"+------+------+--------------------------------+-----+",
"| tag1 | tag2 | time | val |",
@ -124,13 +123,12 @@ async fn flightsql_prepared_query() {
&mut cluster,
vec![
Step::WriteLineProtocol(format!(
"{},tag1=A,tag2=B val=42i 123456\n\
{},tag1=A,tag2=C val=43i 123457",
table_name, table_name
"{table_name},tag1=A,tag2=B val=42i 123456\n\
{table_name},tag1=A,tag2=C val=43i 123457"
)),
Step::Custom(Box::new(move |state: &mut StepTestState| {
async move {
let sql = format!("select * from {}", table_name);
let sql = format!("select * from {table_name}");
let expected = vec![
"+------+------+--------------------------------+-----+",
"| tag1 | tag2 | time | val |",
@ -195,9 +193,8 @@ async fn flightsql_jdbc() {
&mut cluster,
vec![
Step::WriteLineProtocol(format!(
"{},tag1=A,tag2=B val=42i 123456\n\
{},tag1=A,tag2=C val=43i 123457",
table_name, table_name
"{table_name},tag1=A,tag2=B val=42i 123456\n\
{table_name},tag1=A,tag2=C val=43i 123457"
)),
Step::Custom(Box::new(move |state: &mut StepTestState| {
// satisfy the borrow checker

View File

@ -14,9 +14,8 @@ async fn influxql_returns_error() {
&mut cluster,
vec![
Step::WriteLineProtocol(format!(
"{},tag1=A,tag2=B val=42i 123456\n\
{},tag1=A,tag2=C val=43i 123457",
table_name, table_name
"{table_name},tag1=A,tag2=B val=42i 123456\n\
{table_name},tag1=A,tag2=C val=43i 123457"
)),
Step::InfluxQLExpectingError {
query: "SHOW TAG KEYS".into(),
@ -45,12 +44,11 @@ async fn influxql_select_returns_results() {
&mut cluster,
vec![
Step::WriteLineProtocol(format!(
"{},tag1=A,tag2=B val=42i 123456\n\
{},tag1=A,tag2=C val=43i 123457",
table_name, table_name
"{table_name},tag1=A,tag2=B val=42i 123456\n\
{table_name},tag1=A,tag2=C val=43i 123457"
)),
Step::InfluxQLQuery {
query: format!("select tag1, val from {}", table_name),
query: format!("select tag1, val from {table_name}"),
expected: vec![
"+--------------------------------+------+-----+",
"| time | tag1 | val |",

View File

@ -88,7 +88,7 @@ async fn ingester_flight_api() {
let mut cluster = MiniCluster::create_non_shared2(database_url).await;
// Write some data into the v2 HTTP API ==============
let lp = format!("{},tag1=A,tag2=B val=42i 123456", table_name);
let lp = format!("{table_name},tag1=A,tag2=B val=42i 123456");
let response = cluster.write_to_router(lp).await;
assert_eq!(response.status(), StatusCode::NO_CONTENT);
@ -123,12 +123,7 @@ async fn ingester_flight_api() {
.iter()
.enumerate()
.for_each(|(i, b)| {
assert_eq!(
schema,
b.schema(),
"Schema mismatch for returned batch {}",
i
);
assert_eq!(schema, b.schema(), "Schema mismatch for returned batch {i}");
});
// Ensure the ingester UUID is the same in the next query
@ -140,7 +135,7 @@ async fn ingester_flight_api() {
// Populate the ingester with some data so it returns a successful
// response containing the UUID.
let lp = format!("{},tag1=A,tag2=B val=42i 123456", table_name);
let lp = format!("{table_name},tag1=A,tag2=B val=42i 123456");
let response = cluster.write_to_router(lp).await;
assert_eq!(response.status(), StatusCode::NO_CONTENT);

View File

@ -23,9 +23,7 @@ pub async fn test_metrics() {
.count();
assert!(
catalog_op_metrics_count >= 180,
"Expected at least 180 catalog op metrics, got: {}\n\n{}",
catalog_op_metrics_count,
metrics
"Expected at least 180 catalog op metrics, got: {catalog_op_metrics_count}\n\n{metrics}"
);
let process_metrics_count = metrics_lines
@ -34,8 +32,7 @@ pub async fn test_metrics() {
.count();
assert!(
process_metrics_count >= 1,
"Expected `process_start_time_seconds` metric but found none: \n\n{}",
metrics
"Expected `process_start_time_seconds` metric but found none: \n\n{metrics}"
);
})),
],

View File

@ -23,7 +23,7 @@ async fn querier_namespace_client() {
.await;
// Write some data into the v2 HTTP API ==============
let lp = format!("{},tag1=A,tag2=B val=42i 123456", table_name);
let lp = format!("{table_name},tag1=A,tag2=B val=42i 123456");
let response = cluster.write_to_router(lp).await;
assert_eq!(response.status(), StatusCode::NO_CONTENT);

View File

@ -44,7 +44,7 @@ mod with_kafka {
StepTest::new(
&mut cluster,
vec![
Step::WriteLineProtocol(format!("{},tag1=A,tag2=B val=42i 123456", table_name)),
Step::WriteLineProtocol(format!("{table_name},tag1=A,tag2=B val=42i 123456")),
Step::WaitForPersisted,
Step::WriteLineProtocol(String::from("other_table,tag1=A,tag2=B val=42i 123456")),
Step::WaitForPersisted,
@ -54,7 +54,7 @@ mod with_kafka {
state.cluster_mut().restart_ingester().boxed()
})),
Step::Query {
sql: format!("select * from {}", table_name),
sql: format!("select * from {table_name}"),
expected: vec![
"+------+------+--------------------------------+-----+",
"| tag1 | tag2 | time | val |",
@ -95,16 +95,15 @@ mod with_kafka {
&mut cluster,
vec![
Step::WriteLineProtocol(format!(
"{},tag1=A,tag2=B val=42i 123456\n\
{},tag1=A,tag2=C val=43i 123457",
table_name, table_name
"{table_name},tag1=A,tag2=B val=42i 123456\n\
{table_name},tag1=A,tag2=C val=43i 123457"
)),
Step::WaitForReadable,
Step::AssertNotPersisted,
Step::Custom(Box::new(move |state: &mut StepTestState| {
async move {
// Ingester panics but querier will retry.
let sql = format!("select * from {} where tag2='B'", table_name);
let sql = format!("select * from {table_name} where tag2='B'");
let batches = run_sql(
sql,
state.cluster().namespace(),
@ -180,12 +179,12 @@ mod with_kafka {
"x".repeat(10_000),
)),
Step::WaitForPersisted,
Step::WriteLineProtocol(format!("{},tag=A val=3i 3", table_name)),
Step::WriteLineProtocol(format!("{table_name},tag=A val=3i 3")),
Step::WaitForReadable,
Step::AssertLastNotPersisted,
// circuit breaker will prevent ingester from being queried, so we only get the persisted data
Step::Query {
sql: format!("select tag,val,time from {} where tag='A'", table_name),
sql: format!("select tag,val,time from {table_name} where tag='A'"),
expected: vec![
"+-----+-----+--------------------------------+",
"| tag | val | time |",
@ -208,10 +207,8 @@ mod with_kafka {
// wait for circuit breaker to close circuits again
tokio::time::timeout(Duration::from_secs(10), async {
loop {
let sql = format!(
"select tag,val,time from {} where tag='A'",
table_name
);
let sql =
format!("select tag,val,time from {table_name} where tag='A'");
let batches = run_sql(
sql,
state.cluster().namespace(),
@ -339,11 +336,11 @@ mod with_kafka {
&mut cluster,
vec![
// create persisted chunk with a single tag column
Step::WriteLineProtocol(format!("{},tag=A val=\"foo\" 1", table_name)),
Step::WriteLineProtocol(format!("{table_name},tag=A val=\"foo\" 1")),
Step::WaitForPersisted,
// query to prime the querier caches with partition sort key
Step::Query {
sql: format!("select * from {}", table_name),
sql: format!("select * from {table_name}"),
expected: vec![
"+-----+--------------------------------+-----+",
"| tag | time | val |",
@ -353,7 +350,7 @@ mod with_kafka {
],
},
// create 2nd chunk with an additional tag column (which will be included in the partition sort key)
Step::WriteLineProtocol(format!("{},tag=A,tag2=B val=\"bar\" 1\n", table_name)),
Step::WriteLineProtocol(format!("{table_name},tag=A,tag2=B val=\"bar\" 1\n")),
Step::WaitForPersisted,
// in the original bug the querier would now panic with:
//
@ -361,10 +358,7 @@ mod with_kafka {
//
// Note that we cannot query tag2 because the schema is cached for a while.
Step::Query {
sql: format!(
"select tag, val from {} where tag='A' order by val",
table_name
),
sql: format!("select tag, val from {table_name} where tag='A' order by val"),
expected: vec![
"+-----+-----+",
"| tag | val |",
@ -537,15 +531,13 @@ mod with_kafka {
StepTest::new(
&mut cluster,
vec![
Step::WriteLineProtocol(format!("{},tag1=A,tag2=B val=42i 123457", table_name)),
Step::WriteLineProtocol(format!("{table_name},tag1=A,tag2=B val=42i 123457")),
Step::WaitForReadable,
// SQL query
Step::Custom(Box::new(move |state: &mut StepTestState| {
async move {
let sql = format!(
"select tag1, sum(val) as val from {} group by tag1",
table_name
);
let sql =
format!("select tag1, sum(val) as val from {table_name} group by tag1");
let err = try_run_sql(
&sql,
state.cluster().namespace(),
@ -677,12 +669,11 @@ mod kafkaless_rpc_write {
&mut cluster,
vec![
Step::WriteLineProtocol(format!(
"{},tag1=A,tag2=B val=42i 123456\n\
{},tag1=A,tag2=C val=43i 123457",
table_name, table_name
"{table_name},tag1=A,tag2=B val=42i 123456\n\
{table_name},tag1=A,tag2=C val=43i 123457"
)),
Step::Query {
sql: format!("select * from {}", table_name),
sql: format!("select * from {table_name}"),
expected: vec![
"+------+------+--------------------------------+-----+",
"| tag1 | tag2 | time | val |",
@ -717,9 +708,8 @@ mod kafkaless_rpc_write {
vec![
Step::RecordNumParquetFiles,
Step::WriteLineProtocol(format!(
"{},tag1=A,tag2=B val=42i 123456\n\
{},tag1=A,tag2=C val=43i 123457",
table_name, table_name
"{table_name},tag1=A,tag2=B val=42i 123456\n\
{table_name},tag1=A,tag2=C val=43i 123457"
)),
// This should_panic if the ingester setup is correct
Step::WaitForPersisted2 {
@ -745,13 +735,13 @@ mod kafkaless_rpc_write {
&mut cluster,
vec![
Step::RecordNumParquetFiles,
Step::WriteLineProtocol(format!("{},tag1=A,tag2=B val=42i 123456", table_name)),
Step::WriteLineProtocol(format!("{table_name},tag1=A,tag2=B val=42i 123456")),
// Wait for data to be persisted to parquet
Step::WaitForPersisted2 {
expected_increase: 1,
},
Step::Query {
sql: format!("select * from {}", table_name),
sql: format!("select * from {table_name}"),
expected: vec![
"+------+------+--------------------------------+-----+",
"| tag1 | tag2 | time | val |",
@ -792,9 +782,8 @@ mod kafkaless_rpc_write {
vec![
Step::RecordNumParquetFiles,
Step::WriteLineProtocol(format!(
"{},tag1=A,tag2=B val=42i 123456\n\
{},tag1=A,tag2=C val=43i 123457",
table_name, table_name
"{table_name},tag1=A,tag2=B val=42i 123456\n\
{table_name},tag1=A,tag2=C val=43i 123457"
)),
// Wait for data to be persisted to parquet
Step::WaitForPersisted2 {
@ -803,7 +792,7 @@ mod kafkaless_rpc_write {
Step::Custom(Box::new(move |state: &mut StepTestState| {
async move {
// query returns no results
let sql = format!("select * from {} where time > '2023-01-12'", table_name);
let sql = format!("select * from {table_name} where time > '2023-01-12'");
let querier_connection =
state.cluster().querier().querier_grpc_connection();
let namespace = state.cluster().namespace();
@ -866,12 +855,12 @@ mod kafkaless_rpc_write {
&mut cluster,
vec![
Step::RecordNumParquetFiles,
Step::WriteLineProtocol(format!("{},tag1=A,tag2=B val=42i 123456", table_name)),
Step::WriteLineProtocol(format!("{table_name},tag1=A,tag2=B val=42i 123456")),
Step::WaitForPersisted2 {
expected_increase: 1,
},
Step::Query {
sql: format!("select * from {}", table_name),
sql: format!("select * from {table_name}"),
expected: vec![
"+------+------+--------------------------------+-----+",
"| tag1 | tag2 | time | val |",
@ -902,13 +891,13 @@ mod kafkaless_rpc_write {
let steps = vec![
Step::RecordNumParquetFiles,
Step::WriteLineProtocol(format!("{},tag1=A,tag2=B val=42i 123456", table_name)),
Step::WriteLineProtocol(format!("{table_name},tag1=A,tag2=B val=42i 123456")),
// Wait for data to be persisted to parquet
Step::WaitForPersisted2 {
expected_increase: 1,
},
Step::Query {
sql: format!("select * from {}", table_name),
sql: format!("select * from {table_name}"),
expected: vec![
"+------+------+--------------------------------+-----+",
"| tag1 | tag2 | time | val |",
@ -919,7 +908,7 @@ mod kafkaless_rpc_write {
},
// second query, should be the same result
Step::Query {
sql: format!("select * from {}", table_name),
sql: format!("select * from {table_name}"),
expected: vec![
"+------+------+--------------------------------+-----+",
"| tag1 | tag2 | time | val |",
@ -930,14 +919,14 @@ mod kafkaless_rpc_write {
},
Step::RecordNumParquetFiles,
// write another parquet file that has non duplicated data
Step::WriteLineProtocol(format!("{},tag1=B,tag2=A val=43i 789101112", table_name)),
Step::WriteLineProtocol(format!("{table_name},tag1=B,tag2=A val=43i 789101112")),
// Wait for data to be persisted to parquet
Step::WaitForPersisted2 {
expected_increase: 1,
},
// query should correctly see the data in the second parquet file
Step::Query {
sql: format!("select * from {}", table_name),
sql: format!("select * from {table_name}"),
expected: vec![
"+------+------+--------------------------------+-----+",
"| tag1 | tag2 | time | val |",

View File

@ -59,7 +59,7 @@ where
T: std::fmt::Display,
{
v.iter()
.map(|item| format!("{}", item))
.map(|item| format!("{item}"))
.collect::<Vec<_>>()
.join(",")
}

View File

@ -26,8 +26,7 @@ async fn capabilities() {
assert_eq!(
capabilities_response.caps.len(),
4,
"Response: {:?}",
capabilities_response
"Response: {capabilities_response:?}"
);
}
.boxed()

View File

@ -331,7 +331,7 @@ async fn do_read_filter_test(
async move {
let mut storage_client = state.cluster().querier_storage_client();
println!("Sending read_filter request with {:#?}", request_builder);
println!("Sending read_filter request with {request_builder:#?}");
let read_filter_request =
request_builder.source(state.cluster()).build_read_filter();
@ -341,8 +341,7 @@ async fn do_read_filter_test(
assert_eq!(
expected_frames, actual_frames,
"\n\nExpected:\n{:#?}\n\nActual:\n{:#?}\n\n",
expected_frames, actual_frames,
"\n\nExpected:\n{expected_frames:#?}\n\nActual:\n{actual_frames:#?}\n\n",
);
}
.boxed()

View File

@ -324,7 +324,7 @@ async fn do_read_group_test(
.into_grpc_connection();
let mut storage_client = StorageClient::new(grpc_connection);
println!("Sending read_group request with {:#?}", request_builder);
println!("Sending read_group request with {request_builder:#?}");
let read_group_request =
request_builder.source(state.cluster()).build_read_group();
@ -334,8 +334,7 @@ async fn do_read_group_test(
assert_eq!(
expected_frames, actual_frames,
"\n\nExpected:\n{:#?}\n\nActual:\n{:#?}\n\n",
expected_frames, actual_frames,
"\n\nExpected:\n{expected_frames:#?}\n\nActual:\n{actual_frames:#?}\n\n",
);
}
.boxed()

View File

@ -97,7 +97,7 @@ async fn do_read_window_aggregate_test(
.source(state.cluster())
.build_read_window_aggregate();
println!("Sending read_window_aggregate request {:#?}", request);
println!("Sending read_window_aggregate request {request:#?}");
let response = storage_client.read_window_aggregate(request).await.unwrap();
let responses: Vec<_> = response.into_inner().try_collect().await.unwrap();
@ -111,8 +111,7 @@ async fn do_read_window_aggregate_test(
assert_eq!(
expected_frames, actual_frames,
"\n\nExpected:\n{:#?}\nActual:\n{:#?}",
expected_frames, actual_frames,
"\n\nExpected:\n{expected_frames:#?}\nActual:\n{actual_frames:#?}",
);
}
.boxed()

View File

@ -63,7 +63,7 @@ async fn basic_multi_ingesters() {
// pick 100 table names to spread across both ingesters
let lp_data = (0..100)
.map(|i| format!("table_{},tag1=A,tag2=B val={}i 123456", i, i))
.map(|i| format!("table_{i},tag1=A,tag2=B val={i}i 123456"))
.collect::<Vec<_>>()
.join("\n");
@ -83,8 +83,7 @@ async fn basic_multi_ingesters() {
ShardStatus::Persisted | ShardStatus::Readable
)
}),
"Not all shards were readable or persisted. Combined responses: {:?}",
combined_response
"Not all shards were readable or persisted. Combined responses: {combined_response:?}"
);
}
.boxed()
@ -114,21 +113,20 @@ async fn basic_multi_ingesters() {
.into_iter()
// read all the data back out
.chain((0..100).map(|i| Step::VerifiedQuery {
sql: format!("select * from table_{}", i),
sql: format!("select * from table_{i}"),
verify: Box::new(move |batches: Vec<RecordBatch>| {
println!("Verifing contents of table_{}", i);
println!("Verifing contents of table_{i}");
// results look like this:
// "+------+------+--------------------------------+-----+",
// "| tag1 | tag2 | time | val |",
// "+------+------+--------------------------------+-----+",
// "| A | B | 1970-01-01T00:00:00.000123456Z | val |",
// "+------+------+--------------------------------+-----+",
assert_eq!(batches.len(), 1, "{:?}", batches);
assert_eq!(batches.len(), 1, "{batches:?}");
assert_eq!(
batches[0].schema().fields()[3].name(),
"val",
"{:?}",
batches
"{batches:?}"
);
let array = as_primitive_array::<Int64Type>(batches[0].column(3));
assert_eq!(array.len(), 1);
@ -162,14 +160,14 @@ async fn get_multi_ingester_readable_combined_response(
match combined_response {
Ok(combined_response) => {
if all_readable(&combined_response) {
println!("All data is readable: {:?}", combined_response);
println!("All data is readable: {combined_response:?}");
return combined_response;
} else {
println!("retrying, not yet readable: {:?}", combined_response);
println!("retrying, not yet readable: {combined_response:?}");
}
}
Err(e) => {
println!("retrying, error getting token status: {}", e);
println!("retrying, error getting token status: {e}");
}
}
interval.tick().await;

View File

@ -16,12 +16,11 @@ pub async fn test_tracing_sql() {
&mut cluster,
vec![
Step::WriteLineProtocol(format!(
"{},tag1=A,tag2=B val=42i 123456\n\
{},tag1=A,tag2=C val=43i 123457",
table_name, table_name
"{table_name},tag1=A,tag2=B val=42i 123456\n\
{table_name},tag1=A,tag2=C val=43i 123457"
)),
Step::Query {
sql: format!("select * from {}", table_name),
sql: format!("select * from {table_name}"),
expected: vec![
"+------+------+--------------------------------+-----+",
"| tag1 | tag2 | time | val |",
@ -61,9 +60,8 @@ pub async fn test_tracing_storage_api() {
&mut cluster,
vec![
Step::WriteLineProtocol(format!(
"{},tag1=A,tag2=B val=42i 123456\n\
{},tag1=A,tag2=C val=43i 123457",
table_name, table_name
"{table_name},tag1=A,tag2=B val=42i 123456\n\
{table_name},tag1=A,tag2=C val=43i 123457"
)),
Step::Custom(Box::new(move |state: &mut StepTestState| {
let cluster = state.cluster();
@ -116,12 +114,11 @@ pub async fn test_tracing_create_trace() {
&mut cluster,
vec![
Step::WriteLineProtocol(format!(
"{},tag1=A,tag2=B val=42i 123456\n\
{},tag1=A,tag2=C val=43i 123457",
table_name, table_name
"{table_name},tag1=A,tag2=B val=42i 123456\n\
{table_name},tag1=A,tag2=C val=43i 123457"
)),
Step::Query {
sql: format!("select * from {}", table_name),
sql: format!("select * from {table_name}"),
expected: vec![
"+------+------+--------------------------------+-----+",
"| tag1 | tag2 | time | val |",

View File

@ -171,7 +171,7 @@ impl Error {
let description = description.into();
Self::InvalidArgument(ServerError {
message: format!("Invalid argument for '{}': {}", field_name, description),
message: format!("Invalid argument for '{field_name}': {description}"),
details: Some(FieldViolation {
field: field_name,
description,

View File

@ -130,7 +130,7 @@ impl Client {
let (org_id, bucket_id) = split_namespace(namespace.as_ref()).map_err(|e| {
Error::invalid_argument(
"namespace",
format!("Could not find valid org_id and bucket_id: {}", e),
format!("Could not find valid org_id and bucket_id: {e}"),
)
})?;

View File

@ -195,7 +195,7 @@ where
/// can be changed when performing the request that carries the LP body.
/// Setting the unit is outside of the scope of a line protocol builder.
pub fn timestamp(self, ts: i64) -> LineProtocolBuilder<B, AfterTimestamp> {
self.write(format_args!(" {}", ts))
self.write(format_args!(" {ts}"))
}
/// Closing a line is required before starting a new one or finishing building the batch.
@ -226,7 +226,7 @@ where
use std::io::Write;
// MutBuf's Write adapter is infallible.
let mut writer = self.buf.writer();
write!(&mut writer, "{}", args).unwrap();
write!(&mut writer, "{args}").unwrap();
LineProtocolBuilder {
buf: writer.into_inner(),
_marker: PhantomData,
@ -254,7 +254,7 @@ impl<'a, const N: usize> fmt::Display for Escaped<'a, N> {
if self.special_characters.contains(&ch) || ch == '\\' {
write!(f, "\\")?;
}
write!(f, "{}", ch)?;
write!(f, "{ch}")?;
}
Ok(())
}
@ -305,25 +305,25 @@ impl FieldValue for &str {
impl FieldValue for f64 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self)
write!(f, "{self}")
}
}
impl FieldValue for bool {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self)
write!(f, "{self}")
}
}
impl FieldValue for i64 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}i", self)
write!(f, "{self}i")
}
}
impl FieldValue for u64 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}u", self)
write!(f, "{self}u")
}
}

View File

@ -225,12 +225,12 @@ impl<'a> Display for ParsedLine<'a> {
}
first = false;
escape_and_write_value(f, field_name.as_str(), FIELD_KEY_DELIMITERS)?;
write!(f, "={}", field_value)?;
write!(f, "={field_value}")?;
}
}
if let Some(timestamp) = self.timestamp {
write!(f, " {}", timestamp)?
write!(f, " {timestamp}")?
}
Ok(())
}
@ -281,8 +281,7 @@ impl<'a> Series<'a> {
let mut series_base = self.measurement.to_string();
for (tag_key, tag_value) in self.tag_set.unwrap_or_default() {
use std::fmt::Write;
write!(&mut series_base, ",{}={}", tag_key, tag_value)
.expect("Could not append string");
write!(&mut series_base, ",{tag_key}={tag_value}").expect("Could not append string");
}
series_base
}
@ -307,8 +306,7 @@ impl<'a> Series<'a> {
let mut series_base = self.measurement.to_string();
for (tag_key, tag_value) in unique_sorted_tag_set {
use std::fmt::Write;
write!(&mut series_base, ",{}={}", tag_key, tag_value)
.expect("Could not append string");
write!(&mut series_base, ",{tag_key}={tag_value}").expect("Could not append string");
}
Ok(series_base)
@ -363,11 +361,11 @@ impl<'a> FieldValue<'a> {
impl<'a> Display for FieldValue<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::I64(v) => write!(f, "{}i", v),
Self::U64(v) => write!(f, "{}u", v),
Self::F64(v) => write!(f, "{}", v),
Self::I64(v) => write!(f, "{v}i"),
Self::U64(v) => write!(f, "{v}u"),
Self::F64(v) => write!(f, "{v}"),
Self::String(v) => escape_and_write_value(f, v, FIELD_VALUE_STRING_DELIMITERS),
Self::Boolean(v) => write!(f, "{}", v),
Self::Boolean(v) => write!(f, "{v}"),
}
}
}
@ -1062,7 +1060,7 @@ fn escape_and_write_value(
for (idx, delim) in value.match_indices(escaping_specification) {
let s = &value[last..idx];
write!(f, r#"{}\{}"#, s, delim)?;
write!(f, r#"{s}\{delim}"#)?;
last = idx + delim.len();
}
@ -1454,8 +1452,7 @@ mod test {
assert!(
matches!(parsed, Err(super::Error::CannotParseEntireLine { .. })),
"Wrong error: {:?}",
parsed,
"Wrong error: {parsed:?}",
);
}
@ -1529,40 +1526,35 @@ mod test {
let parsed = parse(input);
assert!(
matches!(parsed, Err(super::Error::CannotParseEntireLine { .. })),
"Wrong error: {:?}",
parsed,
"Wrong error: {parsed:?}",
);
let input = "m0 field=-1.234456e+ 1615869152385000000";
let parsed = parse(input);
assert!(
matches!(parsed, Err(super::Error::CannotParseEntireLine { .. })),
"Wrong error: {:?}",
parsed,
"Wrong error: {parsed:?}",
);
let input = "m0 field=-1.234456E 1615869152385000000";
let parsed = parse(input);
assert!(
matches!(parsed, Err(super::Error::CannotParseEntireLine { .. })),
"Wrong error: {:?}",
parsed,
"Wrong error: {parsed:?}",
);
let input = "m0 field=-1.234456E+ 1615869152385000000";
let parsed = parse(input);
assert!(
matches!(parsed, Err(super::Error::CannotParseEntireLine { .. })),
"Wrong error: {:?}",
parsed,
"Wrong error: {parsed:?}",
);
let input = "m0 field=-1.234456E-";
let parsed = parse(input);
assert!(
matches!(parsed, Err(super::Error::CannotParseEntireLine { .. })),
"Wrong error: {:?}",
parsed,
"Wrong error: {parsed:?}",
);
}
@ -1585,8 +1577,7 @@ mod test {
assert!(
matches!(parsed, Err(super::Error::IntegerValueInvalid { .. })),
"Wrong error: {:?}",
parsed,
"Wrong error: {parsed:?}",
);
}
@ -1597,8 +1588,7 @@ mod test {
assert!(
matches!(parsed, Err(super::Error::UIntegerValueInvalid { .. })),
"Wrong error: {:?}",
parsed,
"Wrong error: {parsed:?}",
);
}
@ -1729,8 +1719,7 @@ bar value2=2i 123"#;
assert!(
matches!(parsed, Err(super::Error::TimestampValueInvalid { .. })),
"Wrong error: {:?}",
parsed,
"Wrong error: {parsed:?}",
);
}
@ -2273,10 +2262,7 @@ her"#,
let actual_value = if let FieldValue::F64(v) = field_value {
*v
} else {
panic!(
"field {} had value {:?}, expected F64",
field_name, field_value
);
panic!("field {field_name} had value {field_value:?}, expected F64");
};
assert!(approximately_equal(expected_value, actual_value));

View File

@ -49,7 +49,7 @@ pub struct OrgAndBucket {
impl OrgAndBucket {
/// Create a new `OrgAndBucket` from the provided `org_id` and `bucket_id`
pub fn new(org_id: NonZeroU64, bucket_id: NonZeroU64) -> Self {
let db_name = format!("{:016x}_{:016x}", org_id, bucket_id);
let db_name = format!("{org_id:016x}_{bucket_id:016x}");
Self {
org_id,

View File

@ -141,7 +141,7 @@ mod tests {
#[test]
fn encode_multi_compressed() {
let src_strings: Vec<_> = (0..10).map(|i| format!("value {}", i)).collect();
let src_strings: Vec<_> = (0..10).map(|i| format!("value {i}")).collect();
let src: Vec<_> = src_strings.iter().map(|s| s.as_bytes()).collect();
let mut dst = vec![];
@ -215,7 +215,7 @@ mod tests {
.iter()
.map(|s| std::str::from_utf8(s).unwrap())
.collect();
let expected: Vec<_> = (0..10).map(|i| format!("value {}", i)).collect();
let expected: Vec<_> = (0..10).map(|i| format!("value {i}")).collect();
assert_eq!(dst_as_strings, expected);
}

View File

@ -100,8 +100,7 @@ mod tests {
assert_eq!(
&dst[0] >> 4,
Encoding::Rle as u8,
"didn't use rle on {:?}",
src
"didn't use rle on {src:?}"
);
let mut got = vec![];
decode(&dst, &mut got).expect("failed to decode");

View File

@ -303,8 +303,7 @@ fn parse_tsm_field_key_value(rem_key: impl Iterator<Item = u8>) -> Result<String
_ => ParsingFieldKeySnafu {
details: format!(
"Delimiter not found before end of stream reached. \
Still in state {:?}",
state
Still in state {state:?}"
),
}
.fail(),
@ -893,13 +892,11 @@ mod tests {
Ok(field_key) => {
assert_eq!(
field_key, expected_field_key,
"Unexpected field key parsing '{}'",
input
"Unexpected field key parsing '{input}'"
);
}
Err(e) => panic!(
"Unexpected error while parsing field key '{}', got '{}', expected '{}'",
input, e, expected_field_key
"Unexpected error while parsing field key '{input}', got '{e}', expected '{expected_field_key}'"
),
}
}
@ -910,20 +907,16 @@ mod tests {
match result {
Ok(field_key) => {
panic!(
"Unexpected success parsing field key '{}'. \
Expected error '{}', got '{}'",
input, expected_error, field_key
"Unexpected success parsing field key '{input}'. \
Expected error '{expected_error}', got '{field_key}'"
);
}
Err(err) => {
let err_str = err.to_string();
assert!(
err_str.contains(expected_error),
"Did not find expected error while parsing '{}'. \
Expected '{}' but actual error was '{}'",
input,
expected_error,
err_str
"Did not find expected error while parsing '{input}'. \
Expected '{expected_error}' but actual error was '{err_str}'"
);
}
}
@ -942,19 +935,17 @@ mod tests {
match result {
Ok(tag_key) => {
assert_eq!(tag_key, expected_tag_key, "while parsing input '{}'", input);
assert_eq!(tag_key, expected_tag_key, "while parsing input '{input}'");
}
Err(err) => {
panic!(
"Got error '{}', expected parsed tag key: '{:?}' while parsing '{}'",
err, expected_tag_key, input
"Got error '{err}', expected parsed tag key: '{expected_tag_key:?}' while parsing '{input}'"
);
}
}
assert_eq!(
remaining_input, expected_remaining_input,
"remaining input was not correct while parsing input '{}'",
input
"remaining input was not correct while parsing input '{input}'"
);
}
@ -972,24 +963,20 @@ mod tests {
match result {
Ok(tag_key) => {
panic!(
"Got parsed key {:?}, expected failure {} while parsing input '{}'",
tag_key, expected_error, input
"Got parsed key {tag_key:?}, expected failure {expected_error} while parsing input '{input}'"
);
}
Err(err) => {
let err_str = err.to_string();
assert!(
err_str.contains(expected_error),
"Did not find expected error '{}' in actual error '{}'",
expected_error,
err_str
"Did not find expected error '{expected_error}' in actual error '{err_str}'"
);
}
}
assert_eq!(
remaining_input, expected_remaining_input,
"remaining input was not correct while parsing input '{}'",
input
"remaining input was not correct while parsing input '{input}'"
);
}
@ -1008,22 +995,19 @@ mod tests {
Ok(tag_value) => {
assert_eq!(
tag_value, expected_tag_value,
"while parsing input '{}'",
input
"while parsing input '{input}'"
);
}
Err(err) => {
panic!(
"Got error '{}', expected parsed tag_value: '{:?}' while parsing input '{}",
err, expected_tag_value, input
"Got error '{err}', expected parsed tag_value: '{expected_tag_value:?}' while parsing input '{input}"
);
}
}
assert_eq!(
remaining_input, expected_remaining_input,
"remaining input was not correct while parsing input '{}'",
input
"remaining input was not correct while parsing input '{input}'"
);
}
@ -1041,25 +1025,21 @@ mod tests {
match result {
Ok(tag_value) => {
panic!(
"Got parsed tag_value {:?}, expected failure {} while parsing input '{}'",
tag_value, expected_error, input
"Got parsed tag_value {tag_value:?}, expected failure {expected_error} while parsing input '{input}'"
);
}
Err(err) => {
let err_str = err.to_string();
assert!(
err_str.contains(expected_error),
"Did not find expected error '{}' in actual error '{}'",
expected_error,
err_str
"Did not find expected error '{expected_error}' in actual error '{err_str}'"
);
}
}
assert_eq!(
remaining_input, expected_remaining_input,
"remaining input was not correct while parsing input '{}'",
input
"remaining input was not correct while parsing input '{input}'"
);
}
}

View File

@ -41,7 +41,7 @@ impl TryFrom<u8> for BlockType {
3 => Ok(Self::Str),
4 => Ok(Self::Unsigned),
_ => Err(TsmError {
description: format!("{:?} is invalid block type", value),
description: format!("{value:?} is invalid block type"),
}),
}
}
@ -120,7 +120,7 @@ impl error::Error for TsmError {
impl From<io::Error> for TsmError {
fn from(e: io::Error) -> Self {
Self {
description: format!("TODO - io error: {} ({:?})", e, e),
description: format!("TODO - io error: {e} ({e:?})"),
}
}
}
@ -128,7 +128,7 @@ impl From<io::Error> for TsmError {
impl From<std::str::Utf8Error> for TsmError {
fn from(e: std::str::Utf8Error) -> Self {
Self {
description: format!("TODO - utf8 error: {} ({:?})", e, e),
description: format!("TODO - utf8 error: {e} ({e:?})"),
}
}
}
@ -141,7 +141,7 @@ mod tests {
fn influx_id() {
let id = InfluxId::new_str("20aa9b0").unwrap();
assert_eq!(id, InfluxId(34_253_232));
assert_eq!(format!("{}", id), "00000000020aa9b0");
assert_eq!(format!("{id}"), "00000000020aa9b0");
}
#[test]

View File

@ -299,12 +299,12 @@ impl Display for MeasurementTable {
for (tagset, field_key_blocks) in &self.tag_set_fields_blocks {
write!(f, "\t")?;
for (key, value) in tagset {
write!(f, "{}={} ", key, value)?;
write!(f, "{key}={value} ")?;
}
writeln!(f, "\n\tField Keys:")?;
for (field_key, blocks) in field_key_blocks {
writeln!(f, "\t{}", field_key)?;
writeln!(f, "\t{field_key}")?;
for block in blocks {
writeln!(
f,

View File

@ -669,7 +669,7 @@ where
}
}
None => Err(TsmError {
description: format!("cannot decode block {:?} with no associated decoder", block),
description: format!("cannot decode block {block:?} with no associated decoder"),
}),
}
}
@ -741,7 +741,7 @@ mod tests {
String::from_utf8_lossy(entry.key.as_slice())
);
}
Err(e) => panic!("{:?} {:?}", e, got_blocks),
Err(e) => panic!("{e:?} {got_blocks:?}"),
}
}
@ -797,7 +797,7 @@ mod tests {
assert_eq!(ts.len(), 509);
assert_eq!(values.len(), 509);
}
other => panic!("should not have decoded {:?}", other),
other => panic!("should not have decoded {other:?}"),
}
}
}

View File

@ -282,7 +282,7 @@ mod test {
"<=" => RPCComparison::Lte,
"~" => RPCComparison::Regex,
"!~" => RPCComparison::NotRegex,
_ => panic!("invalid comparator string: {:?}", cmp),
_ => panic!("invalid comparator string: {cmp:?}"),
}
}
@ -296,7 +296,7 @@ mod test {
// N.B, does not support spaces in tag keys or values.
fn make_tag_expr(input: &str) -> RPCNode {
let parts = input.split_whitespace().collect::<Vec<_>>();
assert_eq!(parts.len(), 3, "invalid input string: {:?}", input);
assert_eq!(parts.len(), 3, "invalid input string: {input:?}");
let comparison = rpc_op_from_str(parts[1]);
let is_regex =
@ -378,7 +378,7 @@ mod test {
let ops = vec!["=", "!=", ">", ">=", "<", "<=", "~", "!~"];
let exprs = ops
.into_iter()
.map(|op| format!("server {} 'abc'", op))
.map(|op| format!("server {op} 'abc'"))
.collect::<Vec<_>>();
for expr_str in exprs {
@ -458,19 +458,19 @@ mod test {
for op in ops {
let exprs = vec![
(
make_sql_expr(&format!("server::field {} 100", op)),
make_sql_expr(&format!("server::field {op} 100")),
make_field_expr_i64("server", op, 100_i64),
),
(
make_sql_expr(&format!("server::field {} 100.0", op)),
make_sql_expr(&format!("server::field {op} 100.0")),
make_field_expr_f64("server", op, 100.0),
),
(
make_sql_expr(&format!("server::field {} true", op)),
make_sql_expr(&format!("server::field {op} true")),
make_field_expr_bool("server", op, true),
),
(
make_sql_expr(&format!("server::field {} 'Mice'", op)),
make_sql_expr(&format!("server::field {op} 'Mice'")),
make_field_expr_str("server", op, "Mice".to_owned()),
),
];

View File

@ -159,7 +159,7 @@ where
self.query_request_limit_rejected.inc(1);
return Err(Error::RequestLimit)?;
}
Err(e) => panic!("request limiter error: {}", e),
Err(e) => panic!("request limiter error: {e}"),
};
let ticket = request.into_inner();

View File

@ -336,8 +336,7 @@ impl PartitionData {
.expect("persisting batch must contain sequence numbers");
assert_eq!(
persisting_max, sequence_number,
"marking {:?} as persisted but persisting batch max is {:?}",
sequence_number, persisting_max
"marking {sequence_number:?} as persisted but persisting batch max is {persisting_max:?}"
);
// Additionally assert the persisting batch is ordered strictly before

View File

@ -621,7 +621,7 @@ impl Persister for IngesterData {
.expect("retry forever");
// Record metrics
let attributes = Attributes::from([("shard_id", format!("{}", shard_id).into())]);
let attributes = Attributes::from([("shard_id", format!("{shard_id}").into())]);
self.persisted_file_size_bytes
.recorder(attributes)
.record(file_size as u64);

View File

@ -308,7 +308,7 @@ impl IngestHandler for IngestHandlerImpl {
self.query_request_limit_rejected.inc(1);
return Err(crate::querier_handler::Error::RequestLimit);
}
Err(e) => panic!("request limiter error: {}", e),
Err(e) => panic!("request limiter error: {e}"),
};
// TEMP(alamb): Log details about what was requested

View File

@ -242,7 +242,7 @@ mod tests {
Some(42) => break,
// The mock is configured to return 42 - any other value
// is incorrect.
Some(v) => panic!("observed unexpected value {}", v),
Some(v) => panic!("observed unexpected value {v}"),
None => tokio::time::sleep(Duration::from_millis(10)).await,
}
}

View File

@ -295,9 +295,9 @@ mod tests {
) -> Observation {
metrics
.get_instrument::<Metric<T>>(name)
.unwrap_or_else(|| panic!("did not find metric {}", name))
.unwrap_or_else(|| panic!("did not find metric {name}"))
.get_observer(attrs)
.unwrap_or_else(|| panic!("failed to match {} attributes", name))
.unwrap_or_else(|| panic!("failed to match {name} attributes"))
.observe()
}

View File

@ -372,7 +372,7 @@ impl TestContext {
self.metrics
.get_instrument::<Metric<T>>(name)
.unwrap_or_else(|| panic!("failed to find metric {}", name))
.unwrap_or_else(|| panic!("failed to find metric {name}"))
.get_observer(&attrs)
.unwrap_or_else(|| {
panic!(

View File

@ -50,8 +50,7 @@ impl NamespaceNameResolver {
.await?
.unwrap_or_else(|| {
panic!(
"resolving namespace name for non-existent namespace id {}",
namespace_id
"resolving namespace name for non-existent namespace id {namespace_id}"
)
})
.name

View File

@ -50,8 +50,7 @@ impl TableNameResolver {
.await?
.unwrap_or_else(|| {
panic!(
"resolving table name for non-existent table id {}",
table_id
"resolving table name for non-existent table id {table_id}"
)
})
.name

View File

@ -161,7 +161,7 @@ where
self.query_request_limit_rejected.inc(1);
return Err(Error::RequestLimit)?;
}
Err(e) => panic!("request limiter error: {}", e),
Err(e) => panic!("request limiter error: {e}"),
};
let ticket = request.into_inner();

View File

@ -2312,8 +2312,7 @@ pub(crate) mod test_helpers {
assert_eq!(
tombstones_ids, expected_ids,
"\ntombstones: {:#?}\nexpected: {:#?}\nparquet_file: {:#?}",
tombstones, expected, parquet_file
"\ntombstones: {tombstones:#?}\nexpected: {expected:#?}\nparquet_file: {parquet_file:#?}"
);
repos
@ -3043,8 +3042,7 @@ pub(crate) mod test_helpers {
assert_eq!(
level_0_ids, expected_ids,
"\nlevel 0: {:#?}\nexpected: {:#?}",
level_0, expected,
"\nlevel 0: {level_0:#?}\nexpected: {expected:#?}",
);
// drop the namespace to avoid the created data in this tests from affecting other tests
@ -3278,8 +3276,7 @@ pub(crate) mod test_helpers {
assert_eq!(
level_1_ids, expected_ids,
"\nlevel 1: {:#?}\nexpected: {:#?}",
level_1, expected,
"\nlevel 1: {level_1:#?}\nexpected: {expected:#?}",
);
// drop the namespace to avoid the created data in this tests from affecting other tests
@ -3334,8 +3331,7 @@ pub(crate) mod test_helpers {
.unwrap();
assert!(
partitions.is_empty(),
"Expected no partitions, instead got {:#?}",
partitions,
"Expected no partitions, instead got {partitions:#?}",
);
// Across all shards
let partitions = repos
@ -3345,8 +3341,7 @@ pub(crate) mod test_helpers {
.unwrap();
assert!(
partitions.is_empty(),
"Expected no partitions, instead got {:#?}",
partitions,
"Expected no partitions, instead got {partitions:#?}",
);
// The DB has 1 partition, partition_1, but it does not have any files
@ -3362,8 +3357,7 @@ pub(crate) mod test_helpers {
.unwrap();
assert!(
partitions.is_empty(),
"Expected no partitions, instead got {:#?}",
partitions,
"Expected no partitions, instead got {partitions:#?}",
);
// Across all shards
let partitions = repos
@ -3373,8 +3367,7 @@ pub(crate) mod test_helpers {
.unwrap();
assert!(
partitions.is_empty(),
"Expected no partitions, instead got {:#?}",
partitions,
"Expected no partitions, instead got {partitions:#?}",
);
// The partition_1 has one deleted file
@ -3411,8 +3404,7 @@ pub(crate) mod test_helpers {
.unwrap();
assert!(
partitions.is_empty(),
"Expected no partitions, instead got {:#?}",
partitions,
"Expected no partitions, instead got {partitions:#?}",
);
// Across all shards
let partitions = repos
@ -3422,8 +3414,7 @@ pub(crate) mod test_helpers {
.unwrap();
assert!(
partitions.is_empty(),
"Expected no partitions, instead got {:#?}",
partitions,
"Expected no partitions, instead got {partitions:#?}",
);
// A hot_partition with one cold file and one hot file
@ -3456,8 +3447,7 @@ pub(crate) mod test_helpers {
.unwrap();
assert!(
partitions.is_empty(),
"Expected no partitions, instead got {:#?}",
partitions,
"Expected no partitions, instead got {partitions:#?}",
);
// Across all shards
let partitions = repos
@ -3467,8 +3457,7 @@ pub(crate) mod test_helpers {
.unwrap();
assert!(
partitions.is_empty(),
"Expected no partitions, instead got {:#?}",
partitions,
"Expected no partitions, instead got {partitions:#?}",
);
// An already_compacted_partition that has only one non-deleted level 2 file, should never
@ -3496,8 +3485,7 @@ pub(crate) mod test_helpers {
.unwrap();
assert!(
partitions.is_empty(),
"Expected no partitions, instead got {:#?}",
partitions,
"Expected no partitions, instead got {partitions:#?}",
);
// Across all shards
let partitions = repos
@ -3507,8 +3495,7 @@ pub(crate) mod test_helpers {
.unwrap();
assert!(
partitions.is_empty(),
"Expected no partitions, instead got {:#?}",
partitions,
"Expected no partitions, instead got {partitions:#?}",
);
// The partition_1 has one non-deleted level 0 file created 38 hours ago
@ -5207,8 +5194,7 @@ pub(crate) mod test_helpers {
expected_ids.sort();
assert_eq!(
level_0_ids, expected_ids,
"\nlevel 0: {:#?}\nexpected: {:#?}",
level_0, expected,
"\nlevel 0: {level_0:#?}\nexpected: {expected:#?}",
);
// Make parquet_file compaction level 1, attempt to mark the nonexistent file; operation
@ -5232,8 +5218,7 @@ pub(crate) mod test_helpers {
expected_ids.sort();
assert_eq!(
level_0_ids, expected_ids,
"\nlevel 0: {:#?}\nexpected: {:#?}",
level_0, expected,
"\nlevel 0: {level_0:#?}\nexpected: {expected:#?}",
);
// Level 1 parquet files for a shard should only contain parquet_file
@ -5250,8 +5235,7 @@ pub(crate) mod test_helpers {
expected_ids.sort();
assert_eq!(
level_1_ids, expected_ids,
"\nlevel 1: {:#?}\nexpected: {:#?}",
level_1, expected,
"\nlevel 1: {level_1:#?}\nexpected: {expected:#?}",
);
// remove namespace to avoid it from affecting later tests

View File

@ -403,7 +403,7 @@ async fn new_raw_pool(
.execute(&mut *c)
.await?;
}
let search_path_query = format!("SET search_path TO {},public;", schema_name);
let search_path_query = format!("SET search_path TO {schema_name},public;");
c.execute(sqlx::query(&search_path_query)).await?;
// Ensure explicit timezone selection, instead of deferring to
@ -2532,7 +2532,7 @@ mod tests {
// Create the test schema
pg.pool
.execute(format!("CREATE SCHEMA {};", schema_name).as_str())
.execute(format!("CREATE SCHEMA {schema_name};").as_str())
.await
.expect("failed to create test schema");
@ -2540,8 +2540,7 @@ mod tests {
pg.pool
.execute(
format!(
"GRANT USAGE ON SCHEMA {} TO public; GRANT CREATE ON SCHEMA {} TO public;",
schema_name, schema_name
"GRANT USAGE ON SCHEMA {schema_name} TO public; GRANT CREATE ON SCHEMA {schema_name} TO public;"
)
.as_str(),
)
@ -2879,7 +2878,7 @@ mod tests {
// fetch dsn from envvar
let test_dsn = std::env::var("TEST_INFLUXDB_IOX_CATALOG_DSN").unwrap();
create_db(&test_dsn).await;
eprintln!("TEST_DSN={}", test_dsn);
eprintln!("TEST_DSN={test_dsn}");
// create a temp file to store the initial dsn
let mut dsn_file = NamedTempFile::new().expect("create temp file");
@ -2889,7 +2888,7 @@ mod tests {
const TEST_APPLICATION_NAME: &str = "test_application_name";
let dsn_good = format!("dsn-file://{}", dsn_file.path().display());
eprintln!("dsn_good={}", dsn_good);
eprintln!("dsn_good={dsn_good}");
// create a hot swap pool with test application name and dsn file pointing to tmp file.
// we will later update this file and the pool should be replaced.
@ -2919,7 +2918,7 @@ mod tests {
.write_all(test_dsn.as_bytes())
.expect("write temp file");
new_dsn_file
.write_all(format!("?application_name={}", TEST_APPLICATION_NAME_NEW).as_bytes())
.write_all(format!("?application_name={TEST_APPLICATION_NAME_NEW}").as_bytes())
.expect("write temp file");
new_dsn_file
.persist(dsn_file.path())

Some files were not shown because too many files have changed in this diff Show More