Merge pull request #817 from influxdata/er/refactor/clippy_clone_lint

refactor: apply clippy clone lint
pull/24376/head
kodiakhq[bot] 2021-02-16 13:53:11 +00:00 committed by GitHub
commit 72546f75be
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 29 additions and 28 deletions

View File

@ -165,7 +165,7 @@ impl Buffer {
let segment = mem::replace(&mut self.open_segment, Segment::new(next_id));
let segment = Arc::new(segment);
self.closed_segments.push(segment.clone());
self.closed_segments.push(Arc::clone(&segment));
closed_segment = Some(segment);
}
@ -192,7 +192,7 @@ impl Buffer {
writes.reverse();
return writes;
}
writes.push(w.clone());
writes.push(Arc::clone(&w));
}
for s in self.closed_segments.iter().rev() {
@ -201,7 +201,7 @@ impl Buffer {
writes.reverse();
return writes;
}
writes.push(w.clone());
writes.push(Arc::clone(&w));
}
}
@ -225,7 +225,7 @@ impl Buffer {
writes.reverse();
return writes;
}
writes.push(w.clone());
writes.push(Arc::clone(&w));
}
}
@ -237,7 +237,7 @@ impl Buffer {
writes.reverse();
return writes;
}
writes.push(w.clone());
writes.push(Arc::clone(&w));
}
}
}

View File

@ -128,7 +128,7 @@ pub(crate) struct CreateDatabaseHandle<'a> {
impl<'a> CreateDatabaseHandle<'a> {
pub(crate) fn commit(self) {
self.config.commit(&self.name, self.db.clone())
self.config.commit(&self.name, Arc::clone(&self.db))
}
}

View File

@ -145,7 +145,7 @@ impl Db {
self.read_buffer
.chunk_ids(partition_key)
.into_iter()
.map(|chunk_id| DBChunk::new_rb(self.read_buffer.clone(), partition_key, chunk_id))
.map(|chunk_id| DBChunk::new_rb(Arc::clone(&self.read_buffer), partition_key, chunk_id))
.collect()
}
@ -176,7 +176,7 @@ impl Db {
.context(ReadBufferDrop)?;
Ok(DBChunk::new_rb(
self.read_buffer.clone(),
Arc::clone(&self.read_buffer),
partition_key,
chunk_id,
))
@ -222,7 +222,7 @@ impl Db {
}
Ok(DBChunk::new_rb(
self.read_buffer.clone(),
Arc::clone(&self.read_buffer),
partition_key,
mb_chunk.id,
))

View File

@ -249,7 +249,7 @@ impl PartitionChunk for DBChunk {
let schema: Schema = self.table_schema(table_name, selection.clone()).await?;
Ok(Box::pin(MutableBufferChunkStream::new(
chunk.clone(),
Arc::clone(&chunk),
schema.as_arrow(),
table_name,
)))

View File

@ -93,7 +93,7 @@ impl MutableBufferChunkStream {
impl RecordBatchStream for MutableBufferChunkStream {
fn schema(&self) -> SchemaRef {
self.schema.clone()
Arc::clone(&self.schema)
}
}
@ -127,7 +127,7 @@ impl ReadFilterResultsStream {
impl RecordBatchStream for ReadFilterResultsStream {
fn schema(&self) -> SchemaRef {
self.schema.clone()
Arc::clone(&self.schema)
}
}

View File

@ -63,7 +63,8 @@
#![warn(
missing_debug_implementations,
clippy::explicit_iter_loop,
clippy::use_self
clippy::use_self,
clippy::clone_on_ref_ptr
)]
pub mod buffer;
@ -251,8 +252,8 @@ impl<M: ConnectionManager> Server<M> {
.common_prefixes
.into_iter()
.map(|mut path| {
let store = self.store.clone();
let config = self.config.clone();
let store = Arc::clone(&self.store);
let config = Arc::clone(&self.config);
path.set_file_name(DB_RULES_FILE_NAME);
@ -353,13 +354,13 @@ impl<M: ConnectionManager> Server<M> {
// succeed while a WAL buffer write fails, which would then
// return an error. A single lock is probably undesirable, but
// we need to figure out what semantics we want.
wal_buffer.append(write.clone()).context(WalError)?
wal_buffer.append(Arc::clone(&write)).context(WalError)?
};
if let Some(segment) = segment {
if persist {
let writer_id = self.require_id()?;
let store = self.store.clone();
let store = Arc::clone(&self.store);
segment
.persist_bytes_in_background(
&self.segment_persistence_registry,
@ -478,7 +479,7 @@ where
}
fn executor(&self) -> Arc<Executor> {
self.executor.clone()
Arc::clone(&self.executor)
}
}
@ -607,7 +608,7 @@ mod tests {
async fn create_database_persists_rules() {
let manager = TestConnectionManager::new();
let store = Arc::new(ObjectStore::new_in_memory(InMemory::new()));
let server = Server::new(manager, store.clone());
let server = Server::new(manager, Arc::clone(&store));
server.set_id(1);
let name = "bananas";
@ -782,7 +783,7 @@ mod tests {
let remote_id = "serverA";
manager
.remotes
.insert(remote_id.to_string(), remote.clone());
.insert(remote_id.to_string(), Arc::clone(&remote));
let store = Arc::new(ObjectStore::new_in_memory(InMemory::new()));
@ -841,7 +842,7 @@ partition_key:
let remote_id = "serverA";
manager
.remotes
.insert(remote_id.to_string(), remote.clone());
.insert(remote_id.to_string(), Arc::clone(&remote));
let store = Arc::new(ObjectStore::new_in_memory(InMemory::new()));
@ -904,7 +905,7 @@ partition_key:
let manager = TestConnectionManager::new();
let store = Arc::new(ObjectStore::new_in_memory(InMemory::new()));
let server = Server::new(manager, store.clone());
let server = Server::new(manager, Arc::clone(&store));
server.set_id(1);
let db_name = "my_db";
let rules = DatabaseRules {
@ -974,7 +975,7 @@ partition_key:
type RemoteServer = TestRemoteServer;
async fn remote_server(&self, id: &str) -> Result<Arc<TestRemoteServer>, Self::Error> {
Ok(self.remotes.get(id).unwrap().clone())
Ok(Arc::clone(&self.remotes.get(id).unwrap()))
}
}

View File

@ -300,7 +300,7 @@ where
);
let snapshot = Arc::new(snapshot);
let return_snapshot = snapshot.clone();
let return_snapshot = Arc::clone(&snapshot);
tokio::spawn(async move {
info!(
@ -355,7 +355,7 @@ impl Seek for MemWriter {
impl TryClone for MemWriter {
fn try_clone(&self) -> std::io::Result<Self> {
Ok(Self {
mem: self.mem.clone(),
mem: Arc::clone(&self.mem),
})
}
}
@ -393,12 +393,12 @@ mem,host=A,region=west used=45 1
let mut data_path = store.new_path();
data_path.push_dir("data");
let chunk = db.chunks("1970-01-01T00").await[0].clone();
let chunk = Arc::clone(&db.chunks("1970-01-01T00").await[0]);
let snapshot = snapshot_chunk(
metadata_path.clone(),
data_path,
store.clone(),
Arc::clone(&store),
"testaroo",
chunk,
Some(tx),

View File

@ -38,7 +38,7 @@ pub struct TrackerRegistry<T> {
impl<T> Clone for TrackerRegistry<T> {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
inner: Arc::clone(&self.inner),
}
}
}