chore: use upstream versions of some workarounds (#2057)

* chore: use upstream versions of some workarounds

* docs: update docstring

Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
pull/24376/head
Andrew Lamb 2021-07-20 04:53:46 -04:00 committed by GitHub
parent 8e5d5928cf
commit 2c20528c69
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 5 additions and 30 deletions

View File

@ -786,12 +786,12 @@ macro_rules! assert_column_eq {
#[cfg(test)]
mod test {
use arrow::compute::SortOptions;
use InfluxColumnType::*;
use InfluxFieldType::*;
use super::{builder::SchemaBuilder, *};
use crate::schema::merge::SchemaMerger;
use crate::schema::sort::SortOptions;
fn make_field(
name: &str,

View File

@ -1,5 +1,6 @@
use std::{fmt::Display, str::FromStr};
use arrow::compute::SortOptions;
use indexmap::{map::Iter, IndexMap};
use itertools::Itertools;
use snafu::Snafu;
@ -23,24 +24,6 @@ pub enum Error {
pub type Result<T, E = Error> = std::result::Result<T, E>;
/// Temporary - <https://github.com/apache/arrow-rs/pull/425>
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct SortOptions {
/// Whether to sort in descending order
pub descending: bool,
/// Whether to sort nulls first
pub nulls_first: bool,
}
impl Default for SortOptions {
fn default() -> Self {
Self {
descending: false,
nulls_first: true,
}
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct ColumnSort {
/// Position of this column in the sort key

View File

@ -268,8 +268,9 @@ struct ScanPlan<C: QueryChunk + 'static> {
#[cfg(test)]
mod test {
use arrow::compute::SortOptions;
use arrow_util::assert_batches_eq;
use internal_types::schema::{merge::SchemaMerger, sort::SortOptions};
use internal_types::schema::merge::SchemaMerger;
use crate::{
exec::{Executor, ExecutorType},

View File

@ -339,21 +339,12 @@ impl RecordBatchDeduplicator {
}
/// Create a new record batch from offset --> len
///
/// <https://github.com/apache/arrow-rs/issues/460> for adding this upstream
fn slice_record_batch(
batch: &RecordBatch,
offset: usize,
len: usize,
) -> ArrowResult<RecordBatch> {
let schema = batch.schema();
let new_columns: Vec<_> = batch
.columns()
.iter()
.map(|old_column| old_column.slice(offset, len))
.collect();
let batch = RecordBatch::try_new(schema, new_columns)?;
let batch = batch.slice(offset, len);
// At time of writing, `concat_batches` concatenates the
// contents of dictionaries as well; Do a post pass to remove the