refactor: make heappy an optional feature, add to build image

pull/24376/head
Andrew Lamb 2021-08-13 08:11:32 -05:00
parent 36e84f85ef
commit 697de875ca
5 changed files with 48 additions and 29 deletions

View File

@ -192,8 +192,8 @@ jobs:
name: Build benches
command: cargo test --workspace --benches --no-run
- run:
name: Build with object store + exporter support
command: cargo build --features="aws,gcp,azure,jaeger,otlp"
name: Build with object store + exporter support + HEAP profiling
command: cargo build --features="aws,gcp,azure,jaeger,otlp,heappy"
- cache_save
# Lint protobufs.

View File

@ -59,7 +59,7 @@ datafusion = { path = "datafusion" }
data_types = { path = "data_types" }
entry = { path = "entry" }
generated_types = { path = "generated_types" }
heappy = { git = "https://github.com/mkmik/heappy", rev = "39530c987aecda0b2d76503e8b5bb33d8a93f384" , features = ["enable_heap_profiler"]}
heappy = { git = "https://github.com/mkmik/heappy", rev = "39530c987aecda0b2d76503e8b5bb33d8a93f384", features = ["enable_heap_profiler", "jemalloc_shim"], optional = true}
influxdb_iox_client = { path = "influxdb_iox_client", features = ["format"] }
influxdb_line_protocol = { path = "influxdb_line_protocol" }
@ -147,3 +147,5 @@ gcp = ["object_store/gcp"] # Optional GCP object store support
aws = ["object_store/aws"] # Optional AWS / S3 object store support
jaeger = ["trogging/jaeger"] # Enable optional jaeger tracing support
otlp = ["trogging/otlp"] # Enable optional open telemetry collector
# heappy is also an optional feature; Not on by default as it
# runtime overhead on all allocations (calls to malloc)

View File

@ -10,13 +10,15 @@
//! Long term, we expect to create IOx specific api in terms of
//! database names and may remove this quasi /v2 API.
#[cfg(feature = "heappy")]
mod heappy;
// Influx crates
use super::planner::Planner;
use data_types::{
names::{org_and_bucket_to_database, OrgBucketMappingError},
DatabaseName,
};
use heappy::{self, HeapReport};
use influxdb_iox_client::format::QueryOutputFormat;
use influxdb_line_protocol::parse_lines;
use query::{exec::ExecutorType, QueryDatabase};
@ -800,29 +802,6 @@ async fn dump_rsprof(seconds: u64, frequency: i32) -> pprof::Result<pprof::Repor
guard.report().build()
}
#[cfg(feature = "heappy")]
async fn dump_heappy_rsprof(seconds: u64, frequency: i32) -> Result<HeapReport> {
let guard = heappy::HeapProfilerGuard::new(frequency as usize);
info!(
"start heappy profiling {} seconds with frequency {} /s",
seconds, frequency
);
tokio::time::sleep(Duration::from_secs(seconds)).await;
info!(
"done heappy profiling {} seconds with frequency {} /s",
seconds, frequency
);
Ok(guard.report())
}
#[cfg(not(feature = "heappy"))]
async fn dump_heappy_rsprof(_seconds: u64, _frequency: i32) -> Result<HeapReport> {
HeappyIsNotCompiled {}.fail()
}
#[derive(Debug, Deserialize)]
struct PProfArgs {
#[serde(default = "PProfArgs::default_seconds")]
@ -877,6 +856,8 @@ async fn pprof_profile<M: ConnectionManager + Send + Sync + Debug + 'static>(
Ok(Response::new(Body::from(body)))
}
// If heappy support is enabled, call it
#[cfg(feature = "heappy")]
#[tracing::instrument(level = "debug")]
async fn pprof_heappy_profile<M: ConnectionManager + Send + Sync + Debug + 'static>(
req: Request<Body>,
@ -885,12 +866,13 @@ async fn pprof_heappy_profile<M: ConnectionManager + Send + Sync + Debug + 'stat
let query: PProfArgs =
serde_urlencoded::from_str(query_string).context(InvalidQueryString { query_string })?;
let report: HeapReport = dump_heappy_rsprof(query.seconds, query.frequency.get()).await?;
let report = self::heappy::dump_heappy_rsprof(query.seconds, query.frequency.get()).await;
let mut body: Vec<u8> = Vec::new();
// render flamegraph when opening in the browser
// otherwise render as protobuf; works great with: go tool pprof http://..../debug/pprof/allocs
// otherwise render as protobuf;
// works great with: go tool pprof http://..../debug/pprof/allocs
if req
.headers()
.get_all("Accept")
@ -909,6 +891,16 @@ async fn pprof_heappy_profile<M: ConnectionManager + Send + Sync + Debug + 'stat
Ok(Response::new(Body::from(body)))
}
// Return error if heappy not enabled
#[cfg(not(feature = "heappy"))]
#[tracing::instrument(level = "debug")]
async fn pprof_heappy_profile<M: ConnectionManager + Send + Sync + Debug + 'static>(
req: Request<Body>,
) -> Result<Response<Body>, ApplicationError> {
#[cfg(not(feature = "heappy"))]
HeappyIsNotCompiled {}.fail()
}
pub async fn serve<M>(
addr: AddrIncoming,
application: Arc<ApplicationState>,

View File

@ -0,0 +1,24 @@
//! Memory profiling support using heappy
//!
//! Compiled only when the "heappy" feature is enabled
use heappy::{self, HeapReport};
use observability_deps::tracing::info;
use tokio::time::Duration;
pub(crate) async fn dump_heappy_rsprof(seconds: u64, frequency: i32) -> HeapReport {
let guard = heappy::HeapProfilerGuard::new(frequency as usize);
info!(
"start heappy profiling {} seconds with frequency {} /s",
seconds, frequency
);
tokio::time::sleep(Duration::from_secs(seconds)).await;
info!(
"done heappy profiling {} seconds with frequency {} /s",
seconds, frequency
);
guard.report()
}

View File

@ -34,6 +34,7 @@ mod object_store;
pub mod influxdb_ioxd;
#[cfg(not(feature = "heappy"))]
#[global_allocator]
static GLOBAL: Jemalloc = Jemalloc;