chore: port sqlx-hotswap-pool over from conductor (#3750)
* chore: port sqlx-hotswap-pool over from conductor Co-authored-by: Marko Mikulicic <mkm@influxdata.com> * chore: workspace hack fixes * fix: unique schema per test db connection * fix: adjust search path in catalog pg tests to see if it fixes test schema issue * fix: actually fixed sqlx hotswap pool test Co-authored-by: Marko Mikulicic <mkm@influxdata.com> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>pull/24376/head
parent
0b3f76462d
commit
a66e29e5b3
|
@ -2120,6 +2120,7 @@ dependencies = [
|
|||
"schema",
|
||||
"snafu",
|
||||
"sqlx",
|
||||
"sqlx-hotswap-pool",
|
||||
"test_helpers",
|
||||
"tokio",
|
||||
"uuid",
|
||||
|
@ -4864,6 +4865,20 @@ dependencies = [
|
|||
"whoami",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sqlx-hotswap-pool"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"dotenv",
|
||||
"either",
|
||||
"futures",
|
||||
"rand",
|
||||
"sqlx",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"workspace-hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sqlx-macros"
|
||||
version = "0.5.10"
|
||||
|
@ -4876,6 +4891,7 @@ dependencies = [
|
|||
"once_cell",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"sqlx-core",
|
||||
"sqlx-rt",
|
||||
|
@ -6034,6 +6050,9 @@ dependencies = [
|
|||
"serde_json",
|
||||
"sha2",
|
||||
"smallvec",
|
||||
"sqlx",
|
||||
"sqlx-core",
|
||||
"sqlx-macros",
|
||||
"syn",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
|
|
|
@ -51,6 +51,7 @@ members = [
|
|||
"schema",
|
||||
"server",
|
||||
"server_benchmarks",
|
||||
"sqlx-hotswap-pool",
|
||||
"test_helpers",
|
||||
"time",
|
||||
"trace",
|
||||
|
|
|
@ -7,15 +7,16 @@ edition = "2021"
|
|||
[dependencies] # In alphabetical order
|
||||
async-trait = "0.1.42"
|
||||
futures = "0.3"
|
||||
influxdb_line_protocol = { path = "../influxdb_line_protocol" }
|
||||
mutable_batch = { path = "../mutable_batch" }
|
||||
observability_deps = { path = "../observability_deps" }
|
||||
schema = { path = "../schema" }
|
||||
snafu = "0.7"
|
||||
sqlx = { version = "0.5", features = [ "runtime-tokio-native-tls" , "postgres", "uuid" ] }
|
||||
sqlx-hotswap-pool = { path = "../sqlx-hotswap-pool" }
|
||||
tokio = { version = "1.13", features = ["io-util", "macros", "parking_lot", "rt-multi-thread", "time"] }
|
||||
influxdb_line_protocol = { path = "../influxdb_line_protocol" }
|
||||
workspace-hack = { path = "../workspace-hack"}
|
||||
uuid = { version = "0.8", features = ["v4"] }
|
||||
mutable_batch = { path = "../mutable_batch" }
|
||||
schema = { path = "../schema" }
|
||||
workspace-hack = { path = "../workspace-hack"}
|
||||
|
||||
[dev-dependencies] # In alphabetical order
|
||||
dotenv = "0.15.0"
|
||||
|
|
|
@ -10,7 +10,8 @@ use crate::interface::{
|
|||
};
|
||||
use async_trait::async_trait;
|
||||
use observability_deps::tracing::{info, warn};
|
||||
use sqlx::{migrate::Migrator, postgres::PgPoolOptions, Executor, Pool, Postgres, Row};
|
||||
use sqlx::{migrate::Migrator, postgres::PgPoolOptions, Acquire, Executor, Postgres, Row};
|
||||
use sqlx_hotswap_pool::HotSwapPool;
|
||||
use std::time::Duration;
|
||||
use uuid::Uuid;
|
||||
|
||||
|
@ -25,7 +26,7 @@ static MIGRATOR: Migrator = sqlx::migrate!();
|
|||
/// PostgreSQL catalog.
|
||||
#[derive(Debug)]
|
||||
pub struct PostgresCatalog {
|
||||
pool: Pool<Postgres>,
|
||||
pool: HotSwapPool<Postgres>,
|
||||
}
|
||||
|
||||
// struct to get return value from "select count(*) ..." wuery"
|
||||
|
@ -66,6 +67,9 @@ impl PostgresCatalog {
|
|||
// name for cross-correlation between Conductor logs & database connections.
|
||||
info!(application_name=%app_name, "connected to catalog store");
|
||||
|
||||
// Upgrade the pool to a hot swap pool
|
||||
let pool = HotSwapPool::new(pool);
|
||||
|
||||
Ok(Self { pool })
|
||||
}
|
||||
}
|
||||
|
@ -80,7 +84,7 @@ pub struct PostgresTxn {
|
|||
#[allow(clippy::large_enum_variant)]
|
||||
enum PostgresTxnInner {
|
||||
Txn(Option<sqlx::Transaction<'static, Postgres>>),
|
||||
Oneshot(Pool<Postgres>),
|
||||
Oneshot(HotSwapPool<Postgres>),
|
||||
}
|
||||
|
||||
impl<'c> Executor<'c> for &'c mut PostgresTxnInner {
|
||||
|
@ -943,7 +947,7 @@ mod tests {
|
|||
crate::interface::test_helpers::test_catalog(postgres).await;
|
||||
}
|
||||
|
||||
async fn clear_schema(pool: &Pool<Postgres>) {
|
||||
async fn clear_schema(pool: &HotSwapPool<Postgres>) {
|
||||
sqlx::query("delete from processed_tombstone;")
|
||||
.execute(pool)
|
||||
.await
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
[package]
|
||||
name = "sqlx-hotswap-pool"
|
||||
authors = ["Marko Mikulicic <mkm@influxdata.com>"]
|
||||
version = "0.0.0"
|
||||
edition = "2021"
|
||||
description = "Workaround for the lack of dyanmic credential update support in sqlx"
|
||||
|
||||
# Prevent this from being published to crates.io!
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
sqlx = { version = "0.5.9", features = ["runtime-tokio-native-tls", "postgres", "json", "tls"] }
|
||||
either = "1.6.1"
|
||||
tokio = { version = "1.13", features = ["rt-multi-thread", "macros", "parking_lot"] }
|
||||
tokio-stream = { version = "0.1", default_features = false }
|
||||
futures = "0.3"
|
||||
workspace-hack = { path = "../workspace-hack"}
|
||||
|
||||
[dev-dependencies]
|
||||
dotenv = "0.15.0"
|
||||
rand = { version = "0.8", features = ["small_rng"] }
|
|
@ -0,0 +1,53 @@
|
|||
# sqlx-hotswap-pool
|
||||
|
||||
This crate implements a workaround for the lack of support for password rotation in the `sqlx` crate.
|
||||
|
||||
There is an upstream ticket for this [Support rotating passwords #445](https://github.com/launchbadge/sqlx/issues/445).
|
||||
This crate offfers a more quick&dirty solution to the problem.
|
||||
|
||||
## Problem
|
||||
|
||||
Some authentication methods for databases provide short lived passwords that must be regularly rotated.
|
||||
|
||||
Examples are: [AWS IAM database authentication](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html), HashiCorp Vault's dynamic role, ...
|
||||
|
||||
However, in `sqlx` once you create a pool you need to pass the connection string (which includes the credentials) and you can't change it afterwards.
|
||||
The pool will create one or more connections with those credentials.
|
||||
|
||||
## Workaround
|
||||
|
||||
This crate implements a wrapper struct around a reference counted Pool smart pointer. This wrapper can then be updated using internal mutability (mutex protected) whenever the main binary detects a credentials refresh. Every subsequent use of the pool will use the new underlying pool.
|
||||
|
||||
This workaround has been designed to solve the problem of updating credentials, but it happens to work if you want to point your pool to an entirely different database as well.
|
||||
|
||||
If the credentials refresh happen before the existing credentials are invalidated, references to the previous pool can still be used for some time.
|
||||
|
||||
If the credentials refresh contextually invalidates the exsting credentials, the process will experience connection errors if they used the pool before it has been updated (and if they cloned the `Arc` before the `update` method has been called).
|
||||
|
||||
Already open connections will keep working in both cases.
|
||||
|
||||
Usage:
|
||||
|
||||
```rust
|
||||
use sqlx_hotswap_pool::HotSwapPool;
|
||||
use sqlx::{pool::PoolOptions, Pool, Postgres};
|
||||
# async fn foo() {
|
||||
let pool1: Pool<Postgres> = PoolOptions::new()
|
||||
.test_before_acquire(true)
|
||||
.connect("postgresql://user:pw1@localhost/db")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// create a HotSwapPool, a pool that wraps `pool1` and supports replacing it with another
|
||||
let pool: HotSwapPool<Postgres> = HotSwapPool::new(pool1);
|
||||
|
||||
let pool2 = PoolOptions::new()
|
||||
.test_before_acquire(true)
|
||||
.connect("postgresql://user:pw2@localhost/db")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// replace the pool wrapped by the HotSwapPool with `pool2` instead of `pool1`
|
||||
pool.replace(pool2);
|
||||
# }
|
||||
```
|
|
@ -0,0 +1,289 @@
|
|||
#![doc = include_str!("../README.md")]
|
||||
#![deny(
|
||||
rustdoc::broken_intra_doc_links,
|
||||
rust_2018_idioms,
|
||||
missing_debug_implementations,
|
||||
unreachable_pub
|
||||
)]
|
||||
#![warn(
|
||||
missing_docs,
|
||||
clippy::todo,
|
||||
clippy::dbg_macro,
|
||||
clippy::clone_on_ref_ptr,
|
||||
clippy::future_not_send
|
||||
)]
|
||||
#![allow(clippy::missing_docs_in_private_items, clippy::type_complexity)]
|
||||
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use either::Either;
|
||||
use futures::{future::BoxFuture, prelude::stream::BoxStream};
|
||||
use sqlx::{
|
||||
database::HasStatement, pool::PoolConnection, Acquire, Database, Describe, Error, Execute,
|
||||
Executor, Pool, Transaction,
|
||||
};
|
||||
|
||||
/// A `HotSwapPool` is a Pool that wraps another Pool and it allows the pool to
|
||||
/// be replaced at runtime.
|
||||
#[derive(Debug)]
|
||||
pub struct HotSwapPool<DB>
|
||||
where
|
||||
DB: Database,
|
||||
{
|
||||
pool: Arc<RwLock<Arc<Pool<DB>>>>,
|
||||
}
|
||||
|
||||
impl<DB> HotSwapPool<DB>
|
||||
where
|
||||
DB: Database,
|
||||
{
|
||||
/// Creates a new [`HotSwapPool`] from a [`Pool`].
|
||||
pub fn new(pool: Pool<DB>) -> Self {
|
||||
Self {
|
||||
pool: Arc::new(RwLock::new(Arc::new(pool))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Replaces the underlying [`Pool`] with `new_pool`.
|
||||
///
|
||||
/// Existing connections obtained by performing operations on the pool
|
||||
/// before the call to `replace` are still valid.
|
||||
///
|
||||
/// This method affects new operations only.
|
||||
pub fn replace(&self, new_pool: Pool<DB>) {
|
||||
let mut pool = self.pool.write().expect("poisoned");
|
||||
*pool = Arc::new(new_pool);
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB> Clone for HotSwapPool<DB>
|
||||
where
|
||||
DB: Database,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
pool: Arc::clone(&self.pool),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, DB> Acquire<'a> for &'_ HotSwapPool<DB>
|
||||
where
|
||||
DB: Database,
|
||||
{
|
||||
type Database = DB;
|
||||
|
||||
type Connection = PoolConnection<DB>;
|
||||
|
||||
fn acquire(self) -> BoxFuture<'static, Result<Self::Connection, Error>> {
|
||||
let pool = self.pool.read().expect("poisoned");
|
||||
Box::pin(pool.acquire())
|
||||
}
|
||||
|
||||
fn begin(self) -> BoxFuture<'static, Result<Transaction<'a, DB>, Error>> {
|
||||
let pool = self.pool.read().expect("poisoned");
|
||||
let pool = Arc::clone(&pool);
|
||||
Box::pin(async move { pool.begin().await })
|
||||
}
|
||||
}
|
||||
|
||||
impl<'p, DB> Executor<'p> for &'_ HotSwapPool<DB>
|
||||
where
|
||||
DB: Database,
|
||||
for<'c> &'c mut DB::Connection: Executor<'c, Database = DB>,
|
||||
{
|
||||
type Database = DB;
|
||||
|
||||
fn fetch_many<'e, 'q: 'e, E: 'q>(
|
||||
self,
|
||||
query: E,
|
||||
) -> BoxStream<'e, Result<Either<DB::QueryResult, DB::Row>, Error>>
|
||||
where
|
||||
E: Execute<'q, Self::Database>,
|
||||
{
|
||||
let pool = self.pool.read().expect("poisoned");
|
||||
pool.fetch_many(query)
|
||||
}
|
||||
|
||||
fn fetch_optional<'e, 'q: 'e, E: 'q>(
|
||||
self,
|
||||
query: E,
|
||||
) -> BoxFuture<'e, Result<Option<DB::Row>, Error>>
|
||||
where
|
||||
E: Execute<'q, Self::Database>,
|
||||
{
|
||||
let pool = self.pool.read().expect("poisoned");
|
||||
pool.fetch_optional(query)
|
||||
}
|
||||
|
||||
fn prepare_with<'e, 'q: 'e>(
|
||||
self,
|
||||
sql: &'q str,
|
||||
parameters: &'e [<Self::Database as Database>::TypeInfo],
|
||||
) -> BoxFuture<'e, Result<<Self::Database as HasStatement<'q>>::Statement, Error>> {
|
||||
let pool = self.pool.read().expect("poisoned");
|
||||
pool.prepare_with(sql, parameters)
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
fn describe<'e, 'q: 'e>(
|
||||
self,
|
||||
sql: &'q str,
|
||||
) -> BoxFuture<'e, Result<Describe<Self::Database>, Error>> {
|
||||
let pool = self.pool.read().expect("poisoned");
|
||||
pool.describe(sql)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::env;
|
||||
use std::time::Duration;
|
||||
|
||||
use super::*;
|
||||
use rand::{distributions::Alphanumeric, Rng};
|
||||
use sqlx::{postgres::PgPoolOptions, Postgres};
|
||||
|
||||
// Helper macro to skip tests if TEST_INTEGRATION and the AWS environment variables are not set.
|
||||
macro_rules! maybe_skip_integration {
|
||||
() => {{
|
||||
dotenv::dotenv().ok();
|
||||
|
||||
let required_vars = ["DATABASE_URL"];
|
||||
let unset_vars: Vec<_> = required_vars
|
||||
.iter()
|
||||
.filter_map(|&name| match env::var(name) {
|
||||
Ok(_) => None,
|
||||
Err(_) => Some(name),
|
||||
})
|
||||
.collect();
|
||||
let unset_var_names = unset_vars.join(", ");
|
||||
|
||||
let force = env::var("TEST_INTEGRATION");
|
||||
|
||||
if force.is_ok() && !unset_var_names.is_empty() {
|
||||
panic!(
|
||||
"TEST_INTEGRATION is set, \
|
||||
but variable(s) {} need to be set",
|
||||
unset_var_names
|
||||
);
|
||||
} else if force.is_err() {
|
||||
eprintln!(
|
||||
"skipping Postgres integration test - set {}TEST_INTEGRATION to run",
|
||||
if unset_var_names.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
format!("{} and ", unset_var_names)
|
||||
}
|
||||
);
|
||||
return;
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
// test helper to create a regular (non hotswapping) DB connection
|
||||
async fn connect_db() -> Result<Pool<Postgres>, sqlx::Error> {
|
||||
// create a random schema for this particular pool
|
||||
let schema_name = {
|
||||
// use scope to make it clear to clippy / rust that `rng` is
|
||||
// not carried past await points
|
||||
let mut rng = rand::thread_rng();
|
||||
(&mut rng)
|
||||
.sample_iter(Alphanumeric)
|
||||
.filter(|c| c.is_ascii_alphabetic())
|
||||
.take(20)
|
||||
.map(char::from)
|
||||
.collect::<String>()
|
||||
};
|
||||
let dsn = std::env::var("DATABASE_URL").unwrap();
|
||||
let captured_schema_name = schema_name.clone();
|
||||
Ok(PgPoolOptions::new()
|
||||
.min_connections(1)
|
||||
.max_connections(5)
|
||||
.connect_timeout(Duration::from_secs(2))
|
||||
.idle_timeout(Duration::from_secs(500))
|
||||
.test_before_acquire(true)
|
||||
.after_connect(move |c| {
|
||||
let captured_schema_name = captured_schema_name.clone();
|
||||
Box::pin(async move {
|
||||
// Tag the connection with the provided application name.
|
||||
c.execute(sqlx::query("SET application_name = 'test';"))
|
||||
.await?;
|
||||
|
||||
// Note can only bind data values, not schema names
|
||||
let query = format!("CREATE SCHEMA IF NOT EXISTS {}", &captured_schema_name);
|
||||
c.execute(sqlx::query(&query))
|
||||
.await
|
||||
.expect("failed to create schema");
|
||||
|
||||
let search_path_query = format!("SET search_path TO {}", captured_schema_name);
|
||||
c.execute(sqlx::query(&search_path_query)).await?;
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.connect(&dsn)
|
||||
.await?)
|
||||
}
|
||||
|
||||
// The goal of this test is to verify that the hotswap pool can indeed replace
|
||||
// the pool. In the real world one would replace pools in order to use new
|
||||
// credentials. Here in our tests though just testing that it actually uses
|
||||
// a different pool handed over by the test utilities will be good enough.
|
||||
// The test utilities return an isolated namespace schema for every test. We'll
|
||||
// leverage that in order to verify that we indeed have swapped the pool at
|
||||
// runtime.
|
||||
#[tokio::test]
|
||||
async fn test_replace() {
|
||||
// If running an integration test on your laptop, this requires that you have Postgres
|
||||
// running and that you've done the sqlx migrations. See the README in this crate for
|
||||
// info to set it up.
|
||||
maybe_skip_integration!();
|
||||
println!("tests are running");
|
||||
|
||||
let db = connect_db().await.unwrap();
|
||||
let db = HotSwapPool::new(db);
|
||||
|
||||
sqlx::query("CREATE TABLE IF NOT EXISTS test (id int)")
|
||||
.execute(&db)
|
||||
.await
|
||||
.expect("executed");
|
||||
|
||||
sqlx::query("INSERT INTO test (id) VALUES (1)")
|
||||
.execute(&db)
|
||||
.await
|
||||
.expect("executed");
|
||||
|
||||
// Acquire a connection before replacing the pool. We'll use this later to prove
|
||||
// that having a connection from the old pool doesn't block our ability to
|
||||
// replace the pool.
|
||||
let mut conn = db.acquire().await.unwrap();
|
||||
|
||||
// hot swap a new pool. The maybe_skip_postgres_integration_no_hotswap creates a
|
||||
// new schema namespace and sets it as the default namespace for all the
|
||||
// connections in that pool, this effectively means we won't find this table
|
||||
// anymore.
|
||||
let new_pool = connect_db().await.unwrap();
|
||||
db.replace(new_pool);
|
||||
|
||||
// create the table so we can test if the row doesn't exist, which is nicer than
|
||||
// testing that we got a "relation not found" error.
|
||||
sqlx::query("CREATE TABLE IF NOT EXISTS test (id int)")
|
||||
.execute(&db)
|
||||
.await
|
||||
.expect("executed");
|
||||
|
||||
// Perform an actual query on the previous pool.
|
||||
conn.fetch_one("SELECT id FROM test")
|
||||
.await
|
||||
.expect("got result");
|
||||
|
||||
// Perform a query on the new pool. This pool uses the schema whose test table
|
||||
// has no rows.
|
||||
let res = sqlx::query("SELECT id FROM test")
|
||||
.fetch_optional(&db)
|
||||
.await
|
||||
.expect("got result");
|
||||
|
||||
assert!(res.is_none());
|
||||
}
|
||||
}
|
|
@ -50,6 +50,8 @@ serde = { version = "1", features = ["alloc", "derive", "rc", "serde_derive", "s
|
|||
serde_json = { version = "1", features = ["alloc", "indexmap", "preserve_order", "raw_value", "std"] }
|
||||
sha2 = { version = "0.9", features = ["std"] }
|
||||
smallvec = { version = "1", default-features = false, features = ["union"] }
|
||||
sqlx = { version = "0.5", features = ["_rt-tokio", "json", "macros", "migrate", "postgres", "runtime-tokio-native-tls", "sqlx-macros", "tls", "uuid"] }
|
||||
sqlx-core = { version = "0.5", default-features = false, features = ["_rt-tokio", "_tls-native-tls", "base64", "crc", "dirs", "hmac", "json", "md-5", "migrate", "postgres", "rand", "runtime-tokio-native-tls", "serde", "serde_json", "sha-1", "sha2", "tokio-stream", "uuid", "whoami"] }
|
||||
tokio = { version = "1", features = ["bytes", "fs", "io-std", "io-util", "libc", "macros", "memchr", "mio", "net", "num_cpus", "once_cell", "parking_lot", "rt", "rt-multi-thread", "signal", "signal-hook-registry", "sync", "time", "tokio-macros", "winapi"] }
|
||||
tokio-stream = { version = "0.1", features = ["fs", "net", "time"] }
|
||||
tokio-util-3b31131e45eafb45 = { package = "tokio-util", version = "0.6", features = ["codec", "io", "slab", "time"] }
|
||||
|
@ -91,6 +93,8 @@ serde = { version = "1", features = ["alloc", "derive", "rc", "serde_derive", "s
|
|||
serde_json = { version = "1", features = ["alloc", "indexmap", "preserve_order", "raw_value", "std"] }
|
||||
sha2 = { version = "0.9", features = ["std"] }
|
||||
smallvec = { version = "1", default-features = false, features = ["union"] }
|
||||
sqlx-core = { version = "0.5", default-features = false, features = ["_rt-tokio", "_tls-native-tls", "base64", "crc", "dirs", "hmac", "json", "md-5", "migrate", "postgres", "rand", "runtime-tokio-native-tls", "serde", "serde_json", "sha-1", "sha2", "tokio-stream", "uuid", "whoami"] }
|
||||
sqlx-macros = { version = "0.5", default-features = false, features = ["_rt-tokio", "json", "migrate", "postgres", "runtime-tokio-native-tls", "serde_json", "sha2", "uuid"] }
|
||||
syn = { version = "1", features = ["clone-impls", "derive", "extra-traits", "full", "parsing", "printing", "proc-macro", "quote", "visit", "visit-mut"] }
|
||||
tokio = { version = "1", features = ["bytes", "fs", "io-std", "io-util", "libc", "macros", "memchr", "mio", "net", "num_cpus", "once_cell", "parking_lot", "rt", "rt-multi-thread", "signal", "signal-hook-registry", "sync", "time", "tokio-macros", "winapi"] }
|
||||
tokio-stream = { version = "0.1", features = ["fs", "net", "time"] }
|
||||
|
|
Loading…
Reference in New Issue