From cf4d53d01fa9c6bb07602a4457ffab96814c1b30 Mon Sep 17 00:00:00 2001 From: Dmitry Savitskiy Date: Thu, 22 Aug 2024 23:14:03 +0300 Subject: [PATCH] feat(lvs): adding support for pool grow and related tests * Pool grow API support added to LVS. * Additional pool stats added. * Added support for MD reservation hint. * Related CLI subcommand added. 'pool list' subcommand extended to print additional pool info. * Related tests added. * For testing purposes, a non-safe support for resizing malloc bdev added. * To simplify adding new pool and replica parameters, LVS inner create-related APIs refactored to accept parameters via PoolArgs and ReplicaArgs instead of function arguments. * Added a example tool (lvs-eval) that creates an LVS with the given parameters and prints it internal configuration (allocated clusters, extent pages, etc.) Signed-off-by: Dmitry Savitskiy --- Cargo.lock | 101 +++++++ io-engine-tests/src/bdev.rs | 31 ++- io-engine-tests/src/compose/mod.rs | 9 +- io-engine-tests/src/lib.rs | 10 +- io-engine-tests/src/pool.rs | 21 +- io-engine/Cargo.toml | 5 + io-engine/examples/lvs-eval/display.rs | 245 +++++++++++++++++ io-engine/examples/lvs-eval/main.rs | 196 ++++++++++++++ io-engine/src/bdev/lvs.rs | 1 + io-engine/src/bdev/malloc.rs | 45 ++- io-engine/src/bdev_api.rs | 3 + .../src/bin/io-engine-client/v1/bdev_cli.rs | 6 + .../src/bin/io-engine-client/v1/pool_cli.rs | 135 ++++++++- io-engine/src/grpc/v1/pool.rs | 60 ++++ io-engine/src/lvm/error.rs | 7 + io-engine/src/lvm/mod.rs | 34 ++- io-engine/src/lvm/vg_pool.rs | 7 + io-engine/src/lvs/lvs_error.rs | 16 ++ io-engine/src/lvs/lvs_lvol.rs | 2 +- io-engine/src/lvs/lvs_store.rs | 256 ++++++++++++------ io-engine/src/lvs/mod.rs | 57 ++-- io-engine/src/pool_backend.rs | 39 ++- io-engine/src/subsys/config/pool.rs | 1 + io-engine/tests/lvs_grow.rs | 167 ++++++++++++ io-engine/tests/lvs_import.rs | 1 + io-engine/tests/lvs_limits.rs | 1 + io-engine/tests/lvs_pool.rs | 54 ++-- io-engine/tests/nexus_with_local.rs | 1 + io-engine/tests/replica_snapshot.rs | 1 + io-engine/tests/snapshot_lvol.rs | 1 + io-engine/tests/snapshot_nexus.rs | 1 + spdk-rs | 2 +- utils/dependencies | 2 +- 33 files changed, 1367 insertions(+), 151 deletions(-) create mode 100644 io-engine/examples/lvs-eval/display.rs create mode 100644 io-engine/examples/lvs-eval/main.rs create mode 100644 io-engine/tests/lvs_grow.rs diff --git a/Cargo.lock b/Cargo.lock index ec6c3ab3f..c8fca308e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -825,6 +825,27 @@ dependencies = [ "typenum", ] +[[package]] +name = "csv" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" +dependencies = [ + "memchr", +] + [[package]] name = "curve25519-dalek" version = "4.1.0" @@ -967,6 +988,27 @@ dependencies = [ "crypto-common", ] +[[package]] +name = "dirs-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" +dependencies = [ + "cfg-if", + "dirs-sys-next", +] + +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + [[package]] name = "doc-comment" version = "0.3.3" @@ -1006,6 +1048,12 @@ version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +[[package]] +name = "encode_unicode" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" + [[package]] name = "enum-primitive-derive" version = "0.2.2" @@ -1642,6 +1690,7 @@ dependencies = [ "once_cell", "parking_lot", "pin-utils", + "prettytable-rs", "prost", "prost-derive", "rand", @@ -1926,6 +1975,16 @@ dependencies = [ "url", ] +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.4.0", + "libc", +] + [[package]] name = "libudev-sys" version = "0.1.4" @@ -2370,6 +2429,20 @@ dependencies = [ "syn 2.0.38", ] +[[package]] +name = "prettytable-rs" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eea25e07510aa6ab6547308ebe3c036016d162b8da920dbb079e3ba8acf3d95a" +dependencies = [ + "csv", + "encode_unicode", + "is-terminal", + "lazy_static", + "term", + "unicode-width", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -2537,6 +2610,17 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_users" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" +dependencies = [ + "getrandom", + "libredox", + "thiserror", +] + [[package]] name = "regex" version = "1.10.0" @@ -3129,6 +3213,17 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "term" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" +dependencies = [ + "dirs-next", + "rustversion", + "winapi", +] + [[package]] name = "termcolor" version = "1.3.0" @@ -3504,6 +3599,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-width" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" + [[package]] name = "unsafe-libyaml" version = "0.2.10" diff --git a/io-engine-tests/src/bdev.rs b/io-engine-tests/src/bdev.rs index a5c61b95c..68560f444 100644 --- a/io-engine-tests/src/bdev.rs +++ b/io-engine-tests/src/bdev.rs @@ -1,10 +1,26 @@ use super::compose::rpc::v1::{ - bdev::{Bdev, ListBdevOptions}, + bdev::{Bdev, CreateBdevRequest, ListBdevOptions}, SharedRpcHandle, Status, }; -pub async fn list_bdevs(rpc: &SharedRpcHandle) -> Result, Status> { +/// Creates a bdev. +pub async fn create_bdev( + rpc: SharedRpcHandle, + uri: &str, +) -> Result { + rpc.lock() + .await + .bdev + .create(CreateBdevRequest { + uri: uri.to_string(), + }) + .await + .map(|r| r.into_inner().bdev.unwrap()) +} + +/// Lists bdevs. +pub async fn list_bdevs(rpc: SharedRpcHandle) -> Result, Status> { rpc.lock() .await .bdev @@ -14,3 +30,14 @@ pub async fn list_bdevs(rpc: &SharedRpcHandle) -> Result, Status> { .await .map(|r| r.into_inner().bdevs) } + +/// Finds a bdev by its name. +pub async fn find_bdev_by_name( + rpc: SharedRpcHandle, + name: &str, +) -> Option { + match list_bdevs(rpc).await { + Err(_) => None, + Ok(nn) => nn.into_iter().find(|p| p.name == name), + } +} diff --git a/io-engine-tests/src/compose/mod.rs b/io-engine-tests/src/compose/mod.rs index 6daa7cc89..3fdb363e1 100644 --- a/io-engine-tests/src/compose/mod.rs +++ b/io-engine-tests/src/compose/mod.rs @@ -78,8 +78,15 @@ impl<'a> MayastorTest<'a> { } pub fn new(args: MayastorCliArgs) -> MayastorTest<'static> { + Self::new_ex(args, None) + } + + pub fn new_ex( + args: MayastorCliArgs, + log_level: Option<&str>, + ) -> MayastorTest<'static> { let (tx, rx) = bounded(1); - mayastor_test_init_ex(args.log_format.unwrap_or_default()); + mayastor_test_init_ex(args.log_format.unwrap_or_default(), log_level); let thdl = std::thread::Builder::new() .name("mayastor_master".into()) .spawn(move || { diff --git a/io-engine-tests/src/lib.rs b/io-engine-tests/src/lib.rs index d4498b3b0..33f515cb8 100644 --- a/io-engine-tests/src/lib.rs +++ b/io-engine-tests/src/lib.rs @@ -130,10 +130,10 @@ macro_rules! test_init { } pub fn mayastor_test_init() { - mayastor_test_init_ex(LogFormat::default()); + mayastor_test_init_ex(LogFormat::default(), None); } -pub fn mayastor_test_init_ex(log_format: LogFormat) { +pub fn mayastor_test_init_ex(log_format: LogFormat, log_level: Option<&str>) { fn binary_present(name: &str) -> Result { std::env::var("PATH").map(|paths| { paths @@ -151,7 +151,11 @@ pub fn mayastor_test_init_ex(log_format: LogFormat) { } }); - logger::init_ex("info,io_engine=DEBUG", log_format, None); + logger::init_ex( + log_level.unwrap_or("info,io_engine=DEBUG"), + log_format, + None, + ); io_engine::CPS_INIT!(); } diff --git a/io-engine-tests/src/pool.rs b/io-engine-tests/src/pool.rs index baff31ae8..486cbee21 100644 --- a/io-engine-tests/src/pool.rs +++ b/io-engine-tests/src/pool.rs @@ -1,7 +1,7 @@ pub use super::compose::rpc::v1::pool::Pool; use super::{ compose::rpc::v1::{ - pool::{CreatePoolRequest, ListPoolOptions}, + pool::{CreatePoolRequest, GrowPoolRequest, ListPoolOptions}, replica::{ListReplicaOptions, Replica}, SharedRpcHandle, Status, @@ -190,6 +190,7 @@ impl PoolBuilderRpc { .with_malloc_blk_size(bdev_name, size_mb, blk_size); self } + pub async fn create(&mut self) -> Result { self.rpc() .lock() @@ -201,11 +202,28 @@ impl PoolBuilderRpc { pooltype: 0, disks: vec![self.bdev.as_ref().unwrap().clone()], cluster_size: None, + md_args: None, }) .await .map(|r| r.into_inner()) } + pub async fn grow(&mut self) -> Result<(Pool, Pool), Status> { + self.rpc() + .lock() + .await + .pool + .grow_pool(GrowPoolRequest { + name: self.name(), + uuid: Some(self.uuid()), + }) + .await + .map(|r| { + let t = r.into_inner(); + (t.previous_pool.unwrap(), t.current_pool.unwrap()) + }) + } + pub async fn get_pool(&self) -> Result { let uuid = self.uuid(); list_pools(self.rpc()) @@ -263,6 +281,7 @@ impl PoolBuilderLocal { uuid: Some(self.uuid()), disks: vec![self.bdev.as_ref().unwrap().clone()], cluster_size: None, + md_args: None, backend: Default::default(), }) .await?; diff --git a/io-engine/Cargo.toml b/io-engine/Cargo.toml index e4e1df05a..0e564388e 100644 --- a/io-engine/Cargo.toml +++ b/io-engine/Cargo.toml @@ -42,6 +42,10 @@ path = "src/bin/jsonrpc.rs" name = "casperf" path = "src/bin/casperf.rs" +[[example]] +name = "lvs-eval" +path = "examples/lvs-eval/main.rs" + [dependencies] ansi_term = "0.12.1" async-channel = "1.9.0" @@ -125,4 +129,5 @@ version = "1.4.1" assert_matches = "1.5.0" io-engine-tests = { path = "../io-engine-tests" } libnvme-rs = { path = "../libnvme-rs", version = "0.1.0" } +prettytable-rs = "0.10.0" run_script = "0.10.1" diff --git a/io-engine/examples/lvs-eval/display.rs b/io-engine/examples/lvs-eval/display.rs new file mode 100644 index 000000000..81c53cb3b --- /dev/null +++ b/io-engine/examples/lvs-eval/display.rs @@ -0,0 +1,245 @@ +use io_engine::{ + core::{LogicalVolume, UntypedBdev}, + lvs::{Lvol, Lvs, LvsLvol}, +}; +use prettytable::{row, Table}; +use spdk_rs::libspdk::{ + spdk_bit_array, + spdk_bit_array_capacity, + spdk_bit_array_get, + spdk_bit_pool, + spdk_bit_pool_capacity, + spdk_bit_pool_is_allocated, + spdk_blob_calc_used_clusters, + spdk_blob_is_thin_provisioned, + spdk_blob_mut_data, + spdk_blob_store, +}; + +/// TODO +pub async fn print_lvs(lvs: &Lvs) { + print_separator("LVS", 0); + + print_bdev(lvs.base_bdev()); + print_lvs_data(lvs); + print_replicas(lvs); +} + +/// TODO +pub fn print_bdev(bdev: UntypedBdev) { + print_separator("Bdev", 0); + + let mut tab = Table::new(); + + tab.add_row(row!["name", bdev.name()]); + tab.add_row(row!["size", bdev.size_in_bytes(), "bytes"]); + tab.add_row(row![" |-", bdev.size_in_bytes() / (1024 * 1024), "Mb"]); + tab.add_row(row!["block_len", bdev.block_len(), "bytes"]); + tab.add_row(row!["num_blocks", bdev.num_blocks(), "blocks"]); + + print_table(tab); + + println!(); +} + +/// TODO +pub fn print_lvs_data(lvs: &Lvs) { + print_separator("Blob store", 0); + + let bs = unsafe { &*lvs.blob_store() }; + + let mut tab = Table::new(); + + tab.add_row(row!["md_start", bs.md_start, "pages"]); + tab.add_row(row!["md_len", bs.md_len, "pages"]); + tab.add_row(row!["cluster_sz", bs.cluster_sz, "bytes"]); + tab.add_row(row![" |-------", bs.cluster_sz / (1024 * 1024), "Mb"]); + tab.add_row(row!["total_clusters", bs.total_clusters]); + tab.add_row(row!["total_data_clusters", bs.total_data_clusters]); + tab.add_row(row![ + "md_clusters", + bs.total_clusters - bs.total_data_clusters + ]); + tab.add_row(row!["num_free_clusters", bs.num_free_clusters]); + tab.add_row(row!["pages_per_cluster", bs.pages_per_cluster]); + tab.add_row(row!["io_unit_size", bs.io_unit_size, "bytes"]); + tab.add_row(row![]); + tab.add_row(row!["page_size", lvs.page_size(), "bytes"]); + tab.add_row(row!["md_page_size", lvs.md_page_size(), "bytes"]); + tab.add_row(row!["md_pages", lvs.md_pages()]); + tab.add_row(row!["md_used_pages", lvs.md_used_pages()]); + + print_table(tab); + println!(); + + // Used MD pages. + println!("Used MD pages:"); + print!(" "); + print_used_array_bits(bs.used_md_pages, Some(bs.md_len)); + println!(); + + // Used clusters. + println!("Used clusters:"); + print!(" "); + print_used_pool_bits(bs.used_clusters, Some(bs.total_clusters as u32)); + println!(); + + // Used blob IDs. + println!("Used blob IDs:"); + print!(" "); + print_used_array_bits(bs.used_blobids, None); + println!(); + + // Open blobs. + println!("Open blob IDs:"); + print!(" "); + print_used_array_bits(bs.open_blobids, None); + println!(); +} + +/// TODO +pub fn print_replicas(lvs: &Lvs) { + print_separator("Replicas", 0); + + for (idx, lvol) in lvs.lvols().unwrap().enumerate() { + print_separator(&format!("Replica #{idx}:"), 1); + print_replica(&lvol); + } + + print_separator("End of replicas", 0); +} + +/// TODO +pub fn print_replica(lvol: &Lvol) { + let blob = unsafe { &*lvol.blob_checked() }; + let bs = unsafe { &*blob.bs }; + + let mut tab = Table::new(); + + let num_allocated_clusters = + unsafe { spdk_blob_calc_used_clusters(blob as *const _ as *mut _) }; + + tab.add_row(row!["id", format!("0x{:x}", blob.id)]); + tab.add_row(row!["parent_id", format!("0x{:x}", blob.parent_id)]); + tab.add_row(row!["name", lvol.name()]); + tab.add_row(row!["uuid", lvol.uuid()]); + tab.add_row(row!["is thin", unsafe { + spdk_blob_is_thin_provisioned(blob as *const _ as *mut _) + }]); + tab.add_row(row!["num_clusters", blob.active.num_clusters]); + tab.add_row(row!["alloc clusters", num_allocated_clusters]); + tab.add_row(row![ + "size", + blob.active.num_clusters * bs.cluster_sz as u64, + "bytes" + ]); + + print_table(tab); + + println!(); + print_blob_data("Active data", bs, &blob.active); + + println!(); +} + +/// TODO +pub fn print_blob_data( + name: &str, + bs: &spdk_blob_store, + blob: &spdk_blob_mut_data, +) { + println!("{name}:"); + + // Clusters. + println!( + " Clusters: {} / {}", + blob.num_clusters, blob.num_allocated_clusters + ); + print!(" "); + for i in 0 .. blob.num_allocated_clusters as isize { + let lba = unsafe { *blob.clusters.offset(i) }; + let num = lba_to_cluster(bs, lba); + print!("0x{num:x} "); + } + println!("\n"); + + // LBAs. + println!( + " LBAs: {} / {}", + blob.num_clusters, blob.num_allocated_clusters + ); + print!(" "); + for i in 0 .. blob.num_allocated_clusters as isize { + let c = unsafe { *blob.clusters.offset(i) }; + print!("0x{c:x} "); + } + println!("\n"); + + // EPs. + println!( + " Extent_pages: {} / {}", + blob.num_extent_pages, blob.extent_pages_array_size + ); + print!(" "); + for i in 0 .. blob.extent_pages_array_size as isize { + let c = unsafe { *blob.extent_pages.offset(i) }; + print!("0x{c:x} "); + } + println!(); +} + +/// TODO +fn print_table(mut tab: Table) { + tab.set_titles(row!["Name", "Value", "Units"]); + tab.set_format(*prettytable::format::consts::FORMAT_NO_LINESEP_WITH_TITLE); + tab.printstd(); +} + +/// TODO +fn print_used_array_bits(ba: *const spdk_bit_array, cnt: Option) { + let cnt = cnt.unwrap_or_else(|| unsafe { spdk_bit_array_capacity(ba) }); + let mut total = 0; + + for i in 0 .. cnt { + let v = unsafe { spdk_bit_array_get(ba, i) }; + if v { + print!("0x{i:x} "); + total += 1; + } + } + + println!(); + println!(" Total: {total}"); +} + +/// TODO +fn print_used_pool_bits(bp: *const spdk_bit_pool, cnt: Option) { + let cnt = cnt.unwrap_or_else(|| unsafe { spdk_bit_pool_capacity(bp) }); + let mut total = 0; + + for i in 0 .. cnt { + let v = unsafe { spdk_bit_pool_is_allocated(bp, i) }; + if v { + print!("0x{i:x} "); + total += 1; + } + } + + println!(); + println!(" Total: {total}"); +} + +/// TODO +fn print_separator(title: &str, level: u8) { + let title = format!(" {title} "); + if level == 0 { + println!("{:=^1$}\n", title, 70); + } else { + println!("{:-^1$}\n", title, 50); + } +} + +/// TODO +fn lba_to_cluster(bs: &spdk_blob_store, lba: u64) -> u64 { + unsafe { lba / (bs.cluster_sz / (*bs.dev).blocklen) as u64 } +} diff --git a/io-engine/examples/lvs-eval/main.rs b/io-engine/examples/lvs-eval/main.rs new file mode 100644 index 000000000..e4d88013d --- /dev/null +++ b/io-engine/examples/lvs-eval/main.rs @@ -0,0 +1,196 @@ +mod display; + +use clap::Parser; +use version_info::{package_description, version_info_str}; + +use io_engine_tests::MayastorTest; + +use crate::display::print_lvs; +use io_engine::{ + core::{LogicalVolume, MayastorCliArgs}, + lvs::{Lvol, Lvs, LvsError, LvsLvol}, + pool_backend::{IPoolProps, PoolArgs, PoolMetadataArgs, ReplicaArgs}, +}; + +/// TODO +#[derive(Debug, Clone, Parser)] +#[clap( + name = package_description!(), + about = "LVS Evaluation", + version = version_info_str!(), +)] +pub struct CliArgs { + pub disk: String, + #[clap(short = 'r', default_value_t = 1)] + pub replicas: u32, + #[clap(short = 'N', default_value_t = 1)] + pub replica_clusters: u64, + /// Cluster size in Mb. + #[clap(short = 'C')] + pub cluster_size: Option, + /// Thin provisioned. + #[clap(short = 't', default_value_t = false)] + pub thin: bool, + /// MD reservation ratio. + #[clap(short = 'M')] + pub md_resv_ratio: Option, + /// Use extent table. + #[clap(short = 'E')] + pub use_extent_table: Option, + /// Pool name. + #[clap(default_value = "pool0")] + pub pool_name: String, + /// Destroy pool on exit (pool is exported by default). + #[clap(long = "destroy")] + pub destroy_pool: bool, + /// Create a temporary filler replica before each replica. + #[clap(short = 'F', default_value_t = false)] + pub fillers: bool, +} + +static mut G_USE_EXTENT_TABLE: bool = true; + +/// TODO +#[tokio::main(worker_threads = 4)] +async fn main() { + let args = CliArgs::parse(); + + unsafe { + G_USE_EXTENT_TABLE = args.use_extent_table.unwrap_or(true); + } + + let ms_args = MayastorCliArgs { + log_format: Some("nodate,nohost,color".parse().unwrap()), + reactor_mask: "0x3".into(), + ..Default::default() + }; + + // let ms = MayastorTest::new_ex(ms_args, + // Some("debug,io_engine::lvs::lvs_store=info")); + let ms = MayastorTest::new_ex(ms_args, Some("info")); + + ms.spawn(async move { + println!(); + println!("-------------------------"); + println!("{args:#?}"); + println!("-------------------------"); + + println!("Creating LVS ..."); + let lvs = create_lvs(&args).await; + + let mut fillers = Vec::new(); + + // Create replicas. + println!("Creating {n} replicas ...", n = args.replicas); + for idx in 0 .. args.replicas { + if args.fillers { + match create_filler_replica(&lvs, idx, 1).await { + Ok(lvol) => fillers.push(lvol), + Err(_) => break, + } + } + + if create_replica(&lvs, idx, args.replica_clusters, args.thin) + .await + .is_err() + { + break; + } + } + + println!("Created pool."); + print_lvs(&lvs).await; + + if !fillers.is_empty() { + // Destroy fillers. + for lvol in fillers.into_iter() { + println!("Destroying filler '{name}'", name = lvol.name()); + lvol.destroy().await.unwrap(); + } + + println!("Destroyed fillers."); + print_lvs(&lvs).await; + } + + if args.destroy_pool { + println!("Destroying pool ..."); + lvs.destroy().await.unwrap(); + } else { + println!("Exporting pool ..."); + lvs.export().await.unwrap(); + } + }) + .await; +} + +/// TODO +async fn create_lvs(args: &CliArgs) -> Lvs { + const POOL_UUID: &str = "b8589388-295c-4859-a88a-72a60d3902e8"; + + let lvs_args = PoolArgs { + name: args.pool_name.clone(), + disks: vec![args.disk.to_string()], + uuid: Some(POOL_UUID.to_string()), + cluster_size: args.cluster_size.map(|sz| sz * 1024 * 1024), + md_args: Some(PoolMetadataArgs { + md_resv_ratio: args.md_resv_ratio, + }), + backend: Default::default(), + }; + + Lvs::create_or_import(lvs_args.clone()).await.unwrap() +} + +/// TODO +async fn create_replica( + lvs: &Lvs, + serial: u32, + n: u64, + thin: bool, +) -> Result { + let name = format!("replica_{serial}"); + let uuid = format!("45c23e54-dc86-45f6-b55b-e44d05f1{serial:04}"); + + create_lvol(lvs, &name, &uuid, n, thin).await +} + +/// TODO +async fn create_filler_replica( + lvs: &Lvs, + serial: u32, + n: u64, +) -> Result { + let name = format!("filler_{serial}"); + let uuid = format!("56723e54-dc86-45f6-b55b-e44d05f1{serial:04}"); + + create_lvol(lvs, &name, &uuid, n, false).await +} + +/// TODO +async fn create_lvol( + lvs: &Lvs, + name: &str, + uuid: &str, + n: u64, + thin: bool, +) -> Result { + let et = unsafe { G_USE_EXTENT_TABLE }; + + println!( + "Creating lvol '{name}': size = {n} cluster(s), thin: {thin}, et: {et}", + ); + + let opts = ReplicaArgs { + name: name.to_owned(), + size: lvs.cluster_size() as u64 * n, + uuid: uuid.to_string(), + thin, + entity_id: None, + use_extent_table: Some(et), + }; + + lvs.create_lvol_with_opts(opts).await.map_err(|err| { + println!("Failed to create lvol '{name}': {err}"); + err + }) +} diff --git a/io-engine/src/bdev/lvs.rs b/io-engine/src/bdev/lvs.rs index 8bba5f590..075650ae8 100644 --- a/io-engine/src/bdev/lvs.rs +++ b/io-engine/src/bdev/lvs.rs @@ -215,6 +215,7 @@ impl Lvs { disks: vec![self.disk.to_owned()], uuid: None, cluster_size: None, + md_args: None, backend: PoolBackend::Lvs, }; match &self.mode { diff --git a/io-engine/src/bdev/malloc.rs b/io-engine/src/bdev/malloc.rs index 526f3a6dd..cf509281f 100644 --- a/io-engine/src/bdev/malloc.rs +++ b/io-engine/src/bdev/malloc.rs @@ -20,6 +20,7 @@ use spdk_rs::{ create_malloc_disk, delete_malloc_disk, malloc_bdev_opts, + resize_malloc_disk, spdk_bdev, SPDK_DIF_DISABLE, }, @@ -45,6 +46,8 @@ pub struct Malloc { blk_size: u32, /// uuid of the spdk bdev uuid: Option, + /// Enable resizing if the bdev already exists + resizing: bool, } impl Debug for Malloc { @@ -105,6 +108,8 @@ impl TryFrom<&Url> for Malloc { }, )?; + let resizing = parameters.remove("resize").is_some(); + reject_unknown_parameters(uri, parameters)?; // Validate parameters. @@ -141,6 +146,7 @@ impl TryFrom<&Url> for Malloc { } as u64, blk_size, uuid, + resizing, }) } } @@ -157,9 +163,13 @@ impl CreateDestroy for Malloc { async fn create(&self) -> Result { if UntypedBdev::lookup_by_name(&self.name).is_some() { - return Err(BdevError::BdevExists { - name: self.name.clone(), - }); + return if self.resizing { + self.try_resize() + } else { + Err(BdevError::BdevExists { + name: self.name.clone(), + }) + }; } debug!("{:?}: creating bdev", self); @@ -245,3 +255,32 @@ impl CreateDestroy for Malloc { } } } + +impl Malloc { + fn try_resize(&self) -> Result::Error> { + debug!("{:?}: resizing existing bdev", self); + + let cname = self.name.clone().into_cstring(); + let new_sz_mb = self.num_blocks * self.blk_size as u64 / (1024 * 1024); + + let errno = unsafe { + resize_malloc_disk( + cname.as_ptr() as *mut std::os::raw::c_char, + new_sz_mb, + ) + }; + + if errno != 0 { + let err = BdevError::ResizeBdevFailed { + source: Errno::from_i32(errno.abs()), + name: self.name.clone(), + }; + + error!("{:?} error: {}", self, err.verbose()); + + return Err(err); + } + + Ok(self.name.clone()) + } +} diff --git a/io-engine/src/bdev_api.rs b/io-engine/src/bdev_api.rs index cfbcc05e8..43fab5254 100644 --- a/io-engine/src/bdev_api.rs +++ b/io-engine/src/bdev_api.rs @@ -88,6 +88,9 @@ pub enum BdevError { // Generic destruction failure. #[snafu(display("Failed to destroy a BDEV '{}'", name))] DestroyBdevFailed { source: Errno, name: String }, + // Generic resize failure. + #[snafu(display("Failed to resize a BDEV '{}'", name))] + ResizeBdevFailed { source: Errno, name: String }, #[snafu(display("Failed to create BDEV '{name}': {error}"))] CreateBdevFailedStr { name: String, error: String }, #[snafu(display("Failed to destroy BDEV '{name}': {error}"))] diff --git a/io-engine/src/bin/io-engine-client/v1/bdev_cli.rs b/io-engine/src/bin/io-engine-client/v1/bdev_cli.rs index a2d2c4841..db4bf67fe 100644 --- a/io-engine/src/bin/io-engine-client/v1/bdev_cli.rs +++ b/io-engine/src/bin/io-engine-client/v1/bdev_cli.rs @@ -6,6 +6,7 @@ use crate::{ ClientError, GrpcStatus, }; +use byte_unit::Byte; use clap::{Arg, ArgMatches, Command}; use colored_json::prelude::*; use io_engine_api::v1 as v1rpc; @@ -103,6 +104,7 @@ async fn list(mut ctx: Context, _args: &ArgMatches) -> crate::Result<()> { "UUID", "NUM_BLOCKS", "BLK_SIZE", + "CAPACITY", "CLAIMED_BY", "NAME", "SHARE_URI", @@ -110,10 +112,14 @@ async fn list(mut ctx: Context, _args: &ArgMatches) -> crate::Result<()> { let table = bdevs .iter() .map(|bdev| { + let cap = Byte::from_bytes( + (bdev.num_blocks * bdev.blk_size as u64).into(), + ); vec![ bdev.uuid.to_string(), bdev.num_blocks.to_string(), bdev.blk_size.to_string(), + ctx.units(cap), bdev.claimed_by.to_string(), bdev.name.to_string(), bdev.share_uri.to_string(), diff --git a/io-engine/src/bin/io-engine-client/v1/pool_cli.rs b/io-engine/src/bin/io-engine-client/v1/pool_cli.rs index 41f03661d..7328e85ca 100644 --- a/io-engine/src/bin/io-engine-client/v1/pool_cli.rs +++ b/io-engine/src/bin/io-engine-client/v1/pool_cli.rs @@ -35,6 +35,12 @@ pub fn subcommands() -> Command { .required(false) .help("SPDK cluster size"), ) + .arg( + Arg::new("md-resv-ratio") + .long("md-resv-ratio") + .required(false) + .help("Metadata reservation ratio"), + ) .arg( Arg::new("disk") .required(true) @@ -134,6 +140,31 @@ pub fn subcommands() -> Command { .default_value(PoolType::Lvs.as_ref()), ); + let grow = Command::new("grow") + .about("Grow a storage pool to fill the entire underlying device") + .arg( + Arg::new("name") + .required(true) + .index(1) + .help("Storage pool name"), + ) + .arg( + Arg::new("uuid") + .short('u') + .long("uuid") + .required(false) + .help("Storage pool uuid"), + ) + .arg( + Arg::new("type") + .short('t') + .long("type") + .help("The type of the pool") + .required(false) + .value_parser(PoolType::types().to_vec()) + .default_value(PoolType::Lvs.as_ref()), + ); + let list = Command::new("list") .about("List storage pools") .arg(Arg::new("name").required(false).help("Storage pool name")) @@ -155,6 +186,7 @@ pub fn subcommands() -> Command { .subcommand(import) .subcommand(destroy) .subcommand(export) + .subcommand(grow) .subcommand(list) } @@ -164,6 +196,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("import", args) => import(ctx, args).await, ("destroy", args) => destroy(ctx, args).await, ("export", args) => export(ctx, args).await, + ("grow", args) => grow(ctx, args).await, ("list", args) => list(ctx, args).await, (cmd, _) => { Err(Status::not_found(format!("command {cmd} does not exist"))) @@ -210,6 +243,19 @@ async fn create(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { None => None, }; + let md_resv_ratio = match matches.get_one::("md-resv-ratio") { + Some(s) => match s.parse::() { + Ok(v) => Some(v), + Err(err) => { + return Err(Status::invalid_argument(format!( + "Bad metadata reservation hint '{err}'" + ))) + .context(GrpcStatus); + } + }, + None => None, + }; + let response = ctx .v1 .pool @@ -219,6 +265,9 @@ async fn create(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { disks: disks_list, pooltype: v1rpc::pool::PoolType::from(pooltype) as i32, cluster_size, + md_args: Some(v1rpc::pool::PoolMetadataArgs { + md_resv_ratio, + }), }) .await .context(GrpcStatus)?; @@ -371,6 +420,37 @@ async fn export(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { Ok(()) } +async fn grow(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { + let name = matches + .get_one::("name") + .ok_or_else(|| ClientError::MissingValue { + field: "name".to_string(), + })? + .to_owned(); + let uuid = matches.get_one::("uuid").cloned(); + + let response = ctx + .v1 + .pool + .grow_pool(v1rpc::pool::GrowPoolRequest { + name: name.clone(), + uuid, + }) + .await + .context(GrpcStatus)?; + + let old_cap = response.get_ref().previous_pool.as_ref().unwrap().capacity; + let new_cap = response.get_ref().current_pool.as_ref().unwrap().capacity; + + if old_cap == new_cap { + println!("Pool capacity did not change: {new_cap} bytes"); + } else { + println!("Pool capacity was {old_cap}, now {new_cap} bytes"); + } + + Ok(()) +} + async fn list(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ctx.v2("Requesting a list of pools"); @@ -413,24 +493,77 @@ async fn list(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { return Ok(()); } + fn percentage_str(a: u64, b: u64) -> String { + if b > 0 { + let v = 100.0 * a as f64 / b as f64; + format!("{v:.2}%") + } else { + "-".to_string() + } + } + let table = pools .iter() .map(|p| { let cap = Byte::from_bytes(p.capacity.into()); let used = Byte::from_bytes(p.used.into()); let state = pool_state_to_str(p.state); + let cluster = Byte::from_bytes(p.cluster_size.into()); + let page_size = Byte::from_bytes(p.page_size.into()); + let disk_cap = Byte::from_bytes(p.disk_capacity.into()); + + let (md_page_size, md_pages, md_used_pages, md_usage) = + if let Some(t) = p.md_info.as_ref() { + ( + ctx.units(t.md_page_size.into()), + t.md_pages.to_string(), + t.md_used_pages.to_string(), + percentage_str(t.md_used_pages, t.md_pages), + ) + } else { + ( + "-".to_string(), + "-".to_string(), + "-".to_string(), + "-".to_string(), + ) + }; + vec![ p.name.clone(), p.uuid.clone(), state.to_string(), ctx.units(cap), ctx.units(used), + percentage_str(p.used, p.capacity), + ctx.units(cluster), + ctx.units(page_size), + md_page_size, + md_pages, + md_used_pages, + md_usage, p.disks.join(" "), + ctx.units(disk_cap), ] }) .collect(); ctx.print_list( - vec!["NAME", "UUID", "STATE", ">CAPACITY", ">USED", "DISKS"], + vec![ + "NAME", + "UUID", + "STATE", + "CAPACITY", + "USED", + "USED%", + "CLUSTER_SIZE", + "PAGE_SIZE", + "MD_PAGE_SIZE", + "MD_PAGES", + "MD_USED_PAGES", + "MD_USED%", + "DISKS", + "DISK_CAPACITY", + ], table, ); } diff --git a/io-engine/src/grpc/v1/pool.rs b/io-engine/src/grpc/v1/pool.rs index accf35dce..f0616768f 100644 --- a/io-engine/src/grpc/v1/pool.rs +++ b/io-engine/src/grpc/v1/pool.rs @@ -16,6 +16,7 @@ use crate::{ }, lvs::{BsError, LvsError}, pool_backend::{ + self, FindPoolArgs, IPoolFactory, ListPoolArgs, @@ -71,6 +72,11 @@ impl From for FindPoolArgs { Self::name_uuid(value.name, &value.uuid) } } +impl From for FindPoolArgs { + fn from(value: GrowPoolRequest) -> Self { + Self::name_uuid(value.name, &value.uuid) + } +} /// RPC service for mayastor pool operations #[derive(Debug, Clone)] @@ -183,10 +189,18 @@ impl TryFrom for PoolArgs { disks: args.disks, uuid: args.uuid, cluster_size: args.cluster_size, + md_args: args.md_args.map(|md| md.into()), backend: backend.into(), }) } } +impl From for pool_backend::PoolMetadataArgs { + fn from(params: PoolMetadataArgs) -> Self { + Self { + md_resv_ratio: params.md_resv_ratio, + } + } +} impl From for PoolBackend { fn from(value: PoolType) -> Self { match value { @@ -256,6 +270,7 @@ impl TryFrom for PoolArgs { disks: args.disks, uuid: args.uuid, cluster_size: None, + md_args: None, backend: backend.into(), }) } @@ -299,6 +314,7 @@ impl PoolGrpc { uuid: args.uuid, thin: args.thin, entity_id: args.entity_id, + use_extent_table: None, }) .await { @@ -337,6 +353,10 @@ impl PoolGrpc { self.pool.export().await?; Ok(()) } + async fn grow(&self) -> Result<(), tonic::Status> { + self.pool.grow().await?; + Ok(()) + } /// Access the `PoolOps` from this wrapper. pub(crate) fn as_ops(&self) -> &dyn PoolOps { self.pool.deref() @@ -361,6 +381,18 @@ impl From<&dyn PoolOps> for Pool { committed: value.committed(), pooltype: PoolType::from(value.pool_type()) as i32, cluster_size: value.cluster_size(), + page_size: value.page_size(), + disk_capacity: value.disk_capacity(), + md_info: value.md_props().map(|md| md.into()), + } + } +} +impl From for PoolMetadataInfo { + fn from(value: pool_backend::PoolMetadataInfo) -> Self { + Self { + md_page_size: value.md_page_size, + md_pages: value.md_pages, + md_used_pages: value.md_used_pages, } } } @@ -623,4 +655,32 @@ impl PoolRpc for PoolService { ) .await } + + #[named] + async fn grow_pool( + &self, + request: Request, + ) -> GrpcResult { + self.locked( + GrpcClientContext::new(&request, function_name!()), + async move { + crate::spdk_submit!(async move { + info!("{:?}", request.get_ref()); + + let pool = + GrpcPoolFactory::finder(request.into_inner()).await?; + + let previous_pool = Pool::from(pool.as_ops()); + pool.grow().await.map_err(Into::::into)?; + let current_pool = Pool::from(pool.as_ops()); + + Ok(GrowPoolResponse { + previous_pool: Some(previous_pool), + current_pool: Some(current_pool), + }) + }) + }, + ) + .await + } } diff --git a/io-engine/src/lvm/error.rs b/io-engine/src/lvm/error.rs index 1a503a202..3a4b27066 100644 --- a/io-engine/src/lvm/error.rs +++ b/io-engine/src/lvm/error.rs @@ -58,6 +58,10 @@ pub enum Error { NoSpace { error: String }, #[snafu(display("Snapshots are not currently supported for LVM volumes"))] SnapshotNotSup {}, + #[snafu(display( + "Pool expansion is not currently supported for LVM volumes" + ))] + GrowNotSup {}, } impl ToErrno for Error { @@ -129,6 +133,9 @@ impl ToErrno for Error { Error::SnapshotNotSup { .. } => Errno::ENOTSUP, + Error::GrowNotSup { + .. + } => Errno::ENOTSUP, } } } diff --git a/io-engine/src/lvm/mod.rs b/io-engine/src/lvm/mod.rs index 646434832..19d9059b2 100644 --- a/io-engine/src/lvm/mod.rs +++ b/io-engine/src/lvm/mod.rs @@ -68,6 +68,7 @@ use crate::{ ListPoolArgs, PoolArgs, PoolBackend, + PoolMetadataInfo, PoolOps, ReplicaArgs, }, @@ -150,6 +151,7 @@ impl PoolOps for VolumeGroup { .await?; Ok(Box::new(replica)) } + async fn destroy( self: Box, ) -> Result<(), crate::pool_backend::Error> { @@ -163,6 +165,10 @@ impl PoolOps for VolumeGroup { VolumeGroup::export(&mut self).await?; Ok(()) } + + async fn grow(&self) -> Result<(), crate::pool_backend::Error> { + Err(Error::GrowNotSup {}.into()) + } } #[async_trait::async_trait(?Send)] @@ -312,6 +318,10 @@ impl SnapshotOps for LogicalVolume { } impl IPoolProps for VolumeGroup { + fn pool_type(&self) -> PoolBackend { + PoolBackend::Lvm + } + fn name(&self) -> &str { self.name() } @@ -324,24 +334,32 @@ impl IPoolProps for VolumeGroup { self.disks().clone() } - fn used(&self) -> u64 { - self.used() + fn disk_capacity(&self) -> u64 { + self.capacity() } - fn committed(&self) -> u64 { - self.committed() + fn cluster_size(&self) -> u32 { + self.cluster_size() as u32 + } + + fn page_size(&self) -> u32 { + self.page_size() as u32 } fn capacity(&self) -> u64 { self.capacity() } - fn pool_type(&self) -> PoolBackend { - PoolBackend::Lvm + fn used(&self) -> u64 { + self.used() } - fn cluster_size(&self) -> u32 { - self.cluster_size() as u32 + fn committed(&self) -> u64 { + self.committed() + } + + fn md_props(&self) -> Option { + None } } diff --git a/io-engine/src/lvm/vg_pool.rs b/io-engine/src/lvm/vg_pool.rs index 02344357e..c1eb7d250 100644 --- a/io-engine/src/lvm/vg_pool.rs +++ b/io-engine/src/lvm/vg_pool.rs @@ -358,11 +358,18 @@ impl VolumeGroup { pub(crate) fn committed(&self) -> u64 { self.size } + /// Get the volume group cluster size. pub(crate) fn cluster_size(&self) -> u64 { 4 * 1024 * 1024 } + /// Get the volume group page size. + pub(crate) fn page_size(&self) -> u64 { + // TODO: return correct value here + 4 * 1024 + } + /// Get the volume group available capacity. pub(crate) fn available(&self) -> u64 { self.free diff --git a/io-engine/src/lvs/lvs_error.rs b/io-engine/src/lvs/lvs_error.rs index 982aeaf22..dda40e1ad 100644 --- a/io-engine/src/lvs/lvs_error.rs +++ b/io-engine/src/lvs/lvs_error.rs @@ -136,6 +136,11 @@ pub enum LvsError { source: BdevError, name: String, }, + #[snafu(display("{source}, failed to grow pool {name}"))] + Grow { + source: BsError, + name: String, + }, #[snafu(display("{}", msg))] PoolNotFound { source: BsError, @@ -161,6 +166,11 @@ pub enum LvsError { name: String, msg: String, }, + #[snafu(display("pool {name}: invalid metadata parameter: {msg}"))] + InvalidMetadataParam { + name: String, + msg: String, + }, #[snafu(display("lvol exists {}", name))] RepExists { source: BsError, @@ -291,6 +301,9 @@ impl ToErrno for LvsError { Self::Destroy { .. } => Errno::ENXIO, + Self::Grow { + .. + } => Errno::ENXIO, Self::PoolNotFound { source, .. } => source.to_errno(), @@ -303,6 +316,9 @@ impl ToErrno for LvsError { Self::InvalidClusterSize { source, .. } => source.to_errno(), + Self::InvalidMetadataParam { + .. + } => Errno::EINVAL, Self::RepExists { source, .. } => source.to_errno(), diff --git a/io-engine/src/lvs/lvs_lvol.rs b/io-engine/src/lvs/lvs_lvol.rs index 5d4a4b634..8777c2359 100644 --- a/io-engine/src/lvs/lvs_lvol.rs +++ b/io-engine/src/lvs/lvs_lvol.rs @@ -286,7 +286,7 @@ impl Lvol { /// TODO #[inline(always)] - fn as_inner_ref(&self) -> &spdk_lvol { + pub fn as_inner_ref(&self) -> &spdk_lvol { unsafe { self.inner.as_ref() } } diff --git a/io-engine/src/lvs/lvs_store.rs b/io-engine/src/lvs/lvs_store.rs index 4c4009550..fa5de2653 100644 --- a/io-engine/src/lvs/lvs_store.rs +++ b/io-engine/src/lvs/lvs_store.rs @@ -11,18 +11,25 @@ use events_api::event::EventAction; use futures::channel::oneshot; use nix::errno::Errno; use pin_utils::core_reexport::fmt::Formatter; + use spdk_rs::libspdk::{ + spdk_bdev_update_bs_blockcnt, spdk_blob_store, spdk_bs_free_cluster_count, spdk_bs_get_cluster_size, + spdk_bs_get_md_len, + spdk_bs_get_page_size, + spdk_bs_get_used_md, spdk_bs_total_data_cluster_count, spdk_lvol, + spdk_lvol_opts, + spdk_lvol_opts_init, spdk_lvol_store, + spdk_lvs_grow_live, vbdev_get_lvol_store_by_name, vbdev_get_lvol_store_by_uuid, vbdev_get_lvs_bdev_by_lvs, - vbdev_lvol_create, - vbdev_lvol_create_with_uuid, + vbdev_lvol_create_with_opts, vbdev_lvs_create, vbdev_lvs_create_with_uuid, vbdev_lvs_destruct, @@ -62,7 +69,7 @@ use crate::{ lvs_lvol::{LvsLvol, WIPE_SUPER_LEN}, LvolSnapshotDescriptor, }, - pool_backend::PoolArgs, + pool_backend::{PoolArgs, ReplicaArgs}, }; static ROUND_TO_MB: u32 = 1024 * 1024; @@ -100,7 +107,7 @@ impl Lvs { /// TODO #[inline(always)] - unsafe fn as_inner_ptr(&self) -> *mut spdk_lvol_store { + pub fn as_inner_ptr(&self) -> *mut spdk_lvol_store { self.inner.as_ptr() } @@ -112,7 +119,7 @@ impl Lvs { /// TODO #[inline(always)] - pub(super) fn blob_store(&self) -> *mut spdk_blob_store { + pub fn blob_store(&self) -> *mut spdk_blob_store { self.as_inner_ref().blobstore } @@ -226,6 +233,26 @@ impl Lvs { unsafe { spdk_bs_get_cluster_size(blobs) } } + /// Returns blobstore page size. + pub fn page_size(&self) -> u64 { + unsafe { spdk_bs_get_page_size(self.blob_store()) } + } + + /// Returns blobstore metadata page size. + pub fn md_page_size(&self) -> u64 { + unsafe { spdk_bs_get_page_size(self.blob_store()) } + } + + /// TODO + pub fn md_pages(&self) -> u64 { + unsafe { spdk_bs_get_md_len(self.blob_store()) } + } + + /// TODO + pub fn md_used_pages(&self) -> u64 { + unsafe { spdk_bs_get_used_md(self.blob_store()) } + } + /// returns the UUID of the lvs pub fn uuid(&self) -> String { let t = unsafe { self.as_inner_ref().uuid.u.raw }; @@ -409,41 +436,64 @@ impl Lvs { } } - /// Create a pool on base bdev - pub async fn create( - name: &str, - bdev: &str, - uuid: Option, - cluster_size: Option, + /// Converts floating point metadata reservation ratio into SPDK's format. + fn mdp_ratio(args: &PoolArgs) -> Result { + if let Some(h) = args.md_args.as_ref().and_then(|p| p.md_resv_ratio) { + if h > 0.0 { + Ok((h * 100.0) as u32) + } else { + Err(LvsError::InvalidMetadataParam { + name: args.name.clone(), + msg: format!("bad metadata resevation ratio: {h}"), + }) + } + } else { + Ok(0) + } + } + + /// Creates a pool on base bdev. + /// The caller must ensure the base bdev exists. + /// This function is made public for tests purposes. + pub async fn create_from_args_inner( + args: PoolArgs, ) -> Result { - let pool_name = name.into_cstring(); + assert_eq!(args.disks.len(), 1); + let bdev = args.disks[0].clone(); + + let pool_name = args.name.clone().into_cstring(); let bdev_name = bdev.into_cstring(); - let cluster_size = if let Some(cluster_size) = cluster_size { + + let cluster_size = if let Some(cluster_size) = args.cluster_size { if cluster_size % ROUND_TO_MB == 0 { cluster_size } else { return Err(LvsError::InvalidClusterSize { source: BsError::InvalidArgument {}, - name: name.to_string(), + name: args.name, msg: format!("{cluster_size}, not multiple of 1MiB"), }); } } else { DEFAULT_CLUSTER_SIZE }; + if cluster_size > MAX_CLUSTER_SIZE { return Err(LvsError::InvalidClusterSize { source: BsError::InvalidArgument {}, - name: name.to_string(), + name: args.name, msg: format!( "{cluster_size}, larger than max limit {MAX_CLUSTER_SIZE}" ), }); } + + let mdp_ratio = Self::mdp_ratio(&args)?; + let (sender, receiver) = pair::>(); unsafe { - if let Some(uuid) = uuid { - let cuuid = uuid.into_cstring(); + if let Some(uuid) = &args.uuid { + let cuuid = uuid.clone().into_cstring(); vbdev_lvs_create_with_uuid( bdev_name.as_ptr(), pool_name.as_ptr(), @@ -456,7 +506,7 @@ impl Lvs { // lvols tend to be small so there the overhead is // acceptable. LVS_CLEAR_WITH_NONE, - 0, // num_md_pages_per_cluster_ratio + mdp_ratio, Some(Self::lvs_cb), cb_arg(sender), ) @@ -472,7 +522,7 @@ impl Lvs { // lvols tend to be small so there the overhead is // acceptable. LVS_CLEAR_WITH_NONE, - 0, // num_md_pages_per_cluster_ratio + mdp_ratio, Some(Self::lvs_cb), cb_arg(sender), ) @@ -480,7 +530,7 @@ impl Lvs { } .to_result(|e| LvsError::PoolCreate { source: BsError::from_i32(e), - name: name.to_string(), + name: args.name.clone(), })?; receiver @@ -488,22 +538,23 @@ impl Lvs { .expect("Cancellation is not supported") .map_err(|err| LvsError::PoolCreate { source: BsError::from_errno(err), - name: name.to_string(), + name: args.name.clone(), })?; - match Self::lookup(name) { + match Self::lookup(&args.name) { Some(pool) => { info!("{:?}: new lvs created successfully", pool); Ok(pool) } None => Err(LvsError::PoolCreate { source: BsError::LvolNotFound {}, - name: name.to_string(), + name: args.name.clone(), }), } } - /// imports the pool if it exists, otherwise try to create it + /// Imports the pool if it exists, otherwise tries to create a new pool. + /// This function creates the underlying bdev if it does not exist. #[tracing::instrument(level = "debug", err)] pub async fn create_or_import(args: PoolArgs) -> Result { let disk = Self::parse_disk(args.disks.clone())?; @@ -513,13 +564,14 @@ impl Lvs { args.name, disk ); - let parsed = uri::parse(&disk).map_err(|e| LvsError::InvalidBdev { - source: e, - name: args.name.clone(), - })?; + let bdev_ops = + uri::parse(&disk).map_err(|e| LvsError::InvalidBdev { + source: e, + name: args.name.clone(), + })?; if let Some(pool) = Self::lookup(&args.name) { - return if pool.base_bdev().name() == parsed.get_name() { + return if pool.base_bdev().name() == bdev_ops.get_name() { Err(LvsError::PoolCreate { source: BsError::VolAlreadyExists {}, name: args.name.clone(), @@ -532,14 +584,15 @@ impl Lvs { }; } - let bdev = match parsed.create().await { + // Create the underlying ndev. + let bdev_name = match bdev_ops.create().await { Err(e) => match e { BdevError::BdevExists { .. - } => Ok(parsed.get_name()), + } => Ok(bdev_ops.get_name()), BdevError::CreateBdevInvalidParams { source, .. - } if source == Errno::EEXIST => Ok(parsed.get_name()), + } if source == Errno::EEXIST => Ok(bdev_ops.get_name()), _ => { tracing::error!("Failed to create pool bdev: {e:?}"); Err(LvsError::InvalidBdev { @@ -557,20 +610,18 @@ impl Lvs { Err(LvsError::Import { source, .. }) if matches!(source, BsError::CannotImportLvs {}) => { - match Self::create( - &args.name, - &bdev, - args.uuid, - args.cluster_size, - ) + match Self::create_from_args_inner(PoolArgs { + disks: vec![bdev_name.clone()], + ..args + }) .await { Err(create) => { - let _ = parsed.destroy().await.map_err(|_e| { + let _ = bdev_ops.destroy().await.map_err(|_e| { // we failed to delete the base_bdev be loud about it // there is not much we can do about it here, likely // some desc is still holding on to it or something. - error!("failed to delete base_bdev {} after failed pool creation", bdev); + error!("failed to delete base_bdev {bdev_name} after failed pool creation"); }); Err(create) } @@ -731,6 +782,35 @@ impl Lvs { Ok(()) } + /// Grows the online (live) pool. + #[tracing::instrument(level = "debug", err)] + pub async fn grow(&self) -> Result<(), LvsError> { + info!("{self:?}: growing lvs..."); + + let (s, r) = pair::(); + + unsafe { + let lvs = self.as_inner_ptr(); + + // Update block count on spdk_bs_bdev. + spdk_bdev_update_bs_blockcnt((*lvs).bs_dev); + + // Grow the LVS. + spdk_lvs_grow_live(lvs, Some(Self::lvs_op_cb), cb_arg(s)); + } + + r.await + .expect("callback gone while growing lvs") + .to_result(|e| LvsError::Grow { + source: BsError::from_i32(e), + name: self.name().to_string(), + })?; + + info!("{self:?}: lvs has been grown successfully"); + + Ok(()) + } + /// return an iterator for enumerating all snapshots that reside on the pool pub fn snapshots( &self, @@ -779,6 +859,7 @@ impl Lvs { None } } + /// create a new lvol on this pool pub async fn create_lvol( &self, @@ -787,6 +868,22 @@ impl Lvs { uuid: Option<&str>, thin: bool, entity_id: Option, + ) -> Result { + self.create_lvol_with_opts(ReplicaArgs { + name: name.to_owned(), + size, + uuid: uuid.unwrap_or("").to_string(), + thin, + entity_id, + use_extent_table: None, + }) + .await + } + + /// create a new lvol on this pool + pub async fn create_lvol_with_opts( + &self, + opts: ReplicaArgs, ) -> Result { let clear_method = if self.base_bdev().io_type_supported(IoType::Unmap) { @@ -794,19 +891,20 @@ impl Lvs { } else { LVOL_CLEAR_WITH_NONE }; - if let Some(uuid) = uuid { - if UntypedBdev::lookup_by_uuid_str(uuid).is_some() { - return Err(LvsError::RepExists { - source: BsError::VolAlreadyExists {}, - name: uuid.to_string(), - }); - } + + if !opts.uuid.is_empty() + && UntypedBdev::lookup_by_uuid_str(&opts.uuid).is_some() + { + return Err(LvsError::RepExists { + source: BsError::VolAlreadyExists {}, + name: opts.uuid, + }); } - if UntypedBdev::lookup_by_name(name).is_some() { + if UntypedBdev::lookup_by_name(&opts.name).is_some() { return Err(LvsError::RepExists { source: BsError::VolAlreadyExists {}, - name: name.to_string(), + name: opts.name, }); }; @@ -815,51 +913,49 @@ impl Lvs { { return Err(LvsError::RepCreate { source: BsError::NoSpace {}, - name: name.to_string(), + name: opts.name, }); } // As it stands lvs pools can't grow, so limit the max replica size to // the pool capacity. - if size > self.capacity() { + if opts.size > self.capacity() { return Err(LvsError::RepCreate { source: BsError::CapacityOverflow {}, - name: name.to_string(), + name: opts.name, }); } let (s, r) = pair::>(); - let cname = name.into_cstring(); + + let cname = opts.name.clone().into_cstring(); + let cuuid = opts.uuid.clone().into_cstring(); + unsafe { - match uuid { - Some(u) => { - let cuuid = u.into_cstring(); - - vbdev_lvol_create_with_uuid( - self.as_inner_ptr(), - cname.as_ptr(), - size, - thin, - clear_method, - cuuid.as_ptr(), - Some(Lvol::lvol_cb), - cb_arg(s), - ) - } - None => vbdev_lvol_create( - self.as_inner_ptr(), - cname.as_ptr(), - size, - thin, - clear_method, - Some(Lvol::lvol_cb), - cb_arg(s), - ), + let mut lvol_opts: spdk_lvol_opts = std::mem::zeroed(); + spdk_lvol_opts_init(&mut lvol_opts as *mut _); + lvol_opts.name = cname.as_ptr(); + lvol_opts.size = opts.size; + lvol_opts.thin_provision = opts.thin; + if let Some(v) = opts.use_extent_table { + lvol_opts.use_extent_table = v; + } + lvol_opts.clear_method = clear_method; + + if !cuuid.is_empty() { + lvol_opts.uuid = cuuid.as_ptr(); } + + vbdev_lvol_create_with_opts( + self.as_inner_ptr(), + &lvol_opts as *const _, + Some(Lvol::lvol_cb), + cb_arg(s), + ) } .to_result(|e| LvsError::RepCreate { source: BsError::from_i32(e), - name: name.to_string(), + name: opts.name.clone(), })?; let mut lvol = r @@ -867,11 +963,11 @@ impl Lvs { .expect("lvol creation callback dropped") .map_err(|e| LvsError::RepCreate { source: BsError::from_errno(e), - name: name.to_string(), + name: opts.name.clone(), }) .map(Lvol::from_inner_ptr)?; - if let Some(id) = entity_id { + if let Some(id) = opts.entity_id { if let Err(error) = Pin::new(&mut lvol).set(PropValue::EntityId(id)).await { diff --git a/io-engine/src/lvs/mod.rs b/io-engine/src/lvs/mod.rs index 480480ca5..53f325cbf 100644 --- a/io-engine/src/lvs/mod.rs +++ b/io-engine/src/lvs/mod.rs @@ -18,6 +18,7 @@ use crate::{ ListPoolArgs, PoolArgs, PoolBackend, + PoolMetadataInfo, PoolOps, ReplicaArgs, }, @@ -170,17 +171,10 @@ impl PoolOps for Lvs { &self, args: ReplicaArgs, ) -> Result, crate::pool_backend::Error> { - let lvol = self - .create_lvol( - &args.name, - args.size, - Some(&args.uuid), - args.thin, - args.entity_id, - ) - .await?; + let lvol = self.create_lvol_with_opts(args).await?; Ok(Box::new(lvol)) } + async fn destroy( self: Box, ) -> Result<(), crate::pool_backend::Error> { @@ -192,6 +186,11 @@ impl PoolOps for Lvs { (*self).export().await?; Ok(()) } + + async fn grow(&self) -> Result<(), crate::pool_backend::Error> { + (*self).grow().await?; + Ok(()) + } } #[async_trait::async_trait(?Send)] @@ -209,6 +208,10 @@ impl BdevStater for Lvs { } impl IPoolProps for Lvs { + fn pool_type(&self) -> PoolBackend { + PoolBackend::Lvs + } + fn name(&self) -> &str { self.name() } @@ -221,30 +224,43 @@ impl IPoolProps for Lvs { vec![self.base_bdev().bdev_uri_str().unwrap_or_else(|| "".into())] } - fn used(&self) -> u64 { - self.used() + fn disk_capacity(&self) -> u64 { + self.base_bdev().size_in_bytes() } - fn committed(&self) -> u64 { - self.committed() + fn cluster_size(&self) -> u32 { + self.blob_cluster_size() as u32 + } + + fn page_size(&self) -> u32 { + self.page_size() as u32 } fn capacity(&self) -> u64 { self.capacity() } - fn pool_type(&self) -> PoolBackend { - PoolBackend::Lvs + fn used(&self) -> u64 { + self.used() } - fn cluster_size(&self) -> u32 { - self.blob_cluster_size() as u32 + fn committed(&self) -> u64 { + self.committed() + } + + fn md_props(&self) -> Option { + Some(PoolMetadataInfo { + md_page_size: self.md_page_size() as u32, + md_pages: self.md_pages(), + md_used_pages: self.md_used_pages(), + }) } } /// A factory instance which implements LVS specific `PoolFactory`. #[derive(Default)] pub struct PoolLvsFactory {} + #[async_trait::async_trait(?Send)] impl IPoolFactory for PoolLvsFactory { async fn create( @@ -254,6 +270,7 @@ impl IPoolFactory for PoolLvsFactory { let lvs = Lvs::create_or_import(args).await?; Ok(Box::new(lvs)) } + async fn import( &self, args: PoolArgs, @@ -261,6 +278,7 @@ impl IPoolFactory for PoolLvsFactory { let lvs = Lvs::import_from_args(args).await?; Ok(Box::new(lvs)) } + async fn find( &self, args: &FindPoolArgs, @@ -284,6 +302,7 @@ impl IPoolFactory for PoolLvsFactory { }; Ok(lvs.map(|lvs| Box::new(lvs) as _)) } + async fn list( &self, args: &ListPoolArgs, @@ -318,6 +337,7 @@ impl IPoolFactory for PoolLvsFactory { /// A factory instance which implements LVS specific `ReplicaFactory`. #[derive(Default)] pub struct ReplLvsFactory {} + #[async_trait::async_trait(?Send)] impl IReplicaFactory for ReplLvsFactory { fn bdev_as_replica( @@ -332,6 +352,7 @@ impl IReplicaFactory for ReplLvsFactory { } Some(Box::new(lvol)) } + async fn find( &self, args: &FindReplicaArgs, @@ -341,6 +362,7 @@ impl IReplicaFactory for ReplLvsFactory { .transpose()?; Ok(lvol.map(|l| Box::new(l) as _)) } + async fn find_snap( &self, args: &FindSnapshotArgs, @@ -356,6 +378,7 @@ impl IReplicaFactory for ReplLvsFactory { } Ok(lvol.map(|l| Box::new(l) as _)) } + async fn list( &self, args: &ListReplicaArgs, diff --git a/io-engine/src/pool_backend.rs b/io-engine/src/pool_backend.rs index 738b247da..3994ff89b 100644 --- a/io-engine/src/pool_backend.rs +++ b/io-engine/src/pool_backend.rs @@ -14,9 +14,16 @@ pub struct PoolArgs { pub disks: Vec, pub uuid: Option, pub cluster_size: Option, + pub md_args: Option, pub backend: PoolBackend, } +/// Pool metadata args. +#[derive(Clone, Debug, Default)] +pub struct PoolMetadataArgs { + pub md_resv_ratio: Option, +} + /// PoolBackend is the type of pool underneath Lvs, Lvm, etc #[derive(Copy, Clone, Debug, Default, PartialEq, Serialize, Deserialize)] pub enum PoolBackend { @@ -27,11 +34,12 @@ pub enum PoolBackend { /// Arguments for replica creation. pub struct ReplicaArgs { - pub(crate) name: String, - pub(crate) size: u64, - pub(crate) uuid: String, - pub(crate) thin: bool, - pub(crate) entity_id: Option, + pub name: String, + pub size: u64, + pub uuid: String, + pub thin: bool, + pub entity_id: Option, + pub use_extent_table: Option, } /// Generic Errors shared by all backends. @@ -137,11 +145,16 @@ pub trait PoolOps: &self, args: ReplicaArgs, ) -> Result, Error>; + /// Destroy the pool itself along with all its replicas. async fn destroy(self: Box) -> Result<(), Error>; + /// Exports the volume group by unloading all logical volumes. /// The pool will no longer be listable until it is imported again. async fn export(self: Box) -> Result<(), Error>; + + /// Grows the given pool by filling the entire underlying device(s). + async fn grow(&self) -> Result<(), Error>; } /// Interface for a pool factory which can be used for various @@ -219,16 +232,26 @@ impl FindPoolArgs { } } +/// Pool metadata properties/statistics. +pub struct PoolMetadataInfo { + pub md_page_size: u32, + pub md_pages: u64, + pub md_used_pages: u64, +} + /// Various properties from a pool. pub trait IPoolProps { + fn pool_type(&self) -> PoolBackend; fn name(&self) -> &str; fn uuid(&self) -> String; fn disks(&self) -> Vec; - fn used(&self) -> u64; + fn disk_capacity(&self) -> u64; + fn cluster_size(&self) -> u32; + fn page_size(&self) -> u32; fn capacity(&self) -> u64; + fn used(&self) -> u64; fn committed(&self) -> u64; - fn pool_type(&self) -> PoolBackend; - fn cluster_size(&self) -> u32; + fn md_props(&self) -> Option; } /// A pool factory helper. diff --git a/io-engine/src/subsys/config/pool.rs b/io-engine/src/subsys/config/pool.rs index 85ea2dc2a..0269dfeeb 100644 --- a/io-engine/src/subsys/config/pool.rs +++ b/io-engine/src/subsys/config/pool.rs @@ -175,6 +175,7 @@ impl From<&Pool> for PoolArgs { disks: pool.disks.clone(), uuid: None, cluster_size: None, + md_args: None, backend: pool.backend, } } diff --git a/io-engine/tests/lvs_grow.rs b/io-engine/tests/lvs_grow.rs new file mode 100644 index 000000000..f01667596 --- /dev/null +++ b/io-engine/tests/lvs_grow.rs @@ -0,0 +1,167 @@ +pub mod common; + +use once_cell::sync::OnceCell; + +use spdk_rs::{ffihelper::IntoCString, libspdk::resize_malloc_disk}; + +use io_engine::{ + core::MayastorCliArgs, + lvs::Lvs, + pool_backend::{IPoolProps, PoolArgs}, +}; + +use io_engine_tests::{ + bdev::{create_bdev, find_bdev_by_name}, + compose::{rpc::v1::GrpcConnect, Binary, Builder}, + pool::PoolBuilder, + MayastorTest, +}; + +/// Tests if 'a' is approximately equal to 'b' up to the given tolerance (in +/// percents). +fn approx_eq(a: f64, b: f64, t: f64) -> bool { + assert!(a > 0.0 && b > 0.0 && (0.0 .. 100.0).contains(&t)); + let d = 100.0 * (a - b).abs() / f64::max(a, b); + d <= t +} + +static MAYASTOR: OnceCell = OnceCell::new(); + +fn get_ms() -> &'static MayastorTest<'static> { + MAYASTOR.get_or_init(|| { + MayastorTest::new(MayastorCliArgs { + log_format: Some("nodate,nohost,compact".parse().unwrap()), + reactor_mask: "0x3".into(), + enable_io_all_thrd_nexus_channels: true, + ..Default::default() + }) + }) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn lvs_grow_ms() { + const BDEV_NAME: &str = "mem0"; + const SIZE_BEFORE_MB: u64 = 64; + const SIZE_AFTER_MB: u64 = 128; + const BDEV_URI: &str = "malloc:///mem0?size_mb=64"; + const POOL_NAME: &str = "pool_0"; + const POOL_UUID: &str = "40baf8b5-6256-4f29-b073-61ebf67d9b91"; + + common::composer_init(); + + let ms = get_ms(); + + ms.spawn(async { + let lvs_args = PoolArgs { + name: POOL_NAME.to_string(), + disks: vec![BDEV_URI.to_string()], + uuid: Some(POOL_UUID.to_string()), + cluster_size: None, + md_args: None, + backend: Default::default(), + }; + + // Create LVS. + let lvs = Lvs::create_or_import(lvs_args.clone()).await.unwrap(); + let cap_before = lvs.capacity(); + + assert_eq!(lvs.disk_capacity(), SIZE_BEFORE_MB * 1024 * 1024); + assert!(lvs.capacity() <= lvs.disk_capacity()); + assert!(approx_eq( + lvs.disk_capacity() as f64, + lvs.capacity() as f64, + 10.0 + )); + + // Resize the malloc bdev. + unsafe { + let name = BDEV_NAME.to_owned(); + resize_malloc_disk(name.into_cstring().as_ptr(), SIZE_AFTER_MB); + } + + // Pool capacity must not change, disk capacity must reflect disk size + // change. + assert_eq!(lvs.disk_capacity(), SIZE_AFTER_MB * 1024 * 1024); + assert_eq!(cap_before, lvs.capacity()); + + // Grow the LVS. + lvs.grow().await.unwrap(); + + // Pool must have grown. + assert!(cap_before < lvs.capacity()); + + // New pool capacity must be close to disk capacity. + assert!(lvs.capacity() <= lvs.disk_capacity()); + assert!(approx_eq( + lvs.disk_capacity() as f64, + lvs.capacity() as f64, + 10.0 + )); + }) + .await; +} + +#[tokio::test] +async fn lvs_grow_api() { + const SIZE_AFTER_MB: u64 = 128; + const BDEV_URI: &str = "malloc:///mem0?size_mb=64"; + const BDEV_URI_RESIZE: &str = "malloc:///mem0?size_mb=128&resize"; + const POOL_UUID: &str = "40baf8b5-6256-4f29-b073-61ebf67d9b91"; + + common::composer_init(); + + let test = Builder::new() + .name("cargo-test") + .network("10.1.0.0/16") + .unwrap() + .add_container_bin( + "ms_0", + Binary::from_dbg("io-engine").with_args(vec!["-l", "1,2"]), + ) + .with_clean(true) + .build() + .await + .unwrap(); + + let conn = GrpcConnect::new(&test); + + let ms_0 = conn.grpc_handle_shared("ms_0").await.unwrap(); + + let mut pool_0 = PoolBuilder::new(ms_0.clone()) + .with_name("pool0") + .with_uuid(POOL_UUID) + .with_bdev(BDEV_URI); + + let pool_before = pool_0.create().await.unwrap(); + + let bdev = find_bdev_by_name(ms_0.clone(), "mem0").await.unwrap(); + assert_eq!( + pool_before.disk_capacity, + bdev.num_blocks * bdev.blk_size as u64 + ); + + // Grow the bdev. + let bdev = create_bdev(ms_0.clone(), BDEV_URI_RESIZE).await.unwrap(); + let new_bdev_cap = bdev.num_blocks * bdev.blk_size as u64; + + // Pool capacity must not change, disk capacity must reflect disk size + // change. + let pool = pool_0.get_pool().await.unwrap(); + assert_eq!(pool.capacity, pool_before.capacity); + assert_eq!(pool.disk_capacity, new_bdev_cap); + assert_eq!(new_bdev_cap / (1024 * 1024), SIZE_AFTER_MB); + + // Grow the pool. + let (pool_before, pool_after) = pool_0.grow().await.unwrap(); + + // Pool must have grown. + assert!(pool_before.capacity < pool_after.capacity); + + // New pool capacity must be close to disk capacity. + assert!(pool_after.capacity <= pool_after.disk_capacity); + assert!(approx_eq( + pool_after.disk_capacity as f64, + pool_after.capacity as f64, + 10.0 + )); +} diff --git a/io-engine/tests/lvs_import.rs b/io-engine/tests/lvs_import.rs index 9d9d8b4a2..09ca2cd16 100644 --- a/io-engine/tests/lvs_import.rs +++ b/io-engine/tests/lvs_import.rs @@ -54,6 +54,7 @@ async fn lvs_import_many_volume() { disks: vec![BDEV_NAME.to_string()], uuid: Some(POOL_UUID.to_string()), cluster_size: None, + md_args: None, backend: Default::default(), }; diff --git a/io-engine/tests/lvs_limits.rs b/io-engine/tests/lvs_limits.rs index 08069bd5d..454ac3ff5 100644 --- a/io-engine/tests/lvs_limits.rs +++ b/io-engine/tests/lvs_limits.rs @@ -49,6 +49,7 @@ async fn lvs_metadata_limit() { disks: vec![BDEV_NAME.to_string()], uuid: Some(POOL_UUID.to_string()), cluster_size: None, + md_args: None, backend: Default::default(), }; diff --git a/io-engine/tests/lvs_pool.rs b/io-engine/tests/lvs_pool.rs index 8335499c3..4241c32df 100644 --- a/io-engine/tests/lvs_pool.rs +++ b/io-engine/tests/lvs_pool.rs @@ -57,17 +57,21 @@ async fn lvs_pool_test() { }) .await; + let pool_args = PoolArgs { + name: "tpool".into(), + disks: vec![format!("aio://{DISKNAME1}")], + uuid: None, + cluster_size: None, + md_args: None, + backend: PoolBackend::Lvs, + }; + // should succeed to create a pool we can not import - ms.spawn(async { - Lvs::create_or_import(PoolArgs { - name: "tpool".into(), - disks: vec![format!("aio://{DISKNAME1}")], - uuid: None, - cluster_size: None, - backend: PoolBackend::Lvs, - }) - .await - .unwrap(); + ms.spawn({ + let pool_args = pool_args.clone(); + async { + Lvs::create_or_import(pool_args).await.unwrap(); + } }) .await; @@ -76,14 +80,7 @@ async fn lvs_pool_test() { // have an idempotent snafu, we dont crash and // burn ms.spawn(async { - assert!(Lvs::create( - "tpool", - format!("aio://{DISKNAME1}").as_str(), - None, - None - ) - .await - .is_err()) + assert!(Lvs::create_from_args_inner(pool_args).await.is_err()) }) .await; @@ -147,12 +144,14 @@ async fn lvs_pool_test() { .is_err()); assert_eq!(Lvs::iter().count(), 0); - assert!(Lvs::create( - "tpool", - format!("aio://{DISKNAME1}").as_str(), - None, - None - ) + assert!(Lvs::create_from_args_inner(PoolArgs { + name: "tpool".to_string(), + disks: vec![format!("aio://{DISKNAME1}")], + uuid: None, + cluster_size: None, + md_args: None, + backend: PoolBackend::Lvs, + }) .await .is_ok()); @@ -188,6 +187,7 @@ async fn lvs_pool_test() { disks: vec!["malloc:///malloc0?size_mb=64".to_string()], uuid: None, cluster_size: None, + md_args: None, backend: PoolBackend::Lvs, }) .await @@ -225,6 +225,7 @@ async fn lvs_pool_test() { disks: vec![format!("aio://{DISKNAME1}")], uuid: None, cluster_size: None, + md_args: None, backend: PoolBackend::Lvs, }) .await @@ -369,6 +370,7 @@ async fn lvs_pool_test() { disks: vec![format!("aio://{DISKNAME1}")], uuid: None, cluster_size: None, + md_args: None, backend: PoolBackend::Lvs, }) .await @@ -390,6 +392,7 @@ async fn lvs_pool_test() { disks: vec![format!("aio://{pool_dev_aio}")], uuid: None, cluster_size: None, + md_args: None, backend: PoolBackend::Lvs, }) .await @@ -417,6 +420,7 @@ async fn lvs_pool_test() { disks: vec![format!("uring://{pool_dev_uring}")], uuid: None, cluster_size: None, + md_args: None, backend: PoolBackend::Lvs, }) .await @@ -454,6 +458,7 @@ async fn lvs_pool_test() { disks: vec![format!("aio://{DISKNAME1}")], uuid: None, cluster_size: None, + md_args: None, backend: PoolBackend::Lvs, }) .await @@ -471,6 +476,7 @@ async fn lvs_pool_test() { disks: vec![format!("aio://{DISKNAME2}")], uuid: None, cluster_size: None, + md_args: None, backend: PoolBackend::Lvs, }) .await diff --git a/io-engine/tests/nexus_with_local.rs b/io-engine/tests/nexus_with_local.rs index 40a30fb76..c35f79a29 100644 --- a/io-engine/tests/nexus_with_local.rs +++ b/io-engine/tests/nexus_with_local.rs @@ -51,6 +51,7 @@ async fn create_replicas(h: &mut RpcHandle) { pooltype: 0, disks: vec!["malloc:///disk0?size_mb=64".into()], cluster_size: None, + md_args: None, }) .await .unwrap(); diff --git a/io-engine/tests/replica_snapshot.rs b/io-engine/tests/replica_snapshot.rs index e73556524..be1c95bb3 100644 --- a/io-engine/tests/replica_snapshot.rs +++ b/io-engine/tests/replica_snapshot.rs @@ -98,6 +98,7 @@ async fn replica_snapshot() { disks: vec![format!("aio://{DISKNAME1}")], uuid: None, cluster_size: None, + md_args: None, backend: PoolBackend::Lvs, }) .await diff --git a/io-engine/tests/snapshot_lvol.rs b/io-engine/tests/snapshot_lvol.rs index 528a8cf26..2735b8b66 100755 --- a/io-engine/tests/snapshot_lvol.rs +++ b/io-engine/tests/snapshot_lvol.rs @@ -68,6 +68,7 @@ async fn create_test_pool( disks: vec![disk], uuid: None, cluster_size, + md_args: None, backend: PoolBackend::Lvs, }) .await diff --git a/io-engine/tests/snapshot_nexus.rs b/io-engine/tests/snapshot_nexus.rs index ab18b2fdf..f71a6886c 100755 --- a/io-engine/tests/snapshot_nexus.rs +++ b/io-engine/tests/snapshot_nexus.rs @@ -144,6 +144,7 @@ async fn launch_instance(create_replicas: bool) -> (ComposeTest, Vec) { pooltype: 0, disks: vec!["malloc:///disk0?size_mb=128".into()], cluster_size: None, + md_args: None, }) .await .unwrap(); diff --git a/spdk-rs b/spdk-rs index 10c2d8c7e..4bafbb7e9 160000 --- a/spdk-rs +++ b/spdk-rs @@ -1 +1 @@ -Subproject commit 10c2d8c7ea059d32b62853df74587d1b1617361e +Subproject commit 4bafbb7e92cf136ee95da8a7a381123a418f09c1 diff --git a/utils/dependencies b/utils/dependencies index 5a511173e..7231e5eb9 160000 --- a/utils/dependencies +++ b/utils/dependencies @@ -1 +1 @@ -Subproject commit 5a511173ea3dedd89534f205c6add5f15b81244f +Subproject commit 7231e5eb9754f761d77afd096a50778c05563343