diff --git a/Cargo.toml b/Cargo.toml index 5d53e6cd9..dc3fcb0a5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,3 +26,8 @@ url = "2.3" uuid = "1.4" nix = "0.27" procfs = "0.15" +rand = "0.8" +sha2 = { version = "0.10.2", default-features = false } +hex = "0.4" +sp-core = "22.0.0" +libp2p = { version = "0.52" } \ No newline at end of file diff --git a/crates/configuration/src/lib.rs b/crates/configuration/src/lib.rs index 3908b3691..fa434f988 100644 --- a/crates/configuration/src/lib.rs +++ b/crates/configuration/src/lib.rs @@ -10,7 +10,7 @@ mod utils; pub use global_settings::{GlobalSettings, GlobalSettingsBuilder}; pub use hrmp_channel::{HrmpChannelConfig, HrmpChannelConfigBuilder}; pub use network::{NetworkConfig, NetworkConfigBuilder}; -pub use parachain::{ParachainConfig, ParachainConfigBuilder}; +pub use parachain::{ParachainConfig, ParachainConfigBuilder, RegistrationStrategy}; pub use relaychain::{RelaychainConfig, RelaychainConfigBuilder}; // re-export shared pub use shared::{node::NodeConfig, types}; diff --git a/crates/configuration/src/network.rs b/crates/configuration/src/network.rs index f5a6160c3..9569f6fdf 100644 --- a/crates/configuration/src/network.rs +++ b/crates/configuration/src/network.rs @@ -753,15 +753,11 @@ mod tests { .with_default_command("polkadot") .with_default_image("docker.io/parity/polkadot:latest") .with_default_args(vec![("-lparachain", "debug").into()]) - .with_node(|node| { - node.with_name("alice") - .validator(true) - .invulnerable(true) - .validator(true) - }) + .with_node(|node| node.with_name("alice").validator(true)) .with_node(|node| { node.with_name("bob") .validator(true) + .invulnerable(false) .bootnode(true) .with_args(vec![("--database", "paritydb-experimental").into()]) }) @@ -822,6 +818,7 @@ mod tests { .with_collator(|collator| { collator .with_name("charles") + .validator(false) .bootnode(true) .invulnerable(true) .with_initial_balance(0) @@ -830,6 +827,7 @@ mod tests { collator .with_name("frank") .validator(true) + .invulnerable(false) .bootnode(true) .with_initial_balance(1_000_000_000) }) @@ -850,6 +848,7 @@ mod tests { .with_collator(|collator| { collator .with_name("georges") + .validator(false) .bootnode(true) .invulnerable(true) .with_initial_balance(0) @@ -858,6 +857,7 @@ mod tests { collator .with_name("victor") .validator(true) + .invulnerable(false) .bootnode(true) .with_initial_balance(1_000_000_000) }) @@ -947,6 +947,7 @@ mod tests { .with_collator(|collator| { collator .with_name("charles") + .validator(false) .bootnode(true) .invulnerable(true) .with_initial_balance(0) @@ -1088,6 +1089,7 @@ mod tests { collator .with_name("charles") .bootnode(true) + .validator(false) .invulnerable(true) .with_initial_balance(0) }) @@ -1095,6 +1097,7 @@ mod tests { collator .with_name("frank") .validator(true) + .invulnerable(false) .bootnode(true) .with_initial_balance(1_000_000_000) }) @@ -1116,6 +1119,7 @@ mod tests { collator .with_name("georges") .bootnode(true) + .validator(false) .invulnerable(true) .with_initial_balance(0) }) @@ -1123,6 +1127,7 @@ mod tests { collator .with_name("victor") .validator(true) + .invulnerable(false) .bootnode(true) .with_initial_balance(1_000_000_000) }) @@ -1323,6 +1328,7 @@ mod tests { collator .with_name("charles") .bootnode(true) + .validator(false) .invulnerable(true) .with_initial_balance(0) }) diff --git a/crates/configuration/src/shared/node.rs b/crates/configuration/src/shared/node.rs index d1e0bb747..9b11d2709 100644 --- a/crates/configuration/src/shared/node.rs +++ b/crates/configuration/src/shared/node.rs @@ -10,9 +10,12 @@ use super::{ resources::ResourcesBuilder, types::{AssetLocation, ChainDefaultContext, Command, Image, ValidationContext, U128}, }; -use crate::shared::{ - resources::Resources, - types::{Arg, Port}, +use crate::{ + shared::{ + resources::Resources, + types::{Arg, Port}, + }, + utils::default_as_true, }; /// An environment variable with a name and a value. @@ -59,9 +62,9 @@ pub struct NodeConfig { pub(crate) command: Option, #[serde(default)] args: Vec, - #[serde(alias = "validator")] + #[serde(alias = "validator", default = "default_as_true")] pub(crate) is_validator: bool, - #[serde(alias = "invulnerable")] + #[serde(alias = "invulnerable", default = "default_as_true")] pub(crate) is_invulnerable: bool, #[serde(alias = "bootnode")] pub(crate) is_bootnode: bool, @@ -263,8 +266,8 @@ impl Default for NodeConfigBuilder { image: None, command: None, args: vec![], - is_validator: false, - is_invulnerable: false, + is_validator: true, + is_invulnerable: true, is_bootnode: false, initial_balance: 2_000_000_000_000.into(), env: vec![], diff --git a/crates/configuration/src/shared/types.rs b/crates/configuration/src/shared/types.rs index 590fa6070..2cdf29c74 100644 --- a/crates/configuration/src/shared/types.rs +++ b/crates/configuration/src/shared/types.rs @@ -217,7 +217,7 @@ impl Command { /// let url_location2: AssetLocation = "https://mycloudstorage.com/path/to/my/file.tgz".into(); /// let path_location: AssetLocation = PathBuf::from_str("/tmp/path/to/my/file").unwrap().into(); /// let path_location2: AssetLocation = "/tmp/path/to/my/file".into(); -/// +/// /// assert!(matches!(url_location, AssetLocation::Url(value) if value.as_str() == "https://mycloudstorage.com/path/to/my/file.tgz")); /// assert!(matches!(url_location2, AssetLocation::Url(value) if value.as_str() == "https://mycloudstorage.com/path/to/my/file.tgz")); /// assert!(matches!(path_location, AssetLocation::FilePath(value) if value.to_str().unwrap() == "/tmp/path/to/my/file")); diff --git a/crates/examples/Cargo.toml b/crates/examples/Cargo.toml index 303a0f313..dce89cfbc 100644 --- a/crates/examples/Cargo.toml +++ b/crates/examples/Cargo.toml @@ -7,3 +7,9 @@ edition = "2021" [dependencies] configuration = { path = "../configuration" } +orchestrator = { path = "../orchestrator" } +provider = { path = "../provider" } +# TODO: we shouldn't need to pull from support, we need +# to review the exports for neeeded types +support = { path = "../support" } +tokio = { workspace = true } diff --git a/crates/examples/examples/small_network_with_default.rs b/crates/examples/examples/small_network_with_default.rs index 5a31d2064..d003dde02 100644 --- a/crates/examples/examples/small_network_with_default.rs +++ b/crates/examples/examples/small_network_with_default.rs @@ -1,14 +1,65 @@ +use std::time::Duration; + use configuration::NetworkConfigBuilder; +use orchestrator::{AddNodeOpts, Orchestrator}; +use provider::NativeProvider; +use support::fs::local::LocalFileSystem; -fn main() { +#[tokio::main] +async fn main() -> Result<(), Box> { let config = NetworkConfigBuilder::new() .with_relaychain(|r| { r.with_chain("rococo-local") .with_default_command("polkadot") .with_node(|node| node.with_name("alice")) - .with_node(|node| node.with_name("name")) + .with_node(|node| node.with_name("bob")) + }) + .with_parachain(|p| { + p.with_id(100) + .cumulus_based(true) + .with_collator(|n| n.with_name("collator").with_command("polkadot-parachain")) }) - .build(); + .build() + .unwrap(); + + let fs = LocalFileSystem; + let provider = NativeProvider::new(fs.clone()); + let orchestrator = Orchestrator::new(fs, provider); + let mut network = orchestrator.spawn(config).await?; + println!("🚀🚀🚀🚀 network deployed"); + // add a new node + let opts = AddNodeOpts { + rpc_port: Some(9444), + is_validator: true, + ..Default::default() + }; + + // TODO: add check to ensure if unique + network.add_node("new1", opts, None).await?; + + tokio::time::sleep(Duration::from_secs(5)).await; + + // Example of some opertions that you can do + // with `nodes` (e.g pause, resume, restart) + // pause the node + // network.pause_node("new1").await?; + // println!("node new1 paused!"); + + // tokio::time::sleep(Duration::from_secs(5)).await; + + // network.resume_node("new1").await?; + // println!("node new1 resumed!"); + + let col_opts = AddNodeOpts { + command: Some("polkadot-parachain".try_into()?), + ..Default::default() + }; + network.add_node("new-col-1", col_opts, Some(100)).await?; + println!("new collator deployed!"); + + // For now let just loop.... + #[allow(clippy::empty_loop)] + loop {} - println!("{:?}", config.unwrap()); + // Ok(()) } diff --git a/crates/orchestrator/Cargo.toml b/crates/orchestrator/Cargo.toml index 98e8302af..626fe4959 100644 --- a/crates/orchestrator/Cargo.toml +++ b/crates/orchestrator/Cargo.toml @@ -6,3 +6,19 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +configuration = { path = "../configuration" } +support = { path = "../support" } +provider = { path = "../provider" } +tokio = { workspace = true, features = ["time"] } +thiserror = { workspace = true } +# TODO: add logger in a new pr. +#log = { workspace = true } +multiaddr = { workspace = true } +serde_json = { workspace = true } +futures = { workspace = true } +anyhow = { workspace = true } +rand = { workspace = true } +sha2 = { workspace = true, default-features = false } +hex = { workspace = true } +sp-core = { workspace = true } +libp2p = { workspace = true } \ No newline at end of file diff --git a/crates/orchestrator/src/errors.rs b/crates/orchestrator/src/errors.rs new file mode 100644 index 000000000..d8cd05824 --- /dev/null +++ b/crates/orchestrator/src/errors.rs @@ -0,0 +1,27 @@ +//! Zombienet Orchestrator error definitions. + +use provider::ProviderError; +use support::fs::FileSystemError; + +use crate::generators; + +#[derive(Debug, thiserror::Error)] +pub enum OrchestratorError { + // TODO: improve invalid config reporting + #[error("Invalid network configuration: {0}")] + InvalidConfig(String), + #[error("Invalid configuration for node: {0}, field: {1}")] + InvalidNodeConfig(String, String), + #[error("Invariant not fulfilled {0}")] + InvariantError(&'static str), + #[error("Global network spawn timeout: {0} secs")] + GlobalTimeOut(u32), + #[error("Generator error")] + GeneratorError(#[from] generators::errors::GeneratorError), + #[error("Provider error")] + ProviderError(#[from] ProviderError), + #[error("FileSystem error")] + FileSystemError(#[from] FileSystemError), + #[error(transparent)] + SpawnerError(#[from] anyhow::Error), +} diff --git a/crates/orchestrator/src/generators.rs b/crates/orchestrator/src/generators.rs new file mode 100644 index 000000000..f14731088 --- /dev/null +++ b/crates/orchestrator/src/generators.rs @@ -0,0 +1,20 @@ +pub mod chain_spec; +pub mod errors; +pub mod para_artifact; + +mod bootnode_addr; +mod command; +mod identity; +mod key; +mod keystore; +mod port; + +pub use bootnode_addr::generate as generate_node_bootnode_addr; +pub use command::{ + generate_for_cumulus_node as generate_node_command_cumulus, + generate_for_node as generate_node_command, GenCmdOptions, +}; +pub use identity::generate as generate_node_identity; +pub use key::generate as generate_node_keys; +pub use keystore::generate as generate_node_keystore; +pub use port::generate as generate_node_port; diff --git a/crates/orchestrator/src/generators/bootnode_addr.rs b/crates/orchestrator/src/generators/bootnode_addr.rs new file mode 100644 index 000000000..5463b7923 --- /dev/null +++ b/crates/orchestrator/src/generators/bootnode_addr.rs @@ -0,0 +1,112 @@ +use std::{fmt::Display, net::IpAddr}; + +use super::errors::GeneratorError; + +pub fn generate + Display>( + peer_id: &str, + ip: &IpAddr, + port: u16, + args: &[T], + // args: &[&String], + p2p_cert: &Option, +) -> Result { + let addr = if let Some(index) = args.iter().position(|arg| arg.as_ref().eq("--listen-addr")) { + let listen_value = args + .as_ref() + .get(index + 1) + .ok_or(GeneratorError::BootnodeAddrGeneration( + "can not generate bootnode address from args".into(), + ))? + .to_string(); + + let ip_str = ip.to_string(); + let port_str = port.to_string(); + let mut parts = listen_value.split('/').collect::>(); + parts[2] = ip_str.as_str(); + parts[4] = port_str.as_str(); + parts.join("/") + } else { + format!("/ip4/{ip}/tcp/{port}/ws") + }; + + let mut addr_with_peer = format!("{addr}/p2p/{peer_id}"); + if let Some(p2p_cert) = p2p_cert { + addr_with_peer.push_str("/certhash/"); + addr_with_peer.push_str(p2p_cert) + } + Ok(addr_with_peer) +} + +#[cfg(test)] +mod tests { + + use provider::constants::LOCALHOST; + + use super::*; + #[test] + fn generate_for_alice_without_args() { + let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed + let args: Vec<&str> = vec![]; + let bootnode_addr = generate(peer_id, &LOCALHOST, 5678, &args, &None).unwrap(); + assert_eq!( + &bootnode_addr, + "/ip4/127.0.0.1/tcp/5678/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm" + ); + } + + #[test] + fn generate_for_alice_with_listen_addr() { + // Should override the ip/port + let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed + let args: Vec = [ + "--some", + "other", + "--listen-addr", + "/ip4/192.168.100.1/tcp/30333/ws", + ] + .iter() + .map(|x| x.to_string()) + .collect(); + let bootnode_addr = + generate(peer_id, &LOCALHOST, 5678, args.iter().as_ref(), &None).unwrap(); + assert_eq!( + &bootnode_addr, + "/ip4/127.0.0.1/tcp/5678/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm" + ); + } + + #[test] + fn generate_for_alice_with_listen_addr_without_value_must_fail() { + // Should override the ip/port + let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed + let args: Vec = ["--some", "other", "--listen-addr"] + .iter() + .map(|x| x.to_string()) + .collect(); + let bootnode_addr = generate(peer_id, &LOCALHOST, 5678, args.iter().as_ref(), &None); + + assert!(bootnode_addr.is_err()); + assert!(matches!( + bootnode_addr, + Err(GeneratorError::BootnodeAddrGeneration(_)) + )); + } + + #[test] + fn generate_for_alice_withcert() { + let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed + let args: Vec<&str> = vec![]; + let bootnode_addr = generate( + peer_id, + &LOCALHOST, + 5678, + &args, + &Some(String::from("data")), + ) + .unwrap(); + assert_eq!( + &bootnode_addr, + "/ip4/127.0.0.1/tcp/5678/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm/certhash/data" + ); + } +} diff --git a/crates/orchestrator/src/generators/chain_spec.rs b/crates/orchestrator/src/generators/chain_spec.rs new file mode 100644 index 000000000..f302d6590 --- /dev/null +++ b/crates/orchestrator/src/generators/chain_spec.rs @@ -0,0 +1,807 @@ +use std::{ + collections::HashMap, + path::{Path, PathBuf}, +}; + +use anyhow::anyhow; +use configuration::{types::AssetLocation, HrmpChannelConfig}; +use provider::{ + types::{GenerateFileCommand, GenerateFilesOptions, TransferedFile}, + DynNamespace, ProviderError, +}; +use serde_json::json; +use support::fs::FileSystem; + +use super::errors::GeneratorError; +use crate::{ + network_spec::{node::NodeSpec, parachain::ParachainSpec, relaychain::RelaychainSpec}, + ScopedFilesystem, +}; + +// TODO: (javier) move to state +#[derive(Debug, Clone)] +pub enum Context { + Relay, + Para, +} + +enum ChainSpecFormat { + Plain, + Raw, +} + +enum KeyType { + Session, + Aura, + Grandpa, +} +#[derive(Debug)] +pub struct ParaGenesisConfig> { + pub(crate) state_path: T, + pub(crate) wasm_path: T, + pub(crate) id: u32, + pub(crate) as_parachain: bool, +} + +#[derive(Debug, Clone)] +pub struct ChainSpec { + // Name of the spec file, most of the times could be the same as the chain_name. (e.g rococo-local) + chain_spec_name: String, + asset_location: Option, + maybe_plain_path: Option, + chain_name: Option, + raw_path: Option, + // The binary to build the chain-spec + command: Option, + // full command to build the spec, we will use as provided + build_command: Option, + context: Context, +} + +impl ChainSpec { + pub(crate) fn new(chain_spec_name: impl Into, context: Context) -> Self { + Self { + chain_spec_name: chain_spec_name.into(), + build_command: None, + chain_name: None, + maybe_plain_path: None, + asset_location: None, + raw_path: None, + command: None, + context, + } + } + + pub(crate) fn chain_name(&self) -> Option<&str> { + self.chain_name.as_deref() + } + + pub(crate) fn set_chain_name(mut self, chain_name: impl Into) -> Self { + self.chain_name = Some(chain_name.into()); + self + } + + pub(crate) fn asset_location(mut self, location: AssetLocation) -> Self { + self.asset_location = Some(location); + self + } + + pub(crate) fn command(mut self, command: impl Into) -> Self { + self.command = Some(command.into()); + self + } + + /// Build the chain-spec + pub async fn build<'a, T>( + &mut self, + ns: &DynNamespace, + scoped_fs: &ScopedFilesystem<'a, T>, + ) -> Result<(), GeneratorError> + where + T: FileSystem, + { + // TODO: Move this to state builder. + if self.asset_location.is_none() && self.command.is_none() { + return Err(GeneratorError::ChainSpecGeneration( + "Can not build the chain spec without set the command or asset_location" + .to_string(), + )); + } + + let maybe_plain_spec_path = PathBuf::from(format!("{}-plain.json", self.chain_spec_name)); + // if we have a path, copy to the base_dir of the ns with the name `-plain.json` + if let Some(location) = self.asset_location.as_ref() { + match location { + AssetLocation::FilePath(path) => { + let file_to_transfer = TransferedFile { + local_path: path.clone(), + remote_path: maybe_plain_spec_path.clone(), + }; + + scoped_fs + .copy_files(vec![&file_to_transfer]) + .await + .map_err(|_| { + GeneratorError::ChainSpecGeneration(format!( + "Error copying file: {file_to_transfer}" + )) + })?; + }, + AssetLocation::Url(_url) => todo!(), + } + } else { + // we should create the chain-spec using command. + // SAFETY: we ensure that command is some with the first check of the fn + let cmd = self.command.as_ref().unwrap(); + let mut args: Vec = vec!["build-spec".into()]; + if let Some(chain_name) = self.chain_name.as_ref() { + args.push("--chain".into()); + args.push(chain_name.clone()); + } + args.push("--disable-default-bootnode".into()); + + let generate_command = + GenerateFileCommand::new(cmd.as_str(), maybe_plain_spec_path.clone()).args(args); + let options = GenerateFilesOptions::new(vec![generate_command]); + ns.generate_files(options).await?; + } + + if is_raw(maybe_plain_spec_path.clone(), scoped_fs).await? { + self.raw_path = Some(maybe_plain_spec_path); + } else { + self.maybe_plain_path = Some(maybe_plain_spec_path); + } + Ok(()) + } + + pub async fn build_raw(&mut self, ns: &DynNamespace) -> Result<(), GeneratorError> { + let None = self.raw_path else { + return Ok(()); + }; + // build raw + let raw_spec_path = PathBuf::from(format!("{}.json", self.chain_spec_name)); + let cmd = self + .command + .as_ref() + .ok_or(GeneratorError::ChainSpecGeneration( + "Invalid command".into(), + ))?; + let maybe_plain_path = + self.maybe_plain_path + .as_ref() + .ok_or(GeneratorError::ChainSpecGeneration( + "Invalid plain path".into(), + ))?; + let args: Vec = vec![ + "build-spec".into(), + "--chain".into(), + // TODO: we should get the full path from the scoped filesystem + format!( + "{}/{}", + ns.base_dir().to_string_lossy(), + maybe_plain_path.display().to_string() + ), + "--raw".into(), + "--disable-default-bootnode".into(), + ]; + + let generate_command = GenerateFileCommand::new(cmd, raw_spec_path.clone()).args(args); + let options = GenerateFilesOptions::new(vec![generate_command]); + ns.generate_files(options).await?; + + self.raw_path = Some(raw_spec_path); + + Ok(()) + } + + pub fn raw_path(&self) -> Option<&Path> { + self.raw_path.as_deref() + } + + pub async fn read_chain_id<'a, T>( + &self, + scoped_fs: &ScopedFilesystem<'a, T>, + ) -> Result + where + T: FileSystem, + { + let (content, _) = self.read_spec(scoped_fs).await?; + let chain_spec_json: serde_json::Value = serde_json::from_str(&content).map_err(|_| { + GeneratorError::ChainSpecGeneration("Can not parse chain-spec as json".into()) + })?; + if let Some(chain_id) = chain_spec_json.get("id") { + if let Some(chain_id) = chain_id.as_str() { + Ok(chain_id.to_string()) + } else { + Err(GeneratorError::ChainSpecGeneration( + "id should be an string in the chain-spec, this is a bug".into(), + )) + } + } else { + Err(GeneratorError::ChainSpecGeneration( + "'id' should be a fields in the chain-spec of the relaychain".into(), + )) + } + } + + async fn read_spec<'a, T>( + &self, + scoped_fs: &ScopedFilesystem<'a, T>, + ) -> Result<(String, ChainSpecFormat), GeneratorError> + where + T: FileSystem, + { + let (path, format) = match (self.maybe_plain_path.as_ref(), self.raw_path.as_ref()) { + (Some(path), None) => (path, ChainSpecFormat::Plain), + (None, Some(path)) => (path, ChainSpecFormat::Raw), + (Some(_), Some(path)) => { + // if we have both paths return the raw + (path, ChainSpecFormat::Raw) + }, + (None, None) => unreachable!(), + }; + + let content = scoped_fs.read_to_string(path.clone()).await.map_err(|_| { + GeneratorError::ChainSpecGeneration(format!( + "Can not read chain-spec from {}", + path.to_string_lossy() + )) + })?; + + Ok((content, format)) + } + + async fn write_spec<'a, T>( + &self, + scoped_fs: &ScopedFilesystem<'a, T>, + content: impl Into, + ) -> Result<(), GeneratorError> + where + T: FileSystem, + { + let (path, _format) = match (self.maybe_plain_path.as_ref(), self.raw_path.as_ref()) { + (Some(path), None) => (path, ChainSpecFormat::Plain), + (None, Some(path)) => (path, ChainSpecFormat::Raw), + (Some(_), Some(path)) => { + // if we have both paths return the raw + (path, ChainSpecFormat::Raw) + }, + (None, None) => unreachable!(), + }; + + scoped_fs.write(path, content.into()).await.map_err(|_| { + GeneratorError::ChainSpecGeneration(format!( + "Can not write chain-spec from {}", + path.to_string_lossy() + )) + })?; + + Ok(()) + } + + // TODO: (javier) move this fns to state aware + pub async fn customize_para<'a, T>( + &self, + para: &ParachainSpec, + relay_chain_id: &str, + scoped_fs: &ScopedFilesystem<'a, T>, + ) -> Result<(), GeneratorError> + where + T: FileSystem, + { + let (content, format) = self.read_spec(scoped_fs).await?; + let mut chain_spec_json: serde_json::Value = + serde_json::from_str(&content).map_err(|_| { + GeneratorError::ChainSpecGeneration("Can not parse chain-spec as json".into()) + })?; + + if let Some(para_id) = chain_spec_json.get_mut("para_id") { + *para_id = json!(para.id); + }; + if let Some(para_id) = chain_spec_json.get_mut("paraId") { + *para_id = json!(para.id); + }; + + if let Some(relay_chain_id_field) = chain_spec_json.get_mut("relay_chain") { + *relay_chain_id_field = json!(relay_chain_id); + }; + + if let ChainSpecFormat::Plain = format { + let pointer = get_runtime_config_pointer(&chain_spec_json) + .map_err(GeneratorError::ChainSpecGeneration)?; + + // make genesis overrides first. + // override_genesis() + clear_authorities(&pointer, &mut chain_spec_json); + + // Get validators to add as authorities + let validators: Vec<&NodeSpec> = para + .collators + .iter() + .filter(|node| node.is_validator) + .collect(); + + // check chain key types + if chain_spec_json + .pointer(&format!("{}/session", pointer)) + .is_some() + { + add_authorities( + &pointer, + &mut chain_spec_json, + &validators, + KeyType::Session, + ); + } else { + add_aura_authorities(&pointer, &mut chain_spec_json, &validators, KeyType::Aura); + let invulnerables: Vec<&NodeSpec> = para + .collators + .iter() + .filter(|node| node.is_invulnerable) + .collect(); + add_collator_selection(&pointer, &mut chain_spec_json, &invulnerables); + // await addParaCustom(chainSpecFullPathPlain, node); + }; + + // override `parachainInfo/parachainId` + override_parachain_info(&pointer, &mut chain_spec_json, para.id); + + // write spec + let content = serde_json::to_string_pretty(&chain_spec_json).map_err(|_| { + GeneratorError::ChainSpecGeneration("can not parse chain-spec value as json".into()) + })?; + self.write_spec(scoped_fs, content).await?; + } else { + // TODO: add a warning here + todo!(); + } + Ok(()) + } + + pub async fn customize_relay<'a, T, U>( + &self, + relaychain: &RelaychainSpec, + _hrmp_channels: &[HrmpChannelConfig], + para_artifacts: Vec>, + scoped_fs: &ScopedFilesystem<'a, T>, + ) -> Result<(), GeneratorError> + where + T: FileSystem, + U: AsRef, + { + let (content, format) = self.read_spec(scoped_fs).await?; + let mut chain_spec_json: serde_json::Value = + serde_json::from_str(&content).map_err(|_| { + GeneratorError::ChainSpecGeneration("Can not parse chain-spec as json".into()) + })?; + + if let ChainSpecFormat::Plain = format { + // get the config pointer + let pointer = get_runtime_config_pointer(&chain_spec_json) + .map_err(GeneratorError::ChainSpecGeneration)?; + + // make genesis overrides first. + // override_genesis() + + println!( + "{:#?}", + chain_spec_json.pointer(format!("{}/session/keys", pointer).as_str()) + ); + // Clear authorities + clear_authorities(&pointer, &mut chain_spec_json); + + println!( + "{:#?}", + chain_spec_json.pointer(format!("{}/session/keys", pointer).as_str()) + ); + + // TODO: add to logger + // println!("BALANCES"); + // println!("{:#?}", chain_spec_json.pointer(format!("{}/balances",pointer).as_str())); + // add balances + add_balances(&pointer, &mut chain_spec_json, &relaychain.nodes, 0); + + println!( + "{:#?}", + chain_spec_json.pointer(format!("{}/balances", pointer).as_str()) + ); + + // Get validators to add as authorities + let validators: Vec<&NodeSpec> = relaychain + .nodes + .iter() + .filter(|node| node.is_validator) + .collect(); + + // check chain key types + if chain_spec_json + .pointer(&format!("{}/session", pointer)) + .is_some() + { + add_authorities( + &pointer, + &mut chain_spec_json, + &validators, + KeyType::Session, + ); + } + + // staking && nominators + + // TODO: add to logger + // println!("KEYS"); + // println!("{:#?}", chain_spec_json.pointer(format!("{}/session/keys",pointer).as_str())); + + // add_hrmp_channels + + // paras + for para_genesis_config in para_artifacts.iter() { + add_parachain_to_genesis( + &pointer, + &mut chain_spec_json, + para_genesis_config, + scoped_fs, + ) + .await + .map_err(|e| GeneratorError::ChainSpecGeneration(e.to_string()))?; + } + + // TODO: + // - manage session/aura for keys ( Javier think is done!) + // - staking + // - nominators + // - hrmp_channels + + // write spec + let content = serde_json::to_string_pretty(&chain_spec_json).map_err(|_| { + GeneratorError::ChainSpecGeneration("can not parse chain-spec value as json".into()) + })?; + self.write_spec(scoped_fs, content).await?; + } else { + // TODO: add a warning here + } + Ok(()) + } + + pub async fn add_bootnodes<'a, T>( + &self, + scoped_fs: &ScopedFilesystem<'a, T>, + bootnodes: &[String], + ) -> Result<(), GeneratorError> + where + T: FileSystem, + { + let (content, _) = self.read_spec(scoped_fs).await?; + let mut chain_spec_json: serde_json::Value = + serde_json::from_str(&content).map_err(|_| { + GeneratorError::ChainSpecGeneration("Can not parse chain-spec as json".into()) + })?; + + if let Some(bootnodes_on_file) = chain_spec_json.get_mut("bootNodes") { + if let Some(bootnodes_on_file) = bootnodes_on_file.as_array_mut() { + let mut bootnodes_to_add = + bootnodes.iter().map(|bootnode| json!(bootnode)).collect(); + bootnodes_on_file.append(&mut bootnodes_to_add); + } else { + return Err(GeneratorError::ChainSpecGeneration( + "id should be an string in the chain-spec, this is a bug".into(), + )); + }; + } else { + return Err(GeneratorError::ChainSpecGeneration( + "'bootNodes' should be a fields in the chain-spec of the relaychain".into(), + )); + }; + + // write spec + let content = serde_json::to_string_pretty(&chain_spec_json).map_err(|_| { + GeneratorError::ChainSpecGeneration("can not parse chain-spec value as json".into()) + })?; + self.write_spec(scoped_fs, content).await?; + + Ok(()) + } +} + +type GenesisNodeKey = (String, String, HashMap); + +async fn is_raw<'a, T>( + file: PathBuf, + scoped_fs: &ScopedFilesystem<'a, T>, +) -> Result +where + T: FileSystem, +{ + let content = scoped_fs.read_to_string(file).await?; + let chain_spec_json: serde_json::Value = serde_json::from_str(&content).unwrap(); + + Ok(chain_spec_json.pointer("/genesis/raw/top").is_some()) +} + +// Internal Chain-spec customizations + +async fn add_parachain_to_genesis<'a, T, U>( + runtime_config_ptr: &str, + chain_spec_json: &mut serde_json::Value, + para_genesis_config: &ParaGenesisConfig, + scoped_fs: &ScopedFilesystem<'a, T>, +) -> Result<(), anyhow::Error> +where + T: FileSystem, + U: AsRef, +{ + println!("es:"); + println!("{:?}", para_genesis_config.state_path.as_ref()); + println!("{:?}", para_genesis_config.wasm_path.as_ref()); + if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + let paras_pointer = if val.get("paras").is_some() { + "/paras/paras" + } else if val.get("parachainsParas").is_some() { + // For retro-compatibility with substrate pre Polkadot 0.9.5 + "/parachainsParas/paras" + } else { + // The config may not contain paras. Since chainspec allows to contain the RuntimeGenesisConfig patch we can inject it. + val["paras"] = json!({ "paras": [] }); + "/paras/paras" + }; + + println!("{:#?}", paras_pointer); + + let paras = val.pointer_mut(paras_pointer).ok_or(anyhow!( + "paras pointer should be valid {:?} ", + paras_pointer + ))?; + let paras_vec = paras + .as_array_mut() + .ok_or(anyhow!("paras should be an array"))?; + + let head = scoped_fs + .read_to_string(para_genesis_config.state_path.as_ref()) + .await?; + let wasm = scoped_fs + .read_to_string(para_genesis_config.wasm_path.as_ref()) + .await?; + // const new_para = [ + // parseInt(para_id), + // [readDataFile(head), readDataFile(wasm), parachain], + // ]; + + paras_vec.push(json!([ + para_genesis_config.id, + [head.trim(), wasm.trim(), para_genesis_config.as_parachain] + ])); + + Ok(()) + } else { + unreachable!("pointer to runtime config should be valid!") + } +} + +fn get_runtime_config_pointer(chain_spec_json: &serde_json::Value) -> Result { + // runtime_genesis_config is no longer in ChainSpec after rococo runtime rework (refer to: https://github.com/paritytech/polkadot-sdk/pull/1256) + // ChainSpec may contain a RuntimeGenesisConfigPatch + let pointers = [ + "/genesis/runtimeGenesisConfigPatch", + "/genesis/runtime/runtime_genesis_config", + "/genesis/runtime", + ]; + + for pointer in pointers { + if chain_spec_json.pointer(pointer).is_some() { + return Ok(pointer.to_string()); + } + } + + Err("Can not find the runtime pointer".into()) +} + +// Override `genesis` key if present +fn override_genesis() { + todo!() +} + +fn clear_authorities(runtime_config_ptr: &str, chain_spec_json: &mut serde_json::Value) { + if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + // clear keys (session, aura, grandpa) + if val.get("session").is_some() { + val["session"]["keys"] = json!([]); + } + + if val.get("aura").is_some() { + val["aura"]["authorities"] = json!([]); + } + + if val.get("grandpa").is_some() { + val["grandpa"]["authorities"] = json!([]); + } + + // clear collatorSelector + if val.get("collatorSelection").is_some() { + val["collatorSelection"]["invulnerables"] = json!([]); + } + + // clear staking + if val.get("staking").is_some() { + val["staking"]["stakers"] = json!([]); + val["staking"]["invulnerables"] = json!([]); + val["staking"]["validatorCount"] = json!(0); + } + } else { + unreachable!("pointer to runtime config should be valid!") + } +} +fn add_balances( + runtime_config_ptr: &str, + chain_spec_json: &mut serde_json::Value, + nodes: &Vec, + staking_min: u128, +) { + if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + let Some(balances) = val.pointer("/balances/balances") else { + // should be a info log + println!("NO 'balances' key in runtime config, skipping..."); + return; + }; + + // create a balance map + // SAFETY: balances is always an array in chain-spec with items [k,v] + let mut balances_map: HashMap = + serde_json::from_value::>(balances.clone()) + .unwrap() + .iter() + .fold(HashMap::new(), |mut memo, balance| { + memo.insert(balance.0.clone(), balance.1); + memo + }); + + for node in nodes { + if node.initial_balance.eq(&0) { + continue; + }; + + // TODO: handle error here and check the `accounts.accounts` design + let account = node.accounts.accounts.get("sr").unwrap(); + balances_map.insert( + account.address.clone(), + std::cmp::max(node.initial_balance, staking_min), + ); + } + + // convert the map and store again + let new_balances: Vec<(&String, &u128)> = + balances_map.iter().collect::>(); + + val["balances"]["balances"] = json!(new_balances); + } else { + unreachable!("pointer to runtime config should be valid!") + } +} + +fn get_node_keys(node: &NodeSpec) -> GenesisNodeKey { + let sr_account = node.accounts.accounts.get("sr").unwrap(); + let ed_account = node.accounts.accounts.get("ed").unwrap(); + let ec_account = node.accounts.accounts.get("ec").unwrap(); + let mut keys = HashMap::new(); + for k in [ + "babe", + "im_online", + "parachain_validator", + "authority_discovery", + "para_validator", + "para_assignment", + "aura", + "nimbus", + "vrf", + ] { + keys.insert(k.to_string(), sr_account.address.clone()); + } + + keys.insert("grandpa".to_string(), ed_account.address.clone()); + keys.insert("beefy".to_string(), ec_account.address.clone()); + + (sr_account.address.clone(), sr_account.address.clone(), keys) +} +fn add_authorities( + runtime_config_ptr: &str, + chain_spec_json: &mut serde_json::Value, + nodes: &[&NodeSpec], + _key_type: KeyType, +) { + if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + let keys: Vec = nodes.iter().map(|node| get_node_keys(node)).collect(); + println!("{:#?}", keys); + val["session"]["keys"] = json!(keys); + } else { + unreachable!("pointer to runtime config should be valid!") + } +} +fn add_hrmp_channels( + runtime_config_ptr: &str, + chain_spec_json: &mut serde_json::Value, + _hrmp_channels: &[HrmpChannelConfig], +) { + if let Some(_val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + todo!() + } else { + unreachable!("pointer to runtime config should be valid!") + } +} + +fn add_aura_authorities( + runtime_config_ptr: &str, + chain_spec_json: &mut serde_json::Value, + nodes: &[&NodeSpec], + _key_type: KeyType, +) { + if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + let keys: Vec = nodes + .iter() + .map(|node| { + node.accounts + .accounts + .get("sr") + .expect("'sr' account should be set at spec computation, this is a bug") + .address + .clone() + }) + .collect(); + println!("{:#?}", keys); + val["aura"]["authorities"] = json!(keys); + } else { + unreachable!("pointer to runtime config should be valid!") + } +} +// TODO: (team) + +// fn add_staking() {} +// fn add_nominators() {} + +// // TODO: (team) we should think a better way to use the decorators from +// // current version (ts). +// fn para_custom() { todo!() } +fn override_parachain_info( + runtime_config_ptr: &str, + chain_spec_json: &mut serde_json::Value, + para_id: u32, +) { + if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + if let Some(parachain_id) = val.pointer_mut("/parachainInfo/parachainId") { + *parachain_id = json!(para_id) + } else { + // Add warning here! + } + } else { + unreachable!("pointer to runtime config should be valid!") + } +} +fn add_collator_selection( + runtime_config_ptr: &str, + chain_spec_json: &mut serde_json::Value, + nodes: &[&NodeSpec], +) { + if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + let keys: Vec = nodes + .iter() + .map(|node| { + node.accounts + .accounts + .get("sr") + .expect("'sr' account should be set at spec computation, this is a bug") + .address + .clone() + }) + .collect(); + println!("{:#?}", keys); + // collatorSelection.invulnerables + if let Some(invulnerables) = val.pointer_mut("collatorSelection/invulnerables") { + *invulnerables = json!(keys); + } else { + // TODO: add a nice warning here. + } + } else { + unreachable!("pointer to runtime config should be valid!") + } +} + +#[cfg(test)] +mod tests {} diff --git a/crates/orchestrator/src/generators/command.rs b/crates/orchestrator/src/generators/command.rs new file mode 100644 index 000000000..8848ee1d1 --- /dev/null +++ b/crates/orchestrator/src/generators/command.rs @@ -0,0 +1,337 @@ +use configuration::types::Arg; + +use crate::network_spec::node::NodeSpec; + +pub struct GenCmdOptions<'a> { + pub relay_chain_name: &'a str, + pub cfg_path: &'a str, + pub data_path: &'a str, + pub relay_data_path: &'a str, + pub use_wrapper: bool, + pub bootnode_addr: Vec, +} + +impl<'a> Default for GenCmdOptions<'a> { + fn default() -> Self { + Self { + relay_chain_name: "rococo-local", + cfg_path: "/cfg", + data_path: "/data", + relay_data_path: "/relay-data", + use_wrapper: true, + bootnode_addr: vec![], + } + } +} + +const FLAGS_ADDED_BY_US: [&str; 5] = [ + "--unsafe-rpc-external", + "--no-telemetry", + "--no-mdns", + "--collator", + "--", +]; +const OPS_ADDED_BY_US: [&str; 6] = [ + "--chain", + "--name", + "--rpc-cors", + "--rpc-methods", + "--parachain-id", + "--node-key", +]; + +// TODO: can we abstract this and use only one fn (or at least split and reuse in small fns) +pub fn generate_for_cumulus_node( + node: &NodeSpec, + options: GenCmdOptions, + para_id: u32, + full_p2p_port: u16, +) -> (String, Vec) { + let NodeSpec { + key, + args, + is_validator, + bootnodes_addresses, + .. + } = node; + + let mut tmp_args: Vec = vec!["--node-key".into(), key.clone()]; + + if !args.contains(&Arg::Flag("--prometheus-external".into())) { + tmp_args.push("--prometheus-external".into()) + } + + if *is_validator && !args.contains(&Arg::Flag("--validator".into())) { + tmp_args.push("--collator".into()) + } + + if !bootnodes_addresses.is_empty() { + tmp_args.push("--bootnodes".into()); + let bootnodes = bootnodes_addresses + .iter() + .map(|m| m.to_string()) + .collect::>() + .join(" "); + tmp_args.push(bootnodes) + } + + // ports + tmp_args.push("--prometheus-port".into()); + tmp_args.push(node.prometheus_port.0.to_string()); + + tmp_args.push("--rpc-port".into()); + tmp_args.push(node.rpc_port.0.to_string()); + + tmp_args.push("--listen-addr".into()); + tmp_args.push(format!("/ip4/0.0.0.0/tcp/{}/ws", node.p2p_port.0)); + + let mut collator_args: &[Arg] = &[]; + let mut full_node_args: &[Arg] = &[]; + if !args.is_empty() { + if let Some(index) = args.iter().position(|arg| match arg { + Arg::Flag(flag) => flag.eq("--"), + Arg::Option(..) => false, + }) { + (collator_args, full_node_args) = args.split_at(index); + }; + } + + // set our base path + tmp_args.push("--base-path".into()); + tmp_args.push(options.data_path.into()); + + let node_specific_bootnodes: Vec = node + .bootnodes_addresses + .iter() + .map(|b| b.to_string()) + .collect(); + let full_bootnodes = [node_specific_bootnodes, options.bootnode_addr].concat(); + if !full_bootnodes.is_empty() { + tmp_args.push("--bootnodes".into()); + tmp_args.push(full_bootnodes.join(" ")); + } + + let mut full_node_p2p_needs_to_be_injected = false; + let mut full_node_args_filtered = full_node_args + .iter() + .filter_map(|arg| match arg { + Arg::Flag(flag) => { + if FLAGS_ADDED_BY_US.contains(&flag.as_str()) { + None + } else { + Some(flag.to_owned()) + } + }, + Arg::Option(k, v) => { + if OPS_ADDED_BY_US.contains(&k.as_str()) { + None + } else if k.eq(&"port") && v.eq(&"30333") { + full_node_p2p_needs_to_be_injected = true; + None + } else { + let kv_str = format!("{} {}", k, v); + Some(kv_str) + } + }, + }) + .collect::>(); + + // change p2p port if is the default + full_node_args_filtered.push("--port".into()); + full_node_args_filtered.push(full_p2p_port.to_string()); + + let mut args_filtered = collator_args + .iter() + .filter_map(|arg| match arg { + Arg::Flag(flag) => { + if FLAGS_ADDED_BY_US.contains(&flag.as_str()) { + None + } else { + Some(flag.to_owned()) + } + }, + Arg::Option(k, v) => { + if OPS_ADDED_BY_US.contains(&k.as_str()) { + None + } else { + let kv_str = format!("{} {}", k, v); + Some(kv_str) + } + }, + }) + .collect::>(); + + tmp_args.append(&mut args_filtered); + + let parachain_spec_path = format!("{}/{}.json", options.cfg_path, para_id); + let mut final_args = vec![ + node.command.as_str().to_string(), + "--chain".into(), + parachain_spec_path, + "--name".into(), + node.name.clone(), + "--rpc-cors".into(), + "all".into(), + "--unsafe-rpc-external".into(), + "--rpc-methods".into(), + "unsafe".into(), + ]; + + final_args.append(&mut tmp_args); + + let relaychain_spec_path = format!("{}/{}.json", options.cfg_path, options.relay_chain_name); + let mut full_node_injected: Vec = vec![ + "--".into(), + "--base-path".into(), + options.relay_data_path.into(), + "--chain".into(), + relaychain_spec_path, + "--execution".into(), + "wasm".into(), + ]; + + final_args.append(&mut full_node_injected); + final_args.append(&mut full_node_args_filtered); + + if options.use_wrapper { + ("/cfg/zombie-wrapper.sh".to_string(), final_args) + } else { + (final_args.remove(0), final_args) + } +} + +pub fn generate_for_node( + node: &NodeSpec, + options: GenCmdOptions, + para_id: Option, +) -> (String, Vec) { + let NodeSpec { + key, + args, + is_validator, + bootnodes_addresses, + .. + } = node; + let mut tmp_args: Vec = vec![ + "--node-key".into(), + key.clone(), + // TODO:(team) we should allow to set the telemetry url from config + "--no-telemetry".into(), + ]; + + if !args.contains(&Arg::Flag("--prometheus-external".into())) { + tmp_args.push("--prometheus-external".into()) + } + + if let Some(para_id) = para_id { + tmp_args.push("--parachain-id".into()); + tmp_args.push(para_id.to_string()); + } + + if *is_validator && !args.contains(&Arg::Flag("--validator".into())) { + tmp_args.push("--validator".into()) + } + + if !bootnodes_addresses.is_empty() { + tmp_args.push("--bootnodes".into()); + let bootnodes = bootnodes_addresses + .iter() + .map(|m| m.to_string()) + .collect::>() + .join(" "); + tmp_args.push(bootnodes) + } + + // ports + tmp_args.push("--prometheus-port".into()); + tmp_args.push(node.prometheus_port.0.to_string()); + + tmp_args.push("--rpc-port".into()); + tmp_args.push(node.rpc_port.0.to_string()); + + let listen_value = if let Some(listen_val) = args.iter().find_map(|arg| match arg { + Arg::Flag(_) => None, + Arg::Option(k, v) => { + if k.eq("--listen-addr") { + Some(v) + } else { + None + } + }, + }) { + let mut parts = listen_val.split('/').collect::>(); + // TODO: move this to error + let port_part = parts + .get_mut(4) + .expect("should have at least 5 parts, this is a bug"); + let port_to_use = node.p2p_port.0.to_string(); + *port_part = port_to_use.as_str(); + parts.join("/") + } else { + format!("/ip4/0.0.0.0/tcp/{}/ws", node.p2p_port.0) + }; + + tmp_args.push("--listen-addr".into()); + tmp_args.push(listen_value); + + // set our base path + tmp_args.push("--base-path".into()); + tmp_args.push(options.data_path.into()); + + let node_specific_bootnodes: Vec = node + .bootnodes_addresses + .iter() + .map(|b| b.to_string()) + .collect(); + let full_bootnodes = [node_specific_bootnodes, options.bootnode_addr].concat(); + if !full_bootnodes.is_empty() { + tmp_args.push("--bootnodes".into()); + tmp_args.push(full_bootnodes.join(" ")); + } + + // add the rest of the args + let mut args_filtered = args + .iter() + .filter_map(|arg| match arg { + Arg::Flag(flag) => { + if FLAGS_ADDED_BY_US.contains(&flag.as_str()) { + None + } else { + Some(flag.to_owned()) + } + }, + Arg::Option(k, v) => { + if OPS_ADDED_BY_US.contains(&k.as_str()) { + None + } else { + let kv_str = format!("{} {}", k, v); + Some(kv_str) + } + }, + }) + .collect::>(); + + tmp_args.append(&mut args_filtered); + + let chain_spec_path = format!("{}/{}.json", options.cfg_path, options.relay_chain_name); + let mut final_args = vec![ + node.command.as_str().to_string(), + "--chain".into(), + chain_spec_path, + "--name".into(), + node.name.clone(), + "--rpc-cors".into(), + "all".into(), + "--unsafe-rpc-external".into(), + "--rpc-methods".into(), + "unsafe".into(), + ]; + + final_args.append(&mut tmp_args); + + if options.use_wrapper { + ("/cfg/zombie-wrapper.sh".to_string(), final_args) + } else { + (final_args.remove(0), final_args) + } +} diff --git a/crates/orchestrator/src/generators/errors.rs b/crates/orchestrator/src/generators/errors.rs new file mode 100644 index 000000000..5da0ec75d --- /dev/null +++ b/crates/orchestrator/src/generators/errors.rs @@ -0,0 +1,20 @@ +use provider::ProviderError; +use support::fs::FileSystemError; + +#[derive(Debug, thiserror::Error)] +pub enum GeneratorError { + #[error("Generating key {0} with input {1}")] + KeyGeneration(String, String), + #[error("Generating port {0}, err {1}")] + PortGeneration(u16, String), + #[error("Chain-spec build error: {0}")] + ChainSpecGeneration(String), + #[error("Provider error")] + ProviderError(#[from] ProviderError), + #[error("FileSystem error")] + FileSystemError(#[from] FileSystemError), + #[error("Generating identity, err {0}")] + IdentityGeneration(String), + #[error("Generating bootnode address, err {0}")] + BootnodeAddrGeneration(String), +} diff --git a/crates/orchestrator/src/generators/identity.rs b/crates/orchestrator/src/generators/identity.rs new file mode 100644 index 000000000..62ac5f36b --- /dev/null +++ b/crates/orchestrator/src/generators/identity.rs @@ -0,0 +1,41 @@ +use hex::FromHex; +use libp2p::identity::{ed25519, Keypair}; +use sha2::digest::Digest; + +use super::errors::GeneratorError; + +// Generate p2p identity for node +// return `node-key` and `peerId` +pub fn generate(node_name: &str) -> Result<(String, String), GeneratorError> { + let key = hex::encode(sha2::Sha256::digest(node_name)); + + let bytes = <[u8; 32]>::from_hex(key.clone()).map_err(|_| { + GeneratorError::IdentityGeneration("can not transform hex to [u8;32]".into()) + })?; + let sk = ed25519::SecretKey::try_from_bytes(bytes) + .map_err(|_| GeneratorError::IdentityGeneration("can not create sk from bytes".into()))?; + let local_identity: Keypair = ed25519::Keypair::from(sk).into(); + let local_public = local_identity.public(); + let local_peer_id = local_public.to_peer_id(); + + Ok((key, local_peer_id.to_base58())) +} + +#[cfg(test)] +mod tests { + + use super::*; + #[test] + fn generate_for_alice() { + let s = "alice"; + let (key, peer_id) = generate(s).unwrap(); + assert_eq!( + &key, + "2bd806c97f0e00af1a1fc3328fa763a9269723c8db8fac4f93af71db186d6e90" + ); + assert_eq!( + &peer_id, + "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm" + ); + } +} diff --git a/crates/orchestrator/src/generators/key.rs b/crates/orchestrator/src/generators/key.rs new file mode 100644 index 000000000..47f81a891 --- /dev/null +++ b/crates/orchestrator/src/generators/key.rs @@ -0,0 +1,108 @@ +use sp_core::{crypto::SecretStringError, ecdsa, ed25519, sr25519, Pair}; + +use super::errors::GeneratorError; +use crate::shared::types::{Accounts, NodeAccount}; +const KEY_SCHEME: [&str; 3] = ["sr", "ed", "ec"]; + +pub fn generate_pair(seed: &str) -> Result { + let pair = T::Pair::from_string(seed, None)?; + Ok(pair) +} + +pub fn generate(seed: &str) -> Result { + let mut accounts: Accounts = Default::default(); + for scheme in KEY_SCHEME { + let (address, public_key) = match scheme { + "sr" => { + let pair = generate_pair::(seed) + .map_err(|_| GeneratorError::KeyGeneration(scheme.into(), seed.into()))?; + (pair.public().to_string(), hex::encode(pair.public())) + }, + "ed" => { + let pair = generate_pair::(seed) + .map_err(|_| GeneratorError::KeyGeneration(scheme.into(), seed.into()))?; + (pair.public().to_string(), hex::encode(pair.public())) + }, + "ec" => { + let pair = generate_pair::(seed) + .map_err(|_| GeneratorError::KeyGeneration(scheme.into(), seed.into()))?; + (pair.public().to_string(), hex::encode(pair.public())) + }, + _ => unreachable!(), + }; + accounts.insert(scheme.into(), NodeAccount::new(address, public_key)); + } + Ok(accounts) +} + +#[cfg(test)] +mod tests { + + use super::*; + #[test] + fn generate_for_alice() { + use sp_core::crypto::Ss58Codec; + let s = "Alice"; + let seed = format!("//{}", s); + + let pair = generate_pair::(&seed).unwrap(); + assert_eq!( + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + pair.public().to_ss58check() + ); + + let pair = generate_pair::(&seed).unwrap(); + assert_eq!( + "0x020a1091341fe5664bfa1782d5e04779689068c916b04cb365ec3153755684d9a1", + format!("0x{}", hex::encode(pair.public())) + ); + + let pair = generate_pair::(&seed).unwrap(); + assert_eq!( + "5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu", + pair.public().to_ss58check() + ); + } + + #[test] + fn generate_pair_invalid_should_fail() { + let s = "Alice"; + let seed = s.to_string(); + + let pair = generate_pair::(&seed); + assert!(pair.is_err()); + } + + #[test] + fn generate_invalid_should_fail() { + let s = "Alice"; + let seed = s.to_string(); + + let pair = generate(&seed); + assert!(pair.is_err()); + assert!(matches!(pair, Err(GeneratorError::KeyGeneration(_, _)))); + } + + #[test] + fn generate_work() { + let s = "Alice"; + let seed = format!("//{}", s); + + let pair = generate(&seed).unwrap(); + let sr = pair.get("sr").unwrap(); + let ed = pair.get("ed").unwrap(); + let ec = pair.get("ec").unwrap(); + assert_eq!( + sr.address, + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" + ); + assert_eq!( + ed.address, + "5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu" + ); + assert_eq!( + format!("0x{}", ec.public_key), + "0x020a1091341fe5664bfa1782d5e04779689068c916b04cb365ec3153755684d9a1" + ); + } +} diff --git a/crates/orchestrator/src/generators/keystore.rs b/crates/orchestrator/src/generators/keystore.rs new file mode 100644 index 000000000..91ac4b80c --- /dev/null +++ b/crates/orchestrator/src/generators/keystore.rs @@ -0,0 +1,86 @@ +use std::{ + path::{Path, PathBuf}, + vec, +}; + +use hex::encode; +use support::fs::FileSystem; + +use super::errors::GeneratorError; +use crate::{shared::types::NodeAccounts, ScopedFilesystem}; + +const PREFIXES: [&str; 11] = [ + "aura", "babe", "imon", "gran", "audi", "asgn", "para", "beef", // Beffy + "nmbs", // Nimbus + "rand", // Randomness (Moonbeam) + "rate", // Equilibrium rate module +]; + +pub async fn generate<'a, T>( + acc: &NodeAccounts, + node_files_path: impl AsRef, + scoped_fs: &ScopedFilesystem<'a, T>, +) -> Result, GeneratorError> +where + T: FileSystem, +{ + // Create local keystore + scoped_fs.create_dir_all(node_files_path.as_ref()).await?; + let mut filenames = vec![]; + + let f = PREFIXES.map(|k| { + // let filename = encode(k); + + let filename = match k { + // TODO: add logic for isAssetHubPolkadot + // "aura" => { + // "" + // }, + "babe" | "imon" | "audi" | "asgn" | "para" | "nmbs" | "rand" | "aura" => { + let pk = acc + .accounts + .get("sr") + .expect("Key 'sr' should be set for node, this is a bug.") + .public_key + .as_str(); + format!("{}{}", encode(k), pk) + }, + "gran" | "rate" => { + let pk = acc + .accounts + .get("ed") + .expect("Key 'ed' should be set for node, this is a bug.") + .public_key + .as_str(); + format!("{}{}", encode(k), pk) + }, + "beef" => { + let pk = acc + .accounts + .get("ec") + .expect("Key 'ec' should be set for node, this is a bug.") + .public_key + .as_str(); + format!("{}{}", encode(k), pk) + }, + _ => unreachable!(), + }; + let file_path = PathBuf::from(format!( + "{}/{}", + node_files_path.as_ref().to_string_lossy(), + filename + )); + filenames.push(PathBuf::from(filename)); + let content = format!("\"{}\"", acc.seed); + scoped_fs.write(file_path, content) + }); + + // TODO: implement logic for filter keys + // node.keystoreKeyTypes?.forEach((key_type: string) => { + // if (DEFAULT_KEYSTORE_KEY_TYPES.includes(key_type)) + // keystore_key_types[key_type] = default_keystore_key_types[key_type]; + // }); + + futures::future::try_join_all(f).await?; + Ok(filenames) +} diff --git a/crates/orchestrator/src/generators/para_artifact.rs b/crates/orchestrator/src/generators/para_artifact.rs new file mode 100644 index 000000000..ea867e54d --- /dev/null +++ b/crates/orchestrator/src/generators/para_artifact.rs @@ -0,0 +1,94 @@ +use std::path::{Path, PathBuf}; + +use provider::{ + types::{GenerateFileCommand, GenerateFilesOptions, TransferedFile}, + DynNamespace, +}; +use support::fs::FileSystem; + +use super::errors::GeneratorError; +use crate::ScopedFilesystem; + +#[derive(Debug, Clone)] +pub(crate) enum ParaArtifactType { + Wasm, + State, +} + +#[derive(Debug, Clone)] +pub(crate) enum ParaArtifactBuildOption { + Path(String), + Command(String), +} + +/// Parachain artifact (could be either the genesis state or genesis wasm) +#[derive(Debug, Clone)] +pub struct ParaArtifact { + artifact_type: ParaArtifactType, + build_option: ParaArtifactBuildOption, + artifact_path: Option, +} + +impl ParaArtifact { + pub(crate) fn new( + artifact_type: ParaArtifactType, + build_option: ParaArtifactBuildOption, + ) -> Self { + Self { + artifact_type, + build_option, + artifact_path: None, + } + } + + pub(crate) fn artifact_path(&self) -> Option<&PathBuf> { + self.artifact_path.as_ref() + } + + pub(crate) async fn build<'a, T>( + &mut self, + chain_spec_path: Option>, + artifact_path: impl AsRef, + ns: &DynNamespace, + scoped_fs: &ScopedFilesystem<'a, T>, + ) -> Result<(), GeneratorError> + where + T: FileSystem, + { + match &self.build_option { + ParaArtifactBuildOption::Path(path) => { + let t = TransferedFile { + local_path: PathBuf::from(path), + remote_path: artifact_path.as_ref().into(), + }; + scoped_fs.copy_files(vec![&t]).await?; + }, + ParaArtifactBuildOption::Command(cmd) => { + let generate_subcmd = match self.artifact_type { + ParaArtifactType::Wasm => "export-genesis-wasm", + ParaArtifactType::State => "export-genesis-state", + }; + + let mut args: Vec = vec![generate_subcmd.into()]; + if let Some(chain_spec_path) = chain_spec_path { + let full_chain_path = format!( + "{}/{}", + ns.base_dir().to_string_lossy(), + chain_spec_path.as_ref().to_string_lossy() + ); + args.push("--chain".into()); + args.push(full_chain_path) + } + + let artifact_path_ref = artifact_path.as_ref(); + let generate_command = + GenerateFileCommand::new(cmd.as_str(), artifact_path_ref).args(args); + let options = GenerateFilesOptions::new(vec![generate_command]); + ns.generate_files(options).await?; + self.artifact_path = Some(artifact_path_ref.into()); + }, + } + + Ok(()) + } +} diff --git a/crates/orchestrator/src/generators/port.rs b/crates/orchestrator/src/generators/port.rs new file mode 100644 index 000000000..f6bd519fb --- /dev/null +++ b/crates/orchestrator/src/generators/port.rs @@ -0,0 +1,45 @@ +use std::net::TcpListener; + +use configuration::shared::types::Port; + +use super::errors::GeneratorError; +use crate::shared::types::ParkedPort; + +// TODO: (team), we want to continue support ws_port? No +enum PortTypes { + Rpc, + P2P, + Prometheus, +} + +pub fn generate(port: Option) -> Result { + let port = port.unwrap_or(0); + let listener = TcpListener::bind(format!("0.0.0.0:{port}")) + .map_err(|_e| GeneratorError::PortGeneration(port, "Can't bind".into()))?; + let port = listener + .local_addr() + .expect("We should always get the local_addr from the listener, please report as bug") + .port(); + Ok(ParkedPort::new(port, listener)) +} + +#[cfg(test)] +mod tests { + use super::*; + #[test] + fn generate_random() { + let port = generate(None).unwrap(); + let listener = port.1.write().unwrap(); + + assert!(listener.is_some()); + } + + #[test] + fn generate_fixed_port() { + let port = generate(Some(33056)).unwrap(); + let listener = port.1.write().unwrap(); + + assert!(listener.is_some()); + assert_eq!(port.0, 33056); + } +} diff --git a/crates/orchestrator/src/lib.rs b/crates/orchestrator/src/lib.rs index 7d12d9af8..93189d349 100644 --- a/crates/orchestrator/src/lib.rs +++ b/crates/orchestrator/src/lib.rs @@ -1,14 +1,424 @@ -pub fn add(left: usize, right: usize) -> usize { - left + right +// TODO(Javier): Remove when we implement the logic in the orchestrator to spawn with the provider. +#![allow(dead_code)] + +mod errors; +mod generators; +mod network; +mod network_spec; +mod shared; +mod spawner; + +use std::{ + path::{Path, PathBuf}, + time::Duration, +}; + +use configuration::{NetworkConfig, RegistrationStrategy}; +use errors::OrchestratorError; +use network::{parachain::Parachain, relaychain::Relaychain, Network}; +use network_spec::{parachain::ParachainSpec, NetworkSpec}; +use provider::{constants::LOCALHOST, types::TransferedFile, Provider}; +use support::fs::{FileSystem, FileSystemError}; +use tokio::time::timeout; + +use crate::{generators::chain_spec::ParaGenesisConfig, spawner::SpawnNodeCtx}; + +pub struct Orchestrator +where + T: FileSystem + Sync + Send, + P: Provider, +{ + filesystem: T, + provider: P, +} + +impl Orchestrator +where + T: FileSystem + Sync + Send + Clone, + P: Provider, +{ + pub fn new(filesystem: T, provider: P) -> Self { + Self { + filesystem, + provider, + } + } + + pub async fn spawn( + &self, + network_config: NetworkConfig, + ) -> Result, OrchestratorError> { + let global_timeout = network_config.global_settings().network_spawn_timeout(); + let network_spec = NetworkSpec::from_config(&network_config).await?; + + timeout( + Duration::from_secs(global_timeout.into()), + self.spawn_inner(network_spec), + ) + .await + .map_err(|_| OrchestratorError::GlobalTimeOut(global_timeout))? + } + + async fn spawn_inner( + &self, + mut network_spec: NetworkSpec, + ) -> Result, OrchestratorError> { + // main driver for spawn the network + println!("{:#?}", network_spec); + + // create namespace + let ns = self.provider.create_namespace().await?; + + println!("ns: {:#?}", ns.id()); + println!("base_dir: {:#?}", ns.base_dir()); + + // TODO: noop for native + // Static setup + // ns.static_setup().await?; + + let base_dir = ns.base_dir().to_string_lossy(); + let scoped_fs = ScopedFilesystem::new(&self.filesystem, &base_dir); + // Create chain-spec for relaychain + network_spec + .relaychain + .chain_spec + .build(&ns, &scoped_fs) + .await?; + + // TODO: move to logger + // println!("{:#?}", network_spec.relaychain.chain_spec); + + // Create parachain artifacts (chain-spec, wasm, state) + let relay_chain_id = network_spec + .relaychain + .chain_spec + .read_chain_id(&scoped_fs) + .await?; + let relay_chain_name = network_spec.relaychain.chain.as_str(); + // TODO: if we don't need to register this para we can skip it + for para in network_spec.parachains.iter_mut() { + let para_cloned = para.clone(); + let chain_spec_raw_path = if let Some(chain_spec) = para.chain_spec.as_mut() { + chain_spec.build(&ns, &scoped_fs).await?; + // TODO: move to logger + // println!("{:#?}", chain_spec); + + chain_spec + .customize_para(¶_cloned, &relay_chain_id, &scoped_fs) + .await?; + chain_spec.build_raw(&ns).await?; + + let chain_spec_raw_path = + chain_spec + .raw_path() + .ok_or(OrchestratorError::InvariantError( + "chain-spec raw path should be set now", + ))?; + Some(chain_spec_raw_path) + } else { + None + }; + + // TODO: this need to be abstracted in a single call to generate_files. + scoped_fs.create_dir(para.id.to_string()).await?; + // create wasm/state + para.genesis_state + .build( + chain_spec_raw_path, + format!("{}/genesis-state", para.id), + &ns, + &scoped_fs, + ) + .await?; + para.genesis_wasm + .build( + chain_spec_raw_path, + format!("{}/genesis-wasm", para.id), + &ns, + &scoped_fs, + ) + .await?; + } + + let para_to_register_in_genesis: Vec<&ParachainSpec> = network_spec + .parachains + .iter() + .filter(|para| match ¶.registration_strategy { + RegistrationStrategy::InGenesis => true, + RegistrationStrategy::UsingExtrinsic => false, + }) + .collect(); + + let mut para_artifacts = vec![]; + for para in para_to_register_in_genesis { + let genesis_config = ParaGenesisConfig { + state_path: para.genesis_state.artifact_path().ok_or( + OrchestratorError::InvariantError( + "artifact path for state must be set at this point", + ), + )?, + wasm_path: para.genesis_wasm.artifact_path().ok_or( + OrchestratorError::InvariantError( + "artifact path for wasm must be set at this point", + ), + )?, + id: para.id, + as_parachain: para.onboard_as_parachain, + }; + para_artifacts.push(genesis_config) + } + + // Customize relaychain + network_spec + .relaychain + .chain_spec + .customize_relay( + &network_spec.relaychain, + &network_spec.hrmp_channels, + para_artifacts, + &scoped_fs, + ) + .await?; + + // Build raw version + network_spec.relaychain.chain_spec.build_raw(&ns).await?; + println!("{:#?}", network_spec.relaychain.chain_spec); + + // get the bootnodes to spawn first and calculate the bootnode string for use later + let mut bootnodes = vec![]; + let mut relaynodes = vec![]; + network_spec.relaychain.nodes.iter().for_each(|node| { + if node.is_bootnode { + bootnodes.push(node) + } else { + relaynodes.push(node) + } + }); + + if bootnodes.is_empty() { + bootnodes.push(relaynodes.remove(0)) + } + + // TODO: we want to still supporting spawn a dedicated bootnode?? + let mut ctx = SpawnNodeCtx { + chain_id: &relay_chain_id, + parachain_id: None, + chain: relay_chain_name, + role: ZombieRole::Node, + ns: &ns, + scoped_fs: &scoped_fs, + parachain: None, + bootnodes_addr: &vec![], + }; + + let global_files_to_inject = vec![TransferedFile { + local_path: PathBuf::from(format!( + "{}/{relay_chain_name}.json", + ns.base_dir().to_string_lossy() + )), + remote_path: PathBuf::from(format!("/cfg/{relay_chain_name}.json")), + }]; + + let r = Relaychain::new( + relay_chain_name.to_string(), + relay_chain_id.clone(), + PathBuf::from(network_spec.relaychain.chain_spec.raw_path().ok_or( + OrchestratorError::InvariantError("chain-spec raw path should be set now"), + )?), + ); + let mut network = + Network::new_with_relay(r, ns.clone(), self.filesystem.clone(), network_spec.clone()); + + let spawning_tasks = bootnodes + .iter_mut() + .map(|node| spawner::spawn_node(node, global_files_to_inject.clone(), &ctx)); + + // Calculate the bootnodes addr from the running nodes + let mut bootnodes_addr: Vec = vec![]; + for node in futures::future::try_join_all(spawning_tasks).await? { + bootnodes_addr.push( + // TODO: we just use localhost for now + generators::generate_node_bootnode_addr( + &node.spec.peer_id, + &LOCALHOST, + node.spec.p2p_port.0, + node.inner.args().as_ref(), + &node.spec.p2p_cert_hash, + )?, + ); + // Add the node to the `Network` instance + network.add_running_node(node, None); + } + + ctx.bootnodes_addr = &bootnodes_addr; + + // spawn the rest of the nodes (TODO: in batches) + let spawning_tasks = relaynodes + .iter() + .map(|node| spawner::spawn_node(node, global_files_to_inject.clone(), &ctx)); + for node in futures::future::try_join_all(spawning_tasks).await? { + // Add the node to the `Network` instance + network.add_running_node(node, None); + } + + // Add the bootnodes to the relaychain spec file + network_spec + .relaychain + .chain_spec + .add_bootnodes(&scoped_fs, &bootnodes_addr) + .await?; + + // spawn paras + for para in network_spec.parachains.iter() { + // parachain id is used for the keystore + let parachain_id = if let Some(chain_spec) = para.chain_spec.as_ref() { + let id = chain_spec.read_chain_id(&scoped_fs).await?; + let raw_path = chain_spec + .raw_path() + .ok_or(OrchestratorError::InvariantError( + "chain-spec path should be set by now.", + ))?; + let mut running_para = Parachain::with_chain_spec(para.id, &id, raw_path); + if let Some(chain_name) = chain_spec.chain_name() { + running_para.chain = Some(chain_name.to_string()); + } + network.add_para(running_para); + + Some(id) + } else { + network.add_para(Parachain::new(para.id)); + + None + }; + + let ctx_para = SpawnNodeCtx { + parachain: Some(para), + parachain_id: parachain_id.as_deref(), + role: if para.is_cumulus_based { + ZombieRole::CumulusCollator + } else { + ZombieRole::Collator + }, + bootnodes_addr: &vec![], + ..ctx.clone() + }; + let mut para_files_to_inject = global_files_to_inject.clone(); + if para.is_cumulus_based { + para_files_to_inject.push(TransferedFile { + local_path: PathBuf::from(format!( + "{}/{}.json", + ns.base_dir().to_string_lossy(), + para.id + )), + remote_path: PathBuf::from(format!("/cfg/{}.json", para.id)), + }); + } + + let spawning_tasks = para + .collators + .iter() + .map(|node| spawner::spawn_node(node, para_files_to_inject.clone(), &ctx_para)); + // TODO: Add para to Network instance + for node in futures::future::try_join_all(spawning_tasks).await? { + network.add_running_node(node, Some(para.id)); + } + } + + // TODO (future): + + // - add-ons (introspector/tracing/etc) + + // - verify nodes (clean metrics cache?) + + // - write zombie.json state file (we should defined in a way we can load later) + + Ok(network) + } +} + +// TODO: get the fs from `DynNamespace` will make this not needed +// but the FileSystem trait isn't object-safe so we can't pass around +// as `dyn FileSystem`. We can refactor or using some `erase` techniques +// to resolve this and remove this struct +// TODO (Loris): Probably we could have a .scoped(base_dir) method on the +// filesystem itself (the trait), so it will return this and we can move this +// directly to the support crate, it can be useful in the future +#[derive(Clone, Debug)] +pub struct ScopedFilesystem<'a, FS: FileSystem> { + fs: &'a FS, + base_dir: &'a str, } -#[cfg(test)] -mod tests { - use super::*; +impl<'a, FS: FileSystem> ScopedFilesystem<'a, FS> { + fn new(fs: &'a FS, base_dir: &'a str) -> Self { + Self { fs, base_dir } + } + + async fn copy_files(&self, files: Vec<&TransferedFile>) -> Result<(), FileSystemError> { + for file in files { + let full_remote_path = PathBuf::from(format!( + "{}/{}", + self.base_dir, + file.remote_path.to_string_lossy() + )); + self.fs + .copy(file.local_path.as_path(), full_remote_path) + .await?; + } + Ok(()) + } + + async fn read_to_string(&self, file: impl AsRef) -> Result { + let full_path = PathBuf::from(format!( + "{}/{}", + self.base_dir, + file.as_ref().to_string_lossy() + )); + let content = self.fs.read_to_string(full_path).await?; + Ok(content) + } - #[test] - fn it_works() { - let result = add(2, 2); - assert_eq!(result, 4); + async fn create_dir(&self, path: impl AsRef) -> Result<(), FileSystemError> { + let path = PathBuf::from(format!( + "{}/{}", + self.base_dir, + path.as_ref().to_string_lossy() + )); + self.fs.create_dir(path).await.map_err(Into::into) + } + + async fn create_dir_all(&self, path: impl AsRef) -> Result<(), FileSystemError> { + let path = PathBuf::from(format!( + "{}/{}", + self.base_dir, + path.as_ref().to_string_lossy() + )); + self.fs.create_dir_all(path).await.map_err(Into::into) + } + + async fn write( + &self, + path: impl AsRef, + contents: impl AsRef<[u8]> + Send, + ) -> Result<(), FileSystemError> { + let path = PathBuf::from(format!( + "{}/{}", + self.base_dir, + path.as_ref().to_string_lossy() + )); + self.fs.write(path, contents).await.map_err(Into::into) } } + +#[derive(Clone, Debug)] +pub enum ZombieRole { + Temp, + Node, + Bootnode, + Collator, + CumulusCollator, + Companion, +} + +// re-export +pub use network::AddNodeOpts; diff --git a/crates/orchestrator/src/network.rs b/crates/orchestrator/src/network.rs new file mode 100644 index 000000000..6c72370d4 --- /dev/null +++ b/crates/orchestrator/src/network.rs @@ -0,0 +1,254 @@ +pub mod node; +pub mod parachain; +pub mod relaychain; + +use std::{collections::HashMap, path::PathBuf, time::Duration}; + +use configuration::{ + shared::node::EnvVar, + types::{Arg, Command, Image, Port}, +}; +use provider::{types::TransferedFile, DynNamespace}; +use support::fs::FileSystem; + +use self::{node::NetworkNode, parachain::Parachain, relaychain::Relaychain}; +use crate::{ + network_spec::{self, NetworkSpec}, + shared::types::ChainDefaultContext, + spawner::{self, SpawnNodeCtx}, + ScopedFilesystem, ZombieRole, +}; + +pub struct Network { + ns: DynNamespace, + filesystem: T, + relay: Relaychain, + initial_spec: NetworkSpec, + parachains: HashMap, + nodes_by_name: HashMap, +} + +impl std::fmt::Debug for Network { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Network") + .field("ns", &"ns_skipped") + .field("relay", &self.relay) + .field("initial_spec", &self.initial_spec) + .field("parachains", &self.parachains) + .field("nodes_by_name", &self.nodes_by_name) + .finish() + } +} + +#[derive(Default, Debug, Clone)] +pub struct AddNodeOpts { + pub image: Option, + pub command: Option, + pub args: Vec, + pub env: Vec, + pub is_validator: bool, + pub rpc_port: Option, + pub prometheus_port: Option, + pub p2p_port: Option, +} + +impl Network { + pub(crate) fn new_with_relay( + relay: Relaychain, + ns: DynNamespace, + fs: T, + initial_spec: NetworkSpec, + ) -> Self { + Self { + ns, + filesystem: fs, + relay, + initial_spec, + parachains: Default::default(), + nodes_by_name: Default::default(), + } + } + + // Pub API + + // Teardown the network + // destroy() + + pub async fn add_node( + &mut self, + name: impl Into, + options: AddNodeOpts, + para_id: Option, + ) -> Result<(), anyhow::Error> { + // build context + // let (maybe_para_chain_id, chain_context, para_spec, role) = + let (chain_context, role, maybe_para_chain_id, para_spec, maybe_para_chain_spec_path) = + if let Some(para_id) = para_id { + let spec = self + .initial_spec + .parachains + .iter() + .find(|para| para.id == para_id) + .ok_or(anyhow::anyhow!(format!("parachain: {para_id} not found!")))?; + let role = if spec.is_cumulus_based { + ZombieRole::CumulusCollator + } else { + ZombieRole::Collator + }; + let chain_context = ChainDefaultContext { + default_command: spec.default_command.as_ref(), + default_image: spec.default_image.as_ref(), + default_resources: spec.default_resources.as_ref(), + default_db_snapshot: spec.default_db_snapshot.as_ref(), + default_args: spec.default_args.iter().collect(), + }; + let parachain = self + .parachains + .get(¶_id) + .ok_or(anyhow::anyhow!(format!("parachain: {para_id} not found!")))?; + + // (parachain.chain_id.clone(), chain_context, Some(spec), role) + ( + chain_context, + role, + parachain.chain_id.clone(), + Some(spec), + parachain.chain_spec_path.clone(), + ) + } else { + let spec = &self.initial_spec.relaychain; + let chain_context = ChainDefaultContext { + default_command: spec.default_command.as_ref(), + default_image: spec.default_image.as_ref(), + default_resources: spec.default_resources.as_ref(), + default_db_snapshot: spec.default_db_snapshot.as_ref(), + default_args: spec.default_args.iter().collect(), + }; + (chain_context, ZombieRole::Node, None, None, None) + }; + + let node_spec = + network_spec::node::NodeSpec::from_ad_hoc(name.into(), options, &chain_context)?; + let base_dir = self.ns.base_dir().to_string_lossy(); + let scoped_fs = ScopedFilesystem::new(&self.filesystem, &base_dir); + + // TODO: we want to still supporting spawn a dedicated bootnode?? + let ctx = SpawnNodeCtx { + chain_id: &self.relay.chain_id, + parachain_id: maybe_para_chain_id.as_deref(), + chain: &self.relay.chain, + role, + ns: &self.ns, + scoped_fs: &scoped_fs, + parachain: para_spec, + bootnodes_addr: &vec![], + }; + + let mut global_files_to_inject = vec![TransferedFile { + local_path: PathBuf::from(format!( + "{}/{}.json", + self.ns.base_dir().to_string_lossy(), + self.relay.chain + )), + remote_path: PathBuf::from(format!("/cfg/{}.json", self.relay.chain)), + }]; + + if let Some(para_spec_path) = maybe_para_chain_spec_path { + global_files_to_inject.push(TransferedFile { + local_path: PathBuf::from(format!( + "{}/{}", + self.ns.base_dir().to_string_lossy(), + para_spec_path.to_string_lossy() + )), + remote_path: PathBuf::from(format!( + "/cfg/{}.json", + para_id.ok_or(anyhow::anyhow!( + "para_id should be valid here, this is a bug!" + ))? + )), + }); + } + + let node = spawner::spawn_node(&node_spec, global_files_to_inject, &ctx).await?; + self.add_running_node(node, None); + + Ok(()) + } + + // This should include at least of collator? + // add_parachain() + + // deregister and stop the collator? + // remove_parachain() + + // Node actions + pub async fn pause_node(&self, node_name: impl Into) -> Result<(), anyhow::Error> { + let node_name = node_name.into(); + if let Some(node) = self.nodes_by_name.get(&node_name) { + node.inner.pause().await?; + Ok(()) + } else { + Err(anyhow::anyhow!("can't find the node!")) + } + } + + pub async fn resume_node(&self, node_name: impl Into) -> Result<(), anyhow::Error> { + let node_name = node_name.into(); + if let Some(node) = self.nodes_by_name.get(&node_name) { + node.inner.resume().await?; + Ok(()) + } else { + Err(anyhow::anyhow!("can't find the node!")) + } + } + + pub async fn restart_node( + &self, + node_name: impl Into, + after: Option, + ) -> Result<(), anyhow::Error> { + let node_name = node_name.into(); + if let Some(node) = self.nodes_by_name.get(&node_name) { + node.inner.restart(after).await?; + Ok(()) + } else { + Err(anyhow::anyhow!("can't find the node!")) + } + } + + // Internal API + pub(crate) fn add_running_node(&mut self, node: NetworkNode, para_id: Option) { + if let Some(para_id) = para_id { + if let Some(para) = self.parachains.get_mut(¶_id) { + para.collators.push(node.clone()); + } else { + unreachable!() + } + } else { + self.relay.nodes.push(node.clone()); + } + // TODO: we should hold a ref to the node in the vec in the future. + let node_name = node.name.clone(); + self.nodes_by_name.insert(node_name, node); + } + + pub(crate) fn add_para(&mut self, para: Parachain) { + self.parachains.insert(para.para_id, para); + } + + pub(crate) fn id(&self) -> &str { + self.ns.id() + } + + pub(crate) fn relaychain(&self) -> &Relaychain { + &self.relay + } + + pub(crate) fn parachain(&self, para_id: u32) -> Option<&Parachain> { + self.parachains.get(¶_id) + } + + pub(crate) fn parachains(&self) -> Vec<&Parachain> { + self.parachains.values().collect() + } +} diff --git a/crates/orchestrator/src/network/node.rs b/crates/orchestrator/src/network/node.rs new file mode 100644 index 000000000..66956d6cb --- /dev/null +++ b/crates/orchestrator/src/network/node.rs @@ -0,0 +1,42 @@ +use provider::DynNode; + +use crate::network_spec::node::NodeSpec; + +#[derive(Clone)] +pub struct NetworkNode { + pub(crate) inner: DynNode, + // TODO: do we need the full spec here? + // Maybe a reduce set of values. + pub(crate) spec: NodeSpec, + pub(crate) name: String, + pub(crate) ws_uri: String, + pub(crate) prometheus_uri: String, +} + +impl NetworkNode { + fn new(inner: DynNode, spec: NodeSpec, _ip: String) -> Self { + let name = spec.name.clone(); + let ws_uri = "".into(); + let prometheus_uri = "".into(); + + Self { + inner, + spec, + name, + ws_uri, + prometheus_uri, + } + } +} + +impl std::fmt::Debug for NetworkNode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("NetworkNode") + .field("inner", &"inner_skipped") + .field("spec", &self.spec) + .field("name", &self.name) + .field("ws_uri", &self.ws_uri) + .field("prometheus_uri", &self.prometheus_uri) + .finish() + } +} diff --git a/crates/orchestrator/src/network/parachain.rs b/crates/orchestrator/src/network/parachain.rs new file mode 100644 index 000000000..235bc59a2 --- /dev/null +++ b/crates/orchestrator/src/network/parachain.rs @@ -0,0 +1,38 @@ +use std::path::{Path, PathBuf}; + +use super::node::NetworkNode; + +#[derive(Debug)] +pub struct Parachain { + pub(crate) chain: Option, + pub(crate) para_id: u32, + pub(crate) chain_id: Option, + pub(crate) chain_spec_path: Option, + pub(crate) collators: Vec, +} + +impl Parachain { + pub(crate) fn new(para_id: u32) -> Self { + Self { + chain: None, + para_id, + chain_id: None, + chain_spec_path: None, + collators: Default::default(), + } + } + + pub(crate) fn with_chain_spec( + para_id: u32, + chain_id: impl Into, + chain_spec_path: impl AsRef, + ) -> Self { + Self { + para_id, + chain: None, + chain_id: Some(chain_id.into()), + chain_spec_path: Some(chain_spec_path.as_ref().into()), + collators: Default::default(), + } + } +} diff --git a/crates/orchestrator/src/network/relaychain.rs b/crates/orchestrator/src/network/relaychain.rs new file mode 100644 index 000000000..748861652 --- /dev/null +++ b/crates/orchestrator/src/network/relaychain.rs @@ -0,0 +1,22 @@ +use std::path::PathBuf; + +use super::node::NetworkNode; + +#[derive(Debug)] +pub struct Relaychain { + pub(crate) chain: String, + pub(crate) chain_id: String, + pub(crate) chain_spec_path: PathBuf, + pub(crate) nodes: Vec, +} + +impl Relaychain { + pub(crate) fn new(chain: String, chain_id: String, chain_spec_path: PathBuf) -> Self { + Self { + chain, + chain_id, + chain_spec_path, + nodes: Default::default(), + } + } +} diff --git a/crates/orchestrator/src/network_spec.rs b/crates/orchestrator/src/network_spec.rs new file mode 100644 index 000000000..e82650880 --- /dev/null +++ b/crates/orchestrator/src/network_spec.rs @@ -0,0 +1,98 @@ +use configuration::{GlobalSettings, HrmpChannelConfig, NetworkConfig}; + +use crate::errors::OrchestratorError; + +pub mod node; +pub mod parachain; +pub mod relaychain; + +use self::{parachain::ParachainSpec, relaychain::RelaychainSpec}; + +#[derive(Debug, Clone)] +pub struct NetworkSpec { + /// Relaychain configuration. + pub(crate) relaychain: RelaychainSpec, + + /// Parachains configurations. + pub(crate) parachains: Vec, + + /// HRMP channels configurations. + pub(crate) hrmp_channels: Vec, + + /// Global settings + pub(crate) global_settings: GlobalSettings, +} + +impl NetworkSpec { + pub async fn from_config( + network_config: &NetworkConfig, + ) -> Result { + let mut errs = vec![]; + let relaychain = RelaychainSpec::from_config(network_config.relaychain())?; + let mut parachains = vec![]; + + // TODO: move to `fold` or map+fold + for para_config in network_config.parachains() { + match ParachainSpec::from_config(para_config) { + Ok(para) => parachains.push(para), + Err(err) => errs.push(err), + } + } + + Ok(NetworkSpec { + relaychain, + parachains, + hrmp_channels: network_config + .hrmp_channels() + .into_iter() + .cloned() + .collect(), + global_settings: network_config.global_settings().clone(), + }) + } +} + +#[cfg(test)] +mod tests { + + #[tokio::test] + async fn small_network_config_get_spec() { + use configuration::NetworkConfigBuilder; + + use super::*; + + let config = NetworkConfigBuilder::new() + .with_relaychain(|r| { + r.with_chain("rococo-local") + .with_default_command("polkadot") + .with_node(|node| node.with_name("alice")) + .with_node(|node| { + node.with_name("bob") + .with_command("polkadot1") + .validator(false) + }) + }) + .with_parachain(|p| { + p.with_id(100) + .with_default_command("adder-collator") + .with_collator(|c| c.with_name("collator1")) + }) + .build() + .unwrap(); + + let network_spec = NetworkSpec::from_config(&config).await.unwrap(); + let alice = network_spec.relaychain.nodes.get(0).unwrap(); + let bob = network_spec.relaychain.nodes.get(1).unwrap(); + assert_eq!(alice.command.as_str(), "polkadot"); + assert_eq!(bob.command.as_str(), "polkadot1"); + assert!(alice.is_validator); + assert!(!bob.is_validator); + + // paras + assert_eq!(network_spec.parachains.len(), 1); + let para_100 = network_spec.parachains.get(0).unwrap(); + assert_eq!(para_100.id, 100); + + println!("{:#?}", network_spec); + } +} diff --git a/crates/orchestrator/src/network_spec/node.rs b/crates/orchestrator/src/network_spec/node.rs new file mode 100644 index 000000000..15d9580b2 --- /dev/null +++ b/crates/orchestrator/src/network_spec/node.rs @@ -0,0 +1,218 @@ +use configuration::shared::{ + node::{EnvVar, NodeConfig}, + resources::Resources, + types::{Arg, AssetLocation, Command, Image}, +}; +use multiaddr::Multiaddr; + +use crate::{ + errors::OrchestratorError, + generators, + network::AddNodeOpts, + shared::types::{ChainDefaultContext, NodeAccounts, ParkedPort}, +}; + +/// A node configuration, with fine-grained configuration options. +#[derive(Debug, Clone)] +pub struct NodeSpec { + // Node name (should be unique or an index will be appended). + pub(crate) name: String, + + /// Node key, used for compute the p2p identity. + pub(crate) key: String, + + // libp2p local identity + pub(crate) peer_id: String, + + /// Accounts to be injected in the keystore. + pub(crate) accounts: NodeAccounts, + + /// Image to run (only podman/k8s). Override the default. + pub(crate) image: Option, + + /// Command to run the node. Override the default. + pub(crate) command: Command, + + /// Arguments to use for node. Appended to default. + pub(crate) args: Vec, + + /// Wether the node is a validator. + pub(crate) is_validator: bool, + + /// Whether the node keys must be added to invulnerables. + pub(crate) is_invulnerable: bool, + + /// Whether the node is a bootnode. + pub(crate) is_bootnode: bool, + + /// Node initial balance present in genesis. + pub(crate) initial_balance: u128, + + /// Environment variables to set (inside pod for podman/k8s, inside shell for native). + pub(crate) env: Vec, + + /// List of node's bootnodes addresses to use. Appended to default. + pub(crate) bootnodes_addresses: Vec, + + /// Default resources. Override the default. + pub(crate) resources: Option, + + /// Websocket port to use. + pub(crate) ws_port: ParkedPort, + + /// RPC port to use. + pub(crate) rpc_port: ParkedPort, + + /// Prometheus port to use. + pub(crate) prometheus_port: ParkedPort, + + /// P2P port to use. + pub(crate) p2p_port: ParkedPort, + + /// libp2p cert hash to use with `webrtc` transport. + pub(crate) p2p_cert_hash: Option, + + /// Database snapshot. Override the default. + pub(crate) db_snapshot: Option, +} + +impl NodeSpec { + pub fn from_config( + node_config: &NodeConfig, + chain_context: &ChainDefaultContext, + ) -> Result { + // Check first if the image is set at node level, then try with the default + let image = node_config.image().or(chain_context.default_image).cloned(); + + // Check first if the command is set at node level, then try with the default + let command = if let Some(cmd) = node_config.command() { + cmd.clone() + } else if let Some(cmd) = chain_context.default_command { + cmd.clone() + } else { + return Err(OrchestratorError::InvalidNodeConfig( + node_config.name().into(), + "command".to_string(), + )); + }; + + // If `args` is set at `node` level use them + // otherwise use the default_args (can be empty). + let args: Vec = if node_config.args().is_empty() { + chain_context + .default_args + .iter() + .map(|x| x.to_owned().clone()) + .collect() + } else { + node_config.args().into_iter().cloned().collect() + }; + + let (key, peer_id) = generators::generate_node_identity(node_config.name())?; + + let mut name = node_config.name().to_string(); + let seed = format!("//{}{name}", name.remove(0).to_uppercase()); + let accounts = generators::generate_node_keys(&seed)?; + let accounts = NodeAccounts { seed, accounts }; + + Ok(Self { + name: node_config.name().to_string(), + key, + peer_id, + image, + command, + args, + is_validator: node_config.is_validator(), + is_invulnerable: node_config.is_invulnerable(), + is_bootnode: node_config.is_bootnode(), + initial_balance: node_config.initial_balance(), + env: node_config.env().into_iter().cloned().collect(), + bootnodes_addresses: node_config + .bootnodes_addresses() + .into_iter() + .cloned() + .collect(), + resources: node_config.resources().cloned(), + p2p_cert_hash: node_config.p2p_cert_hash().map(str::to_string), + db_snapshot: node_config.db_snapshot().cloned(), + accounts, + ws_port: generators::generate_node_port(node_config.ws_port())?, + rpc_port: generators::generate_node_port(node_config.rpc_port())?, + prometheus_port: generators::generate_node_port(node_config.prometheus_port())?, + p2p_port: generators::generate_node_port(node_config.p2p_port())?, + }) + } + + pub fn from_ad_hoc( + name: impl Into, + options: AddNodeOpts, + chain_context: &ChainDefaultContext, + ) -> Result { + // Check first if the image is set at node level, then try with the default + let image = if let Some(img) = options.image { + Some(img.clone()) + } else { + chain_context.default_image.cloned() + }; + + let name = name.into(); + // Check first if the command is set at node level, then try with the default + let command = if let Some(cmd) = options.command { + cmd.clone() + } else if let Some(cmd) = chain_context.default_command { + cmd.clone() + } else { + return Err(OrchestratorError::InvalidNodeConfig( + name, + "command".to_string(), + )); + }; + + // If `args` is set at `node` level use them + // otherwise use the default_args (can be empty). + let args: Vec = if options.args.is_empty() { + chain_context + .default_args + .iter() + .map(|x| x.to_owned().clone()) + .collect() + } else { + options.args + }; + + let (key, peer_id) = generators::generate_node_identity(&name)?; + + let mut name_capitalized = name.clone(); + let seed = format!( + "//{}{name_capitalized}", + name_capitalized.remove(0).to_uppercase() + ); + let accounts = generators::generate_node_keys(&seed)?; + let accounts = NodeAccounts { seed, accounts }; + + // + Ok(Self { + name, + key, + peer_id, + image, + command, + args, + is_validator: options.is_validator, + is_invulnerable: false, + is_bootnode: false, + initial_balance: 0, + env: options.env, + bootnodes_addresses: vec![], + resources: None, + p2p_cert_hash: None, + db_snapshot: None, + accounts, + // should be deprecated now! + ws_port: generators::generate_node_port(None)?, + rpc_port: generators::generate_node_port(options.rpc_port)?, + prometheus_port: generators::generate_node_port(options.prometheus_port)?, + p2p_port: generators::generate_node_port(options.p2p_port)?, + }) + } +} diff --git a/crates/orchestrator/src/network_spec/parachain.rs b/crates/orchestrator/src/network_spec/parachain.rs new file mode 100644 index 000000000..d5334df5c --- /dev/null +++ b/crates/orchestrator/src/network_spec/parachain.rs @@ -0,0 +1,182 @@ +use configuration::{ + shared::resources::Resources, + types::{Arg, AssetLocation, Command, Image}, + ParachainConfig, RegistrationStrategy, +}; + +use super::node::NodeSpec; +use crate::{ + errors::OrchestratorError, + generators::{ + chain_spec::{ChainSpec, Context}, + para_artifact::*, + }, + shared::types::ChainDefaultContext, +}; + +#[derive(Debug, Clone)] +pub struct ParachainSpec { + // `name` of the parachain (used in some corner cases) + // name: Option, + /// Parachain id + pub(crate) id: u32, + + /// Default command to run the node. Can be overriden on each node. + pub(crate) default_command: Option, + + /// Default image to use (only podman/k8s). Can be overriden on each node. + pub(crate) default_image: Option, + + /// Default resources. Can be overriden on each node. + pub(crate) default_resources: Option, + + /// Default database snapshot. Can be overriden on each node. + pub(crate) default_db_snapshot: Option, + + /// Default arguments to use in nodes. Can be overriden on each node. + pub(crate) default_args: Vec, + + /// Chain-spec, only needed by cumulus based paras + pub(crate) chain_spec: Option, + + /// Registration strategy to use + pub(crate) registration_strategy: RegistrationStrategy, + + /// Oboard as parachain or parathread + pub(crate) onboard_as_parachain: bool, + + /// Is the parachain cumulus-based + pub(crate) is_cumulus_based: bool, + + /// Initial balance + pub(crate) initial_balance: u128, + + /// Genesis state (head) to register the parachain + pub(crate) genesis_state: ParaArtifact, + + /// Genesis wasm to register the parachain + pub(crate) genesis_wasm: ParaArtifact, + + /// Collators to spawn + pub(crate) collators: Vec, +} + +impl ParachainSpec { + pub fn from_config(config: &ParachainConfig) -> Result { + let main_cmd = if let Some(cmd) = config.default_command() { + cmd + } else if let Some(first_node) = config.collators().first() { + let Some(cmd) = first_node.command() else { + return Err(OrchestratorError::InvalidConfig("Parachain, either default_command or command in the first node needs to be set.".to_string())); + }; + + cmd + } else { + return Err(OrchestratorError::InvalidConfig( + "Parachain without nodes and default_command isn't set.".to_string(), + )); + }; + + let chain_spec = if config.is_cumulus_based() { + // we need a chain-spec + let chain_name = if let Some(chain_name) = config.chain() { + chain_name.as_str() + } else { + "" + }; + + let chain_spec_builder = if chain_name.is_empty() { + // if the chain don't have name use the id for the name of the file + ChainSpec::new(config.id().to_string(), Context::Para) + } else { + ChainSpec::new(chain_name, Context::Para) + }; + let chain_spec_builder = chain_spec_builder.set_chain_name(chain_name); + + if let Some(chain_spec_path) = config.chain_spec_path() { + Some(chain_spec_builder.asset_location(chain_spec_path.clone())) + } else { + // TODO: Do we need to add the posibility to set the command to use? + // Currently (v1) is possible but when is set is set to the default command. + Some(chain_spec_builder.command(main_cmd.as_str())) + } + } else { + None + }; + + // build the `node_specs` + let chain_context = ChainDefaultContext { + default_command: config.default_command(), + default_image: config.default_image(), + default_resources: config.default_resources(), + default_db_snapshot: config.default_db_snapshot(), + default_args: config.default_args(), + }; + + // We want to track the errors for all the nodes and report them ones + let mut errs: Vec = Default::default(); + let mut collators: Vec = Default::default(); + config.collators().iter().for_each(|node_config| { + match NodeSpec::from_config(node_config, &chain_context) { + Ok(node) => collators.push(node), + Err(err) => errs.push(err), + } + }); + + let genesis_state = if let Some(path) = config.genesis_state_path() { + ParaArtifact::new( + ParaArtifactType::State, + ParaArtifactBuildOption::Path(path.to_string()), + ) + } else { + let cmd = if let Some(cmd) = config.genesis_state_generator() { + cmd + } else { + main_cmd + }; + ParaArtifact::new( + ParaArtifactType::State, + ParaArtifactBuildOption::Command(cmd.as_str().into()), + ) + }; + + let genesis_wasm = if let Some(path) = config.genesis_wasm_path() { + ParaArtifact::new( + ParaArtifactType::Wasm, + ParaArtifactBuildOption::Path(path.to_string()), + ) + } else { + let cmd = if let Some(cmd) = config.genesis_wasm_generator() { + cmd + } else { + main_cmd + }; + ParaArtifact::new( + ParaArtifactType::Wasm, + ParaArtifactBuildOption::Command(cmd.as_str().into()), + ) + }; + + let para_spec = ParachainSpec { + id: config.id(), + default_command: config.default_command().cloned(), + default_image: config.default_image().cloned(), + default_resources: config.default_resources().cloned(), + default_db_snapshot: config.default_db_snapshot().cloned(), + default_args: config.default_args().into_iter().cloned().collect(), + chain_spec, + registration_strategy: config + .registration_strategy() + .unwrap_or(&RegistrationStrategy::InGenesis) + .clone(), + onboard_as_parachain: config.onboard_as_parachain(), + is_cumulus_based: config.is_cumulus_based(), + initial_balance: config.initial_balance(), + genesis_state, + genesis_wasm, + collators, + }; + + Ok(para_spec) + } +} diff --git a/crates/orchestrator/src/network_spec/relaychain.rs b/crates/orchestrator/src/network_spec/relaychain.rs new file mode 100644 index 000000000..1f1fd6080 --- /dev/null +++ b/crates/orchestrator/src/network_spec/relaychain.rs @@ -0,0 +1,113 @@ +use configuration::{ + shared::{ + resources::Resources, + types::{Arg, AssetLocation, Chain, Command, Image}, + }, + RelaychainConfig, +}; + +use super::node::NodeSpec; +use crate::{ + errors::OrchestratorError, + generators::chain_spec::{ChainSpec, Context}, + shared::types::ChainDefaultContext, +}; + +/// A relaychain configuration spec +#[derive(Debug, Clone)] +pub struct RelaychainSpec { + /// Chain to use (e.g. rococo-local). + pub(crate) chain: Chain, + + /// Default command to run the node. Can be overriden on each node. + pub(crate) default_command: Option, + + /// Default image to use (only podman/k8s). Can be overriden on each node. + pub(crate) default_image: Option, + + /// Default resources. Can be overriden on each node. + pub(crate) default_resources: Option, + + /// Default database snapshot. Can be overriden on each node. + pub(crate) default_db_snapshot: Option, + + /// Default arguments to use in nodes. Can be overriden on each node. + pub(crate) default_args: Vec, + + // chain_spec_path: Option, + pub(crate) chain_spec: ChainSpec, + + /// Set the count of nominators to generator (used with PoS networks). + pub(crate) random_nominators_count: u32, + + /// Set the max nominators value (used with PoS networks). + pub(crate) max_nominations: u8, + + /// Nodes to run. + pub(crate) nodes: Vec, +} + +impl RelaychainSpec { + pub fn from_config(config: &RelaychainConfig) -> Result { + // Relaychain main command to use, in order: + // set as `default_command` or + // use the command of the first node. + // If non of those is set, return an error. + let main_cmd = config + .default_command() + .or(config.nodes().first().and_then(|node| node.command())) + .ok_or(OrchestratorError::InvalidConfig( + "Relaychain, either default_command or first node with a command needs to be set." + .to_string(), + ))?; + + let chain_spec = ChainSpec::new(config.chain().as_str(), Context::Relay) + .set_chain_name(config.chain().as_str()); + let chain_spec = if let Some(chain_spec_path) = config.chain_spec_path() { + chain_spec.asset_location(chain_spec_path.clone()) + } else { + // TODO: Do we need to add the posibility to set the command to use? + // Currently (v1) is possible but when is set is set to the default command. + chain_spec.command(main_cmd.as_str()) + }; + + // build the `node_specs` + let chain_context = ChainDefaultContext { + default_command: config.default_command(), + default_image: config.default_image(), + default_resources: config.default_resources(), + default_db_snapshot: config.default_db_snapshot(), + default_args: config.default_args(), + }; + + let (nodes, mut errs) = config + .nodes() + .iter() + .map(|node_config| NodeSpec::from_config(node_config, &chain_context)) + .fold((vec![], vec![]), |(mut nodes, mut errs), result| { + match result { + Ok(node) => nodes.push(node), + Err(err) => errs.push(err), + } + (nodes, errs) + }); + + if !errs.is_empty() { + // TODO: merge errs, maybe return something like Result> + return Err(errs.swap_remove(0)); + } + + Ok(RelaychainSpec { + chain: config.chain().clone(), + default_command: config.default_command().cloned(), + default_image: config.default_image().cloned(), + default_resources: config.default_resources().cloned(), + default_db_snapshot: config.default_db_snapshot().cloned(), + default_args: config.default_args().into_iter().cloned().collect(), + chain_spec, + random_nominators_count: config.random_nominators_count().unwrap_or(0), + max_nominations: config.max_nominations().unwrap_or(24), + nodes, + }) + } +} diff --git a/crates/orchestrator/src/shared.rs b/crates/orchestrator/src/shared.rs new file mode 100644 index 000000000..cd408564e --- /dev/null +++ b/crates/orchestrator/src/shared.rs @@ -0,0 +1 @@ +pub mod types; diff --git a/crates/orchestrator/src/shared/types.rs b/crates/orchestrator/src/shared/types.rs new file mode 100644 index 000000000..506dc1578 --- /dev/null +++ b/crates/orchestrator/src/shared/types.rs @@ -0,0 +1,57 @@ +use std::{ + collections::HashMap, + net::TcpListener, + sync::{Arc, RwLock}, +}; + +pub type Accounts = HashMap; +use configuration::shared::{ + resources::Resources, + types::{Arg, AssetLocation, Command, Image, Port}, +}; + +#[derive(Debug, Clone, PartialEq)] +pub struct NodeAccount { + pub address: String, + pub public_key: String, +} + +impl NodeAccount { + pub fn new(addr: impl Into, pk: impl Into) -> Self { + Self { + address: addr.into(), + public_key: pk.into(), + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct NodeAccounts { + pub(crate) seed: String, + pub(crate) accounts: Accounts, +} + +#[derive(Clone, Debug)] +pub struct ParkedPort(pub(crate) Port, pub(crate) Arc>>); + +impl ParkedPort { + pub(crate) fn new(port: u16, listener: TcpListener) -> ParkedPort { + let listener = Arc::new(RwLock::new(Some(listener))); + ParkedPort(port, listener) + } + + pub(crate) fn drop_listener(&self) { + // drop the listener will allow the running node to start listenen connections + let mut l = self.1.write().unwrap(); + *l = None; + } +} + +#[derive(Debug, Clone)] +pub struct ChainDefaultContext<'a> { + pub default_command: Option<&'a Command>, + pub default_image: Option<&'a Image>, + pub default_resources: Option<&'a Resources>, + pub default_db_snapshot: Option<&'a AssetLocation>, + pub default_args: Vec<&'a Arg>, +} diff --git a/crates/orchestrator/src/spawner.rs b/crates/orchestrator/src/spawner.rs new file mode 100644 index 000000000..cd5b31ff5 --- /dev/null +++ b/crates/orchestrator/src/spawner.rs @@ -0,0 +1,164 @@ +use std::path::PathBuf; + +use provider::{ + constants::LOCALHOST, + types::{SpawnNodeOptions, TransferedFile}, + DynNamespace, +}; +use support::fs::FileSystem; + +use crate::{ + generators, + network::node::NetworkNode, + network_spec::{node::NodeSpec, parachain::ParachainSpec}, + ScopedFilesystem, ZombieRole, +}; + +#[derive(Clone)] +pub struct SpawnNodeCtx<'a, T: FileSystem> { + /// Relaychain id, from the chain-spec (e.g rococo_local_testnet) + pub(crate) chain_id: &'a str, + // Parachain id, from the chain-spec (e.g local_testnet) + pub(crate) parachain_id: Option<&'a str>, + /// Relaychain chain name (e.g rococo-local) + pub(crate) chain: &'a str, + /// Role of the node in the network + pub(crate) role: ZombieRole, + /// Ref to the namespace + pub(crate) ns: &'a DynNamespace, + /// Ref to an scoped filesystem (encapsulate fs actions inside the ns directory) + pub(crate) scoped_fs: &'a ScopedFilesystem<'a, T>, + /// Ref to a parachain (used to spawn collators) + pub(crate) parachain: Option<&'a ParachainSpec>, + /// The string represenation of the bootnode addres to pass to nodes + pub(crate) bootnodes_addr: &'a Vec, +} + +pub async fn spawn_node<'a, T>( + node: &NodeSpec, + mut files_to_inject: Vec, + ctx: &SpawnNodeCtx<'a, T>, +) -> Result +where + T: FileSystem, +{ + let mut created_paths = vec![]; + // Create and inject the keystore IFF + // - The node is validator in the relaychain + // - The node is collator (encoded as validator) and the parachain is cumulus_based + // (parachain_id) should be set then. + if node.is_validator && (ctx.parachain.is_none() || ctx.parachain_id.is_some()) { + // Generate keystore for node + let node_files_path = if let Some(para) = ctx.parachain { + para.id.to_string() + } else { + node.name.clone() + }; + let key_filenames = + generators::generate_node_keystore(&node.accounts, &node_files_path, ctx.scoped_fs) + .await + .unwrap(); + + // Paths returned are relative to the base dir, we need to convert into + // fullpaths to inject them in the nodes. + let remote_keystore_chain_id = if let Some(id) = ctx.parachain_id { + id + } else { + ctx.chain_id + }; + + for key_filename in key_filenames { + let f = TransferedFile { + local_path: PathBuf::from(format!( + "{}/{}/{}", + ctx.ns.base_dir().to_string_lossy(), + node_files_path, + key_filename.to_string_lossy() + )), + remote_path: PathBuf::from(format!( + "/data/chains/{}/keystore/{}", + remote_keystore_chain_id, + key_filename.to_string_lossy() + )), + }; + files_to_inject.push(f); + } + created_paths.push(PathBuf::from(format!( + "/data/chains/{}/keystore", + remote_keystore_chain_id + ))); + } + + let base_dir = format!("{}/{}", ctx.ns.base_dir().to_string_lossy(), &node.name); + let cfg_path = format!("{}/cfg", &base_dir); + let data_path = format!("{}/data", &base_dir); + let relay_data_path = format!("{}/relay-data", &base_dir); + let gen_opts = generators::GenCmdOptions { + relay_chain_name: ctx.chain, + cfg_path: &cfg_path, // TODO: get from provider/ns + data_path: &data_path, // TODO: get from provider + relay_data_path: &relay_data_path, // TODO: get from provider + use_wrapper: false, // TODO: get from provider + bootnode_addr: ctx.bootnodes_addr.clone(), + }; + + let (cmd, args) = match ctx.role { + // Collator should be `non-cumulus` one (e.g adder/undying) + ZombieRole::Node | ZombieRole::Collator => { + + let maybe_para_id = ctx.parachain.map(|para| para.id); + + generators::generate_node_command(node, gen_opts, maybe_para_id) + }, + ZombieRole::CumulusCollator => { + let para = ctx.parachain.expect("parachain must be part of the context, this is a bug"); + let full_p2p = generators::generate_node_port(None)?; + generators::generate_node_command_cumulus(node, gen_opts, para.id, full_p2p.0) + } + _ => unreachable!() + // TODO: do we need those? + // ZombieRole::Bootnode => todo!(), + // ZombieRole::Companion => todo!(), + }; + + println!("\n"); + println!("🚀 {}, spawning.... with command:", node.name); + println!("{cmd} {}", args.join(" ")); + + let spawn_ops = SpawnNodeOptions { + name: node.name.clone(), + command: cmd, + args, + env: node + .env + .iter() + .map(|env| (env.name.clone(), env.value.clone())) + .collect(), + injected_files: files_to_inject, + created_paths, + }; + + // Drops the port parking listeners before spawn + node.p2p_port.drop_listener(); + node.rpc_port.drop_listener(); + node.prometheus_port.drop_listener(); + + let running_node = ctx.ns.spawn_node(spawn_ops).await?; + + let ws_uri = format!("ws://{}:{}", LOCALHOST, node.rpc_port.0); + let prometheus_uri = format!("http://{}:{}/metrics", LOCALHOST, node.prometheus_port.0); + println!("🚀 {}, should be running now", node.name); + println!( + "🚀 {} : direct link https://polkadot.js.org/apps/?rpc={ws_uri}#/explorer", + node.name + ); + println!("🚀 {} : metrics link {prometheus_uri}", node.name); + println!("\n"); + Ok(NetworkNode { + inner: running_node, + spec: node.clone(), + name: node.name.clone(), + ws_uri, + prometheus_uri, + }) +} diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs index b6f3e2098..ef664d3f2 100644 --- a/crates/provider/src/lib.rs +++ b/crates/provider/src/lib.rs @@ -24,9 +24,18 @@ pub enum ProviderError { #[error("Error running command '{0}': {1}")] RunCommandError(String, anyhow::Error), + #[error("Invalid network configuration field {0}")] + InvalidConfig(String), + + #[error("Can recover node: {0} info, field: {1}")] + MissingNodeInfo(String, String), + #[error("Duplicated node name: {0}")] DuplicatedNodeName(String), + #[error("File generation failed: {0}")] + FileGenerationFailed(anyhow::Error), + #[error(transparent)] FileSystemError(#[from] FileSystemError), @@ -36,9 +45,6 @@ pub enum ProviderError { #[error("Script with path {0} not found")] ScriptNotFound(PathBuf), - #[error("File generation failed: {0}")] - FileGenerationFailed(anyhow::Error), - #[error("Failed to retrieve process ID for node '{0}'")] ProcessIdRetrievalFailed(String), @@ -88,6 +94,12 @@ type ExecutionResult = Result; pub trait ProviderNode { fn name(&self) -> &str; + fn command(&self) -> &str; + + fn args(&self) -> Vec<&String>; + + async fn ip(&self) -> Result; + fn base_dir(&self) -> &PathBuf; fn config_dir(&self) -> &PathBuf; @@ -130,3 +142,7 @@ pub trait ProviderNode { } pub type DynNode = Arc; + +// re-export +pub use native::NativeProvider; +pub use shared::{constants, types}; diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index b73c4605b..8609852e9 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -31,6 +31,7 @@ use tokio::{ use uuid::Uuid; use crate::{ + constants::LOCALHOST, shared::constants::{NODE_CONFIG_DIR, NODE_DATA_DIR, NODE_SCRIPTS_DIR}, DynNamespace, DynNode, ExecutionResult, GenerateFileCommand, GenerateFilesOptions, Provider, ProviderCapabilities, ProviderError, ProviderNamespace, ProviderNode, RunCommandOptions, @@ -59,6 +60,10 @@ impl NativeProvider { pub fn new(filesystem: FS) -> Self { NativeProvider { capabilities: ProviderCapabilities::new(), + // NOTE: temp_dir in linux return `/tmp` but on mac something like + // `/var/folders/rz/1cyx7hfj31qgb98d8_cg7jwh0000gn/T/`, having + // one `trailing slash` and the other no can cause issues if + // you try to build a fullpath by concatenate. Use Pathbuf to prevent the issue. tmp_dir: std::env::temp_dir(), filesystem, inner: Arc::new(RwLock::new(NativeProviderInner { @@ -94,7 +99,7 @@ impl Provider for NativeProvider let id = format!("zombie_{}", Uuid::new_v4()); let mut inner = self.inner.write().await; - let base_dir = PathBuf::from(format!("{}/{}", self.tmp_dir.to_string_lossy(), &id)); + let base_dir = PathBuf::from_iter([&self.tmp_dir, &PathBuf::from(&id)]); self.filesystem.create_dir(&base_dir).await?; let namespace = NativeNamespace { @@ -169,22 +174,40 @@ impl ProviderNamespace for Nativ let config_dir = PathBuf::from(format!("{}{}", base_dir_raw, NODE_CONFIG_DIR)); let data_dir = PathBuf::from(format!("{}{}", base_dir_raw, NODE_DATA_DIR)); let scripts_dir = PathBuf::from(format!("{}{}", base_dir_raw, NODE_SCRIPTS_DIR)); - self.filesystem.create_dir(&base_dir).await?; + // NOTE: in native this base path already exist + self.filesystem.create_dir_all(&base_dir).await?; try_join!( self.filesystem.create_dir(&config_dir), self.filesystem.create_dir(&data_dir), self.filesystem.create_dir(&scripts_dir), )?; + // Created needed paths + let ops_fut: Vec<_> = options + .created_paths + .iter() + .map(|created_path| { + self.filesystem.create_dir_all(format!( + "{}{}", + &base_dir.to_string_lossy(), + created_path.to_string_lossy() + )) + }) + .collect(); + try_join_all(ops_fut).await?; + // copy injected files - let mut futures = vec![]; - for file in options.injected_files { - futures.push(self.filesystem.copy( - file.local_path, - format!("{}{}", base_dir_raw, file.remote_path.to_string_lossy()), - )); - } - try_join_all(futures).await?; + let ops_fut: Vec<_> = options + .injected_files + .iter() + .map(|file| { + self.filesystem.copy( + &file.local_path, + format!("{}{}", base_dir_raw, file.remote_path.to_string_lossy()), + ) + }) + .collect(); + try_join_all(ops_fut).await?; let (process, stdout_reading_handle, stderr_reading_handle, log_writing_handle) = create_process_with_log_tasks( @@ -234,6 +257,7 @@ impl ProviderNamespace for Nativ args: vec!["-c".to_string(), "while :; do sleep 1; done".to_string()], env: vec![], injected_files: options.injected_files, + created_paths: vec![], }) .await?; @@ -244,6 +268,21 @@ impl ProviderNamespace for Nativ local_output_path, } in options.commands { + // TODO: move to logger + println!("{:#?}, {:#?}", command, args); + println!("{:#?}", self.base_dir.to_string_lossy()); + println!("{:#?}", local_output_path.as_os_str()); + let local_output_full_path = format!( + "{}{}{}", + self.base_dir.to_string_lossy(), + if local_output_path.starts_with("/") { + "" + } else { + "/" + }, + local_output_path.to_string_lossy() + ); + match temp_node .run_command(RunCommandOptions { command, args, env }) .await @@ -251,14 +290,7 @@ impl ProviderNamespace for Nativ { Ok(contents) => self .filesystem - .write( - format!( - "{}{}", - self.base_dir.to_string_lossy(), - local_output_path.to_string_lossy() - ), - contents, - ) + .write(local_output_full_path, contents) .await .map_err(|err| ProviderError::FileGenerationFailed(err.into()))?, Err((_, msg)) => Err(ProviderError::FileGenerationFailed(anyhow!("{msg}")))?, @@ -320,6 +352,18 @@ impl ProviderNode for NativeNode &self.name } + fn command(&self) -> &str { + &self.command + } + + fn args(&self) -> Vec<&String> { + self.args.iter().collect::>() + } + + async fn ip(&self) -> Result { + Ok(LOCALHOST) + } + fn base_dir(&self) -> &PathBuf { &self.base_dir } diff --git a/crates/provider/src/shared/constants.rs b/crates/provider/src/shared/constants.rs index e76f5353d..eeaf37541 100644 --- a/crates/provider/src/shared/constants.rs +++ b/crates/provider/src/shared/constants.rs @@ -7,7 +7,7 @@ pub const NODE_DATA_DIR: &str = "/data"; /// Directory for node scripts pub const NODE_SCRIPTS_DIR: &str = "/scripts"; /// Localhost ip -pub const _LOCALHOST: IpAddr = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); +pub const LOCALHOST: IpAddr = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); /// The port substrate listens for p2p connections on pub const _P2P_PORT: u16 = 30333; /// The remote port prometheus can be accessed with diff --git a/crates/provider/src/shared/types.rs b/crates/provider/src/shared/types.rs index 1219b20ec..1c8466af8 100644 --- a/crates/provider/src/shared/types.rs +++ b/crates/provider/src/shared/types.rs @@ -23,7 +23,12 @@ pub struct SpawnNodeOptions { pub command: String, pub args: Vec, pub env: Vec<(String, String)>, + // TODO: naming pub injected_files: Vec, + /// Paths to create before start the node (e.g keystore) + /// should be created with `create_dir_all` in order + /// to create the full path even when we have missing parts + pub created_paths: Vec, } impl SpawnNodeOptions { @@ -37,6 +42,7 @@ impl SpawnNodeOptions { args: vec![], env: vec![], injected_files: vec![], + created_paths: vec![], } } @@ -70,6 +76,7 @@ impl SpawnNodeOptions { } } +#[derive(Debug)] pub struct GenerateFileCommand { pub command: String, pub args: Vec, @@ -113,6 +120,7 @@ impl GenerateFileCommand { } } +#[derive(Debug)] pub struct GenerateFilesOptions { pub commands: Vec, pub injected_files: Vec, @@ -218,6 +226,8 @@ impl RunScriptOptions { } } +// TODO(team): I think we can rename it to FileMap? +#[derive(Debug, Clone)] pub struct TransferedFile { pub local_path: PathBuf, pub remote_path: PathBuf, @@ -234,3 +244,14 @@ impl TransferedFile { } } } + +impl std::fmt::Display for TransferedFile { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "File to transfer (local: {}, remote: {})", + self.local_path.display(), + self.remote_path.display() + ) + } +}