diff --git a/.cargo/config.toml b/.cargo/config.toml index df790f03322..aff4bee9fbe 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,3 +1,3 @@ [alias] # Temporary solution to have clippy config in a single place until https://github.com/rust-lang/rust-clippy/blob/master/doc/roadmap-2021.md#lintstoml-configuration is shipped. -custom-clippy = "clippy --all-features --all-targets -- -A clippy::type_complexity -A clippy::pedantic -D warnings" +custom-clippy = "clippy --workspace --all-features --all-targets -- -A clippy::type_complexity -A clippy::pedantic -D warnings" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c1509406f8a..83bad9cfdc6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -111,7 +111,7 @@ jobs: - uses: Swatinem/rust-cache@6720f05bc48b77f96918929a9019fb2203ff71f8 # v2.0.0 - name: Check rustdoc links - run: RUSTDOCFLAGS="--deny broken_intra_doc_links" cargo doc --verbose --workspace --no-deps --document-private-items --all-features + run: RUSTDOCFLAGS="--deny rustdoc::broken_intra_doc_links --deny warnings" cargo doc --verbose --workspace --no-deps --all-features --document-private-items check-clippy: runs-on: ubuntu-latest @@ -165,7 +165,7 @@ jobs: - uses: Swatinem/rust-cache@6720f05bc48b77f96918929a9019fb2203ff71f8 # v2.0.0 - name: Run ipfs-kad example - run: RUST_LOG=libp2p_swarm=debug,libp2p_kad=trace,libp2p_tcp=debug cargo run --example ipfs-kad + run: RUST_LOG=libp2p_swarm=debug,libp2p_kad=trace,libp2p_tcp=debug cargo run --example ipfs-kad --features full rustfmt: runs-on: ubuntu-latest @@ -187,3 +187,30 @@ jobs: - name: Check formatting run: cargo fmt -- --check + + manifest_lint: + runs-on: ubuntu-latest + steps: + + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@bb6001c4ea612bf59c3abfc4756fbceee4f870c7 # 0.10.0 + with: + access_token: ${{ github.token }} + + - uses: actions/checkout@v3 + + - uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af # v1.0.7 + with: + profile: minimal + toolchain: stable + override: true + + - name: Ensure `full` feature contains all features + run: | + ALL_FEATURES=$(cargo metadata --format-version=1 --no-deps | jq -r '.packages[] | select(.name == "libp2p") | .features | keys | map(select(. != "full")) | sort | join(" ")') + FULL_FEATURE=$(cargo metadata --format-version=1 --no-deps | jq -r '.packages[] | select(.name == "libp2p") | .features["full"] | sort | join(" ")') + + echo "$ALL_FEATURES"; + echo "$FULL_FEATURE"; + + test "$ALL_FEATURES" = "$FULL_FEATURE" diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 00000000000..8e31c6dfabb --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,26 @@ +name: Close and mark stale issue + +on: + schedule: + - cron: '0 0 * * *' + +jobs: + stale: + + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + + steps: + - uses: actions/stale@v6 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: 'Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 7 days.' + close-issue-message: 'This issue was closed because it is missing author input.' + stale-issue-label: 'kind/stale' + any-of-labels: 'need/author-input' + exempt-issue-labels: 'need/triage,need/community-input,need/maintainer-input,need/maintainers-input,need/analysis,status/blocked,status/in-progress,status/ready,status/deferred,status/inactive' + days-before-issue-stale: 6 + days-before-issue-close: 7 + enable-statistics: true diff --git a/CHANGELOG.md b/CHANGELOG.md index 2aa74dae1df..03b343314ff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,13 +45,48 @@ # 0.49.0 - [unreleased] -- Update to [`libp2p-tcp` `v0.37.0`](transports/tcp/CHANGELOG.md#0370). +- Remove default features. You need to enable required features explicitly now. As a quick workaround, you may want to use the + new `full` feature which activates all features. See [PR 2918]. + +- Introduce `tokio` and `async-std` features and deprecate the following ones: + - `tcp-tokio` in favor of `tcp` + `tokio` + - `mdns-tokio` in favor of `mdns` + `tokio` + - `dns-tokio` in favor of `dns` + `tokio` + - `tcp-async-io` in favor of `tcp` + `async-std` + - `mdns-async-io` in favor of `mdns` + `async-std` + - `dns-async-std` in favor of `dns` + `async-std` + + See [PR 2962]. -- Update to [`libp2p-swarm-derive` `v0.30.1`](swarm-derive/CHANGELOG.md#0301). - -- Update to [`libp2p-metrics` `v0.10.0`](misc/metrics/CHANGELOG.md#0100). - -- Update to [`libp2p-kad` `v0.41.0`](protocols/kad/CHANGELOG.md#0410). +- Update individual crates. + - Update to [`libp2p-autonat` `v0.8.0`](protocols/autonat/CHANGELOG.md#0080). + - Update to [`libp2p-core` `v0.37.0`](core/CHANGELOG.md#0370). + - Update to [`libp2p-dcutr` `v0.7.0`](protocols/dcutr/CHANGELOG.md#0070). + - Update to [`libp2p-deflate` `v0.37.0`](transports/deflate/CHANGELOG.md#0370). + - Update to [`libp2p-dns` `v0.37.0`](transports/dns/CHANGELOG.md#0370). + - Update to [`libp2p-floodsub` `v0.40.0`](protocols/floodsub/CHANGELOG.md#0400). + - Update to [`libp2p-gossipsub` `v0.42.0`](protocols/gossipsub/CHANGELOG.md#0420). + - Update to [`libp2p-identify` `v0.40.0`](protocols/identify/CHANGELOG.md#0400). + - Update to [`libp2p-kad` `v0.41.0`](protocols/kad/CHANGELOG.md#0410). + - Update to [`libp2p-mdns` `v0.41.0`](protocols/mdns/CHANGELOG.md#0410). + - Update to [`libp2p-metrics` `v0.10.0`](misc/metrics/CHANGELOG.md#0100). + - Update to [`libp2p-mplex` `v0.37.0`](muxers/mplex/CHANGELOG.md#0370). + - Update to [`libp2p-noise` `v0.40.0`](transports/noise/CHANGELOG.md#0400). + - Update to [`libp2p-ping` `v0.40.0`](protocols/ping/CHANGELOG.md#0400). + - Update to [`libp2p-plaintext` `v0.37.0`](transports/plaintext/CHANGELOG.md#0370). + - Update to [`libp2p-relay` `v0.13.0`](protocols/relay/CHANGELOG.md#0130). + - Update to [`libp2p-rendezvous` `v0.10.0`](protocols/rendezovus/CHANGELOG.md#0100). + - Update to [`libp2p-request-response` `v0.22.0`](protocols/request-response/CHANGELOG.md#0220). + - Update to [`libp2p-swarm-derive` `v0.30.1`](swarm-derive/CHANGELOG.md#0301). + - Update to [`libp2p-swarm` `v0.40.0`](swarm/CHANGELOG.md#0400). + - Update to [`libp2p-tcp` `v0.37.0`](transports/tcp/CHANGELOG.md#0370). + - Update to [`libp2p-uds` `v0.36.0`](transports/uds/CHANGELOG.md#0360). + - Update to [`libp2p-wasm-ext` `v0.37.0`](transports/wasm-ext/CHANGELOG.md#0370). + - Update to [`libp2p-websocket` `v0.39.0`](transports/websocket/CHANGELOG.md#0390). + - Update to [`libp2p-yamux` `v0.41.0`](muxers/mplex/CHANGELOG.md#0410). + +[PR 2918]: https://github.com/libp2p/rust-libp2p/pull/2918 +[PR 2962]: https://github.com/libp2p/rust-libp2p/pull/2962 # 0.48.0 diff --git a/Cargo.toml b/Cargo.toml index 3f82880528b..e2cdb52d618 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,35 +11,49 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [features] -default = [ +full = [ + "async-std", "autonat", + "dcutr", "deflate", + "dns", "dns-async-std", + "dns-tokio", + "ecdsa", "floodsub", + "gossipsub", "identify", "kad", - "gossipsub", + "mdns", "mdns-async-io", + "mdns-tokio", + "metrics", "mplex", "noise", "ping", "plaintext", "pnet", "relay", - "request-response", "rendezvous", + "request-response", "rsa", "secp256k1", + "serde", + "tcp", "tcp-async-io", + "tcp-tokio", + "tokio", "uds", + "wasm-bindgen", "wasm-ext", + "wasm-ext-websocket", "websocket", "yamux", ] - autonat = ["dep:libp2p-autonat"] dcutr = ["dep:libp2p-dcutr", "libp2p-metrics?/dcutr"] deflate = ["dep:libp2p-deflate"] +dns = ["dep:libp2p-dns"] dns-async-std = ["dep:libp2p-dns", "libp2p-dns?/async-std"] dns-tokio = ["dep:libp2p-dns", "libp2p-dns?/tokio"] floodsub = ["dep:libp2p-floodsub"] @@ -47,6 +61,7 @@ identify = ["dep:libp2p-identify", "libp2p-metrics?/identify"] kad = ["dep:libp2p-kad", "libp2p-metrics?/kad"] gossipsub = ["dep:libp2p-gossipsub", "libp2p-metrics?/gossipsub"] metrics = ["dep:libp2p-metrics"] +mdns = ["dep:libp2p-mdns"] mdns-async-io = ["dep:libp2p-mdns", "libp2p-mdns?/async-io"] mdns-tokio = ["dep:libp2p-mdns", "libp2p-mdns?/tokio"] mplex = ["dep:libp2p-mplex"] @@ -57,17 +72,21 @@ pnet = ["dep:libp2p-pnet"] relay = ["dep:libp2p-relay", "libp2p-metrics?/relay"] request-response = ["dep:libp2p-request-response"] rendezvous = ["dep:libp2p-rendezvous"] +tcp = ["dep:libp2p-tcp"] tcp-async-io = ["dep:libp2p-tcp", "libp2p-tcp?/async-io"] tcp-tokio = ["dep:libp2p-tcp", "libp2p-tcp?/tokio"] uds = ["dep:libp2p-uds"] -wasm-bindgen = ["futures-timer/wasm-bindgen", "instant/wasm-bindgen", "getrandom/js", "rand/wasm-bindgen"] +wasm-bindgen = ["futures-timer/wasm-bindgen", "instant/wasm-bindgen", "getrandom/js"] wasm-ext = ["dep:libp2p-wasm-ext"] wasm-ext-websocket = ["wasm-ext", "libp2p-wasm-ext?/websocket"] websocket = ["dep:libp2p-websocket"] yamux = ["dep:libp2p-yamux"] secp256k1 = ["libp2p-core/secp256k1"] rsa = ["libp2p-core/rsa"] +ecdsa = ["libp2p-core/ecdsa"] serde = ["libp2p-core/serde", "libp2p-kad?/serde", "libp2p-gossipsub?/serde"] +tokio = ["libp2p-mdns?/tokio", "libp2p-tcp?/tokio", "libp2p-dns?/tokio"] +async-std = ["libp2p-mdns?/async-io", "libp2p-tcp?/async-io", "libp2p-dns?/async-std"] [package.metadata.docs.rs] all-features = true @@ -80,47 +99,46 @@ getrandom = "0.2.3" # Explicit dependency to be used in `wasm-bindgen` feature instant = "0.1.11" # Explicit dependency to be used in `wasm-bindgen` feature lazy_static = "1.2" -libp2p-autonat = { version = "0.7.0", path = "protocols/autonat", optional = true } -libp2p-core = { version = "0.36.0", path = "core", default-features = false } -libp2p-dcutr = { version = "0.6.0", path = "protocols/dcutr", optional = true } -libp2p-floodsub = { version = "0.39.0", path = "protocols/floodsub", optional = true } -libp2p-identify = { version = "0.39.0", path = "protocols/identify", optional = true } +libp2p-autonat = { version = "0.8.0", path = "protocols/autonat", optional = true } +libp2p-core = { version = "0.37.0", path = "core" } +libp2p-dcutr = { version = "0.7.0", path = "protocols/dcutr", optional = true } +libp2p-floodsub = { version = "0.40.1", path = "protocols/floodsub", optional = true } +libp2p-identify = { version = "0.40.0", path = "protocols/identify", optional = true } libp2p-kad = { version = "0.41.0", path = "protocols/kad", optional = true } libp2p-metrics = { version = "0.10.0", path = "misc/metrics", optional = true } -libp2p-mplex = { version = "0.36.0", path = "muxers/mplex", optional = true } -libp2p-noise = { version = "0.39.1", path = "transports/noise", optional = true } -libp2p-ping = { version = "0.39.0", path = "protocols/ping", optional = true } -libp2p-plaintext = { version = "0.36.0", path = "transports/plaintext", optional = true } -libp2p-pnet = { version = "0.22.0", path = "transports/pnet", optional = true } -libp2p-relay = { version = "0.12.0", path = "protocols/relay", optional = true } -libp2p-rendezvous = { version = "0.9.0", path = "protocols/rendezvous", optional = true } -libp2p-request-response = { version = "0.21.0", path = "protocols/request-response", optional = true } -libp2p-swarm = { version = "0.39.0", path = "swarm" } +libp2p-mplex = { version = "0.37.0", path = "muxers/mplex", optional = true } +libp2p-noise = { version = "0.40.0", path = "transports/noise", optional = true } +libp2p-ping = { version = "0.40.1", path = "protocols/ping", optional = true } +libp2p-plaintext = { version = "0.37.0", path = "transports/plaintext", optional = true } +libp2p-pnet = { version = "0.22.1", path = "transports/pnet", optional = true } +libp2p-relay = { version = "0.13.0", path = "protocols/relay", optional = true } +libp2p-rendezvous = { version = "0.10.0", path = "protocols/rendezvous", optional = true } +libp2p-request-response = { version = "0.22.1", path = "protocols/request-response", optional = true } +libp2p-swarm = { version = "0.40.1", path = "swarm" } libp2p-swarm-derive = { version = "0.30.1", path = "swarm-derive" } -libp2p-uds = { version = "0.35.0", path = "transports/uds", optional = true } -libp2p-wasm-ext = { version = "0.36.0", path = "transports/wasm-ext", default-features = false, optional = true } -libp2p-yamux = { version = "0.40.0", path = "muxers/yamux", optional = true } +libp2p-uds = { version = "0.36.0", path = "transports/uds", optional = true } +libp2p-wasm-ext = { version = "0.37.0", path = "transports/wasm-ext", optional = true } +libp2p-yamux = { version = "0.41.0", path = "muxers/yamux", optional = true } multiaddr = { version = "0.14.0" } parking_lot = "0.12.0" pin-project = "1.0.0" -rand = "0.7.3" # Explicit dependency to be used in `wasm-bindgen` feature smallvec = "1.6.1" [target.'cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))'.dependencies] -libp2p-deflate = { version = "0.36.0", path = "transports/deflate", optional = true } -libp2p-dns = { version = "0.36.0", path = "transports/dns", optional = true, default-features = false } -libp2p-mdns = { version = "0.40.0", path = "protocols/mdns", optional = true, default-features = false } -libp2p-tcp = { version = "0.37.0", path = "transports/tcp", default-features = false, optional = true } -libp2p-websocket = { version = "0.38.0", path = "transports/websocket", optional = true } +libp2p-deflate = { version = "0.37.0", path = "transports/deflate", optional = true } +libp2p-dns = { version = "0.37.0", path = "transports/dns", optional = true } +libp2p-mdns = { version = "0.41.0", path = "protocols/mdns", optional = true } +libp2p-tcp = { version = "0.37.0", path = "transports/tcp", optional = true } +libp2p-websocket = { version = "0.39.0", path = "transports/websocket", optional = true } [target.'cfg(not(target_os = "unknown"))'.dependencies] -libp2p-gossipsub = { version = "0.41.0", path = "protocols/gossipsub", optional = true } +libp2p-gossipsub = { version = "0.42.1", path = "protocols/gossipsub", optional = true } [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } async-trait = "0.1" env_logger = "0.9.0" -clap = {version = "3.1.6", features = ["derive"]} +clap = { version = "3.1.6", features = ["derive"] } tokio = { version = "1.15", features = ["io-util", "io-std", "macros", "rt", "rt-multi-thread"] } [workspace] @@ -131,6 +149,7 @@ members = [ "misc/rw-stream-sink", "misc/keygen", "misc/prost-codec", + "misc/quickcheck-ext", "muxers/mplex", "muxers/yamux", "protocols/dcutr", @@ -159,20 +178,36 @@ members = [ [[example]] name = "chat" -required-features = ["floodsub"] +required-features = ["full"] [[example]] name = "chat-tokio" -required-features = ["tcp-tokio", "mdns-tokio"] +required-features = ["full"] [[example]] name = "file-sharing" -required-features = ["request-response"] +required-features = ["full"] [[example]] name = "gossipsub-chat" -required-features = ["gossipsub"] +required-features = ["full"] [[example]] name = "ipfs-private" -required-features = ["gossipsub"] +required-features = ["full"] + +[[example]] +name = "ipfs-kad" +required-features = ["full"] + +[[example]] +name = "ping" +required-features = ["full"] + +[[example]] +name = "mdns-passive-discovery" +required-features = ["full"] + +[[example]] +name = "distributed-key-value-store" +required-features = ["full"] diff --git a/README.md b/README.md index b2e5a7d9bac..1d5240acd83 100644 --- a/README.md +++ b/README.md @@ -83,14 +83,15 @@ Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). (open a pull request if you want your project to be added here) -- https://github.com/paritytech/polkadot -- https://github.com/paritytech/substrate -- https://github.com/sigp/lighthouse -- https://github.com/golemfactory/golem-libp2p -- https://github.com/comit-network -- https://github.com/rs-ipfs/rust-ipfs -- https://github.com/marcopoloprotocol/marcopolo -- https://github.com/ChainSafe/forest -- https://github.com/ipfs-rust/ipfs-embed -- https://www.actyx.com/developers/ -- https://github.com/starcoinorg/starcoin +- [COMIT](https://github.com/comit-network/xmr-btc-swap) - Bitcoin–Monero Cross-chain Atomic Swap. +- [Forest](https://github.com/ChainSafe/forest) - An implementation of Filecoin written in Rust. +- [ipfs-embed](https://github.com/ipfs-rust/ipfs-embed) - A small embeddable ipfs implementation +used and maintained by [Actyx][https://www.actyx.com]. +- [iroh](https://github.com/n0-computer/iroh) - Next-generation implementation of IPFS for Cloud & Mobile platforms. +- [Lighthouse](https://github.com/sigp/lighthouse) - Ethereum consensus client in Rust. +- [Locutus](https://github.com/freenet/locutus) - Global, observable, decentralized key-value store. +- [rust-ipfs](https://github.com/rs-ipfs/rust-ipfs) - IPFS implementation in Rust. +- [Starcoin](https://github.com/starcoinorg/starcoin) - A smart contract blockchain network that scales by layering. +- [Subspace](https://github.com/subspace/subspace) - Subspace Network reference implementation +- [Substrate](https://github.com/paritytech/substrate) - Framework for blockchain innovation, +used by [Polkadot](https://www.parity.io/technologies/polkadot/). diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 4a5e07f14f8..9f258c88a24 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,3 +1,13 @@ +# 0.37.0 [unreleased] + +- Implement `Hash` and `Ord` for `PublicKey`. See [PR 2915]. + +- Remove default features. If you previously depended on `secp256k1` or `ecdsa` you need to enable these explicitly + now. See [PR 2918]. + +[PR 2915]: https://github.com/libp2p/rust-libp2p/pull/2915 +[PR 2918]: https://github.com/libp2p/rust-libp2p/pull/2918 + # 0.36.0 - Make RSA keypair support optional. To enable RSA support, `rsa` feature should be enabled. diff --git a/core/Cargo.toml b/core/Cargo.toml index 0970b3e74d6..c0881afe619 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-core" edition = "2021" rust-version = "1.56.1" description = "Core traits and structs of libp2p" -version = "0.36.0" +version = "0.37.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -24,7 +24,7 @@ libsecp256k1 = { version = "0.7.0", optional = true } log = "0.4" multiaddr = { version = "0.14.0" } multihash = { version = "0.16", default-features = false, features = ["std", "multihash-impl", "identity", "sha2"] } -multistream-select = { version = "0.11", path = "../misc/multistream-select" } +multistream-select = { version = "0.12", path = "../misc/multistream-select" } p256 = { version = "0.11.1", default-features = false, features = ["ecdsa"], optional = true } parking_lot = "0.12.0" pin-project = "1.0.0" @@ -46,12 +46,9 @@ ring = { version = "0.16.9", features = ["alloc", "std"], default-features = fal async-std = { version = "1.6.2", features = ["attributes"] } base64 = "0.13.0" criterion = "0.4" -libp2p-mplex = { path = "../muxers/mplex" } -libp2p-noise = { path = "../transports/noise" } -libp2p-tcp = { path = "../transports/tcp" } +libp2p = { path = "..", features = ["full"] } multihash = { version = "0.16", default-features = false, features = ["arb"] } -quickcheck = "0.9.0" -rand07 = { package = "rand", version = "0.7" } +quickcheck = { package = "quickcheck-ext", path = "../misc/quickcheck-ext" } rmp-serde = "1.0" serde_json = "1.0" @@ -59,7 +56,6 @@ serde_json = "1.0" prost-build = "0.11" [features] -default = [ "secp256k1", "ecdsa" ] secp256k1 = [ "libsecp256k1" ] ecdsa = [ "p256" ] rsa = [ "dep:ring" ] diff --git a/core/benches/peer_id.rs b/core/benches/peer_id.rs index 9a6935113ec..e9fec2f18d3 100644 --- a/core/benches/peer_id.rs +++ b/core/benches/peer_id.rs @@ -39,7 +39,7 @@ fn clone(c: &mut Criterion) { c.bench_function("clone", |b| { b.iter(|| { - black_box(peer_id.clone()); + black_box(peer_id); }) }); } diff --git a/core/src/identity.rs b/core/src/identity.rs index 73be1c78b57..af5dceb69ee 100644 --- a/core/src/identity.rs +++ b/core/src/identity.rs @@ -214,7 +214,7 @@ impl zeroize::Zeroize for keys_proto::PrivateKey { } /// The public key of a node's identity keypair. -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] pub enum PublicKey { /// A public Ed25519 key. Ed25519(ed25519::PublicKey), @@ -379,4 +379,22 @@ mod tests { assert_eq!(expected_peer_id, peer_id); } + + #[test] + fn public_key_implements_hash() { + use std::hash::Hash; + + fn assert_implements_hash() {} + + assert_implements_hash::(); + } + + #[test] + fn public_key_implements_ord() { + use std::cmp::Ord; + + fn assert_implements_ord() {} + + assert_implements_ord::(); + } } diff --git a/core/src/identity/ecdsa.rs b/core/src/identity/ecdsa.rs index 81dfec4b4e0..88411b23655 100644 --- a/core/src/identity/ecdsa.rs +++ b/core/src/identity/ecdsa.rs @@ -21,7 +21,9 @@ //! ECDSA keys with secp256r1 curve support. use super::error::DecodingError; +use core::cmp; use core::fmt; +use core::hash; use p256::{ ecdsa::{ signature::{Signer, Verifier}, @@ -117,7 +119,7 @@ impl fmt::Debug for SecretKey { } /// An ECDSA public key. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Eq, PartialOrd, Ord)] pub struct PublicKey(VerifyingKey); impl PublicKey { @@ -222,6 +224,18 @@ impl fmt::Debug for PublicKey { } } +impl cmp::PartialEq for PublicKey { + fn eq(&self, other: &Self) -> bool { + self.to_bytes().eq(&other.to_bytes()) + } +} + +impl hash::Hash for PublicKey { + fn hash(&self, state: &mut H) { + self.to_bytes().hash(state); + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/core/src/identity/ed25519.rs b/core/src/identity/ed25519.rs index 5782ac788cb..eef934d4d91 100644 --- a/core/src/identity/ed25519.rs +++ b/core/src/identity/ed25519.rs @@ -21,7 +21,9 @@ //! Ed25519 keys. use super::error::DecodingError; +use core::cmp; use core::fmt; +use core::hash; use ed25519_dalek::{self as ed25519, Signer as _, Verifier as _}; use rand::RngCore; use std::convert::TryFrom; @@ -113,7 +115,7 @@ impl From for Keypair { } /// An Ed25519 public key. -#[derive(PartialEq, Eq, Clone)] +#[derive(Eq, Clone)] pub struct PublicKey(ed25519::PublicKey); impl fmt::Debug for PublicKey { @@ -126,6 +128,30 @@ impl fmt::Debug for PublicKey { } } +impl cmp::PartialEq for PublicKey { + fn eq(&self, other: &Self) -> bool { + self.0.as_bytes().eq(other.0.as_bytes()) + } +} + +impl hash::Hash for PublicKey { + fn hash(&self, state: &mut H) { + self.0.as_bytes().hash(state); + } +} + +impl cmp::PartialOrd for PublicKey { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.as_bytes().partial_cmp(other.0.as_bytes()) + } +} + +impl cmp::Ord for PublicKey { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.0.as_bytes().cmp(other.0.as_bytes()) + } +} + impl PublicKey { /// Verify the Ed25519 signature on a message using the public key. pub fn verify(&self, msg: &[u8], sig: &[u8]) -> bool { diff --git a/core/src/identity/rsa.rs b/core/src/identity/rsa.rs index 40a7ea6f144..54dbe47f697 100644 --- a/core/src/identity/rsa.rs +++ b/core/src/identity/rsa.rs @@ -70,7 +70,7 @@ impl Keypair { } /// An RSA public key. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct PublicKey(Vec); impl PublicKey { @@ -306,25 +306,17 @@ impl DerDecodable<'_> for Asn1SubjectPublicKeyInfo { mod tests { use super::*; use quickcheck::*; - use rand07::seq::SliceRandom; - use std::fmt; - const KEY1: &'static [u8] = include_bytes!("test/rsa-2048.pk8"); - const KEY2: &'static [u8] = include_bytes!("test/rsa-3072.pk8"); - const KEY3: &'static [u8] = include_bytes!("test/rsa-4096.pk8"); + const KEY1: &[u8] = include_bytes!("test/rsa-2048.pk8"); + const KEY2: &[u8] = include_bytes!("test/rsa-3072.pk8"); + const KEY3: &[u8] = include_bytes!("test/rsa-4096.pk8"); - #[derive(Clone)] + #[derive(Clone, Debug)] struct SomeKeypair(Keypair); - impl fmt::Debug for SomeKeypair { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "SomeKeypair") - } - } - impl Arbitrary for SomeKeypair { - fn arbitrary(g: &mut G) -> SomeKeypair { - let mut key = [KEY1, KEY2, KEY3].choose(g).unwrap().to_vec(); + fn arbitrary(g: &mut Gen) -> SomeKeypair { + let mut key = g.choose(&[KEY1, KEY2, KEY3]).unwrap().to_vec(); SomeKeypair(Keypair::from_pkcs8(&mut key).unwrap()) } } diff --git a/core/src/identity/secp256k1.rs b/core/src/identity/secp256k1.rs index 2c3aaf89a51..bfecc33ed2f 100644 --- a/core/src/identity/secp256k1.rs +++ b/core/src/identity/secp256k1.rs @@ -22,7 +22,9 @@ use super::error::{DecodingError, SigningError}; use asn1_der::typed::{DerDecodable, Sequence}; +use core::cmp; use core::fmt; +use core::hash; use libsecp256k1::{Message, Signature}; use sha2::{Digest as ShaDigestTrait, Sha256}; use zeroize::Zeroize; @@ -150,7 +152,7 @@ impl SecretKey { } /// A Secp256k1 public key. -#[derive(PartialEq, Eq, Clone)] +#[derive(Eq, Clone)] pub struct PublicKey(libsecp256k1::PublicKey); impl fmt::Debug for PublicKey { @@ -163,6 +165,30 @@ impl fmt::Debug for PublicKey { } } +impl cmp::PartialEq for PublicKey { + fn eq(&self, other: &Self) -> bool { + self.encode().eq(&other.encode()) + } +} + +impl hash::Hash for PublicKey { + fn hash(&self, state: &mut H) { + self.encode().hash(state); + } +} + +impl cmp::PartialOrd for PublicKey { + fn partial_cmp(&self, other: &Self) -> Option { + self.encode().partial_cmp(&other.encode()) + } +} + +impl cmp::Ord for PublicKey { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.encode().cmp(&other.encode()) + } +} + impl PublicKey { /// Verify the Secp256k1 signature on a message using the public key. pub fn verify(&self, msg: &[u8], sig: &[u8]) -> bool { diff --git a/core/src/peer_id.rs b/core/src/peer_id.rs index cbe0a13395c..9e7a1f238cf 100644 --- a/core/src/peer_id.rs +++ b/core/src/peer_id.rs @@ -286,10 +286,10 @@ mod tests { #[test] fn extract_peer_id_from_multi_address() { - let address = - format!("/memory/1234/p2p/12D3KooWGQmdpzHXCqLno4mMxWXKNFQHASBeF99gTm2JR8Vu5Bdc") - .parse() - .unwrap(); + let address = "/memory/1234/p2p/12D3KooWGQmdpzHXCqLno4mMxWXKNFQHASBeF99gTm2JR8Vu5Bdc" + .to_string() + .parse() + .unwrap(); let peer_id = PeerId::try_from_multiaddr(&address).unwrap(); @@ -303,7 +303,7 @@ mod tests { #[test] fn no_panic_on_extract_peer_id_from_multi_address_if_not_present() { - let address = format!("/memory/1234").parse().unwrap(); + let address = "/memory/1234".to_string().parse().unwrap(); let maybe_empty = PeerId::try_from_multiaddr(&address); diff --git a/core/src/transport.rs b/core/src/transport.rs index df6e094ed80..0c9891773b2 100644 --- a/core/src/transport.rs +++ b/core/src/transport.rs @@ -149,9 +149,22 @@ pub trait Transport { cx: &mut Context<'_>, ) -> Poll>; - /// Performs a transport-specific mapping of an address `observed` by - /// a remote onto a local `listen` address to yield an address for - /// the local node that may be reachable for other peers. + /// Performs a transport-specific mapping of an address `observed` by a remote onto a + /// local `listen` address to yield an address for the local node that may be reachable + /// for other peers. + /// + /// This is relevant for transports where Network Address Translation (NAT) can occur + /// so that e.g. the peer is observed at a different IP than the IP of the local + /// listening address. See also [`address_translation`][crate::address_translation]. + /// + /// Within [`libp2p::Swarm`]() this is + /// used when extending the listening addresses of the local peer with external addresses + /// observed by remote peers. + /// On transports where this is not relevant (i.e. no NATs are present) `None` should be + /// returned for the sake of de-duplication. + /// + /// Note: if the listen or observed address is not a valid address of this transport, + /// `None` should be returned as well. fn address_translation(&self, listen: &Multiaddr, observed: &Multiaddr) -> Option; /// Boxes the transport, including custom transport errors. diff --git a/core/tests/transport_upgrade.rs b/core/tests/transport_upgrade.rs index dac84534369..9775e50a9af 100644 --- a/core/tests/transport_upgrade.rs +++ b/core/tests/transport_upgrade.rs @@ -19,11 +19,11 @@ // DEALINGS IN THE SOFTWARE. use futures::prelude::*; -use libp2p_core::identity; -use libp2p_core::transport::{MemoryTransport, Transport}; -use libp2p_core::upgrade::{self, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; -use libp2p_mplex::MplexConfig; -use libp2p_noise as noise; +use libp2p::core::identity; +use libp2p::core::transport::{MemoryTransport, Transport}; +use libp2p::core::upgrade::{self, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p::mplex::MplexConfig; +use libp2p::noise; use multiaddr::{Multiaddr, Protocol}; use rand::random; use std::{io, pin::Pin}; diff --git a/examples/README.md b/examples/README.md index 6c16d77cf66..13edf1aaa76 100644 --- a/examples/README.md +++ b/examples/README.md @@ -9,7 +9,7 @@ A set of examples showcasing how to use rust-libp2p. Small `ping` clone, sending a ping to a peer, expecting a pong as a response. See [tutorial](../src/tutorials/ping.rs) for a step-by-step guide building the example. -## Individual libp2p protocols +## Individual libp2p features - [Chat](./chat.rs) @@ -43,6 +43,10 @@ A set of examples showcasing how to use rust-libp2p. - [Passive Discovery via MDNS](mdns-passive-discovery.rs) Discover peers on the same network via the MDNS protocol. + +- [Hole punching tutorial](https://docs.rs/libp2p/latest/libp2p/tutorials/hole_punching/index.html) + + Tutorial on how to overcome firewalls and NATs with libp2p’s hole punching mechanism. ## Integration into a larger application diff --git a/examples/chat-tokio.rs b/examples/chat-tokio.rs index 5ee00f9eedc..2578d106ff3 100644 --- a/examples/chat-tokio.rs +++ b/examples/chat-tokio.rs @@ -25,18 +25,11 @@ //! The example is run per node as follows: //! //! ```sh -//! cargo run --example chat-tokio --features="tcp-tokio mdns-tokio" -//! ``` -//! -//! Alternatively, to run with the minimal set of features and crates: -//! -//! ```sh -//!cargo run --example chat-tokio \\ -//! --no-default-features \\ -//! --features="floodsub mplex noise tcp-tokio mdns-tokio" +//! cargo run --example chat-tokio --features=full //! ``` use futures::StreamExt; +use libp2p::tcp::GenTcpConfig; use libp2p::{ core::upgrade, floodsub::{self, Floodsub, FloodsubEvent}, @@ -56,7 +49,6 @@ use libp2p::{ PeerId, Transport, }; -use libp2p_tcp::GenTcpConfig; use std::error::Error; use tokio::io::{self, AsyncBufReadExt}; @@ -113,7 +105,7 @@ async fn main() -> Result<(), Box> { // Create a Swarm to manage peers and events. let mut swarm = { - let mdns = TokioMdns::new(Default::default()).await?; + let mdns = TokioMdns::new(Default::default())?; let mut behaviour = MyBehaviour { floodsub: Floodsub::new(peer_id), mdns, diff --git a/examples/chat.rs b/examples/chat.rs index ee5527bfca7..e5368b49e80 100644 --- a/examples/chat.rs +++ b/examples/chat.rs @@ -35,7 +35,7 @@ //! terminal window, run: //! //! ```sh -//! cargo run --example chat +//! cargo run --example chat --features=full //! ``` //! //! It will print the PeerId and the listening addresses, e.g. `Listening on @@ -44,12 +44,12 @@ //! In the second terminal window, start a new instance of the example with: //! //! ```sh -//! cargo run --example chat -- /ip4/127.0.0.1/tcp/24915 +//! cargo run --example chat --features=full -- /ip4/127.0.0.1/tcp/24915 //! ``` //! //! The two nodes then connect. -use async_std::{io, task}; +use async_std::io; use futures::{ prelude::{stream::StreamExt, *}, select, @@ -108,7 +108,7 @@ async fn main() -> Result<(), Box> { // Create a Swarm to manage peers and events let mut swarm = { - let mdns = task::block_on(Mdns::new(MdnsConfig::default()))?; + let mdns = Mdns::new(MdnsConfig::default())?; let mut behaviour = MyBehaviour { floodsub: Floodsub::new(local_peer_id), mdns, diff --git a/examples/distributed-key-value-store.rs b/examples/distributed-key-value-store.rs index 7fef717cf78..de02ddf81be 100644 --- a/examples/distributed-key-value-store.rs +++ b/examples/distributed-key-value-store.rs @@ -40,7 +40,7 @@ //! //! 4. Close with Ctrl-c. -use async_std::{io, task}; +use async_std::io; use futures::{prelude::*, select}; use libp2p::kad::record::store::MemoryStore; use libp2p::kad::{ @@ -96,7 +96,7 @@ async fn main() -> Result<(), Box> { // Create a Kademlia behaviour. let store = MemoryStore::new(local_peer_id); let kademlia = Kademlia::new(local_peer_id, store); - let mdns = task::block_on(Mdns::new(MdnsConfig::default()))?; + let mdns = Mdns::new(MdnsConfig::default())?; let behaviour = MyBehaviour { kademlia, mdns }; Swarm::new(transport, behaviour, local_peer_id) }; diff --git a/examples/file-sharing.rs b/examples/file-sharing.rs index 21fa45d54fc..4e3cad3f187 100644 --- a/examples/file-sharing.rs +++ b/examples/file-sharing.rs @@ -57,7 +57,7 @@ //! 1. Run command below in one terminal. //! //! ``` -//! cargo run --example file-sharing -- \ +//! cargo run --example file-sharing --features=full -- \ //! --listen-address /ip4/127.0.0.1/tcp/40837 \ //! --secret-key-seed 1 \ //! provide \ @@ -68,7 +68,7 @@ //! 2. Run command below in another terminal. //! //! ``` -//! cargo run --example file-sharing -- \ +//! cargo run --example file-sharing --features=full -- \ //! --peer /ip4/127.0.0.1/tcp/40837/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X \ //! get \ //! --name diff --git a/examples/gossipsub-chat.rs b/examples/gossipsub-chat.rs index d6ea44dcef6..807209324f6 100644 --- a/examples/gossipsub-chat.rs +++ b/examples/gossipsub-chat.rs @@ -32,7 +32,7 @@ //! terminal window, run: //! //! ```sh -//! cargo run --example gossipsub-chat +//! cargo run --example gossipsub-chat --features=full //! ``` //! //! It will print the [`PeerId`] and the listening addresses, e.g. `Listening on @@ -41,7 +41,7 @@ //! In the second terminal window, start a new instance of the example with: //! //! ```sh -//! cargo run --example gossipsub-chat -- /ip4/127.0.0.1/tcp/24915 +//! cargo run --example gossipsub-chat --features=full -- /ip4/127.0.0.1/tcp/24915 //! ``` //! //! The two nodes should then connect. diff --git a/examples/ipfs-private.rs b/examples/ipfs-private.rs index c0596816919..8c980b12f78 100644 --- a/examples/ipfs-private.rs +++ b/examples/ipfs-private.rs @@ -33,23 +33,21 @@ //! to work, the ipfs node needs to be configured to use gossipsub. use async_std::io; use futures::{prelude::*, select}; +use libp2p::tcp::GenTcpConfig; use libp2p::{ core::{ either::EitherTransport, muxing::StreamMuxerBox, transport, transport::upgrade::Version, }, gossipsub::{self, Gossipsub, GossipsubConfigBuilder, GossipsubEvent, MessageAuthenticity}, - identify::{Identify, IdentifyConfig, IdentifyEvent}, - identity, + identify, identity, multiaddr::Protocol, - noise, - ping::{self, PingEvent}, + noise, ping, pnet::{PnetConfig, PreSharedKey}, swarm::SwarmEvent, tcp::TcpTransport, yamux::YamuxConfig, Multiaddr, NetworkBehaviour, PeerId, Swarm, Transport, }; -use libp2p_tcp::GenTcpConfig; use std::{env, error::Error, fs, path::Path, str::FromStr, time::Duration}; /// Builds the transport that serves as a common ground for all connections. @@ -158,14 +156,14 @@ async fn main() -> Result<(), Box> { #[behaviour(out_event = "MyBehaviourEvent")] struct MyBehaviour { gossipsub: Gossipsub, - identify: Identify, + identify: identify::Behaviour, ping: ping::Behaviour, } enum MyBehaviourEvent { Gossipsub(GossipsubEvent), - Identify(IdentifyEvent), - Ping(PingEvent), + Identify(identify::Event), + Ping(ping::Event), } impl From for MyBehaviourEvent { @@ -174,14 +172,14 @@ async fn main() -> Result<(), Box> { } } - impl From for MyBehaviourEvent { - fn from(event: IdentifyEvent) -> Self { + impl From for MyBehaviourEvent { + fn from(event: identify::Event) -> Self { MyBehaviourEvent::Identify(event) } } - impl From for MyBehaviourEvent { - fn from(event: PingEvent) -> Self { + impl From for MyBehaviourEvent { + fn from(event: ping::Event) -> Self { MyBehaviourEvent::Ping(event) } } @@ -198,7 +196,7 @@ async fn main() -> Result<(), Box> { gossipsub_config, ) .expect("Valid configuration"), - identify: Identify::new(IdentifyConfig::new( + identify: identify::Behaviour::new(identify::Config::new( "/ipfs/0.1.0".into(), local_key.public(), )), diff --git a/examples/mdns-passive-discovery.rs b/examples/mdns-passive-discovery.rs index a63ec7d5afe..9ac7e1a45a0 100644 --- a/examples/mdns-passive-discovery.rs +++ b/examples/mdns-passive-discovery.rs @@ -40,7 +40,7 @@ async fn main() -> Result<(), Box> { let transport = libp2p::development_transport(id_keys).await?; // Create an MDNS network behaviour. - let behaviour = Mdns::new(MdnsConfig::default()).await?; + let behaviour = Mdns::new(MdnsConfig::default())?; // Create a Swarm that establishes connections through the given transport. // Note that the MDNS behaviour itself will not actually inititiate any connections, diff --git a/examples/ping.rs b/examples/ping.rs index 26223459bfa..9be23419e40 100644 --- a/examples/ping.rs +++ b/examples/ping.rs @@ -25,7 +25,7 @@ //! In the first terminal window, run: //! //! ```sh -//! cargo run --example ping +//! cargo run --example ping --features=full //! ``` //! //! It will print the PeerId and the listening addresses, e.g. `Listening on @@ -34,7 +34,7 @@ //! In the second terminal window, start a new instance of the example with: //! //! ```sh -//! cargo run --example ping -- /ip4/127.0.0.1/tcp/24915 +//! cargo run --example ping --features=full -- /ip4/127.0.0.1/tcp/24915 //! ``` //! //! The two nodes establish a connection, negotiate the ping protocol @@ -42,7 +42,8 @@ use futures::prelude::*; use libp2p::swarm::{Swarm, SwarmEvent}; -use libp2p::{identity, ping, Multiaddr, PeerId}; +use libp2p::{identity, ping, Multiaddr, NetworkBehaviour, PeerId}; +use libp2p_swarm::keep_alive; use std::error::Error; #[async_std::main] @@ -53,14 +54,7 @@ async fn main() -> Result<(), Box> { let transport = libp2p::development_transport(local_key).await?; - // Create a ping network behaviour. - // - // For illustrative purposes, the ping protocol is configured to - // keep the connection alive, so a continuous sequence of pings - // can be observed. - let behaviour = ping::Behaviour::new(ping::Config::new().with_keep_alive(true)); - - let mut swarm = Swarm::new(transport, behaviour, local_peer_id); + let mut swarm = Swarm::new(transport, Behaviour::default(), local_peer_id); // Tell the swarm to listen on all interfaces and a random, OS-assigned // port. @@ -82,3 +76,13 @@ async fn main() -> Result<(), Box> { } } } + +/// Our network behaviour. +/// +/// For illustrative purposes, this includes the [`KeepAlive`](behaviour::KeepAlive) behaviour so a continuous sequence of +/// pings can be observed. +#[derive(NetworkBehaviour, Default)] +struct Behaviour { + keep_alive: keep_alive::Behaviour, + ping: ping::Behaviour, +} diff --git a/misc/keygen/Cargo.toml b/misc/keygen/Cargo.toml index 36e01709df9..13b1ff38216 100644 --- a/misc/keygen/Cargo.toml +++ b/misc/keygen/Cargo.toml @@ -13,5 +13,5 @@ clap = {version = "3.1.6", features = ["derive"]} zeroize = "1" serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.79" -libp2p-core = { path = "../../core", default-features = false, version = "0.36.0"} +libp2p-core = { version = "0.37.0", path = "../../core" } base64 = "0.13.0" diff --git a/misc/metrics/CHANGELOG.md b/misc/metrics/CHANGELOG.md index 1b784ffa1f9..fe428745d70 100644 --- a/misc/metrics/CHANGELOG.md +++ b/misc/metrics/CHANGELOG.md @@ -1,7 +1,21 @@ # 0.10.0 [unreleased] +- Update to `libp2p-swarm` `v0.40.0`. + +- Update to `libp2p-dcutr` `v0.7.0`. + +- Update to `libp2p-ping` `v0.40.0`. + +- Update to `libp2p-identify` `v0.40.0`. + +- Update to `libp2p-relay` `v0.13.0`. + - Update to `libp2p-kad` `v0.41.0`. +- Update to `libp2p-core` `v0.37.0`. + +- Update to `libp2p-gossipsub` `v0.42.0`. + # 0.9.0 - Update to `libp2p-swarm` `v0.39.0`. diff --git a/misc/metrics/Cargo.toml b/misc/metrics/Cargo.toml index 1c29fceec24..1e96355606a 100644 --- a/misc/metrics/Cargo.toml +++ b/misc/metrics/Cargo.toml @@ -19,22 +19,22 @@ relay = ["libp2p-relay"] dcutr = ["libp2p-dcutr"] [dependencies] -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } -libp2p-dcutr = { version = "0.6.0", path = "../../protocols/dcutr", optional = true } -libp2p-identify = { version = "0.39.0", path = "../../protocols/identify", optional = true } +libp2p-core = { version = "0.37.0", path = "../../core" } +libp2p-dcutr = { version = "0.7.0", path = "../../protocols/dcutr", optional = true } +libp2p-identify = { version = "0.40.0", path = "../../protocols/identify", optional = true } libp2p-kad = { version = "0.41.0", path = "../../protocols/kad", optional = true } -libp2p-ping = { version = "0.39.0", path = "../../protocols/ping", optional = true } -libp2p-relay = { version = "0.12.0", path = "../../protocols/relay", optional = true } -libp2p-swarm = { version = "0.39.0", path = "../../swarm" } +libp2p-ping = { version = "0.40.0", path = "../../protocols/ping", optional = true } +libp2p-relay = { version = "0.13.0", path = "../../protocols/relay", optional = true } +libp2p-swarm = { version = "0.40.0", path = "../../swarm" } prometheus-client = "0.18.0" [target.'cfg(not(target_os = "unknown"))'.dependencies] -libp2p-gossipsub = { version = "0.41.0", path = "../../protocols/gossipsub", optional = true } +libp2p-gossipsub = { version = "0.42.0", path = "../../protocols/gossipsub", optional = true } [dev-dependencies] log = "0.4.0" env_logger = "0.9.0" futures = "0.3.1" -libp2p = { path = "../../", default-features = false, features = ["metrics", "ping", "tcp-async-io", "dns-async-std", "websocket", "noise", "mplex", "yamux"] } +libp2p = { path = "../..", features = ["full"] } hyper = { version="0.14", features = ["server", "tcp", "http1"] } tokio = { version = "1", features = ["rt-multi-thread"] } diff --git a/misc/metrics/examples/metrics/main.rs b/misc/metrics/examples/metrics/main.rs index d4f491b4af3..0307e32dc1e 100644 --- a/misc/metrics/examples/metrics/main.rs +++ b/misc/metrics/examples/metrics/main.rs @@ -48,19 +48,19 @@ //! You should see a long list of metrics printed to the terminal. Check the //! `libp2p_ping` metrics, they should be `>0`. +use env_logger::Env; use futures::executor::block_on; use futures::stream::StreamExt; use libp2p::core::Multiaddr; use libp2p::metrics::{Metrics, Recorder}; -use libp2p::ping::{Ping, PingConfig}; use libp2p::swarm::SwarmEvent; -use libp2p::{identity, PeerId, Swarm}; +use libp2p::{identity, ping, NetworkBehaviour, PeerId, Swarm}; +use libp2p_swarm::keep_alive; +use log::info; use prometheus_client::registry::Registry; use std::error::Error; use std::thread; -use env_logger::Env; -use log::info; mod http_service; fn main() -> Result<(), Box> { @@ -72,7 +72,7 @@ fn main() -> Result<(), Box> { let mut swarm = Swarm::new( block_on(libp2p::development_transport(local_key))?, - Ping::new(PingConfig::new().with_keep_alive(true)), + Behaviour::default(), local_peer_id, ); @@ -91,7 +91,7 @@ fn main() -> Result<(), Box> { block_on(async { loop { match swarm.select_next_some().await { - SwarmEvent::Behaviour(ping_event) => { + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping_event)) => { info!("{:?}", ping_event); metrics.record(&ping_event); } @@ -104,3 +104,13 @@ fn main() -> Result<(), Box> { }); Ok(()) } + +/// Our network behaviour. +/// +/// For illustrative purposes, this includes the [`keep_alive::Behaviour`]) behaviour so the ping actually happen +/// and can be observed via the metrics. +#[derive(NetworkBehaviour, Default)] +struct Behaviour { + keep_alive: keep_alive::Behaviour, + ping: ping::Behaviour, +} diff --git a/misc/metrics/src/identify.rs b/misc/metrics/src/identify.rs index 730528167a8..8f91521713f 100644 --- a/misc/metrics/src/identify.rs +++ b/misc/metrics/src/identify.rs @@ -112,16 +112,16 @@ impl Metrics { } } -impl super::Recorder for Metrics { - fn record(&self, event: &libp2p_identify::IdentifyEvent) { +impl super::Recorder for Metrics { + fn record(&self, event: &libp2p_identify::Event) { match event { - libp2p_identify::IdentifyEvent::Error { .. } => { + libp2p_identify::Event::Error { .. } => { self.error.inc(); } - libp2p_identify::IdentifyEvent::Pushed { .. } => { + libp2p_identify::Event::Pushed { .. } => { self.pushed.inc(); } - libp2p_identify::IdentifyEvent::Received { peer_id, info, .. } => { + libp2p_identify::Event::Received { peer_id, info, .. } => { { let mut protocols: Vec = info .protocols @@ -168,7 +168,7 @@ impl super::Recorder for Metrics { self.received_info_listen_addrs .observe(info.listen_addrs.len() as f64); } - libp2p_identify::IdentifyEvent::Sent { .. } => { + libp2p_identify::Event::Sent { .. } => { self.sent.inc(); } } diff --git a/misc/metrics/src/lib.rs b/misc/metrics/src/lib.rs index d9fa3c40ffe..bc9f45b8d76 100644 --- a/misc/metrics/src/lib.rs +++ b/misc/metrics/src/lib.rs @@ -112,8 +112,8 @@ impl Recorder for Metrics { } #[cfg(feature = "identify")] -impl Recorder for Metrics { - fn record(&self, event: &libp2p_identify::IdentifyEvent) { +impl Recorder for Metrics { + fn record(&self, event: &libp2p_identify::Event) { self.identify.record(event) } } @@ -126,8 +126,8 @@ impl Recorder for Metrics { } #[cfg(feature = "ping")] -impl Recorder for Metrics { - fn record(&self, event: &libp2p_ping::PingEvent) { +impl Recorder for Metrics { + fn record(&self, event: &libp2p_ping::Event) { self.ping.record(event) } } diff --git a/misc/metrics/src/ping.rs b/misc/metrics/src/ping.rs index b7c3ef60f9b..fbd89338cd5 100644 --- a/misc/metrics/src/ping.rs +++ b/misc/metrics/src/ping.rs @@ -29,16 +29,16 @@ struct FailureLabels { reason: Failure, } -impl From<&libp2p_ping::PingFailure> for FailureLabels { - fn from(failure: &libp2p_ping::PingFailure) -> Self { +impl From<&libp2p_ping::Failure> for FailureLabels { + fn from(failure: &libp2p_ping::Failure) -> Self { match failure { - libp2p_ping::PingFailure::Timeout => FailureLabels { + libp2p_ping::Failure::Timeout => FailureLabels { reason: Failure::Timeout, }, - libp2p_ping::PingFailure::Unsupported => FailureLabels { + libp2p_ping::Failure::Unsupported => FailureLabels { reason: Failure::Unsupported, }, - libp2p_ping::PingFailure::Other { .. } => FailureLabels { + libp2p_ping::Failure::Other { .. } => FailureLabels { reason: Failure::Other, }, } @@ -92,13 +92,13 @@ impl Metrics { } } -impl super::Recorder for Metrics { - fn record(&self, event: &libp2p_ping::PingEvent) { +impl super::Recorder for Metrics { + fn record(&self, event: &libp2p_ping::Event) { match &event.result { - Ok(libp2p_ping::PingSuccess::Pong) => { + Ok(libp2p_ping::Success::Pong) => { self.pong_received.inc(); } - Ok(libp2p_ping::PingSuccess::Ping { rtt }) => { + Ok(libp2p_ping::Success::Ping { rtt }) => { self.rtt.observe(rtt.as_secs_f64()); } Err(failure) => { diff --git a/misc/multistream-select/CHANGELOG.md b/misc/multistream-select/CHANGELOG.md index 5b3b336775b..4b15cc51ba1 100644 --- a/misc/multistream-select/CHANGELOG.md +++ b/misc/multistream-select/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.12.0 [unreleased] + +- Remove parallel dialing optimization, to avoid requiring the use of the `ls` command. See [PR 2934]. + +[PR 2934]: https://github.com/libp2p/rust-libp2p/pull/2934 + # 0.11.0 [2022-01-27] - Migrate to Rust edition 2021 (see [PR 2339]). diff --git a/misc/multistream-select/Cargo.toml b/misc/multistream-select/Cargo.toml index 9ca21c0362a..c669eb314aa 100644 --- a/misc/multistream-select/Cargo.toml +++ b/misc/multistream-select/Cargo.toml @@ -3,7 +3,7 @@ name = "multistream-select" edition = "2021" rust-version = "1.56.1" description = "Multistream-select negotiation protocol for libp2p" -version = "0.11.0" +version = "0.12.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -21,10 +21,7 @@ unsigned-varint = "0.7" [dev-dependencies] async-std = "1.6.2" env_logger = "0.9" -libp2p-core = { path = "../../core", default-features = false } -libp2p-swarm = { path = "../../swarm", default-features = false } -libp2p-mplex = { path = "../../muxers/mplex" } -libp2p-plaintext = { path = "../../transports/plaintext" } -quickcheck = "0.9.0" -rand = "0.7.2" +libp2p = { path = "../..", features = ["full"] } +quickcheck = { package = "quickcheck-ext", path = "../../misc/quickcheck-ext" } +rand = "0.8" rw-stream-sink = { version = "0.3.0", path = "../../misc/rw-stream-sink" } diff --git a/misc/multistream-select/src/dialer_select.rs b/misc/multistream-select/src/dialer_select.rs index 7a8c75daa6f..893c86f8867 100644 --- a/misc/multistream-select/src/dialer_select.rs +++ b/misc/multistream-select/src/dialer_select.rs @@ -23,7 +23,7 @@ use crate::protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError}; use crate::{Negotiated, NegotiationError, Version}; -use futures::{future::Either, prelude::*}; +use futures::prelude::*; use std::{ convert::TryFrom as _, iter, mem, @@ -39,12 +39,6 @@ use std::{ /// returned `Future` resolves with the name of the negotiated protocol and /// a [`Negotiated`] I/O stream. /// -/// The chosen message flow for protocol negotiation depends on the numbers of -/// supported protocols given. That is, this function delegates to serial or -/// parallel variant based on the number of protocols given. The number of -/// protocols is determined through the `size_hint` of the given iterator and -/// thus an inaccurate size estimate may result in a suboptimal choice. -/// /// Within the scope of this library, a dialer always commits to a specific /// multistream-select [`Version`], whereas a listener always supports /// all versions supported by this library. Frictionless multistream-select @@ -55,92 +49,32 @@ pub fn dialer_select_proto( protocols: I, version: Version, ) -> DialerSelectFuture -where - R: AsyncRead + AsyncWrite, - I: IntoIterator, - I::Item: AsRef<[u8]>, -{ - let iter = protocols.into_iter(); - // We choose between the "serial" and "parallel" strategies based on the number of protocols. - if iter.size_hint().1.map(|n| n <= 3).unwrap_or(false) { - Either::Left(dialer_select_proto_serial(inner, iter, version)) - } else { - Either::Right(dialer_select_proto_parallel(inner, iter, version)) - } -} - -/// Future, returned by `dialer_select_proto`, which selects a protocol and dialer -/// either trying protocols in-order, or by requesting all protocols supported -/// by the remote upfront, from which the first protocol found in the dialer's -/// list of protocols is selected. -pub type DialerSelectFuture = Either, DialerSelectPar>; - -/// Returns a `Future` that negotiates a protocol on the given I/O stream. -/// -/// Just like [`dialer_select_proto`] but always using an iterative message flow, -/// trying the given list of supported protocols one-by-one. -/// -/// This strategy is preferable if the dialer only supports a few protocols. -pub(crate) fn dialer_select_proto_serial( - inner: R, - protocols: I, - version: Version, -) -> DialerSelectSeq where R: AsyncRead + AsyncWrite, I: IntoIterator, I::Item: AsRef<[u8]>, { let protocols = protocols.into_iter().peekable(); - DialerSelectSeq { + DialerSelectFuture { version, protocols, - state: SeqState::SendHeader { + state: State::SendHeader { io: MessageIO::new(inner), }, } } -/// Returns a `Future` that negotiates a protocol on the given I/O stream. -/// -/// Just like [`dialer_select_proto`] but always using a message flow that first -/// requests all supported protocols from the remote, selecting the first -/// protocol from the given list of supported protocols that is supported -/// by the remote. -/// -/// This strategy may be beneficial if the dialer supports many protocols -/// and it is unclear whether the remote supports one of the first few. -pub(crate) fn dialer_select_proto_parallel( - inner: R, - protocols: I, - version: Version, -) -> DialerSelectPar -where - R: AsyncRead + AsyncWrite, - I: IntoIterator, - I::Item: AsRef<[u8]>, -{ - let protocols = protocols.into_iter(); - DialerSelectPar { - version, - protocols, - state: ParState::SendHeader { - io: MessageIO::new(inner), - }, - } -} - -/// A `Future` returned by [`dialer_select_proto_serial`] which negotiates +/// A `Future` returned by [`dialer_select_proto`] which negotiates /// a protocol iteratively by considering one protocol after the other. #[pin_project::pin_project] -pub struct DialerSelectSeq { +pub struct DialerSelectFuture { // TODO: It would be nice if eventually N = I::Item = Protocol. protocols: iter::Peekable, - state: SeqState, + state: State, version: Version, } -enum SeqState { +enum State { SendHeader { io: MessageIO }, SendProtocol { io: MessageIO, protocol: N }, FlushProtocol { io: MessageIO, protocol: N }, @@ -148,7 +82,7 @@ enum SeqState { Done, } -impl Future for DialerSelectSeq +impl Future for DialerSelectFuture where // The Unpin bound here is required because we produce a `Negotiated` as the output. // It also makes the implementation considerably easier to write. @@ -162,12 +96,12 @@ where let this = self.project(); loop { - match mem::replace(this.state, SeqState::Done) { - SeqState::SendHeader { mut io } => { + match mem::replace(this.state, State::Done) { + State::SendHeader { mut io } => { match Pin::new(&mut io).poll_ready(cx)? { Poll::Ready(()) => {} Poll::Pending => { - *this.state = SeqState::SendHeader { io }; + *this.state = State::SendHeader { io }; return Poll::Pending; } } @@ -181,14 +115,14 @@ where // The dialer always sends the header and the first protocol // proposal in one go for efficiency. - *this.state = SeqState::SendProtocol { io, protocol }; + *this.state = State::SendProtocol { io, protocol }; } - SeqState::SendProtocol { mut io, protocol } => { + State::SendProtocol { mut io, protocol } => { match Pin::new(&mut io).poll_ready(cx)? { Poll::Ready(()) => {} Poll::Pending => { - *this.state = SeqState::SendProtocol { io, protocol }; + *this.state = State::SendProtocol { io, protocol }; return Poll::Pending; } } @@ -200,10 +134,10 @@ where log::debug!("Dialer: Proposed protocol: {}", p); if this.protocols.peek().is_some() { - *this.state = SeqState::FlushProtocol { io, protocol } + *this.state = State::FlushProtocol { io, protocol } } else { match this.version { - Version::V1 => *this.state = SeqState::FlushProtocol { io, protocol }, + Version::V1 => *this.state = State::FlushProtocol { io, protocol }, // This is the only effect that `V1Lazy` has compared to `V1`: // Optimistically settling on the only protocol that // the dialer supports for this negotiation. Notably, @@ -218,21 +152,21 @@ where } } - SeqState::FlushProtocol { mut io, protocol } => { + State::FlushProtocol { mut io, protocol } => { match Pin::new(&mut io).poll_flush(cx)? { - Poll::Ready(()) => *this.state = SeqState::AwaitProtocol { io, protocol }, + Poll::Ready(()) => *this.state = State::AwaitProtocol { io, protocol }, Poll::Pending => { - *this.state = SeqState::FlushProtocol { io, protocol }; + *this.state = State::FlushProtocol { io, protocol }; return Poll::Pending; } } } - SeqState::AwaitProtocol { mut io, protocol } => { + State::AwaitProtocol { mut io, protocol } => { let msg = match Pin::new(&mut io).poll_next(cx)? { Poll::Ready(Some(msg)) => msg, Poll::Pending => { - *this.state = SeqState::AwaitProtocol { io, protocol }; + *this.state = State::AwaitProtocol { io, protocol }; return Poll::Pending; } // Treat EOF error as [`NegotiationError::Failed`], not as @@ -243,7 +177,7 @@ where match msg { Message::Header(v) if v == HeaderLine::from(*this.version) => { - *this.state = SeqState::AwaitProtocol { io, protocol }; + *this.state = State::AwaitProtocol { io, protocol }; } Message::Protocol(ref p) if p.as_ref() == protocol.as_ref() => { log::debug!("Dialer: Received confirmation for protocol: {}", p); @@ -256,148 +190,13 @@ where String::from_utf8_lossy(protocol.as_ref()) ); let protocol = this.protocols.next().ok_or(NegotiationError::Failed)?; - *this.state = SeqState::SendProtocol { io, protocol } + *this.state = State::SendProtocol { io, protocol } } _ => return Poll::Ready(Err(ProtocolError::InvalidMessage.into())), } } - SeqState::Done => panic!("SeqState::poll called after completion"), - } - } - } -} - -/// A `Future` returned by [`dialer_select_proto_parallel`] which negotiates -/// a protocol selectively by considering all supported protocols of the remote -/// "in parallel". -#[pin_project::pin_project] -pub struct DialerSelectPar { - protocols: I, - state: ParState, - version: Version, -} - -enum ParState { - SendHeader { io: MessageIO }, - SendProtocolsRequest { io: MessageIO }, - Flush { io: MessageIO }, - RecvProtocols { io: MessageIO }, - SendProtocol { io: MessageIO, protocol: N }, - Done, -} - -impl Future for DialerSelectPar -where - // The Unpin bound here is required because we produce a `Negotiated` as the output. - // It also makes the implementation considerably easier to write. - R: AsyncRead + AsyncWrite + Unpin, - I: Iterator, - I::Item: AsRef<[u8]>, -{ - type Output = Result<(I::Item, Negotiated), NegotiationError>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.project(); - - loop { - match mem::replace(this.state, ParState::Done) { - ParState::SendHeader { mut io } => { - match Pin::new(&mut io).poll_ready(cx)? { - Poll::Ready(()) => {} - Poll::Pending => { - *this.state = ParState::SendHeader { io }; - return Poll::Pending; - } - } - - let msg = Message::Header(HeaderLine::from(*this.version)); - if let Err(err) = Pin::new(&mut io).start_send(msg) { - return Poll::Ready(Err(From::from(err))); - } - - *this.state = ParState::SendProtocolsRequest { io }; - } - - ParState::SendProtocolsRequest { mut io } => { - match Pin::new(&mut io).poll_ready(cx)? { - Poll::Ready(()) => {} - Poll::Pending => { - *this.state = ParState::SendProtocolsRequest { io }; - return Poll::Pending; - } - } - - if let Err(err) = Pin::new(&mut io).start_send(Message::ListProtocols) { - return Poll::Ready(Err(From::from(err))); - } - - log::debug!("Dialer: Requested supported protocols."); - *this.state = ParState::Flush { io } - } - - ParState::Flush { mut io } => match Pin::new(&mut io).poll_flush(cx)? { - Poll::Ready(()) => *this.state = ParState::RecvProtocols { io }, - Poll::Pending => { - *this.state = ParState::Flush { io }; - return Poll::Pending; - } - }, - - ParState::RecvProtocols { mut io } => { - let msg = match Pin::new(&mut io).poll_next(cx)? { - Poll::Ready(Some(msg)) => msg, - Poll::Pending => { - *this.state = ParState::RecvProtocols { io }; - return Poll::Pending; - } - // Treat EOF error as [`NegotiationError::Failed`], not as - // [`NegotiationError::ProtocolError`], allowing dropping or closing an I/O - // stream as a permissible way to "gracefully" fail a negotiation. - Poll::Ready(None) => return Poll::Ready(Err(NegotiationError::Failed)), - }; - - match &msg { - Message::Header(h) if h == &HeaderLine::from(*this.version) => { - *this.state = ParState::RecvProtocols { io } - } - Message::Protocols(supported) => { - let protocol = this - .protocols - .by_ref() - .find(|p| supported.iter().any(|s| s.as_ref() == p.as_ref())) - .ok_or(NegotiationError::Failed)?; - log::debug!( - "Dialer: Found supported protocol: {}", - String::from_utf8_lossy(protocol.as_ref()) - ); - *this.state = ParState::SendProtocol { io, protocol }; - } - _ => return Poll::Ready(Err(ProtocolError::InvalidMessage.into())), - } - } - - ParState::SendProtocol { mut io, protocol } => { - match Pin::new(&mut io).poll_ready(cx)? { - Poll::Ready(()) => {} - Poll::Pending => { - *this.state = ParState::SendProtocol { io, protocol }; - return Poll::Pending; - } - } - - let p = Protocol::try_from(protocol.as_ref())?; - if let Err(err) = Pin::new(&mut io).start_send(Message::Protocol(p.clone())) { - return Poll::Ready(Err(From::from(err))); - } - - log::debug!("Dialer: Expecting proposed protocol: {}", p); - let io = Negotiated::expecting(io.into_reader(), p, None); - - return Poll::Ready(Ok((protocol, io))); - } - - ParState::Done => panic!("ParState::poll called after completion"), + State::Done => panic!("State::poll called after completion"), } } } diff --git a/misc/multistream-select/src/lib.rs b/misc/multistream-select/src/lib.rs index 00291f4ece8..0d7346750bc 100644 --- a/misc/multistream-select/src/lib.rs +++ b/misc/multistream-select/src/lib.rs @@ -92,7 +92,6 @@ mod length_delimited; mod listener_select; mod negotiated; mod protocol; -mod tests; pub use self::dialer_select::{dialer_select_proto, DialerSelectFuture}; pub use self::listener_select::{listener_select_proto, ListenerSelectFuture}; diff --git a/misc/multistream-select/src/protocol.rs b/misc/multistream-select/src/protocol.rs index d1374ef7495..5809e1768d6 100644 --- a/misc/multistream-select/src/protocol.rs +++ b/misc/multistream-select/src/protocol.rs @@ -455,15 +455,14 @@ impl fmt::Display for ProtocolError { mod tests { use super::*; use quickcheck::*; - use rand::distributions::Alphanumeric; - use rand::Rng; use std::iter; impl Arbitrary for Protocol { - fn arbitrary(g: &mut G) -> Protocol { - let n = g.gen_range(1, g.size()); + fn arbitrary(g: &mut Gen) -> Protocol { + let n = g.gen_range(1..g.size()); let p: String = iter::repeat(()) - .map(|()| g.sample(Alphanumeric)) + .map(|()| char::arbitrary(g)) + .filter(|&c| c.is_ascii_alphanumeric()) .take(n) .collect(); Protocol(Bytes::from(format!("/{}", p))) @@ -471,8 +470,8 @@ mod tests { } impl Arbitrary for Message { - fn arbitrary(g: &mut G) -> Message { - match g.gen_range(0, 5) { + fn arbitrary(g: &mut Gen) -> Message { + match g.gen_range(0..5u8) { 0 => Message::Header(HeaderLine::V1), 1 => Message::NotAvailable, 2 => Message::ListProtocols, @@ -488,7 +487,7 @@ mod tests { fn prop(msg: Message) { let mut buf = BytesMut::new(); msg.encode(&mut buf) - .expect(&format!("Encoding message failed: {:?}", msg)); + .unwrap_or_else(|_| panic!("Encoding message failed: {:?}", msg)); match Message::decode(buf.freeze()) { Ok(m) => assert_eq!(m, msg), Err(e) => panic!("Decoding failed: {:?}", e), diff --git a/misc/multistream-select/src/tests.rs b/misc/multistream-select/tests/dialer_select.rs similarity index 73% rename from misc/multistream-select/src/tests.rs rename to misc/multistream-select/tests/dialer_select.rs index ca627d24fcf..66fd1593a62 100644 --- a/misc/multistream-select/src/tests.rs +++ b/misc/multistream-select/tests/dialer_select.rs @@ -20,14 +20,9 @@ //! Integration tests for protocol negotiation. -#![cfg(test)] - -use crate::dialer_select::{dialer_select_proto_parallel, dialer_select_proto_serial}; -use crate::{dialer_select_proto, listener_select_proto}; -use crate::{NegotiationError, Version}; - use async_std::net::{TcpListener, TcpStream}; use futures::prelude::*; +use multistream_select::{dialer_select_proto, listener_select_proto, NegotiationError, Version}; #[test] fn select_proto_basic() { @@ -181,67 +176,3 @@ fn negotiation_failed() { } } } - -#[test] -fn select_proto_parallel() { - async fn run(version: Version) { - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - let listener_addr = listener.local_addr().unwrap(); - - let server = async_std::task::spawn(async move { - let connec = listener.accept().await.unwrap().0; - let protos = vec![b"/proto1", b"/proto2"]; - let (proto, io) = listener_select_proto(connec, protos).await.unwrap(); - assert_eq!(proto, b"/proto2"); - io.complete().await.unwrap(); - }); - - let client = async_std::task::spawn(async move { - let connec = TcpStream::connect(&listener_addr).await.unwrap(); - let protos = vec![b"/proto3", b"/proto2"]; - let (proto, io) = dialer_select_proto_parallel(connec, protos.into_iter(), version) - .await - .unwrap(); - assert_eq!(proto, b"/proto2"); - io.complete().await.unwrap(); - }); - - server.await; - client.await; - } - - async_std::task::block_on(run(Version::V1)); - async_std::task::block_on(run(Version::V1Lazy)); -} - -#[test] -fn select_proto_serial() { - async fn run(version: Version) { - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - let listener_addr = listener.local_addr().unwrap(); - - let server = async_std::task::spawn(async move { - let connec = listener.accept().await.unwrap().0; - let protos = vec![b"/proto1", b"/proto2"]; - let (proto, io) = listener_select_proto(connec, protos).await.unwrap(); - assert_eq!(proto, b"/proto2"); - io.complete().await.unwrap(); - }); - - let client = async_std::task::spawn(async move { - let connec = TcpStream::connect(&listener_addr).await.unwrap(); - let protos = vec![b"/proto3", b"/proto2"]; - let (proto, io) = dialer_select_proto_serial(connec, protos.into_iter(), version) - .await - .unwrap(); - assert_eq!(proto, b"/proto2"); - io.complete().await.unwrap(); - }); - - server.await; - client.await; - } - - async_std::task::block_on(run(Version::V1)); - async_std::task::block_on(run(Version::V1Lazy)); -} diff --git a/misc/multistream-select/tests/transport.rs b/misc/multistream-select/tests/transport.rs index cd4f2f87a76..bf5dd247b40 100644 --- a/misc/multistream-select/tests/transport.rs +++ b/misc/multistream-select/tests/transport.rs @@ -19,16 +19,16 @@ // DEALINGS IN THE SOFTWARE. use futures::{channel::oneshot, prelude::*, ready}; -use libp2p_core::{ +use libp2p::core::{ identity, multiaddr::Protocol, muxing::StreamMuxerBox, transport::{self, MemoryTransport}, upgrade, Multiaddr, PeerId, Transport, }; -use libp2p_mplex::MplexConfig; -use libp2p_plaintext::PlainText2Config; -use libp2p_swarm::{DummyBehaviour, Swarm, SwarmEvent}; +use libp2p::mplex::MplexConfig; +use libp2p::plaintext::PlainText2Config; +use libp2p::swarm::{dummy, Swarm, SwarmEvent}; use rand::random; use std::task::Poll; @@ -61,8 +61,8 @@ fn transport_upgrade() { let listen_addr = Multiaddr::from(Protocol::Memory(random::())); - let mut dialer = Swarm::new(dialer_transport, DummyBehaviour::default(), dialer_id); - let mut listener = Swarm::new(listener_transport, DummyBehaviour::default(), listener_id); + let mut dialer = Swarm::new(dialer_transport, dummy::Behaviour, dialer_id); + let mut listener = Swarm::new(listener_transport, dummy::Behaviour, listener_id); listener.listen_on(listen_addr).unwrap(); let (addr_sender, addr_receiver) = oneshot::channel(); @@ -71,9 +71,10 @@ fn transport_upgrade() { let addr = addr_receiver.await.unwrap(); dialer.dial(addr).unwrap(); futures::future::poll_fn(move |cx| loop { - match ready!(dialer.poll_next_unpin(cx)).unwrap() { - SwarmEvent::ConnectionEstablished { .. } => return Poll::Ready(()), - _ => {} + if let SwarmEvent::ConnectionEstablished { .. } = + ready!(dialer.poll_next_unpin(cx)).unwrap() + { + return Poll::Ready(()); } }) .await diff --git a/misc/quickcheck-ext/Cargo.toml b/misc/quickcheck-ext/Cargo.toml new file mode 100644 index 00000000000..0c427dc4fc9 --- /dev/null +++ b/misc/quickcheck-ext/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "quickcheck-ext" +version = "0.1.0" +edition = "2021" +publish = false +license = "Unlicense/MIT" + +[dependencies] +quickcheck = "1" +num-traits = "0.2" diff --git a/misc/quickcheck-ext/src/lib.rs b/misc/quickcheck-ext/src/lib.rs new file mode 100644 index 00000000000..392c9f0f8aa --- /dev/null +++ b/misc/quickcheck-ext/src/lib.rs @@ -0,0 +1,44 @@ +pub use quickcheck::*; + +use core::ops::Range; +use num_traits::sign::Unsigned; + +pub trait GenRange { + fn gen_range(&mut self, _range: Range) -> T; + + fn gen_index(&mut self, ubound: usize) -> usize { + if ubound <= (core::u32::MAX as usize) { + self.gen_range(0..ubound as u32) as usize + } else { + self.gen_range(0..ubound) + } + } +} + +impl GenRange for Gen { + fn gen_range(&mut self, range: Range) -> T { + ::arbitrary(self) % (range.end - range.start) + range.start + } +} + +pub trait SliceRandom { + fn shuffle(&mut self, arr: &mut [T]); + fn choose_multiple<'a, T>( + &mut self, + arr: &'a [T], + amount: usize, + ) -> std::iter::Take> { + let mut v: Vec<&T> = arr.iter().collect(); + self.shuffle(&mut v); + v.into_iter().take(amount) + } +} + +impl SliceRandom for Gen { + fn shuffle(&mut self, arr: &mut [T]) { + for i in (1..arr.len()).rev() { + // invariant: elements with index > i have been locked in place. + arr.swap(i, self.gen_index(i + 1)); + } + } +} diff --git a/muxers/mplex/CHANGELOG.md b/muxers/mplex/CHANGELOG.md index 925013d1e29..161cdbc9a64 100644 --- a/muxers/mplex/CHANGELOG.md +++ b/muxers/mplex/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.37.0 [unreleased] + +- Bump rand to 0.8 and quickcheck to 1. See [PR 2857]. + +[PR 2857]: https://github.com/libp2p/rust-libp2p/pull/2857 + # 0.36.0 - Update to `libp2p-core` `v0.36.0` diff --git a/muxers/mplex/Cargo.toml b/muxers/mplex/Cargo.toml index d4ea5342262..0e38ab84018 100644 --- a/muxers/mplex/Cargo.toml +++ b/muxers/mplex/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-mplex" edition = "2021" rust-version = "1.56.1" description = "Mplex multiplexing protocol for libp2p" -version = "0.36.0" +version = "0.37.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,11 +14,11 @@ categories = ["network-programming", "asynchronous"] bytes = "1" futures = "0.3.1" asynchronous-codec = "0.6" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.37.0", path = "../../core" } log = "0.4" nohash-hasher = "0.2" parking_lot = "0.12" -rand = "0.7" +rand = "0.8" smallvec = "1.6.1" unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } @@ -27,10 +27,8 @@ async-std = "1.7.0" criterion = "0.4" env_logger = "0.9" futures = "0.3" -libp2p-tcp = { path = "../../transports/tcp" } -libp2p-plaintext = { path = "../../transports/plaintext" } -quickcheck = "0.9" -rand = "0.7" +libp2p = { path = "../..", features = ["full"] } +quickcheck = { package = "quickcheck-ext", path = "../../misc/quickcheck-ext" } [[bench]] name = "split_send_size" diff --git a/muxers/mplex/benches/split_send_size.rs b/muxers/mplex/benches/split_send_size.rs index f74bcd1046f..8d6803880d6 100644 --- a/muxers/mplex/benches/split_send_size.rs +++ b/muxers/mplex/benches/split_send_size.rs @@ -26,13 +26,13 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughpu use futures::future::poll_fn; use futures::prelude::*; use futures::{channel::oneshot, future::join}; -use libp2p_core::muxing::StreamMuxerExt; -use libp2p_core::{ +use libp2p::core::muxing::StreamMuxerExt; +use libp2p::core::{ identity, multiaddr::multiaddr, muxing, transport, upgrade, Multiaddr, PeerId, Transport, }; -use libp2p_mplex as mplex; -use libp2p_plaintext::PlainText2Config; -use libp2p_tcp::GenTcpConfig; +use libp2p::mplex; +use libp2p::plaintext::PlainText2Config; +use libp2p::tcp::GenTcpConfig; use std::pin::Pin; use std::time::Duration; @@ -53,7 +53,7 @@ const BENCH_SIZES: [usize; 8] = [ fn prepare(c: &mut Criterion) { let _ = env_logger::try_init(); - let payload: Vec = vec![1; 1024 * 1024 * 1]; + let payload: Vec = vec![1; 1024 * 1024]; let mut tcp = c.benchmark_group("tcp"); let tcp_addr = multiaddr![Ip4(std::net::Ipv4Addr::new(127, 0, 0, 1)), Tcp(0u16)]; @@ -166,7 +166,7 @@ fn tcp_transport(split_send_size: usize) -> BenchTransport { let mut mplex = mplex::MplexConfig::default(); mplex.set_split_send_size(split_send_size); - libp2p_tcp::TcpTransport::new(GenTcpConfig::default().nodelay(true)) + libp2p::tcp::TcpTransport::new(GenTcpConfig::default().nodelay(true)) .upgrade(upgrade::Version::V1) .authenticate(PlainText2Config { local_public_key }) .multiplex(mplex) diff --git a/muxers/mplex/src/io.rs b/muxers/mplex/src/io.rs index bbb11943201..9d0d9939c28 100644 --- a/muxers/mplex/src/io.rs +++ b/muxers/mplex/src/io.rs @@ -1105,27 +1105,25 @@ mod tests { use asynchronous_codec::{Decoder, Encoder}; use bytes::BytesMut; use quickcheck::*; - use rand::prelude::*; use std::collections::HashSet; use std::num::NonZeroU8; use std::ops::DerefMut; use std::pin::Pin; impl Arbitrary for MaxBufferBehaviour { - fn arbitrary(g: &mut G) -> MaxBufferBehaviour { - *[MaxBufferBehaviour::Block, MaxBufferBehaviour::ResetStream] - .choose(g) + fn arbitrary(g: &mut Gen) -> MaxBufferBehaviour { + *g.choose(&[MaxBufferBehaviour::Block, MaxBufferBehaviour::ResetStream]) .unwrap() } } impl Arbitrary for MplexConfig { - fn arbitrary(g: &mut G) -> MplexConfig { + fn arbitrary(g: &mut Gen) -> MplexConfig { MplexConfig { - max_substreams: g.gen_range(1, 100), - max_buffer_len: g.gen_range(1, 1000), + max_substreams: g.gen_range(1..100), + max_buffer_len: g.gen_range(1..1000), max_buffer_behaviour: MaxBufferBehaviour::arbitrary(g), - split_send_size: g.gen_range(1, 10000), + split_send_size: g.gen_range(1..10000), protocol_name: crate::config::DEFAULT_MPLEX_PROTOCOL_NAME, } } @@ -1328,7 +1326,7 @@ mod tests { w_buf: BytesMut::new(), eof: false, }; - let mut m = Multiplexed::new(conn, cfg.clone()); + let mut m = Multiplexed::new(conn, cfg); // Run the test. let mut opened = HashSet::new(); diff --git a/muxers/mplex/tests/async_write.rs b/muxers/mplex/tests/async_write.rs index bfbabf0f776..4f5bff1b584 100644 --- a/muxers/mplex/tests/async_write.rs +++ b/muxers/mplex/tests/async_write.rs @@ -19,9 +19,9 @@ // DEALINGS IN THE SOFTWARE. use futures::{channel::oneshot, prelude::*}; -use libp2p_core::muxing::StreamMuxerExt; -use libp2p_core::{upgrade, Transport}; -use libp2p_tcp::TcpTransport; +use libp2p::core::muxing::StreamMuxerExt; +use libp2p::core::{upgrade, Transport}; +use libp2p::tcp::TcpTransport; #[test] fn async_write() { @@ -30,7 +30,7 @@ fn async_write() { let (tx, rx) = oneshot::channel(); let bg_thread = async_std::task::spawn(async move { - let mplex = libp2p_mplex::MplexConfig::new(); + let mplex = libp2p::mplex::MplexConfig::new(); let mut transport = TcpTransport::default() .and_then(move |c, e| upgrade::apply(c, mplex, e, upgrade::Version::V1)) @@ -67,7 +67,7 @@ fn async_write() { }); async_std::task::block_on(async { - let mplex = libp2p_mplex::MplexConfig::new(); + let mplex = libp2p::mplex::MplexConfig::new(); let mut transport = TcpTransport::default() .and_then(move |c, e| upgrade::apply(c, mplex, e, upgrade::Version::V1)); diff --git a/muxers/mplex/tests/two_peers.rs b/muxers/mplex/tests/two_peers.rs index d30fcc1063d..27766c3abd5 100644 --- a/muxers/mplex/tests/two_peers.rs +++ b/muxers/mplex/tests/two_peers.rs @@ -19,9 +19,9 @@ // DEALINGS IN THE SOFTWARE. use futures::{channel::oneshot, prelude::*}; -use libp2p_core::muxing::StreamMuxerExt; -use libp2p_core::{upgrade, Transport}; -use libp2p_tcp::TcpTransport; +use libp2p::core::muxing::StreamMuxerExt; +use libp2p::core::{upgrade, Transport}; +use libp2p::tcp::TcpTransport; #[test] fn client_to_server_outbound() { @@ -188,11 +188,10 @@ fn protocol_not_match() { let mut transport = TcpTransport::default() .and_then(move |c, e| upgrade::apply(c, mplex, e, upgrade::Version::V1)) .boxed(); - match transport.dial(rx.await.unwrap()).unwrap().await { - Ok(_) => { - assert!(false, "Dialing should fail here as protocols do not match") - } - _ => {} - } + + assert!( + transport.dial(rx.await.unwrap()).unwrap().await.is_err(), + "Dialing should fail here as protocols do not match" + ); }); } diff --git a/muxers/yamux/CHANGELOG.md b/muxers/yamux/CHANGELOG.md index 54eb19865a2..41eff104500 100644 --- a/muxers/yamux/CHANGELOG.md +++ b/muxers/yamux/CHANGELOG.md @@ -1,10 +1,18 @@ +# 0.41.0 [unreleased] + +- Update to `libp2p-core` `v0.37.0`. + # 0.40.0 - Update to `libp2p-core` `v0.36.0` - Remove `OpenSubstreamToken` as it is dead code. See [PR 2873]. +- Drive connection also via `StreamMuxer::poll`. Any received streams will be buffered up to a maximum of 25 streams. + See [PR 2861]. + [PR 2873]: https://github.com/libp2p/rust-libp2p/pull/2873/ +[PR 2861]: https://github.com/libp2p/rust-libp2p/pull/2861/ # 0.39.0 diff --git a/muxers/yamux/Cargo.toml b/muxers/yamux/Cargo.toml index 1ee7b4ae667..10800a78c3c 100644 --- a/muxers/yamux/Cargo.toml +++ b/muxers/yamux/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-yamux" edition = "2021" rust-version = "1.56.1" description = "Yamux multiplexing protocol for libp2p" -version = "0.40.0" +version = "0.41.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,7 +12,8 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.1" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.37.0", path = "../../core" } parking_lot = "0.12" thiserror = "1.0" yamux = "0.10.0" +log = "0.4" diff --git a/muxers/yamux/src/lib.rs b/muxers/yamux/src/lib.rs index 1c4c9e7c7c9..50e8a3ecc5e 100644 --- a/muxers/yamux/src/lib.rs +++ b/muxers/yamux/src/lib.rs @@ -24,10 +24,12 @@ use futures::{ future, prelude::*, + ready, stream::{BoxStream, LocalBoxStream}, }; use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use std::collections::VecDeque; use std::{ fmt, io, iter, mem, pin::Pin, @@ -42,8 +44,20 @@ pub struct Yamux { incoming: S, /// Handle to control the connection. control: yamux::Control, + /// Temporarily buffers inbound streams in case our node is performing backpressure on the remote. + /// + /// The only way how yamux can make progress is by driving the [`Incoming`] stream. However, the + /// [`StreamMuxer`] interface is designed to allow a caller to selectively make progress via + /// [`StreamMuxer::poll_inbound`] and [`StreamMuxer::poll_outbound`] whilst the more general + /// [`StreamMuxer::poll`] is designed to make progress on existing streams etc. + /// + /// This buffer stores inbound streams that are created whilst [`StreamMuxer::poll`] is called. + /// Once the buffer is full, new inbound streams are dropped. + inbound_stream_buffer: VecDeque, } +const MAX_BUFFERED_INBOUND_STREAMS: usize = 25; + impl fmt::Debug for Yamux { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("Yamux") @@ -65,6 +79,7 @@ where _marker: std::marker::PhantomData, }, control: ctrl, + inbound_stream_buffer: VecDeque::default(), } } } @@ -84,6 +99,7 @@ where _marker: std::marker::PhantomData, }, control: ctrl, + inbound_stream_buffer: VecDeque::default(), } } } @@ -101,13 +117,11 @@ where mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { - self.incoming.poll_next_unpin(cx).map(|maybe_stream| { - let stream = maybe_stream - .transpose()? - .ok_or(YamuxError(ConnectionError::Closed))?; + if let Some(stream) = self.inbound_stream_buffer.pop_front() { + return Poll::Ready(Ok(stream)); + } - Ok(stream) - }) + self.poll_inner(cx) } fn poll_outbound( @@ -121,9 +135,21 @@ where fn poll( self: Pin<&mut Self>, - _: &mut Context<'_>, + cx: &mut Context<'_>, ) -> Poll> { - Poll::Pending + let this = self.get_mut(); + + loop { + let inbound_stream = ready!(this.poll_inner(cx))?; + + if this.inbound_stream_buffer.len() >= MAX_BUFFERED_INBOUND_STREAMS { + log::warn!("dropping {inbound_stream} because buffer is full"); + drop(inbound_stream); + continue; + } + + this.inbound_stream_buffer.push_back(inbound_stream); + } } fn poll_close(mut self: Pin<&mut Self>, c: &mut Context<'_>) -> Poll> { @@ -145,6 +171,21 @@ where } } +impl Yamux +where + S: Stream> + Unpin, +{ + fn poll_inner(&mut self, cx: &mut Context<'_>) -> Poll> { + self.incoming.poll_next_unpin(cx).map(|maybe_stream| { + let stream = maybe_stream + .transpose()? + .ok_or(YamuxError(ConnectionError::Closed))?; + + Ok(stream) + }) + } +} + /// The yamux configuration. #[derive(Debug, Clone)] pub struct YamuxConfig { diff --git a/protocols/autonat/CHANGELOG.md b/protocols/autonat/CHANGELOG.md index 1b585e7ddf9..06809d78c29 100644 --- a/protocols/autonat/CHANGELOG.md +++ b/protocols/autonat/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.8.0 [unreleased] + +- Update to `libp2p-core` `v0.37.0`. + +- Update to `libp2p-swarm` `v0.40.0`. + +- Update to `libp2p-request-response` `v0.22.0`. + # 0.7.0 - Update to `libp2p-swarm` `v0.39.0`. diff --git a/protocols/autonat/Cargo.toml b/protocols/autonat/Cargo.toml index b5fc9760d6d..ed117c9edf4 100644 --- a/protocols/autonat/Cargo.toml +++ b/protocols/autonat/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-autonat" edition = "2021" rust-version = "1.56.1" description = "NAT and firewall detection for libp2p" -version = "0.7.0" +version = "0.8.0" authors = ["David Craven ", "Elena Frank "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -18,9 +18,9 @@ async-trait = "0.1" futures = "0.3" futures-timer = "3.0" instant = "0.1" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.39.0", path = "../../swarm" } -libp2p-request-response = { version = "0.21.0", path = "../request-response" } +libp2p-core = { version = "0.37.0", path = "../../core" } +libp2p-swarm = { version = "0.40.0", path = "../../swarm" } +libp2p-request-response = { version = "0.22.0", path = "../request-response" } log = "0.4" rand = "0.8" prost = "0.11" @@ -29,9 +29,4 @@ prost = "0.11" async-std = { version = "1.10", features = ["attributes"] } env_logger = "0.9" clap = {version = "3.1.6", features = ["derive"]} - - -[dev-dependencies.libp2p] -path = "../../" -default-features = false -features = ["autonat", "dns-async-std", "identify", "mplex", "noise", "tcp-async-io", "websocket", "yamux"] +libp2p = { path = "../..", features = ["full"] } diff --git a/protocols/autonat/examples/client.rs b/protocols/autonat/examples/autonat_client.rs similarity index 93% rename from protocols/autonat/examples/client.rs rename to protocols/autonat/examples/autonat_client.rs index c2030d99bca..c90a5c55060 100644 --- a/protocols/autonat/examples/client.rs +++ b/protocols/autonat/examples/autonat_client.rs @@ -32,7 +32,7 @@ use clap::Parser; use futures::prelude::*; use libp2p::autonat; -use libp2p::identify::{Identify, IdentifyConfig, IdentifyEvent}; +use libp2p::identify; use libp2p::multiaddr::Protocol; use libp2p::swarm::{Swarm, SwarmEvent}; use libp2p::{identity, Multiaddr, NetworkBehaviour, PeerId}; @@ -91,14 +91,14 @@ async fn main() -> Result<(), Box> { #[derive(NetworkBehaviour)] #[behaviour(out_event = "Event")] struct Behaviour { - identify: Identify, + identify: identify::Behaviour, auto_nat: autonat::Behaviour, } impl Behaviour { fn new(local_public_key: identity::PublicKey) -> Self { Self { - identify: Identify::new(IdentifyConfig::new( + identify: identify::Behaviour::new(identify::Config::new( "/ipfs/0.1.0".into(), local_public_key.clone(), )), @@ -117,13 +117,14 @@ impl Behaviour { } #[derive(Debug)] +#[allow(clippy::large_enum_variant)] enum Event { AutoNat(autonat::Event), - Identify(IdentifyEvent), + Identify(identify::Event), } -impl From for Event { - fn from(v: IdentifyEvent) -> Self { +impl From for Event { + fn from(v: identify::Event) -> Self { Self::Identify(v) } } diff --git a/protocols/autonat/examples/server.rs b/protocols/autonat/examples/autonat_server.rs similarity index 92% rename from protocols/autonat/examples/server.rs rename to protocols/autonat/examples/autonat_server.rs index c4ea7a93e9e..7bb79383710 100644 --- a/protocols/autonat/examples/server.rs +++ b/protocols/autonat/examples/autonat_server.rs @@ -29,7 +29,7 @@ use clap::Parser; use futures::prelude::*; use libp2p::autonat; -use libp2p::identify::{Identify, IdentifyConfig, IdentifyEvent}; +use libp2p::identify; use libp2p::multiaddr::Protocol; use libp2p::swarm::{Swarm, SwarmEvent}; use libp2p::{identity, Multiaddr, NetworkBehaviour, PeerId}; @@ -76,14 +76,14 @@ async fn main() -> Result<(), Box> { #[derive(NetworkBehaviour)] #[behaviour(out_event = "Event")] struct Behaviour { - identify: Identify, + identify: identify::Behaviour, auto_nat: autonat::Behaviour, } impl Behaviour { fn new(local_public_key: identity::PublicKey) -> Self { Self { - identify: Identify::new(IdentifyConfig::new( + identify: identify::Behaviour::new(identify::Config::new( "/ipfs/0.1.0".into(), local_public_key.clone(), )), @@ -96,13 +96,14 @@ impl Behaviour { } #[derive(Debug)] +#[allow(clippy::large_enum_variant)] enum Event { AutoNat(autonat::Event), - Identify(IdentifyEvent), + Identify(identify::Event), } -impl From for Event { - fn from(v: IdentifyEvent) -> Self { +impl From for Event { + fn from(v: identify::Event) -> Self { Self::Identify(v) } } diff --git a/protocols/autonat/tests/test_client.rs b/protocols/autonat/tests/test_client.rs index 72088f6e232..420bcf99829 100644 --- a/protocols/autonat/tests/test_client.rs +++ b/protocols/autonat/tests/test_client.rs @@ -58,9 +58,8 @@ async fn spawn_server(kill: oneshot::Receiver<()>) -> (PeerId, Multiaddr) { .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) .unwrap(); let addr = loop { - match server.select_next_some().await { - SwarmEvent::NewListenAddr { address, .. } => break address, - _ => {} + if let SwarmEvent::NewListenAddr { address, .. } = server.select_next_some().await { + break address; }; }; tx.send((peer_id, addr)).unwrap(); @@ -78,11 +77,8 @@ async fn spawn_server(kill: oneshot::Receiver<()>) -> (PeerId, Multiaddr) { async fn next_event(swarm: &mut Swarm) -> Event { loop { - match swarm.select_next_some().await { - SwarmEvent::Behaviour(event) => { - break event; - } - _ => {} + if let SwarmEvent::Behaviour(event) = swarm.select_next_some().await { + break event; } } } @@ -177,9 +173,8 @@ async fn test_auto_probe() { .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) .unwrap(); loop { - match client.select_next_some().await { - SwarmEvent::NewListenAddr { .. } => break, - _ => {} + if let SwarmEvent::NewListenAddr { .. } = client.select_next_some().await { + break; } } @@ -269,14 +264,13 @@ async fn test_confidence() { .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) .unwrap(); loop { - match client.select_next_some().await { - SwarmEvent::NewListenAddr { .. } => break, - _ => {} + if let SwarmEvent::NewListenAddr { .. } = client.select_next_some().await { + break; } } } else { let unreachable_addr: Multiaddr = "/ip4/127.0.0.1/tcp/42".parse().unwrap(); - client.add_external_address(unreachable_addr.clone(), AddressScore::Infinite); + client.add_external_address(unreachable_addr, AddressScore::Infinite); } for i in 0..MAX_CONFIDENCE + 1 { @@ -357,9 +351,8 @@ async fn test_throttle_server_period() { .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) .unwrap(); loop { - match client.select_next_some().await { - SwarmEvent::NewListenAddr { .. } => break, - _ => {} + if let SwarmEvent::NewListenAddr { .. } = client.select_next_some().await { + break; } } @@ -477,9 +470,8 @@ async fn test_outbound_failure() { .unwrap(); loop { - match client.select_next_some().await { - SwarmEvent::NewListenAddr { .. } => break, - _ => {} + if let SwarmEvent::NewListenAddr { .. } = client.select_next_some().await { + break; } } // First probe should be successful and flip status to public. @@ -497,7 +489,8 @@ async fn test_outbound_failure() { } let inactive = servers.split_off(1); - // Drop the handles of the inactive servers to kill them. + + #[allow(clippy::needless_collect)] // Drop the handles of the inactive servers to kill them. let inactive_ids: Vec<_> = inactive.into_iter().map(|(id, _handle)| id).collect(); // Expect to retry on outbound failure @@ -541,9 +534,8 @@ async fn test_global_ips_config() { .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) .unwrap(); loop { - match client.select_next_some().await { - SwarmEvent::NewListenAddr { .. } => break, - _ => {} + if let SwarmEvent::NewListenAddr { .. } = client.select_next_some().await { + break; } } diff --git a/protocols/autonat/tests/test_server.rs b/protocols/autonat/tests/test_server.rs index 36328319e9f..b45ae7ecafc 100644 --- a/protocols/autonat/tests/test_server.rs +++ b/protocols/autonat/tests/test_server.rs @@ -20,6 +20,8 @@ use futures::{channel::oneshot, Future, FutureExt, StreamExt}; use futures_timer::Delay; +use libp2p::core::{ConnectedPoint, Endpoint}; +use libp2p::swarm::DialError; use libp2p::{ development_transport, identity::Keypair, @@ -30,8 +32,6 @@ use libp2p::{ use libp2p_autonat::{ Behaviour, Config, Event, InboundProbeError, InboundProbeEvent, ResponseError, }; -use libp2p_core::{ConnectedPoint, Endpoint}; -use libp2p_swarm::DialError; use std::{num::NonZeroU32, time::Duration}; async fn init_swarm(config: Config) -> Swarm { @@ -56,9 +56,8 @@ async fn init_server(config: Option) -> (Swarm, PeerId, Multi .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) .unwrap(); let addr = loop { - match server.select_next_some().await { - SwarmEvent::NewListenAddr { address, .. } => break address, - _ => {} + if let SwarmEvent::NewListenAddr { address, .. } = server.select_next_some().await { + break address; }; }; (server, peer_id, addr) @@ -91,12 +90,9 @@ async fn spawn_client( .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) .unwrap(); loop { - match client.select_next_some().await { - SwarmEvent::NewListenAddr { address, .. } => { - addr = Some(address); - break; - } - _ => {} + if let SwarmEvent::NewListenAddr { address, .. } = client.select_next_some().await { + addr = Some(address); + break; }; } } @@ -119,11 +115,8 @@ async fn spawn_client( async fn next_event(swarm: &mut Swarm) -> Event { loop { - match swarm.select_next_some().await { - SwarmEvent::Behaviour(event) => { - break event; - } - _ => {} + if let SwarmEvent::Behaviour(event) = swarm.select_next_some().await { + break event; } } } @@ -161,9 +154,8 @@ async fn test_dial_back() { } => { assert_eq!(peer_id, client_id); let observed_client_ip = loop { - match send_back_addr.pop().unwrap() { - Protocol::Ip4(ip4_addr) => break ip4_addr, - _ => {} + if let Protocol::Ip4(ip4_addr) = send_back_addr.pop().unwrap() { + break ip4_addr; } }; break observed_client_ip; diff --git a/protocols/dcutr/CHANGELOG.md b/protocols/dcutr/CHANGELOG.md index f6742cf5581..8f98e3257d6 100644 --- a/protocols/dcutr/CHANGELOG.md +++ b/protocols/dcutr/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.7.0 [unreleased] + +- Update to `libp2p-core` `v0.37.0`. + +- Update to `libp2p-swarm` `v0.40.0`. + # 0.6.0 - Update to `libp2p-swarm` `v0.39.0`. diff --git a/protocols/dcutr/Cargo.toml b/protocols/dcutr/Cargo.toml index dd059e41422..dbdf9494d9b 100644 --- a/protocols/dcutr/Cargo.toml +++ b/protocols/dcutr/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-dcutr" edition = "2021" rust-version = "1.56.1" description = "Direct connection upgrade through relay" -version = "0.6.0" +version = "0.7.0" authors = ["Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -17,8 +17,8 @@ either = "1.6.0" futures = "0.3.1" futures-timer = "3.0" instant = "0.1.11" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.39.0", path = "../../swarm" } +libp2p-core = { version = "0.37.0", path = "../../core" } +libp2p-swarm = { version = "0.40.0", path = "../../swarm" } log = "0.4" prost-codec = { version = "0.2", path = "../../misc/prost-codec" } prost = "0.11" @@ -30,10 +30,6 @@ prost-build = "0.11" [dev-dependencies] env_logger = "0.9.0" -libp2p = { path = "../..", default-features = false, features = ["dcutr", "relay", "plaintext", "identify", "tcp-async-io", "ping", "noise", "dns-async-std"] } -libp2p-identify = { path = "../identify" } -libp2p-plaintext = { path = "../../transports/plaintext" } -libp2p-relay = { path = "../relay" } -libp2p-yamux = { path = "../../muxers/yamux" } -rand = "0.7" +libp2p = { path = "../..", features = ["full"] } +rand = "0.8" clap = {version = "3.1.6", features = ["derive"]} diff --git a/protocols/dcutr/examples/client.rs b/protocols/dcutr/examples/dcutr.rs similarity index 91% rename from protocols/dcutr/examples/client.rs rename to protocols/dcutr/examples/dcutr.rs index 54448ff635d..925e03593b5 100644 --- a/protocols/dcutr/examples/client.rs +++ b/protocols/dcutr/examples/dcutr.rs @@ -25,15 +25,14 @@ use futures::stream::StreamExt; use libp2p::core::multiaddr::{Multiaddr, Protocol}; use libp2p::core::transport::OrTransport; use libp2p::core::upgrade; -use libp2p::dcutr; use libp2p::dns::DnsConfig; -use libp2p::identify::{Identify, IdentifyConfig, IdentifyEvent, IdentifyInfo}; +use libp2p::identify; use libp2p::noise; -use libp2p::ping::{Ping, PingConfig, PingEvent}; use libp2p::relay::v2::client::{self, Client}; use libp2p::swarm::{SwarmBuilder, SwarmEvent}; use libp2p::tcp::{GenTcpConfig, TcpTransport}; use libp2p::Transport; +use libp2p::{dcutr, ping}; use libp2p::{identity, NetworkBehaviour, PeerId}; use log::info; use std::convert::TryInto; @@ -101,34 +100,35 @@ fn main() -> Result<(), Box> { noise::NoiseAuthenticated::xx(&local_key) .expect("Signing libp2p-noise static DH keypair failed."), ) - .multiplex(libp2p_yamux::YamuxConfig::default()) + .multiplex(libp2p::yamux::YamuxConfig::default()) .boxed(); #[derive(NetworkBehaviour)] #[behaviour(out_event = "Event", event_process = false)] struct Behaviour { relay_client: Client, - ping: Ping, - identify: Identify, + ping: ping::Behaviour, + identify: identify::Behaviour, dcutr: dcutr::behaviour::Behaviour, } #[derive(Debug)] + #[allow(clippy::large_enum_variant)] enum Event { - Ping(PingEvent), - Identify(IdentifyEvent), + Ping(ping::Event), + Identify(identify::Event), Relay(client::Event), Dcutr(dcutr::behaviour::Event), } - impl From for Event { - fn from(e: PingEvent) -> Self { + impl From for Event { + fn from(e: ping::Event) -> Self { Event::Ping(e) } } - impl From for Event { - fn from(e: IdentifyEvent) -> Self { + impl From for Event { + fn from(e: identify::Event) -> Self { Event::Identify(e) } } @@ -147,8 +147,8 @@ fn main() -> Result<(), Box> { let behaviour = Behaviour { relay_client: client, - ping: Ping::new(PingConfig::new()), - identify: Identify::new(IdentifyConfig::new( + ping: ping::Behaviour::new(ping::Config::new()), + identify: identify::Behaviour::new(identify::Config::new( "/TODO/0.0.1".to_string(), local_key.public(), )), @@ -201,12 +201,12 @@ fn main() -> Result<(), Box> { SwarmEvent::Dialing { .. } => {} SwarmEvent::ConnectionEstablished { .. } => {} SwarmEvent::Behaviour(Event::Ping(_)) => {} - SwarmEvent::Behaviour(Event::Identify(IdentifyEvent::Sent { .. })) => { + SwarmEvent::Behaviour(Event::Identify(identify::Event::Sent { .. })) => { info!("Told relay its public address."); told_relay_observed_addr = true; } - SwarmEvent::Behaviour(Event::Identify(IdentifyEvent::Received { - info: IdentifyInfo { observed_addr, .. }, + SwarmEvent::Behaviour(Event::Identify(identify::Event::Received { + info: identify::Info { observed_addr, .. }, .. })) => { info!("Relay told us our public address: {:?}", observed_addr); diff --git a/protocols/dcutr/src/handler.rs b/protocols/dcutr/src/handler.rs index 12eb8f4097a..e854b395308 100644 --- a/protocols/dcutr/src/handler.rs +++ b/protocols/dcutr/src/handler.rs @@ -23,7 +23,7 @@ use either::Either; use libp2p_core::connection::ConnectionId; use libp2p_core::upgrade::{self, DeniedUpgrade}; use libp2p_core::{ConnectedPoint, PeerId}; -use libp2p_swarm::handler::DummyConnectionHandler; +use libp2p_swarm::dummy; use libp2p_swarm::handler::SendWrapper; use libp2p_swarm::{ConnectionHandler, IntoConnectionHandler}; @@ -44,7 +44,7 @@ pub enum Role { } impl IntoConnectionHandler for Prototype { - type Handler = Either>; + type Handler = Either>; fn into_handler(self, _remote_peer_id: &PeerId, endpoint: &ConnectedPoint) -> Self::Handler { match self { @@ -52,7 +52,7 @@ impl IntoConnectionHandler for Prototype { if endpoint.is_relayed() { Either::Left(relayed::Handler::new(endpoint.clone())) } else { - Either::Right(Either::Right(DummyConnectionHandler::default())) + Either::Right(Either::Right(dummy::ConnectionHandler)) } } Self::DirectConnection { diff --git a/protocols/dcutr/tests/lib.rs b/protocols/dcutr/tests/lib.rs index 6a10d1049e4..64aca18596b 100644 --- a/protocols/dcutr/tests/lib.rs +++ b/protocols/dcutr/tests/lib.rs @@ -33,8 +33,8 @@ use libp2p::dcutr; use libp2p::plaintext::PlainText2Config; use libp2p::relay::v2::client; use libp2p::relay::v2::relay; +use libp2p::swarm::{AddressScore, NetworkBehaviour, Swarm, SwarmEvent}; use libp2p::NetworkBehaviour; -use libp2p_swarm::{AddressScore, NetworkBehaviour, Swarm, SwarmEvent}; use std::time::Duration; #[test] @@ -53,7 +53,6 @@ fn connect() { let mut dst = build_client(); let dst_peer_id = *dst.local_peer_id(); let dst_relayed_addr = relay_addr - .clone() .with(Protocol::P2p(relay_peer_id.into())) .with(Protocol::P2pCircuit) .with(Protocol::P2p(dst_peer_id.into())); @@ -96,7 +95,7 @@ fn connect() { fn build_relay() -> Swarm { let local_key = identity::Keypair::generate_ed25519(); let local_public_key = local_key.public(); - let local_peer_id = local_public_key.clone().to_peer_id(); + let local_peer_id = local_public_key.to_peer_id(); let transport = build_transport(MemoryTransport::default().boxed(), local_public_key); @@ -116,7 +115,7 @@ fn build_relay() -> Swarm { fn build_client() -> Swarm { let local_key = identity::Keypair::generate_ed25519(); let local_public_key = local_key.public(); - let local_peer_id = local_public_key.clone().to_peer_id(); + let local_peer_id = local_public_key.to_peer_id(); let (relay_transport, behaviour) = client::Client::new_transport_and_behaviour(local_peer_id); let transport = build_transport( @@ -141,13 +140,11 @@ fn build_transport( where StreamSink: AsyncRead + AsyncWrite + Send + Unpin + 'static, { - let transport = transport + transport .upgrade(Version::V1) .authenticate(PlainText2Config { local_public_key }) - .multiplex(libp2p_yamux::YamuxConfig::default()) - .boxed(); - - transport + .multiplex(libp2p::yamux::YamuxConfig::default()) + .boxed() } #[derive(NetworkBehaviour)] diff --git a/protocols/floodsub/CHANGELOG.md b/protocols/floodsub/CHANGELOG.md index 5a91e14642a..e7c1392c642 100644 --- a/protocols/floodsub/CHANGELOG.md +++ b/protocols/floodsub/CHANGELOG.md @@ -1,3 +1,13 @@ +# 0.40.0 [unreleased] + +- Bump rand to 0.8 and quickcheck to 1. See [PR 2857]. + +- Update to `libp2p-core` `v0.37.0`. + +- Update to `libp2p-swarm` `v0.40.0`. + +[PR 2857]: https://github.com/libp2p/rust-libp2p/pull/2857 + # 0.39.0 - Update to `libp2p-swarm` `v0.39.0`. diff --git a/protocols/floodsub/Cargo.toml b/protocols/floodsub/Cargo.toml index 58977415005..f030e7477d9 100644 --- a/protocols/floodsub/Cargo.toml +++ b/protocols/floodsub/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-floodsub" edition = "2021" rust-version = "1.56.1" description = "Floodsub protocol for libp2p" -version = "0.39.0" +version = "0.40.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,11 +14,11 @@ categories = ["network-programming", "asynchronous"] cuckoofilter = "0.5.0" fnv = "1.0" futures = "0.3.1" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.39.0", path = "../../swarm" } +libp2p-core = { version = "0.37.0", path = "../../core" } +libp2p-swarm = { version = "0.40.0", path = "../../swarm" } log = "0.4" prost = "0.11" -rand = "0.7" +rand = "0.8" smallvec = "1.6.1" [build-dependencies] diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index 1ba36ad6607..99d2ad3a1c3 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -1,3 +1,13 @@ +# 0.42.0 [unreleased] + +- Bump rand to 0.8 and quickcheck to 1. See [PR 2857]. + +- Update to `libp2p-core` `v0.37.0`. + +- Update to `libp2p-swarm` `v0.40.0`. + +[PR 2857]: https://github.com/libp2p/rust-libp2p/pull/2857 + # 0.41.0 - Update to `libp2p-swarm` `v0.39.0`. diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index 2adccfd6607..00778ef0688 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-gossipsub" edition = "2021" rust-version = "1.56.1" description = "Gossipsub protocol for libp2p" -version = "0.41.0" +version = "0.42.1" authors = ["Age Manning "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,13 +11,13 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -libp2p-swarm = { version = "0.39.0", path = "../../swarm" } -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-swarm = { version = "0.40.0", path = "../../swarm" } +libp2p-core = { version = "0.37.0", path = "../../core" } bytes = "1.0" byteorder = "1.3.4" fnv = "1.0.7" futures = "0.3.5" -rand = "0.7.3" +rand = "0.8" asynchronous-codec = "0.6" unsigned-varint = { version = "0.7.0", features = ["asynchronous_codec"] } log = "0.4.11" @@ -36,11 +36,8 @@ prometheus-client = "0.18.0" [dev-dependencies] async-std = "1.6.3" env_logger = "0.9.0" -libp2p-plaintext = { path = "../../transports/plaintext" } -libp2p-yamux = { path = "../../muxers/yamux" } -libp2p-mplex = { path = "../../muxers/mplex" } -libp2p-noise = { path = "../../transports/noise" } -quickcheck = "0.9.2" +libp2p = { path = "../..", features = ["full"] } +quickcheck = { package = "quickcheck-ext", path = "../../misc/quickcheck-ext" } hex = "0.4.2" derive_builder = "0.11.1" diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 21dd77562df..43f2f79466d 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -3664,7 +3664,6 @@ mod local_test { use crate::IdentTopic; use asynchronous_codec::Encoder; use quickcheck::*; - use rand::Rng; fn empty_rpc() -> GossipsubRpc { GossipsubRpc { @@ -3701,16 +3700,16 @@ mod local_test { } impl Arbitrary for GossipsubRpc { - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { let mut rpc = empty_rpc(); - for _ in 0..g.gen_range(0, 10) { + for _ in 0..g.gen_range(0..10u8) { rpc.subscriptions.push(test_subscription()); } - for _ in 0..g.gen_range(0, 10) { + for _ in 0..g.gen_range(0..10u8) { rpc.messages.push(test_message()); } - for _ in 0..g.gen_range(0, 10) { + for _ in 0..g.gen_range(0..10u8) { rpc.control_msgs.push(test_control()); } rpc diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index b4760b7208b..71f4aae9b50 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -20,5350 +20,5242 @@ // Collection of tests for the gossipsub network behaviour -mod tests { - use byteorder::{BigEndian, ByteOrder}; - use std::thread::sleep; - use std::time::Duration; - - use async_std::net::Ipv4Addr; - use rand::Rng; - - use crate::{ - GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, IdentTopic as Topic, - TopicScoreParams, - }; - - use super::super::*; - use crate::error::ValidationError; - use crate::subscription_filter::WhitelistSubscriptionFilter; - use crate::transform::{DataTransform, IdentityTransform}; - use crate::types::FastMessageId; - use libp2p_core::Endpoint; - use std::collections::hash_map::DefaultHasher; - use std::hash::{Hash, Hasher}; - - #[derive(Default, Builder, Debug)] - #[builder(default)] - struct InjectNodes - // TODO: remove trait bound Default when this issue is fixed: - // https://github.com/colin-kiegel/rust-derive-builder/issues/93 - where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, - { - peer_no: usize, - topics: Vec, - to_subscribe: bool, - gs_config: GossipsubConfig, - explicit: usize, - outbound: usize, - scoring: Option<(PeerScoreParams, PeerScoreThresholds)>, - data_transform: D, - subscription_filter: F, - } - - impl InjectNodes - where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, - { - pub fn create_network(self) -> (Gossipsub, Vec, Vec) { - let keypair = libp2p_core::identity::Keypair::generate_ed25519(); - // create a gossipsub struct - let mut gs: Gossipsub = Gossipsub::new_with_subscription_filter_and_transform( - MessageAuthenticity::Signed(keypair), - self.gs_config, - None, - self.subscription_filter, - self.data_transform, - ) - .unwrap(); +use super::*; +use crate::error::ValidationError; +use crate::subscription_filter::WhitelistSubscriptionFilter; +use crate::transform::{DataTransform, IdentityTransform}; +use crate::types::FastMessageId; +use crate::{ + GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, IdentTopic as Topic, + TopicScoreParams, +}; +use async_std::net::Ipv4Addr; +use byteorder::{BigEndian, ByteOrder}; +use libp2p_core::Endpoint; +use rand::Rng; +use std::collections::hash_map::DefaultHasher; +use std::hash::{Hash, Hasher}; +use std::thread::sleep; +use std::time::Duration; + +#[derive(Default, Builder, Debug)] +#[builder(default)] +struct InjectNodes +// TODO: remove trait bound Default when this issue is fixed: +// https://github.com/colin-kiegel/rust-derive-builder/issues/93 +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + peer_no: usize, + topics: Vec, + to_subscribe: bool, + gs_config: GossipsubConfig, + explicit: usize, + outbound: usize, + scoring: Option<(PeerScoreParams, PeerScoreThresholds)>, + data_transform: D, + subscription_filter: F, +} - if let Some((scoring_params, scoring_thresholds)) = self.scoring { - gs.with_peer_score(scoring_params, scoring_thresholds) - .unwrap(); - } +impl InjectNodes +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + pub fn create_network(self) -> (Gossipsub, Vec, Vec) { + let keypair = libp2p_core::identity::Keypair::generate_ed25519(); + // create a gossipsub struct + let mut gs: Gossipsub = Gossipsub::new_with_subscription_filter_and_transform( + MessageAuthenticity::Signed(keypair), + self.gs_config, + None, + self.subscription_filter, + self.data_transform, + ) + .unwrap(); - let mut topic_hashes = vec![]; + if let Some((scoring_params, scoring_thresholds)) = self.scoring { + gs.with_peer_score(scoring_params, scoring_thresholds) + .unwrap(); + } - // subscribe to the topics - for t in self.topics { - let topic = Topic::new(t); - gs.subscribe(&topic).unwrap(); - topic_hashes.push(topic.hash().clone()); - } + let mut topic_hashes = vec![]; - // build and connect peer_no random peers - let mut peers = vec![]; + // subscribe to the topics + for t in self.topics { + let topic = Topic::new(t); + gs.subscribe(&topic).unwrap(); + topic_hashes.push(topic.hash().clone()); + } - let empty = vec![]; - for i in 0..self.peer_no { - peers.push(add_peer( - &mut gs, - if self.to_subscribe { - &topic_hashes - } else { - &empty - }, - i < self.outbound, - i < self.explicit, - )); - } + // build and connect peer_no random peers + let mut peers = vec![]; - (gs, peers, topic_hashes) + let empty = vec![]; + for i in 0..self.peer_no { + peers.push(add_peer( + &mut gs, + if self.to_subscribe { + &topic_hashes + } else { + &empty + }, + i < self.outbound, + i < self.explicit, + )); } - } - impl InjectNodesBuilder - where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, - { - pub fn create_network(&self) -> (Gossipsub, Vec, Vec) { - self.build().unwrap().create_network() - } + (gs, peers, topic_hashes) } +} - fn inject_nodes() -> InjectNodesBuilder - where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, - { - InjectNodesBuilder::default() +impl InjectNodesBuilder +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + pub fn create_network(&self) -> (Gossipsub, Vec, Vec) { + self.build().unwrap().create_network() } +} - fn inject_nodes1() -> InjectNodesBuilder { - inject_nodes() - } +fn inject_nodes() -> InjectNodesBuilder +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + InjectNodesBuilder::default() +} - // helper functions for testing - - fn add_peer( - gs: &mut Gossipsub, - topic_hashes: &Vec, - outbound: bool, - explicit: bool, - ) -> PeerId - where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, - { - add_peer_with_addr(gs, topic_hashes, outbound, explicit, Multiaddr::empty()) - } +fn inject_nodes1() -> InjectNodesBuilder { + inject_nodes() +} - fn add_peer_with_addr( - gs: &mut Gossipsub, - topic_hashes: &Vec, - outbound: bool, - explicit: bool, - address: Multiaddr, - ) -> PeerId - where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, - { - add_peer_with_addr_and_kind( - gs, - topic_hashes, - outbound, - explicit, - address, - Some(PeerKind::Gossipsubv1_1), - ) - } +// helper functions for testing + +fn add_peer( + gs: &mut Gossipsub, + topic_hashes: &Vec, + outbound: bool, + explicit: bool, +) -> PeerId +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + add_peer_with_addr(gs, topic_hashes, outbound, explicit, Multiaddr::empty()) +} - fn add_peer_with_addr_and_kind( - gs: &mut Gossipsub, - topic_hashes: &Vec, - outbound: bool, - explicit: bool, - address: Multiaddr, - kind: Option, - ) -> PeerId - where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, - { - let peer = PeerId::random(); - gs.inject_connection_established( +fn add_peer_with_addr( + gs: &mut Gossipsub, + topic_hashes: &Vec, + outbound: bool, + explicit: bool, + address: Multiaddr, +) -> PeerId +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + add_peer_with_addr_and_kind( + gs, + topic_hashes, + outbound, + explicit, + address, + Some(PeerKind::Gossipsubv1_1), + ) +} + +fn add_peer_with_addr_and_kind( + gs: &mut Gossipsub, + topic_hashes: &Vec, + outbound: bool, + explicit: bool, + address: Multiaddr, + kind: Option, +) -> PeerId +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + let peer = PeerId::random(); + gs.inject_connection_established( + &peer, + &ConnectionId::new(0), + &if outbound { + ConnectedPoint::Dialer { + address, + role_override: Endpoint::Dialer, + } + } else { + ConnectedPoint::Listener { + local_addr: Multiaddr::empty(), + send_back_addr: address, + } + }, + None, + 0, // first connection + ); + if let Some(kind) = kind { + gs.inject_event(peer, ConnectionId::new(1), HandlerEvent::PeerKind(kind)); + } + if explicit { + gs.add_explicit_peer(&peer); + } + if !topic_hashes.is_empty() { + gs.handle_received_subscriptions( + &topic_hashes + .iter() + .cloned() + .map(|t| GossipsubSubscription { + action: GossipsubSubscriptionAction::Subscribe, + topic_hash: t, + }) + .collect::>(), &peer, - &ConnectionId::new(0), - &if outbound { - ConnectedPoint::Dialer { - address, - role_override: Endpoint::Dialer, - } - } else { - ConnectedPoint::Listener { - local_addr: Multiaddr::empty(), - send_back_addr: address, - } - }, - None, - 0, // first connection ); - if let Some(kind) = kind { - gs.inject_event( - peer.clone(), - ConnectionId::new(1), - HandlerEvent::PeerKind(kind), - ); - } - if explicit { - gs.add_explicit_peer(&peer); - } - if !topic_hashes.is_empty() { - gs.handle_received_subscriptions( - &topic_hashes - .iter() - .cloned() - .map(|t| GossipsubSubscription { - action: GossipsubSubscriptionAction::Subscribe, - topic_hash: t, - }) - .collect::>(), - &peer, + } + peer +} + +fn disconnect_peer(gs: &mut Gossipsub, peer_id: &PeerId) +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + if let Some(peer_connections) = gs.connected_peers.get(peer_id) { + let fake_endpoint = ConnectedPoint::Dialer { + address: Multiaddr::empty(), + role_override: Endpoint::Dialer, + }; // this is not relevant + // peer_connections.connections should never be empty. + let mut active_connections = peer_connections.connections.len(); + for conn_id in peer_connections.connections.clone() { + let handler = gs.new_handler(); + active_connections = active_connections.checked_sub(1).unwrap(); + gs.inject_connection_closed( + peer_id, + &conn_id, + &fake_endpoint, + handler, + active_connections, ); } - peer } +} - fn disconnect_peer(gs: &mut Gossipsub, peer_id: &PeerId) - where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, - { - if let Some(peer_connections) = gs.connected_peers.get(peer_id) { - let fake_endpoint = ConnectedPoint::Dialer { - address: Multiaddr::empty(), - role_override: Endpoint::Dialer, - }; // this is not relevant - // peer_connections.connections should never be empty. - let mut active_connections = peer_connections.connections.len(); - for conn_id in peer_connections.connections.clone() { - let handler = gs.new_handler(); - active_connections = active_connections.checked_sub(1).unwrap(); - gs.inject_connection_closed( - peer_id, - &conn_id, - &fake_endpoint, - handler, - active_connections, - ); - } - } +// Converts a protobuf message into a gossipsub message for reading the Gossipsub event queue. +fn proto_to_message(rpc: &crate::rpc_proto::Rpc) -> GossipsubRpc { + // Store valid messages. + let mut messages = Vec::with_capacity(rpc.publish.len()); + let rpc = rpc.clone(); + for message in rpc.publish.into_iter() { + messages.push(RawGossipsubMessage { + source: message.from.map(|x| PeerId::from_bytes(&x).unwrap()), + data: message.data.unwrap_or_default(), + sequence_number: message.seqno.map(|x| BigEndian::read_u64(&x)), // don't inform the application + topic: TopicHash::from_raw(message.topic), + signature: message.signature, // don't inform the application + key: None, + validated: false, + }); } + let mut control_msgs = Vec::new(); + if let Some(rpc_control) = rpc.control { + // Collect the gossipsub control messages + let ihave_msgs: Vec = rpc_control + .ihave + .into_iter() + .map(|ihave| GossipsubControlAction::IHave { + topic_hash: TopicHash::from_raw(ihave.topic_id.unwrap_or_default()), + message_ids: ihave + .message_ids + .into_iter() + .map(MessageId::from) + .collect::>(), + }) + .collect(); - // Converts a protobuf message into a gossipsub message for reading the Gossipsub event queue. - fn proto_to_message(rpc: &crate::rpc_proto::Rpc) -> GossipsubRpc { - // Store valid messages. - let mut messages = Vec::with_capacity(rpc.publish.len()); - let rpc = rpc.clone(); - for message in rpc.publish.into_iter() { - messages.push(RawGossipsubMessage { - source: message.from.map(|x| PeerId::from_bytes(&x).unwrap()), - data: message.data.unwrap_or_default(), - sequence_number: message.seqno.map(|x| BigEndian::read_u64(&x)), // don't inform the application - topic: TopicHash::from_raw(message.topic), - signature: message.signature, // don't inform the application - key: None, - validated: false, - }); - } - let mut control_msgs = Vec::new(); - if let Some(rpc_control) = rpc.control { - // Collect the gossipsub control messages - let ihave_msgs: Vec = rpc_control - .ihave - .into_iter() - .map(|ihave| GossipsubControlAction::IHave { - topic_hash: TopicHash::from_raw(ihave.topic_id.unwrap_or_default()), - message_ids: ihave - .message_ids - .into_iter() - .map(MessageId::from) - .collect::>(), - }) - .collect(); + let iwant_msgs: Vec = rpc_control + .iwant + .into_iter() + .map(|iwant| GossipsubControlAction::IWant { + message_ids: iwant + .message_ids + .into_iter() + .map(MessageId::from) + .collect::>(), + }) + .collect(); - let iwant_msgs: Vec = rpc_control - .iwant - .into_iter() - .map(|iwant| GossipsubControlAction::IWant { - message_ids: iwant - .message_ids - .into_iter() - .map(MessageId::from) - .collect::>(), - }) - .collect(); + let graft_msgs: Vec = rpc_control + .graft + .into_iter() + .map(|graft| GossipsubControlAction::Graft { + topic_hash: TopicHash::from_raw(graft.topic_id.unwrap_or_default()), + }) + .collect(); - let graft_msgs: Vec = rpc_control - .graft + let mut prune_msgs = Vec::new(); + + for prune in rpc_control.prune { + // filter out invalid peers + let peers = prune + .peers .into_iter() - .map(|graft| GossipsubControlAction::Graft { - topic_hash: TopicHash::from_raw(graft.topic_id.unwrap_or_default()), + .filter_map(|info| { + info.peer_id + .and_then(|id| PeerId::from_bytes(&id).ok()) + .map(|peer_id| + //TODO signedPeerRecord, see https://github.com/libp2p/specs/pull/217 + PeerInfo { + peer_id: Some(peer_id), + }) }) - .collect(); - - let mut prune_msgs = Vec::new(); + .collect::>(); - for prune in rpc_control.prune { - // filter out invalid peers - let peers = prune - .peers - .into_iter() - .filter_map(|info| { - info.peer_id - .and_then(|id| PeerId::from_bytes(&id).ok()) - .map(|peer_id| - //TODO signedPeerRecord, see https://github.com/libp2p/specs/pull/217 - PeerInfo { - peer_id: Some(peer_id), - }) - }) - .collect::>(); - - let topic_hash = TopicHash::from_raw(prune.topic_id.unwrap_or_default()); - prune_msgs.push(GossipsubControlAction::Prune { - topic_hash, - peers, - backoff: prune.backoff, - }); - } - - control_msgs.extend(ihave_msgs); - control_msgs.extend(iwant_msgs); - control_msgs.extend(graft_msgs); - control_msgs.extend(prune_msgs); + let topic_hash = TopicHash::from_raw(prune.topic_id.unwrap_or_default()); + prune_msgs.push(GossipsubControlAction::Prune { + topic_hash, + peers, + backoff: prune.backoff, + }); } - GossipsubRpc { - messages, - subscriptions: rpc - .subscriptions - .into_iter() - .map(|sub| GossipsubSubscription { - action: if Some(true) == sub.subscribe { - GossipsubSubscriptionAction::Subscribe - } else { - GossipsubSubscriptionAction::Unsubscribe - }, - topic_hash: TopicHash::from_raw(sub.topic_id.unwrap_or_default()), - }) - .collect(), - control_msgs, - } + control_msgs.extend(ihave_msgs); + control_msgs.extend(iwant_msgs); + control_msgs.extend(graft_msgs); + control_msgs.extend(prune_msgs); } - #[test] - /// Test local node subscribing to a topic - fn test_subscribe() { - // The node should: - // - Create an empty vector in mesh[topic] - // - Send subscription request to all peers - // - run JOIN(topic) - - let subscribe_topic = vec![String::from("test_subscribe")]; - let (gs, _, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(subscribe_topic) - .to_subscribe(true) - .create_network(); + GossipsubRpc { + messages, + subscriptions: rpc + .subscriptions + .into_iter() + .map(|sub| GossipsubSubscription { + action: if Some(true) == sub.subscribe { + GossipsubSubscriptionAction::Subscribe + } else { + GossipsubSubscriptionAction::Unsubscribe + }, + topic_hash: TopicHash::from_raw(sub.topic_id.unwrap_or_default()), + }) + .collect(), + control_msgs, + } +} - assert!( - gs.mesh.get(&topic_hashes[0]).is_some(), - "Subscribe should add a new entry to the mesh[topic] hashmap" - ); +#[test] +/// Test local node subscribing to a topic +fn test_subscribe() { + // The node should: + // - Create an empty vector in mesh[topic] + // - Send subscription request to all peers + // - run JOIN(topic) + + let subscribe_topic = vec![String::from("test_subscribe")]; + let (gs, _, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(subscribe_topic) + .to_subscribe(true) + .create_network(); + + assert!( + gs.mesh.get(&topic_hashes[0]).is_some(), + "Subscribe should add a new entry to the mesh[topic] hashmap" + ); + + // collect all the subscriptions + let subscriptions = gs + .events + .iter() + .fold(vec![], |mut collected_subscriptions, e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { + GossipsubHandlerIn::Message(ref message) => { + for s in &message.subscriptions { + if let Some(true) = s.subscribe { + collected_subscriptions.push(s.clone()) + }; + } + collected_subscriptions + } + _ => collected_subscriptions, + }, + _ => collected_subscriptions, + }); - // collect all the subscriptions - let subscriptions = - gs.events - .iter() - .fold(vec![], |mut collected_subscriptions, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { - GossipsubHandlerIn::Message(ref message) => { - for s in &message.subscriptions { - match s.subscribe { - Some(true) => collected_subscriptions.push(s.clone()), - _ => {} - }; - } - collected_subscriptions - } - _ => collected_subscriptions, - }, - _ => collected_subscriptions, - }); + // we sent a subscribe to all known peers + assert!( + subscriptions.len() == 20, + "Should send a subscription to all known peers" + ); +} - // we sent a subscribe to all known peers +#[test] +/// Test unsubscribe. +fn test_unsubscribe() { + // Unsubscribe should: + // - Remove the mesh entry for topic + // - Send UNSUBSCRIBE to all known peers + // - Call Leave + + let topic_strings = vec![String::from("topic1"), String::from("topic2")]; + let topics = topic_strings + .iter() + .map(|t| Topic::new(t.clone())) + .collect::>(); + + // subscribe to topic_strings + let (mut gs, _, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(topic_strings) + .to_subscribe(true) + .create_network(); + + for topic_hash in &topic_hashes { assert!( - subscriptions.len() == 20, - "Should send a subscription to all known peers" + gs.topic_peers.get(topic_hash).is_some(), + "Topic_peers contain a topic entry" + ); + assert!( + gs.mesh.get(topic_hash).is_some(), + "mesh should contain a topic entry" ); } - #[test] - /// Test unsubscribe. - fn test_unsubscribe() { - // Unsubscribe should: - // - Remove the mesh entry for topic - // - Send UNSUBSCRIBE to all known peers - // - Call Leave + // unsubscribe from both topics + assert!( + gs.unsubscribe(&topics[0]).unwrap(), + "should be able to unsubscribe successfully from each topic", + ); + assert!( + gs.unsubscribe(&topics[1]).unwrap(), + "should be able to unsubscribe successfully from each topic", + ); + + // collect all the subscriptions + let subscriptions = gs + .events + .iter() + .fold(vec![], |mut collected_subscriptions, e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { + GossipsubHandlerIn::Message(ref message) => { + for s in &message.subscriptions { + if let Some(true) = s.subscribe { + collected_subscriptions.push(s.clone()) + }; + } + collected_subscriptions + } + _ => collected_subscriptions, + }, + _ => collected_subscriptions, + }); - let topic_strings = vec![String::from("topic1"), String::from("topic2")]; - let topics = topic_strings - .iter() - .map(|t| Topic::new(t.clone())) - .collect::>(); + // we sent a unsubscribe to all known peers, for two topics + assert!( + subscriptions.len() == 40, + "Should send an unsubscribe event to all known peers" + ); - // subscribe to topic_strings - let (mut gs, _, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(topic_strings) - .to_subscribe(true) - .create_network(); + // check we clean up internal structures + for topic_hash in &topic_hashes { + assert!( + gs.mesh.get(topic_hash).is_none(), + "All topics should have been removed from the mesh" + ); + } +} - for topic_hash in &topic_hashes { - assert!( - gs.topic_peers.get(&topic_hash).is_some(), - "Topic_peers contain a topic entry" - ); - assert!( - gs.mesh.get(&topic_hash).is_some(), - "mesh should contain a topic entry" - ); +#[test] +/// Test JOIN(topic) functionality. +fn test_join() { + // The Join function should: + // - Remove peers from fanout[topic] + // - Add any fanout[topic] peers to the mesh (up to mesh_n) + // - Fill up to mesh_n peers from known gossipsub peers in the topic + // - Send GRAFT messages to all nodes added to the mesh + + // This test is not an isolated unit test, rather it uses higher level, + // subscribe/unsubscribe to perform the test. + + let topic_strings = vec![String::from("topic1"), String::from("topic2")]; + let topics = topic_strings + .iter() + .map(|t| Topic::new(t.clone())) + .collect::>(); + + let (mut gs, _, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(topic_strings) + .to_subscribe(true) + .create_network(); + + // unsubscribe, then call join to invoke functionality + assert!( + gs.unsubscribe(&topics[0]).unwrap(), + "should be able to unsubscribe successfully" + ); + assert!( + gs.unsubscribe(&topics[1]).unwrap(), + "should be able to unsubscribe successfully" + ); + + // re-subscribe - there should be peers associated with the topic + assert!( + gs.subscribe(&topics[0]).unwrap(), + "should be able to subscribe successfully" + ); + + // should have added mesh_n nodes to the mesh + assert!( + gs.mesh.get(&topic_hashes[0]).unwrap().len() == 6, + "Should have added 6 nodes to the mesh" + ); + + fn collect_grafts( + mut collected_grafts: Vec, + (_, controls): (&PeerId, &Vec), + ) -> Vec { + for c in controls.iter() { + if let GossipsubControlAction::Graft { topic_hash: _ } = c { + collected_grafts.push(c.clone()) + } } + collected_grafts + } - // unsubscribe from both topics - assert!( - gs.unsubscribe(&topics[0]).unwrap(), - "should be able to unsubscribe successfully from each topic", - ); - assert!( - gs.unsubscribe(&topics[1]).unwrap(), - "should be able to unsubscribe successfully from each topic", + // there should be mesh_n GRAFT messages. + let graft_messages = gs.control_pool.iter().fold(vec![], collect_grafts); + + assert_eq!( + graft_messages.len(), + 6, + "There should be 6 grafts messages sent to peers" + ); + + // verify fanout nodes + // add 3 random peers to the fanout[topic1] + gs.fanout + .insert(topic_hashes[1].clone(), Default::default()); + let new_peers: Vec = vec![]; + for _ in 0..3 { + let random_peer = PeerId::random(); + // inform the behaviour of a new peer + gs.inject_connection_established( + &random_peer, + &ConnectionId::new(1), + &ConnectedPoint::Dialer { + address: "/ip4/127.0.0.1".parse::().unwrap(), + role_override: Endpoint::Dialer, + }, + None, + 0, ); - // collect all the subscriptions - let subscriptions = - gs.events - .iter() - .fold(vec![], |mut collected_subscriptions, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { - GossipsubHandlerIn::Message(ref message) => { - for s in &message.subscriptions { - match s.subscribe { - Some(true) => collected_subscriptions.push(s.clone()), - _ => {} - }; - } - collected_subscriptions - } - _ => collected_subscriptions, - }, - _ => collected_subscriptions, - }); + // add the new peer to the fanout + let fanout_peers = gs.fanout.get_mut(&topic_hashes[1]).unwrap(); + fanout_peers.insert(random_peer); + } + + // subscribe to topic1 + gs.subscribe(&topics[1]).unwrap(); - // we sent a unsubscribe to all known peers, for two topics + // the three new peers should have been added, along with 3 more from the pool. + assert!( + gs.mesh.get(&topic_hashes[1]).unwrap().len() == 6, + "Should have added 6 nodes to the mesh" + ); + let mesh_peers = gs.mesh.get(&topic_hashes[1]).unwrap(); + for new_peer in new_peers { assert!( - subscriptions.len() == 40, - "Should send an unsubscribe event to all known peers" + mesh_peers.contains(&new_peer), + "Fanout peer should be included in the mesh" ); - - // check we clean up internal structures - for topic_hash in &topic_hashes { - assert!( - gs.mesh.get(&topic_hash).is_none(), - "All topics should have been removed from the mesh" - ); - } } - #[test] - /// Test JOIN(topic) functionality. - fn test_join() { - // The Join function should: - // - Remove peers from fanout[topic] - // - Add any fanout[topic] peers to the mesh (up to mesh_n) - // - Fill up to mesh_n peers from known gossipsub peers in the topic - // - Send GRAFT messages to all nodes added to the mesh + // there should now be 12 graft messages to be sent + let graft_messages = gs.control_pool.iter().fold(vec![], collect_grafts); - // This test is not an isolated unit test, rather it uses higher level, - // subscribe/unsubscribe to perform the test. + assert!( + graft_messages.len() == 12, + "There should be 12 grafts messages sent to peers" + ); +} - let topic_strings = vec![String::from("topic1"), String::from("topic2")]; - let topics = topic_strings - .iter() - .map(|t| Topic::new(t.clone())) - .collect::>(); +/// Test local node publish to subscribed topic +#[test] +fn test_publish_without_flood_publishing() { + // node should: + // - Send publish message to all peers + // - Insert message into gs.mcache and gs.received + + //turn off flood publish to test old behaviour + let config = GossipsubConfigBuilder::default() + .flood_publish(false) + .build() + .unwrap(); - let (mut gs, _, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(topic_strings) - .to_subscribe(true) - .create_network(); + let publish_topic = String::from("test_publish"); + let (mut gs, _, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![publish_topic.clone()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + assert!( + gs.mesh.get(&topic_hashes[0]).is_some(), + "Subscribe should add a new entry to the mesh[topic] hashmap" + ); + + // all peers should be subscribed to the topic + assert_eq!( + gs.topic_peers.get(&topic_hashes[0]).map(|p| p.len()), + Some(20), + "Peers should be subscribed to the topic" + ); + + // publish on topic + let publish_data = vec![0; 42]; + gs.publish(Topic::new(publish_topic), publish_data).unwrap(); + + // Collect all publish messages + let publishes = gs + .events + .iter() + .fold(vec![], |mut collected_publish, e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { + GossipsubHandlerIn::Message(ref message) => { + let event = proto_to_message(message); + for s in &event.messages { + collected_publish.push(s.clone()); + } + collected_publish + } + _ => collected_publish, + }, + _ => collected_publish, + }); - // unsubscribe, then call join to invoke functionality - assert!( - gs.unsubscribe(&topics[0]).unwrap(), - "should be able to unsubscribe successfully" - ); - assert!( - gs.unsubscribe(&topics[1]).unwrap(), - "should be able to unsubscribe successfully" - ); + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform( + publishes + .first() + .expect("Should contain > 0 entries") + .clone(), + ) + .unwrap(); - // re-subscribe - there should be peers associated with the topic - assert!( - gs.subscribe(&topics[0]).unwrap(), - "should be able to subscribe successfully" - ); + let msg_id = gs.config.message_id(message); - // should have added mesh_n nodes to the mesh - assert!( - gs.mesh.get(&topic_hashes[0]).unwrap().len() == 6, - "Should have added 6 nodes to the mesh" - ); + let config: GossipsubConfig = GossipsubConfig::default(); + assert_eq!( + publishes.len(), + config.mesh_n_low(), + "Should send a publish message to all known peers" + ); + + assert!( + gs.mcache.get(&msg_id).is_some(), + "Message cache should contain published message" + ); +} + +/// Test local node publish to unsubscribed topic +#[test] +fn test_fanout() { + // node should: + // - Populate fanout peers + // - Send publish message to fanout peers + // - Insert message into gs.mcache and gs.received + + //turn off flood publish to test fanout behaviour + let config = GossipsubConfigBuilder::default() + .flood_publish(false) + .build() + .unwrap(); + + let fanout_topic = String::from("test_fanout"); + let (mut gs, _, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![fanout_topic.clone()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + assert!( + gs.mesh.get(&topic_hashes[0]).is_some(), + "Subscribe should add a new entry to the mesh[topic] hashmap" + ); + // Unsubscribe from topic + assert!( + gs.unsubscribe(&Topic::new(fanout_topic.clone())).unwrap(), + "should be able to unsubscribe successfully from topic" + ); + + // Publish on unsubscribed topic + let publish_data = vec![0; 42]; + gs.publish(Topic::new(fanout_topic.clone()), publish_data) + .unwrap(); - fn collect_grafts( - mut collected_grafts: Vec, - (_, controls): (&PeerId, &Vec), - ) -> Vec { - for c in controls.iter() { - match c { - GossipsubControlAction::Graft { topic_hash: _ } => { - collected_grafts.push(c.clone()) + assert_eq!( + gs.fanout + .get(&TopicHash::from_raw(fanout_topic)) + .unwrap() + .len(), + gs.config.mesh_n(), + "Fanout should contain `mesh_n` peers for fanout topic" + ); + + // Collect all publish messages + let publishes = gs + .events + .iter() + .fold(vec![], |mut collected_publish, e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { + GossipsubHandlerIn::Message(ref message) => { + let event = proto_to_message(message); + for s in &event.messages { + collected_publish.push(s.clone()); } - _ => {} + collected_publish } - } - collected_grafts - } + _ => collected_publish, + }, + _ => collected_publish, + }); - // there should be mesh_n GRAFT messages. - let graft_messages = gs.control_pool.iter().fold(vec![], collect_grafts); + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform( + publishes + .first() + .expect("Should contain > 0 entries") + .clone(), + ) + .unwrap(); - assert_eq!( - graft_messages.len(), - 6, - "There should be 6 grafts messages sent to peers" - ); + let msg_id = gs.config.message_id(message); - // verify fanout nodes - // add 3 random peers to the fanout[topic1] - gs.fanout - .insert(topic_hashes[1].clone(), Default::default()); - let new_peers: Vec = vec![]; - for _ in 0..3 { - let random_peer = PeerId::random(); - // inform the behaviour of a new peer - gs.inject_connection_established( - &random_peer, - &ConnectionId::new(1), - &ConnectedPoint::Dialer { - address: "/ip4/127.0.0.1".parse::().unwrap(), - role_override: Endpoint::Dialer, - }, - None, - 0, - ); + assert_eq!( + publishes.len(), + gs.config.mesh_n(), + "Should send a publish message to `mesh_n` fanout peers" + ); - // add the new peer to the fanout - let fanout_peers = gs.fanout.get_mut(&topic_hashes[1]).unwrap(); - fanout_peers.insert(random_peer); - } + assert!( + gs.mcache.get(&msg_id).is_some(), + "Message cache should contain published message" + ); +} + +#[test] +/// Test the gossipsub NetworkBehaviour peer connection logic. +fn test_inject_connected() { + let (gs, peers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1"), String::from("topic2")]) + .to_subscribe(true) + .create_network(); + + // check that our subscriptions are sent to each of the peers + // collect all the SendEvents + let send_events: Vec<_> = gs + .events + .iter() + .filter(|e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + !m.subscriptions.is_empty() + } else { + false + } + } + _ => false, + }) + .collect(); - // subscribe to topic1 - gs.subscribe(&topics[1]).unwrap(); + // check that there are two subscriptions sent to each peer + for sevent in send_events.clone() { + if let NetworkBehaviourAction::NotifyHandler { event, .. } = sevent { + if let GossipsubHandlerIn::Message(ref m) = **event { + assert!( + m.subscriptions.len() == 2, + "There should be two subscriptions sent to each peer (1 for each topic)." + ); + } + }; + } + + // check that there are 20 send events created + assert!( + send_events.len() == 20, + "There should be a subscription event sent to each peer." + ); - // the three new peers should have been added, along with 3 more from the pool. + // should add the new peers to `peer_topics` with an empty vec as a gossipsub node + for peer in peers { + let known_topics = gs.peer_topics.get(&peer).unwrap(); assert!( - gs.mesh.get(&topic_hashes[1]).unwrap().len() == 6, - "Should have added 6 nodes to the mesh" + known_topics == &topic_hashes.iter().cloned().collect(), + "The topics for each node should all topics" ); - let mesh_peers = gs.mesh.get(&topic_hashes[1]).unwrap(); - for new_peer in new_peers { - assert!( - mesh_peers.contains(&new_peer), - "Fanout peer should be included in the mesh" - ); - } - - // there should now be 12 graft messages to be sent - let graft_messages = gs.control_pool.iter().fold(vec![], collect_grafts); + } +} +#[test] +/// Test subscription handling +fn test_handle_received_subscriptions() { + // For every subscription: + // SUBSCRIBE: - Add subscribed topic to peer_topics for peer. + // - Add peer to topics_peer. + // UNSUBSCRIBE - Remove topic from peer_topics for peer. + // - Remove peer from topic_peers. + + let topics = vec!["topic1", "topic2", "topic3", "topic4"] + .iter() + .map(|&t| String::from(t)) + .collect(); + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(topics) + .to_subscribe(false) + .create_network(); + + // The first peer sends 3 subscriptions and 1 unsubscription + let mut subscriptions = topic_hashes[..3] + .iter() + .map(|topic_hash| GossipsubSubscription { + action: GossipsubSubscriptionAction::Subscribe, + topic_hash: topic_hash.clone(), + }) + .collect::>(); + + subscriptions.push(GossipsubSubscription { + action: GossipsubSubscriptionAction::Unsubscribe, + topic_hash: topic_hashes[topic_hashes.len() - 1].clone(), + }); + + let unknown_peer = PeerId::random(); + // process the subscriptions + // first and second peers send subscriptions + gs.handle_received_subscriptions(&subscriptions, &peers[0]); + gs.handle_received_subscriptions(&subscriptions, &peers[1]); + // unknown peer sends the same subscriptions + gs.handle_received_subscriptions(&subscriptions, &unknown_peer); + + // verify the result + + let peer_topics = gs.peer_topics.get(&peers[0]).unwrap().clone(); + assert!( + peer_topics == topic_hashes.iter().take(3).cloned().collect(), + "First peer should be subscribed to three topics" + ); + let peer_topics = gs.peer_topics.get(&peers[1]).unwrap().clone(); + assert!( + peer_topics == topic_hashes.iter().take(3).cloned().collect(), + "Second peer should be subscribed to three topics" + ); + + assert!( + gs.peer_topics.get(&unknown_peer).is_none(), + "Unknown peer should not have been added" + ); + + for topic_hash in topic_hashes[..3].iter() { + let topic_peers = gs.topic_peers.get(topic_hash).unwrap().clone(); assert!( - graft_messages.len() == 12, - "There should be 12 grafts messages sent to peers" + topic_peers == peers[..2].iter().cloned().collect(), + "Two peers should be added to the first three topics" ); } - /// Test local node publish to subscribed topic - #[test] - fn test_publish_without_flood_publishing() { - // node should: - // - Send publish message to all peers - // - Insert message into gs.mcache and gs.received - - //turn off flood publish to test old behaviour - let config = GossipsubConfigBuilder::default() - .flood_publish(false) - .build() - .unwrap(); + // Peer 0 unsubscribes from the first topic - let publish_topic = String::from("test_publish"); - let (mut gs, _, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(vec![publish_topic.clone()]) - .to_subscribe(true) - .gs_config(config) - .create_network(); + gs.handle_received_subscriptions( + &[GossipsubSubscription { + action: GossipsubSubscriptionAction::Unsubscribe, + topic_hash: topic_hashes[0].clone(), + }], + &peers[0], + ); + + let peer_topics = gs.peer_topics.get(&peers[0]).unwrap().clone(); + assert!( + peer_topics == topic_hashes[1..3].iter().cloned().collect(), + "Peer should be subscribed to two topics" + ); + + let topic_peers = gs.topic_peers.get(&topic_hashes[0]).unwrap().clone(); // only gossipsub at the moment + assert!( + topic_peers == peers[1..2].iter().cloned().collect(), + "Only the second peers should be in the first topic" + ); +} - assert!( - gs.mesh.get(&topic_hashes[0]).is_some(), - "Subscribe should add a new entry to the mesh[topic] hashmap" - ); +#[test] +/// Test Gossipsub.get_random_peers() function +fn test_get_random_peers() { + // generate a default GossipsubConfig + let gs_config = GossipsubConfigBuilder::default() + .validation_mode(ValidationMode::Anonymous) + .build() + .unwrap(); + // create a gossipsub struct + let mut gs: Gossipsub = Gossipsub::new(MessageAuthenticity::Anonymous, gs_config).unwrap(); + + // create a topic and fill it with some peers + let topic_hash = Topic::new("Test").hash(); + let mut peers = vec![]; + for _ in 0..20 { + peers.push(PeerId::random()) + } - // all peers should be subscribed to the topic - assert_eq!( - gs.topic_peers.get(&topic_hashes[0]).map(|p| p.len()), - Some(20), - "Peers should be subscribed to the topic" - ); + gs.topic_peers + .insert(topic_hash.clone(), peers.iter().cloned().collect()); + + gs.connected_peers = peers + .iter() + .map(|p| { + ( + *p, + PeerConnections { + kind: PeerKind::Gossipsubv1_1, + connections: vec![ConnectionId::new(1)], + }, + ) + }) + .collect(); - // publish on topic - let publish_data = vec![0; 42]; - gs.publish(Topic::new(publish_topic), publish_data).unwrap(); + let random_peers = + get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 5, |_| { + true + }); + assert_eq!(random_peers.len(), 5, "Expected 5 peers to be returned"); + let random_peers = get_random_peers( + &gs.topic_peers, + &gs.connected_peers, + &topic_hash, + 30, + |_| true, + ); + assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); + assert!( + random_peers == peers.iter().cloned().collect(), + "Expected no shuffling" + ); + let random_peers = get_random_peers( + &gs.topic_peers, + &gs.connected_peers, + &topic_hash, + 20, + |_| true, + ); + assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); + assert!( + random_peers == peers.iter().cloned().collect(), + "Expected no shuffling" + ); + let random_peers = + get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 0, |_| { + true + }); + assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); + // test the filter + let random_peers = + get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 5, |_| { + false + }); + assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); + let random_peers = get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 10, { + |peer| peers.contains(peer) + }); + assert!(random_peers.len() == 10, "Expected 10 peers to be returned"); +} - // Collect all publish messages - let publishes = gs - .events - .iter() - .fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { - GossipsubHandlerIn::Message(ref message) => { - let event = proto_to_message(&message); - for s in &event.messages { - collected_publish.push(s.clone()); - } - collected_publish +/// Tests that the correct message is sent when a peer asks for a message in our cache. +#[test] +fn test_handle_iwant_msg_cached() { + let (mut gs, peers, _) = inject_nodes1() + .peer_no(20) + .topics(Vec::new()) + .to_subscribe(true) + .create_network(); + + let raw_message = RawGossipsubMessage { + source: Some(peers[11]), + data: vec![1, 2, 3, 4], + sequence_number: Some(1u64), + topic: TopicHash::from_raw("topic"), + signature: None, + key: None, + validated: true, + }; + + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform(raw_message.clone()) + .unwrap(); + + let msg_id = gs.config.message_id(message); + gs.mcache.put(&msg_id, raw_message); + + gs.handle_iwant(&peers[7], vec![msg_id.clone()]); + + // the messages we are sending + let sent_messages = gs + .events + .iter() + .fold(vec![], |mut collected_messages, e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + for c in &event.messages { + collected_messages.push(c.clone()) } - _ => collected_publish, - }, - _ => collected_publish, - }); + } + collected_messages + } + _ => collected_messages, + }); + + assert!( + sent_messages + .iter() + .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) + .any(|msg| gs.config.message_id(&msg) == msg_id), + "Expected the cached message to be sent to an IWANT peer" + ); +} + +/// Tests that messages are sent correctly depending on the shifting of the message cache. +#[test] +fn test_handle_iwant_msg_cached_shifted() { + let (mut gs, peers, _) = inject_nodes1() + .peer_no(20) + .topics(Vec::new()) + .to_subscribe(true) + .create_network(); + + // perform 10 memshifts and check that it leaves the cache + for shift in 1..10 { + let raw_message = RawGossipsubMessage { + source: Some(peers[11]), + data: vec![1, 2, 3, 4], + sequence_number: Some(shift), + topic: TopicHash::from_raw("topic"), + signature: None, + key: None, + validated: true, + }; // Transform the inbound message let message = &gs .data_transform - .inbound_transform( - publishes - .first() - .expect("Should contain > 0 entries") - .clone(), - ) + .inbound_transform(raw_message.clone()) .unwrap(); - let msg_id = gs.config.message_id(&message); + let msg_id = gs.config.message_id(message); + gs.mcache.put(&msg_id, raw_message); + for _ in 0..shift { + gs.mcache.shift(); + } - let config: GossipsubConfig = GossipsubConfig::default(); - assert_eq!( - publishes.len(), - config.mesh_n_low(), - "Should send a publish message to all known peers" - ); + gs.handle_iwant(&peers[7], vec![msg_id.clone()]); - assert!( - gs.mcache.get(&msg_id).is_some(), - "Message cache should contain published message" - ); - } - - /// Test local node publish to unsubscribed topic - #[test] - fn test_fanout() { - // node should: - // - Populate fanout peers - // - Send publish message to fanout peers - // - Insert message into gs.mcache and gs.received - - //turn off flood publish to test fanout behaviour - let config = GossipsubConfigBuilder::default() - .flood_publish(false) - .build() - .unwrap(); - - let fanout_topic = String::from("test_fanout"); - let (mut gs, _, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(vec![fanout_topic.clone()]) - .to_subscribe(true) - .gs_config(config) - .create_network(); - - assert!( - gs.mesh.get(&topic_hashes[0]).is_some(), - "Subscribe should add a new entry to the mesh[topic] hashmap" - ); - // Unsubscribe from topic - assert!( - gs.unsubscribe(&Topic::new(fanout_topic.clone())).unwrap(), - "should be able to unsubscribe successfully from topic" - ); - - // Publish on unsubscribed topic - let publish_data = vec![0; 42]; - gs.publish(Topic::new(fanout_topic.clone()), publish_data) - .unwrap(); - - assert_eq!( - gs.fanout - .get(&TopicHash::from_raw(fanout_topic.clone())) - .unwrap() - .len(), - gs.config.mesh_n(), - "Fanout should contain `mesh_n` peers for fanout topic" - ); - - // Collect all publish messages - let publishes = gs - .events - .iter() - .fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { - GossipsubHandlerIn::Message(ref message) => { - let event = proto_to_message(&message); - for s in &event.messages { - collected_publish.push(s.clone()); - } - collected_publish - } - _ => collected_publish, - }, - _ => collected_publish, - }); - - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform( - publishes - .first() - .expect("Should contain > 0 entries") - .clone(), - ) - .unwrap(); - - let msg_id = gs.config.message_id(&message); - - assert_eq!( - publishes.len(), - gs.config.mesh_n(), - "Should send a publish message to `mesh_n` fanout peers" - ); - - assert!( - gs.mcache.get(&msg_id).is_some(), - "Message cache should contain published message" - ); - } - - #[test] - /// Test the gossipsub NetworkBehaviour peer connection logic. - fn test_inject_connected() { - let (gs, peers, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(vec![String::from("topic1"), String::from("topic2")]) - .to_subscribe(true) - .create_network(); - - // check that our subscriptions are sent to each of the peers - // collect all the SendEvents - let send_events: Vec<_> = gs - .events - .iter() - .filter(|e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - !m.subscriptions.is_empty() - } else { - false - } - } - _ => false, - }) - .collect(); - - // check that there are two subscriptions sent to each peer - for sevent in send_events.clone() { - match sevent { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - assert!( - m.subscriptions.len() == 2, - "There should be two subscriptions sent to each peer (1 for each topic)." - ); - } + // is the message is being sent? + let message_exists = gs.events.iter().any(|e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + event + .messages + .iter() + .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) + .any(|msg| gs.config.message_id(&msg) == msg_id) + } else { + false } - _ => {} - }; - } - - // check that there are 20 send events created - assert!( - send_events.len() == 20, - "There should be a subscription event sent to each peer." - ); - - // should add the new peers to `peer_topics` with an empty vec as a gossipsub node - for peer in peers { - let known_topics = gs.peer_topics.get(&peer).unwrap(); + } + _ => false, + }); + // default history_length is 5, expect no messages after shift > 5 + if shift < 5 { assert!( - known_topics == &topic_hashes.iter().cloned().collect(), - "The topics for each node should all topics" + message_exists, + "Expected the cached message to be sent to an IWANT peer before 5 shifts" ); - } - } - - #[test] - /// Test subscription handling - fn test_handle_received_subscriptions() { - // For every subscription: - // SUBSCRIBE: - Add subscribed topic to peer_topics for peer. - // - Add peer to topics_peer. - // UNSUBSCRIBE - Remove topic from peer_topics for peer. - // - Remove peer from topic_peers. - - let topics = vec!["topic1", "topic2", "topic3", "topic4"] - .iter() - .map(|&t| String::from(t)) - .collect(); - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(topics) - .to_subscribe(false) - .create_network(); - - // The first peer sends 3 subscriptions and 1 unsubscription - let mut subscriptions = topic_hashes[..3] - .iter() - .map(|topic_hash| GossipsubSubscription { - action: GossipsubSubscriptionAction::Subscribe, - topic_hash: topic_hash.clone(), - }) - .collect::>(); - - subscriptions.push(GossipsubSubscription { - action: GossipsubSubscriptionAction::Unsubscribe, - topic_hash: topic_hashes[topic_hashes.len() - 1].clone(), - }); - - let unknown_peer = PeerId::random(); - // process the subscriptions - // first and second peers send subscriptions - gs.handle_received_subscriptions(&subscriptions, &peers[0]); - gs.handle_received_subscriptions(&subscriptions, &peers[1]); - // unknown peer sends the same subscriptions - gs.handle_received_subscriptions(&subscriptions, &unknown_peer); - - // verify the result - - let peer_topics = gs.peer_topics.get(&peers[0]).unwrap().clone(); - assert!( - peer_topics == topic_hashes.iter().take(3).cloned().collect(), - "First peer should be subscribed to three topics" - ); - let peer_topics = gs.peer_topics.get(&peers[1]).unwrap().clone(); - assert!( - peer_topics == topic_hashes.iter().take(3).cloned().collect(), - "Second peer should be subscribed to three topics" - ); - - assert!( - gs.peer_topics.get(&unknown_peer).is_none(), - "Unknown peer should not have been added" - ); - - for topic_hash in topic_hashes[..3].iter() { - let topic_peers = gs.topic_peers.get(topic_hash).unwrap().clone(); + } else { assert!( - topic_peers == peers[..2].into_iter().cloned().collect(), - "Two peers should be added to the first three topics" + !message_exists, + "Expected the cached message to not be sent to an IWANT peer after 5 shifts" ); } - - // Peer 0 unsubscribes from the first topic - - gs.handle_received_subscriptions( - &vec![GossipsubSubscription { - action: GossipsubSubscriptionAction::Unsubscribe, - topic_hash: topic_hashes[0].clone(), - }], - &peers[0], - ); - - let peer_topics = gs.peer_topics.get(&peers[0]).unwrap().clone(); - assert!( - peer_topics == topic_hashes[1..3].into_iter().cloned().collect(), - "Peer should be subscribed to two topics" - ); - - let topic_peers = gs.topic_peers.get(&topic_hashes[0]).unwrap().clone(); // only gossipsub at the moment - assert!( - topic_peers == peers[1..2].into_iter().cloned().collect(), - "Only the second peers should be in the first topic" - ); } +} - #[test] - /// Test Gossipsub.get_random_peers() function - fn test_get_random_peers() { - // generate a default GossipsubConfig - let gs_config = GossipsubConfigBuilder::default() - .validation_mode(ValidationMode::Anonymous) - .build() - .unwrap(); - // create a gossipsub struct - let mut gs: Gossipsub = Gossipsub::new(MessageAuthenticity::Anonymous, gs_config).unwrap(); - - // create a topic and fill it with some peers - let topic_hash = Topic::new("Test").hash().clone(); - let mut peers = vec![]; - for _ in 0..20 { - peers.push(PeerId::random()) - } - - gs.topic_peers - .insert(topic_hash.clone(), peers.iter().cloned().collect()); - - gs.connected_peers = peers - .iter() - .map(|p| { - ( - p.clone(), - PeerConnections { - kind: PeerKind::Gossipsubv1_1, - connections: vec![ConnectionId::new(1)], - }, - ) - }) - .collect(); - - let random_peers = - get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 5, |_| { - true - }); - assert_eq!(random_peers.len(), 5, "Expected 5 peers to be returned"); - let random_peers = get_random_peers( - &gs.topic_peers, - &gs.connected_peers, - &topic_hash, - 30, - |_| true, - ); - assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); - assert!( - random_peers == peers.iter().cloned().collect(), - "Expected no shuffling" - ); - let random_peers = get_random_peers( - &gs.topic_peers, - &gs.connected_peers, - &topic_hash, - 20, - |_| true, - ); - assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); - assert!( - random_peers == peers.iter().cloned().collect(), - "Expected no shuffling" - ); - let random_peers = - get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 0, |_| { - true - }); - assert!(random_peers.len() == 0, "Expected 0 peers to be returned"); - // test the filter - let random_peers = - get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 5, |_| { - false - }); - assert!(random_peers.len() == 0, "Expected 0 peers to be returned"); - let random_peers = - get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 10, { - |peer| peers.contains(peer) - }); - assert!(random_peers.len() == 10, "Expected 10 peers to be returned"); - } +#[test] +// tests that an event is not created when a peers asks for a message not in our cache +fn test_handle_iwant_msg_not_cached() { + let (mut gs, peers, _) = inject_nodes1() + .peer_no(20) + .topics(Vec::new()) + .to_subscribe(true) + .create_network(); + + let events_before = gs.events.len(); + gs.handle_iwant(&peers[7], vec![MessageId::new(b"unknown id")]); + let events_after = gs.events.len(); + + assert_eq!( + events_before, events_after, + "Expected event count to stay the same" + ); +} - /// Tests that the correct message is sent when a peer asks for a message in our cache. - #[test] - fn test_handle_iwant_msg_cached() { - let (mut gs, peers, _) = inject_nodes1() - .peer_no(20) - .topics(Vec::new()) - .to_subscribe(true) - .create_network(); +#[test] +// tests that an event is created when a peer shares that it has a message we want +fn test_handle_ihave_subscribed_and_msg_not_cached() { + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + + gs.handle_ihave( + &peers[7], + vec![(topic_hashes[0].clone(), vec![MessageId::new(b"unknown id")])], + ); + + // check that we sent an IWANT request for `unknown id` + let iwant_exists = match gs.control_pool.get(&peers[7]) { + Some(controls) => controls.iter().any(|c| match c { + GossipsubControlAction::IWant { message_ids } => message_ids + .iter() + .any(|m| *m == MessageId::new(b"unknown id")), + _ => false, + }), + _ => false, + }; - let raw_message = RawGossipsubMessage { - source: Some(peers[11].clone()), - data: vec![1, 2, 3, 4], - sequence_number: Some(1u64), - topic: TopicHash::from_raw("topic"), - signature: None, - key: None, - validated: true, - }; + assert!( + iwant_exists, + "Expected to send an IWANT control message for unkown message id" + ); +} - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); +#[test] +// tests that an event is not created when a peer shares that it has a message that +// we already have +fn test_handle_ihave_subscribed_and_msg_cached() { + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + + let msg_id = MessageId::new(b"known id"); + + let events_before = gs.events.len(); + gs.handle_ihave(&peers[7], vec![(topic_hashes[0].clone(), vec![msg_id])]); + let events_after = gs.events.len(); + + assert_eq!( + events_before, events_after, + "Expected event count to stay the same" + ) +} - let msg_id = gs.config.message_id(&message); - gs.mcache.put(&msg_id, raw_message); +#[test] +// test that an event is not created when a peer shares that it has a message in +// a topic that we are not subscribed to +fn test_handle_ihave_not_subscribed() { + let (mut gs, peers, _) = inject_nodes1() + .peer_no(20) + .topics(vec![]) + .to_subscribe(true) + .create_network(); + + let events_before = gs.events.len(); + gs.handle_ihave( + &peers[7], + vec![( + TopicHash::from_raw(String::from("unsubscribed topic")), + vec![MessageId::new(b"irrelevant id")], + )], + ); + let events_after = gs.events.len(); + + assert_eq!( + events_before, events_after, + "Expected event count to stay the same" + ) +} - gs.handle_iwant(&peers[7], vec![msg_id.clone()]); +#[test] +// tests that a peer is added to our mesh when we are both subscribed +// to the same topic +fn test_handle_graft_is_subscribed() { + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + + gs.handle_graft(&peers[7], topic_hashes.clone()); + + assert!( + gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), + "Expected peer to have been added to mesh" + ); +} - // the messages we are sending - let sent_messages = gs - .events - .iter() - .fold(vec![], |mut collected_messages, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(&m); - for c in &event.messages { - collected_messages.push(c.clone()) - } - } - collected_messages - } - _ => collected_messages, - }); +#[test] +// tests that a peer is not added to our mesh when they are subscribed to +// a topic that we are not +fn test_handle_graft_is_not_subscribed() { + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + + gs.handle_graft( + &peers[7], + vec![TopicHash::from_raw(String::from("unsubscribed topic"))], + ); + + assert!( + !gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), + "Expected peer to have been added to mesh" + ); +} +#[test] +// tests multiple topics in a single graft message +fn test_handle_graft_multiple_topics() { + let topics: Vec = vec!["topic1", "topic2", "topic3", "topic4"] + .iter() + .map(|&t| String::from(t)) + .collect(); + + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(topics) + .to_subscribe(true) + .create_network(); + + let mut their_topics = topic_hashes.clone(); + // their_topics = [topic1, topic2, topic3] + // our_topics = [topic1, topic2, topic4] + their_topics.pop(); + gs.leave(&their_topics[2]); + + gs.handle_graft(&peers[7], their_topics.clone()); + + for hash in topic_hashes.iter().take(2) { assert!( - sent_messages - .iter() - .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) - .any(|msg| gs.config.message_id(&msg) == msg_id), - "Expected the cached message to be sent to an IWANT peer" + gs.mesh.get(hash).unwrap().contains(&peers[7]), + "Expected peer to be in the mesh for the first 2 topics" ); } - /// Tests that messages are sent correctly depending on the shifting of the message cache. - #[test] - fn test_handle_iwant_msg_cached_shifted() { - let (mut gs, peers, _) = inject_nodes1() - .peer_no(20) - .topics(Vec::new()) - .to_subscribe(true) - .create_network(); - - // perform 10 memshifts and check that it leaves the cache - for shift in 1..10 { - let raw_message = RawGossipsubMessage { - source: Some(peers[11].clone()), - data: vec![1, 2, 3, 4], - sequence_number: Some(shift), - topic: TopicHash::from_raw("topic"), - signature: None, - key: None, - validated: true, - }; - - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); - - let msg_id = gs.config.message_id(&message); - gs.mcache.put(&msg_id, raw_message); - for _ in 0..shift { - gs.mcache.shift(); - } + assert!( + gs.mesh.get(&topic_hashes[2]).is_none(), + "Expected the second topic to not be in the mesh" + ); +} - gs.handle_iwant(&peers[7], vec![msg_id.clone()]); +#[test] +// tests that a peer is removed from our mesh +fn test_handle_prune_peer_in_mesh() { + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + + // insert peer into our mesh for 'topic1' + gs.mesh + .insert(topic_hashes[0].clone(), peers.iter().cloned().collect()); + assert!( + gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), + "Expected peer to be in mesh" + ); + + gs.handle_prune( + &peers[7], + topic_hashes + .iter() + .map(|h| (h.clone(), vec![], None)) + .collect(), + ); + assert!( + !gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), + "Expected peer to be removed from mesh" + ); +} - // is the message is being sent? - let message_exists = gs.events.iter().any(|e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => { +fn count_control_msgs( + gs: &Gossipsub, + mut filter: impl FnMut(&PeerId, &GossipsubControlAction) -> bool, +) -> usize { + gs.control_pool + .iter() + .map(|(peer_id, actions)| actions.iter().filter(|m| filter(peer_id, m)).count()) + .sum::() + + gs.events + .iter() + .map(|e| match e { + NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { if let GossipsubHandlerIn::Message(ref m) = **event { let event = proto_to_message(m); event - .messages + .control_msgs .iter() - .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) - .any(|msg| gs.config.message_id(&msg) == msg_id) + .filter(|m| filter(peer_id, m)) + .count() } else { - false + 0 } } - _ => false, - }); - // default history_length is 5, expect no messages after shift > 5 - if shift < 5 { - assert!( - message_exists, - "Expected the cached message to be sent to an IWANT peer before 5 shifts" - ); - } else { - assert!( - !message_exists, - "Expected the cached message to not be sent to an IWANT peer after 5 shifts" - ); - } - } - } - - #[test] - // tests that an event is not created when a peers asks for a message not in our cache - fn test_handle_iwant_msg_not_cached() { - let (mut gs, peers, _) = inject_nodes1() - .peer_no(20) - .topics(Vec::new()) - .to_subscribe(true) - .create_network(); - - let events_before = gs.events.len(); - gs.handle_iwant(&peers[7], vec![MessageId::new(b"unknown id")]); - let events_after = gs.events.len(); - - assert_eq!( - events_before, events_after, - "Expected event count to stay the same" - ); - } - - #[test] - // tests that an event is created when a peer shares that it has a message we want - fn test_handle_ihave_subscribed_and_msg_not_cached() { - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .create_network(); + _ => 0, + }) + .sum::() +} - gs.handle_ihave( - &peers[7], - vec![(topic_hashes[0].clone(), vec![MessageId::new(b"unknown id")])], - ); +fn flush_events(gs: &mut Gossipsub) { + gs.control_pool.clear(); + gs.events.clear(); +} - // check that we sent an IWANT request for `unknown id` - let iwant_exists = match gs.control_pool.get(&peers[7]) { - Some(controls) => controls.iter().any(|c| match c { - GossipsubControlAction::IWant { message_ids } => message_ids - .iter() - .any(|m| *m == MessageId::new(b"unknown id")), - _ => false, - }), +#[test] +// tests that a peer added as explicit peer gets connected to +fn test_explicit_peer_gets_connected() { + let (mut gs, _, _) = inject_nodes1() + .peer_no(0) + .topics(Vec::new()) + .to_subscribe(true) + .create_network(); + + //create new peer + let peer = PeerId::random(); + + //add peer as explicit peer + gs.add_explicit_peer(&peer); + + let num_events = gs + .events + .iter() + .filter(|e| match e { + NetworkBehaviourAction::Dial { opts, handler: _ } => opts.get_peer_id() == Some(peer), _ => false, - }; + }) + .count(); - assert!( - iwant_exists, - "Expected to send an IWANT control message for unkown message id" - ); - } - - #[test] - // tests that an event is not created when a peer shares that it has a message that - // we already have - fn test_handle_ihave_subscribed_and_msg_cached() { - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .create_network(); - - let msg_id = MessageId::new(b"known id"); - - let events_before = gs.events.len(); - gs.handle_ihave(&peers[7], vec![(topic_hashes[0].clone(), vec![msg_id])]); - let events_after = gs.events.len(); + assert_eq!( + num_events, 1, + "There was no dial peer event for the explicit peer" + ); +} - assert_eq!( - events_before, events_after, - "Expected event count to stay the same" - ) - } +#[test] +fn test_explicit_peer_reconnects() { + let config = GossipsubConfigBuilder::default() + .check_explicit_peers_ticks(2) + .build() + .unwrap(); + let (mut gs, others, _) = inject_nodes1() + .peer_no(1) + .topics(Vec::new()) + .to_subscribe(true) + .gs_config(config) + .create_network(); - #[test] - // test that an event is not created when a peer shares that it has a message in - // a topic that we are not subscribed to - fn test_handle_ihave_not_subscribed() { - let (mut gs, peers, _) = inject_nodes1() - .peer_no(20) - .topics(vec![]) - .to_subscribe(true) - .create_network(); - - let events_before = gs.events.len(); - gs.handle_ihave( - &peers[7], - vec![( - TopicHash::from_raw(String::from("unsubscribed topic")), - vec![MessageId::new(b"irrelevant id")], - )], - ); - let events_after = gs.events.len(); + let peer = others.get(0).unwrap(); - assert_eq!( - events_before, events_after, - "Expected event count to stay the same" - ) - } + //add peer as explicit peer + gs.add_explicit_peer(peer); - #[test] - // tests that a peer is added to our mesh when we are both subscribed - // to the same topic - fn test_handle_graft_is_subscribed() { - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .create_network(); + flush_events(&mut gs); - gs.handle_graft(&peers[7], topic_hashes.clone()); + //disconnect peer + disconnect_peer(&mut gs, peer); - assert!( - gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), - "Expected peer to have been added to mesh" - ); - } + gs.heartbeat(); - #[test] - // tests that a peer is not added to our mesh when they are subscribed to - // a topic that we are not - fn test_handle_graft_is_not_subscribed() { - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .create_network(); - - gs.handle_graft( - &peers[7], - vec![TopicHash::from_raw(String::from("unsubscribed topic"))], - ); + //check that no reconnect after first heartbeat since `explicit_peer_ticks == 2` + assert_eq!( + gs.events + .iter() + .filter(|e| match e { + NetworkBehaviourAction::Dial { opts, handler: _ } => + opts.get_peer_id() == Some(*peer), + _ => false, + }) + .count(), + 0, + "There was a dial peer event before explicit_peer_ticks heartbeats" + ); - assert!( - !gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), - "Expected peer to have been added to mesh" - ); - } + gs.heartbeat(); - #[test] - // tests multiple topics in a single graft message - fn test_handle_graft_multiple_topics() { - let topics: Vec = vec!["topic1", "topic2", "topic3", "topic4"] + //check that there is a reconnect after second heartbeat + assert!( + gs.events .iter() - .map(|&t| String::from(t)) - .collect(); + .filter(|e| match e { + NetworkBehaviourAction::Dial { opts, handler: _ } => + opts.get_peer_id() == Some(*peer), + _ => false, + }) + .count() + >= 1, + "There was no dial peer event for the explicit peer" + ); +} - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(topics.clone()) - .to_subscribe(true) - .create_network(); +#[test] +fn test_handle_graft_explicit_peer() { + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(1) + .topics(vec![String::from("topic1"), String::from("topic2")]) + .to_subscribe(true) + .gs_config(GossipsubConfig::default()) + .explicit(1) + .create_network(); + + let peer = peers.get(0).unwrap(); + + gs.handle_graft(peer, topic_hashes.clone()); + + //peer got not added to mesh + assert!(gs.mesh[&topic_hashes[0]].is_empty()); + assert!(gs.mesh[&topic_hashes[1]].is_empty()); + + //check prunes + assert!( + count_control_msgs(&gs, |peer_id, m| peer_id == peer + && match m { + GossipsubControlAction::Prune { topic_hash, .. } => + topic_hash == &topic_hashes[0] || topic_hash == &topic_hashes[1], + _ => false, + }) + >= 2, + "Not enough prunes sent when grafting from explicit peer" + ); +} - let mut their_topics = topic_hashes.clone(); - // their_topics = [topic1, topic2, topic3] - // our_topics = [topic1, topic2, topic4] - their_topics.pop(); - gs.leave(&their_topics[2]); +#[test] +fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { + let (gs, peers, topic_hashes) = inject_nodes1() + .peer_no(2) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(GossipsubConfig::default()) + .explicit(1) + .create_network(); + + //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + assert_eq!( + gs.mesh[&topic_hashes[0]], + vec![peers[1]].into_iter().collect() + ); + + //assert that graft gets created to non-explicit peer + assert!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] + && matches!(m, GossipsubControlAction::Graft { .. })) + >= 1, + "No graft message got created to non-explicit peer" + ); + + //assert that no graft gets created to explicit peer + assert_eq!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] + && matches!(m, GossipsubControlAction::Graft { .. })), + 0, + "A graft message got created to an explicit peer" + ); +} - gs.handle_graft(&peers[7], their_topics.clone()); +#[test] +fn do_not_graft_explicit_peer() { + let (mut gs, others, topic_hashes) = inject_nodes1() + .peer_no(1) + .topics(vec![String::from("topic")]) + .to_subscribe(true) + .gs_config(GossipsubConfig::default()) + .explicit(1) + .create_network(); + + gs.heartbeat(); + + //mesh stays empty + assert_eq!(gs.mesh[&topic_hashes[0]], BTreeSet::new()); + + //assert that no graft gets created to explicit peer + assert_eq!( + count_control_msgs(&gs, |peer_id, m| peer_id == &others[0] + && matches!(m, GossipsubControlAction::Graft { .. })), + 0, + "A graft message got created to an explicit peer" + ); +} - for i in 0..2 { - assert!( - gs.mesh.get(&topic_hashes[i]).unwrap().contains(&peers[7]), - "Expected peer to be in the mesh for the first 2 topics" - ); - } +#[test] +fn do_forward_messages_to_explicit_peers() { + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(2) + .topics(vec![String::from("topic1"), String::from("topic2")]) + .to_subscribe(true) + .gs_config(GossipsubConfig::default()) + .explicit(1) + .create_network(); + + let local_id = PeerId::random(); + + let message = RawGossipsubMessage { + source: Some(peers[1]), + data: vec![12], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(message.clone(), &local_id); - assert!( - gs.mesh.get(&topic_hashes[2]).is_none(), - "Expected the second topic to not be in the mesh" + assert_eq!( + gs.events + .iter() + .filter(|e| match e { + NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + peer_id == &peers[0] + && event + .messages + .iter() + .filter(|m| m.data == message.data) + .count() + > 0 + } else { + false + } + } + _ => false, + }) + .count(), + 1, + "The message did not get forwarded to the explicit peer" + ); +} + +#[test] +fn explicit_peers_not_added_to_mesh_on_subscribe() { + let (mut gs, peers, _) = inject_nodes1() + .peer_no(2) + .topics(Vec::new()) + .to_subscribe(true) + .gs_config(GossipsubConfig::default()) + .explicit(1) + .create_network(); + + //create new topic, both peers subscribing to it but we do not subscribe to it + let topic = Topic::new(String::from("t")); + let topic_hash = topic.hash(); + for peer in peers.iter().take(2) { + gs.handle_received_subscriptions( + &[GossipsubSubscription { + action: GossipsubSubscriptionAction::Subscribe, + topic_hash: topic_hash.clone(), + }], + peer, ); } - #[test] - // tests that a peer is removed from our mesh - fn test_handle_prune_peer_in_mesh() { - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .create_network(); - - // insert peer into our mesh for 'topic1' - gs.mesh - .insert(topic_hashes[0].clone(), peers.iter().cloned().collect()); - assert!( - gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), - "Expected peer to be in mesh" - ); + //subscribe now to topic + gs.subscribe(&topic).unwrap(); + + //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); + + //assert that graft gets created to non-explicit peer + assert!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] + && matches!(m, GossipsubControlAction::Graft { .. })) + > 0, + "No graft message got created to non-explicit peer" + ); + + //assert that no graft gets created to explicit peer + assert_eq!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] + && matches!(m, GossipsubControlAction::Graft { .. })), + 0, + "A graft message got created to an explicit peer" + ); +} - gs.handle_prune( - &peers[7], - topic_hashes - .iter() - .map(|h| (h.clone(), vec![], None)) - .collect(), +#[test] +fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { + let (mut gs, peers, _) = inject_nodes1() + .peer_no(2) + .topics(Vec::new()) + .to_subscribe(true) + .gs_config(GossipsubConfig::default()) + .explicit(1) + .create_network(); + + //create new topic, both peers subscribing to it but we do not subscribe to it + let topic = Topic::new(String::from("t")); + let topic_hash = topic.hash(); + for peer in peers.iter().take(2) { + gs.handle_received_subscriptions( + &[GossipsubSubscription { + action: GossipsubSubscriptionAction::Subscribe, + topic_hash: topic_hash.clone(), + }], + peer, ); - assert!( - !gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), - "Expected peer to be removed from mesh" - ); - } - - fn count_control_msgs( - gs: &Gossipsub, - mut filter: impl FnMut(&PeerId, &GossipsubControlAction) -> bool, - ) -> usize { - gs.control_pool - .iter() - .map(|(peer_id, actions)| actions.iter().filter(|m| filter(peer_id, m)).count()) - .sum::() - + gs.events - .iter() - .map(|e| match e { - NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - event - .control_msgs - .iter() - .filter(|m| filter(peer_id, m)) - .count() - } else { - 0 - } - } - _ => 0, - }) - .sum::() - } - - fn flush_events(gs: &mut Gossipsub) { - gs.control_pool.clear(); - gs.events.clear(); - } - - #[test] - // tests that a peer added as explicit peer gets connected to - fn test_explicit_peer_gets_connected() { - let (mut gs, _, _) = inject_nodes1() - .peer_no(0) - .topics(Vec::new()) - .to_subscribe(true) - .create_network(); - - //create new peer - let peer = PeerId::random(); - - //add peer as explicit peer - gs.add_explicit_peer(&peer); - - let dial_events: Vec<_> = gs - .events - .iter() - .filter(|e| match e { - NetworkBehaviourAction::Dial { opts, handler: _ } => { - opts.get_peer_id() == Some(peer) - } - _ => false, - }) - .collect(); - - assert_eq!( - dial_events.len(), - 1, - "There was no dial peer event for the explicit peer" - ); - } - - #[test] - fn test_explicit_peer_reconnects() { - let config = GossipsubConfigBuilder::default() - .check_explicit_peers_ticks(2) - .build() - .unwrap(); - let (mut gs, others, _) = inject_nodes1() - .peer_no(1) - .topics(Vec::new()) - .to_subscribe(true) - .gs_config(config) - .create_network(); - - let peer = others.get(0).unwrap(); - - //add peer as explicit peer - gs.add_explicit_peer(peer); - - flush_events(&mut gs); - - //disconnect peer - disconnect_peer(&mut gs, peer); - - gs.heartbeat(); - - //check that no reconnect after first heartbeat since `explicit_peer_ticks == 2` - assert_eq!( - gs.events - .iter() - .filter(|e| match e { - NetworkBehaviourAction::Dial { opts, handler: _ } => - opts.get_peer_id() == Some(*peer), - _ => false, - }) - .count(), - 0, - "There was a dial peer event before explicit_peer_ticks heartbeats" - ); - - gs.heartbeat(); - - //check that there is a reconnect after second heartbeat - assert!( - gs.events - .iter() - .filter(|e| match e { - NetworkBehaviourAction::Dial { opts, handler: _ } => - opts.get_peer_id() == Some(*peer), - _ => false, - }) - .count() - >= 1, - "There was no dial peer event for the explicit peer" - ); - } - - #[test] - fn test_handle_graft_explicit_peer() { - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(1) - .topics(vec![String::from("topic1"), String::from("topic2")]) - .to_subscribe(true) - .gs_config(GossipsubConfig::default()) - .explicit(1) - .create_network(); - - let peer = peers.get(0).unwrap(); - - gs.handle_graft(peer, topic_hashes.clone()); - - //peer got not added to mesh - assert!(gs.mesh[&topic_hashes[0]].is_empty()); - assert!(gs.mesh[&topic_hashes[1]].is_empty()); - - //check prunes - assert!( - count_control_msgs(&gs, |peer_id, m| peer_id == peer - && match m { - GossipsubControlAction::Prune { topic_hash, .. } => - topic_hash == &topic_hashes[0] || topic_hash == &topic_hashes[1], - _ => false, - }) - >= 2, - "Not enough prunes sent when grafting from explicit peer" - ); - } - - #[test] - fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { - let (gs, peers, topic_hashes) = inject_nodes1() - .peer_no(2) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .gs_config(GossipsubConfig::default()) - .explicit(1) - .create_network(); - - //only peer 1 is in the mesh not peer 0 (which is an explicit peer) - assert_eq!( - gs.mesh[&topic_hashes[0]], - vec![peers[1].clone()].into_iter().collect() - ); - - //assert that graft gets created to non-explicit peer - assert!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] - && match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }) - >= 1, - "No graft message got created to non-explicit peer" - ); - - //assert that no graft gets created to explicit peer - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }), - 0, - "A graft message got created to an explicit peer" - ); - } - - #[test] - fn do_not_graft_explicit_peer() { - let (mut gs, others, topic_hashes) = inject_nodes1() - .peer_no(1) - .topics(vec![String::from("topic")]) - .to_subscribe(true) - .gs_config(GossipsubConfig::default()) - .explicit(1) - .create_network(); - - gs.heartbeat(); - - //mesh stays empty - assert_eq!(gs.mesh[&topic_hashes[0]], BTreeSet::new()); - - //assert that no graft gets created to explicit peer - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &others[0] - && match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }), - 0, - "A graft message got created to an explicit peer" - ); - } - - #[test] - fn do_forward_messages_to_explicit_peers() { - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(2) - .topics(vec![String::from("topic1"), String::from("topic2")]) - .to_subscribe(true) - .gs_config(GossipsubConfig::default()) - .explicit(1) - .create_network(); - - let local_id = PeerId::random(); - - let message = RawGossipsubMessage { - source: Some(peers[1].clone()), - data: vec![12], - sequence_number: Some(0), - topic: topic_hashes[0].clone(), - signature: None, - key: None, - validated: true, - }; - gs.handle_received_message(message.clone(), &local_id); - - assert_eq!( - gs.events - .iter() - .filter(|e| match e { - NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - peer_id == &peers[0] - && event - .messages - .iter() - .filter(|m| m.data == message.data) - .count() - > 0 - } else { - false - } - } - _ => false, - }) - .count(), - 1, - "The message did not get forwarded to the explicit peer" - ); - } - - #[test] - fn explicit_peers_not_added_to_mesh_on_subscribe() { - let (mut gs, peers, _) = inject_nodes1() - .peer_no(2) - .topics(Vec::new()) - .to_subscribe(true) - .gs_config(GossipsubConfig::default()) - .explicit(1) - .create_network(); - - //create new topic, both peers subscribing to it but we do not subscribe to it - let topic = Topic::new(String::from("t")); - let topic_hash = topic.hash(); - for i in 0..2 { - gs.handle_received_subscriptions( - &vec![GossipsubSubscription { - action: GossipsubSubscriptionAction::Subscribe, - topic_hash: topic_hash.clone(), - }], - &peers[i], - ); - } - - //subscribe now to topic - gs.subscribe(&topic).unwrap(); - - //only peer 1 is in the mesh not peer 0 (which is an explicit peer) - assert_eq!( - gs.mesh[&topic_hash], - vec![peers[1].clone()].into_iter().collect() - ); - - //assert that graft gets created to non-explicit peer - assert!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] - && match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }) - > 0, - "No graft message got created to non-explicit peer" - ); - - //assert that no graft gets created to explicit peer - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }), - 0, - "A graft message got created to an explicit peer" - ); - } - - #[test] - fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { - let (mut gs, peers, _) = inject_nodes1() - .peer_no(2) - .topics(Vec::new()) - .to_subscribe(true) - .gs_config(GossipsubConfig::default()) - .explicit(1) - .create_network(); - - //create new topic, both peers subscribing to it but we do not subscribe to it - let topic = Topic::new(String::from("t")); - let topic_hash = topic.hash(); - for i in 0..2 { - gs.handle_received_subscriptions( - &vec![GossipsubSubscription { - action: GossipsubSubscriptionAction::Subscribe, - topic_hash: topic_hash.clone(), - }], - &peers[i], - ); - } - - //we send a message for this topic => this will initialize the fanout - gs.publish(topic.clone(), vec![1, 2, 3]).unwrap(); - - //subscribe now to topic - gs.subscribe(&topic).unwrap(); - - //only peer 1 is in the mesh not peer 0 (which is an explicit peer) - assert_eq!( - gs.mesh[&topic_hash], - vec![peers[1].clone()].into_iter().collect() - ); - - //assert that graft gets created to non-explicit peer - assert!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] - && match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }) - >= 1, - "No graft message got created to non-explicit peer" - ); - - //assert that no graft gets created to explicit peer - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }), - 0, - "A graft message got created to an explicit peer" - ); - } - - #[test] - fn no_gossip_gets_sent_to_explicit_peers() { - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(2) - .topics(vec![String::from("topic1"), String::from("topic2")]) - .to_subscribe(true) - .gs_config(GossipsubConfig::default()) - .explicit(1) - .create_network(); - - let local_id = PeerId::random(); - - let message = RawGossipsubMessage { - source: Some(peers[1].clone()), - data: vec![], - sequence_number: Some(0), - topic: topic_hashes[0].clone(), - signature: None, - key: None, - validated: true, - }; - - //forward the message - gs.handle_received_message(message.clone(), &local_id); - - //simulate multiple gossip calls (for randomness) - for _ in 0..3 { - gs.emit_gossip(); - } - - //assert that no gossip gets sent to explicit peer - assert_eq!( - gs.control_pool - .get(&peers[0]) - .unwrap_or(&Vec::new()) - .iter() - .filter(|m| match m { - GossipsubControlAction::IHave { .. } => true, - _ => false, - }) - .count(), - 0, - "Gossip got emitted to explicit peer" - ); - } - - // Tests the mesh maintenance addition - #[test] - fn test_mesh_addition() { - let config: GossipsubConfig = GossipsubConfig::default(); - - // Adds mesh_low peers and PRUNE 2 giving us a deficit. - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.mesh_n() + 1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); - - let to_remove_peers = config.mesh_n() + 1 - config.mesh_n_low() - 1; - - for index in 0..to_remove_peers { - gs.handle_prune( - &peers[index], - topics.iter().map(|h| (h.clone(), vec![], None)).collect(), - ); - } - - // Verify the pruned peers are removed from the mesh. - assert_eq!( - gs.mesh.get(&topics[0]).unwrap().len(), - config.mesh_n_low() - 1 - ); - - // run a heartbeat - gs.heartbeat(); - - // Peers should be added to reach mesh_n - assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), config.mesh_n()); - } - - // Tests the mesh maintenance subtraction - #[test] - fn test_mesh_subtraction() { - let config = GossipsubConfig::default(); - - // Adds mesh_low peers and PRUNE 2 giving us a deficit. - let n = config.mesh_n_high() + 10; - //make all outbound connections so that we allow grafting to all - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(n) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .outbound(n) - .create_network(); - - // graft all the peers - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } - - // run a heartbeat - gs.heartbeat(); - - // Peers should be removed to reach mesh_n - assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), config.mesh_n()); - } - - #[test] - fn test_connect_to_px_peers_on_handle_prune() { - let config: GossipsubConfig = GossipsubConfig::default(); - - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); - - //handle prune from single peer with px peers - - let mut px = Vec::new(); - //propose more px peers than config.prune_peers() - for _ in 0..config.prune_peers() + 5 { - px.push(PeerInfo { - peer_id: Some(PeerId::random()), - }); - } - - gs.handle_prune( - &peers[0], - vec![( - topics[0].clone(), - px.clone(), - Some(config.prune_backoff().as_secs()), - )], - ); - - //Check DialPeer events for px peers - let dials: Vec<_> = gs - .events - .iter() - .filter_map(|e| match e { - NetworkBehaviourAction::Dial { opts, handler: _ } => opts.get_peer_id(), - _ => None, - }) - .collect(); - - // Exactly config.prune_peers() many random peers should be dialled - assert_eq!(dials.len(), config.prune_peers()); - - let dials_set: HashSet<_> = dials.into_iter().collect(); - - // No duplicates - assert_eq!(dials_set.len(), config.prune_peers()); - - //all dial peers must be in px - assert!(dials_set.is_subset( - &px.iter() - .map(|i| i.peer_id.as_ref().unwrap().clone()) - .collect::>() - )); - } - - #[test] - fn test_send_px_and_backoff_in_prune() { - let config: GossipsubConfig = GossipsubConfig::default(); - - //build mesh with enough peers for px - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.prune_peers() + 1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); - - //send prune to peer - gs.send_graft_prune( - HashMap::new(), - vec![(peers[0].clone(), vec![topics[0].clone()])] - .into_iter() - .collect(), - HashSet::new(), - ); - - //check prune message - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && match m { - GossipsubControlAction::Prune { - topic_hash, - peers, - backoff, - } => - topic_hash == &topics[0] && - peers.len() == config.prune_peers() && - //all peers are different - peers.iter().collect::>().len() == - config.prune_peers() && - backoff.unwrap() == config.prune_backoff().as_secs(), - _ => false, - }), - 1 - ); - } - - #[test] - fn test_prune_backoffed_peer_on_graft() { - let config: GossipsubConfig = GossipsubConfig::default(); - - //build mesh with enough peers for px - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.prune_peers() + 1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); - - //remove peer from mesh and send prune to peer => this adds a backoff for this peer - gs.mesh.get_mut(&topics[0]).unwrap().remove(&peers[0]); - gs.send_graft_prune( - HashMap::new(), - vec![(peers[0].clone(), vec![topics[0].clone()])] - .into_iter() - .collect(), - HashSet::new(), - ); - - //ignore all messages until now - gs.events.clear(); - - //handle graft - gs.handle_graft(&peers[0], vec![topics[0].clone()]); - - //check prune message - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && match m { - GossipsubControlAction::Prune { - topic_hash, - peers, - backoff, - } => - topic_hash == &topics[0] && - //no px in this case - peers.is_empty() && - backoff.unwrap() == config.prune_backoff().as_secs(), - _ => false, - }), - 1 - ); - } - - #[test] - fn test_do_not_graft_within_backoff_period() { - let config = GossipsubConfigBuilder::default() - .backoff_slack(1) - .heartbeat_interval(Duration::from_millis(100)) - .build() - .unwrap(); - //only one peer => mesh too small and will try to regraft as early as possible - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config) - .create_network(); - - //handle prune from peer with backoff of one second - gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), Some(1))]); - - //forget all events until now - flush_events(&mut gs); - - //call heartbeat - gs.heartbeat(); - - //Sleep for one second and apply 10 regular heartbeats (interval = 100ms). - for _ in 0..10 { - sleep(Duration::from_millis(100)); - gs.heartbeat(); - } - - //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat - // is needed). - assert_eq!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }), - 0, - "Graft message created too early within backoff period" - ); - - //Heartbeat one more time this should graft now - sleep(Duration::from_millis(100)); - gs.heartbeat(); - - //check that graft got created - assert!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }) > 0, - "No graft message was created after backoff period" - ); - } - - #[test] - fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without_backoff() { - //set default backoff period to 1 second - let config = GossipsubConfigBuilder::default() - .prune_backoff(Duration::from_millis(90)) - .backoff_slack(1) - .heartbeat_interval(Duration::from_millis(100)) - .build() - .unwrap(); - //only one peer => mesh too small and will try to regraft as early as possible - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config) - .create_network(); - - //handle prune from peer without a specified backoff - gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), None)]); - - //forget all events until now - flush_events(&mut gs); - - //call heartbeat - gs.heartbeat(); - - //Apply one more heartbeat - sleep(Duration::from_millis(100)); - gs.heartbeat(); - - //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat - // is needed). - assert_eq!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }), - 0, - "Graft message created too early within backoff period" - ); - - //Heartbeat one more time this should graft now - sleep(Duration::from_millis(100)); - gs.heartbeat(); - - //check that graft got created - assert!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }) > 0, - "No graft message was created after backoff period" - ); - } - - #[test] - fn test_unsubscribe_backoff() { - const HEARTBEAT_INTERVAL: Duration = Duration::from_millis(100); - let config = GossipsubConfigBuilder::default() - .backoff_slack(1) - // ensure a prune_backoff > unsubscribe_backoff - .prune_backoff(Duration::from_secs(5)) - .unsubscribe_backoff(1) - .heartbeat_interval(HEARTBEAT_INTERVAL) - .build() - .unwrap(); - - let topic = String::from("test"); - // only one peer => mesh too small and will try to regraft as early as possible - let (mut gs, _, topics) = inject_nodes1() - .peer_no(1) - .topics(vec![topic.clone()]) - .to_subscribe(true) - .gs_config(config) - .create_network(); - - let _ = gs.unsubscribe(&Topic::new(topic.clone())); - - assert_eq!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Prune { backoff, .. } => backoff == &Some(1), - _ => false, - }), - 1, - "Peer should be pruned with `unsubscribe_backoff`." - ); - - let _ = gs.subscribe(&Topic::new(topics[0].to_string())); - - // forget all events until now - flush_events(&mut gs); - - // call heartbeat - gs.heartbeat(); - - // Sleep for one second and apply 10 regular heartbeats (interval = 100ms). - for _ in 0..10 { - sleep(HEARTBEAT_INTERVAL); - gs.heartbeat(); - } - - // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat - // is needed). - assert_eq!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }), - 0, - "Graft message created too early within backoff period" - ); - - // Heartbeat one more time this should graft now - sleep(HEARTBEAT_INTERVAL); - gs.heartbeat(); - - // check that graft got created - assert!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }) > 0, - "No graft message was created after backoff period" - ); - } - - #[test] - fn test_flood_publish() { - let config: GossipsubConfig = GossipsubConfig::default(); - - let topic = "test"; - // Adds more peers than mesh can hold to test flood publishing - let (mut gs, _, _) = inject_nodes1() - .peer_no(config.mesh_n_high() + 10) - .topics(vec![topic.into()]) - .to_subscribe(true) - .create_network(); - - //publish message - let publish_data = vec![0; 42]; - gs.publish(Topic::new(topic), publish_data).unwrap(); - - // Collect all publish messages - let publishes = gs - .events - .iter() - .fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push(s.clone()); - } - } - collected_publish - } - _ => collected_publish, - }); - - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform( - publishes - .first() - .expect("Should contain > 0 entries") - .clone(), - ) - .unwrap(); - - let msg_id = gs.config.message_id(&message); - - let config: GossipsubConfig = GossipsubConfig::default(); - assert_eq!( - publishes.len(), - config.mesh_n_high() + 10, - "Should send a publish message to all known peers" - ); - - assert!( - gs.mcache.get(&msg_id).is_some(), - "Message cache should contain published message" - ); - } - - #[test] - fn test_gossip_to_at_least_gossip_lazy_peers() { - let config: GossipsubConfig = GossipsubConfig::default(); - - //add more peers than in mesh to test gossipping - //by default only mesh_n_low peers will get added to mesh - let (mut gs, _, topic_hashes) = inject_nodes1() - .peer_no(config.mesh_n_low() + config.gossip_lazy() + 1) - .topics(vec!["topic".into()]) - .to_subscribe(true) - .create_network(); - - //receive message - let raw_message = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![], - sequence_number: Some(0), - topic: topic_hashes[0].clone(), - signature: None, - key: None, - validated: true, - }; - gs.handle_received_message(raw_message.clone(), &PeerId::random()); - - //emit gossip - gs.emit_gossip(); - - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); - - let msg_id = gs.config.message_id(&message); - - //check that exactly config.gossip_lazy() many gossip messages were sent. - assert_eq!( - count_control_msgs(&gs, |_, action| match action { - GossipsubControlAction::IHave { - topic_hash, - message_ids, - } => topic_hash == &topic_hashes[0] && message_ids.iter().any(|id| id == &msg_id), - _ => false, - }), - config.gossip_lazy() - ); - } - - #[test] - fn test_gossip_to_at_most_gossip_factor_peers() { - let config: GossipsubConfig = GossipsubConfig::default(); - - //add a lot of peers - let m = - config.mesh_n_low() + config.gossip_lazy() * (2.0 / config.gossip_factor()) as usize; - let (mut gs, _, topic_hashes) = inject_nodes1() - .peer_no(m) - .topics(vec!["topic".into()]) - .to_subscribe(true) - .create_network(); - - //receive message - let raw_message = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![], - sequence_number: Some(0), - topic: topic_hashes[0].clone(), - signature: None, - key: None, - validated: true, - }; - gs.handle_received_message(raw_message.clone(), &PeerId::random()); - - //emit gossip - gs.emit_gossip(); - - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); - - let msg_id = gs.config.message_id(&message); - //check that exactly config.gossip_lazy() many gossip messages were sent. - assert_eq!( - count_control_msgs(&gs, |_, action| match action { - GossipsubControlAction::IHave { - topic_hash, - message_ids, - } => topic_hash == &topic_hashes[0] && message_ids.iter().any(|id| id == &msg_id), - _ => false, - }), - ((m - config.mesh_n_low()) as f64 * config.gossip_factor()) as usize - ); - } - - #[test] - fn test_accept_only_outbound_peer_grafts_when_mesh_full() { - let config: GossipsubConfig = GossipsubConfig::default(); - - //enough peers to fill the mesh - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); - - // graft all the peers => this will fill the mesh - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } - - //assert current mesh size - assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); - - //create an outbound and an inbound peer - let inbound = add_peer(&mut gs, &topics, false, false); - let outbound = add_peer(&mut gs, &topics, true, false); - - //send grafts - gs.handle_graft(&inbound, vec![topics[0].clone()]); - gs.handle_graft(&outbound, vec![topics[0].clone()]); - - //assert mesh size - assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high() + 1); - - //inbound is not in mesh - assert!(!gs.mesh[&topics[0]].contains(&inbound)); - - //outbound is in mesh - assert!(gs.mesh[&topics[0]].contains(&outbound)); - } - - #[test] - fn test_do_not_remove_too_many_outbound_peers() { - //use an extreme case to catch errors with high probability - let m = 50; - let n = 2 * m; - let config = GossipsubConfigBuilder::default() - .mesh_n_high(n) - .mesh_n(n) - .mesh_n_low(n) - .mesh_outbound_min(m) - .build() - .unwrap(); - - //fill the mesh with inbound connections - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(n) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .create_network(); - - // graft all the peers - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } - - //create m outbound connections and graft (we will accept the graft) - let mut outbound = HashSet::new(); - for _ in 0..m { - let peer = add_peer(&mut gs, &topics, true, false); - outbound.insert(peer.clone()); - gs.handle_graft(&peer, topics.clone()); - } - - //mesh is overly full - assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n + m); - - // run a heartbeat - gs.heartbeat(); - - // Peers should be removed to reach n - assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n); - - //all outbound peers are still in the mesh - assert!(outbound.iter().all(|p| gs.mesh[&topics[0]].contains(p))); - } - - #[test] - fn test_add_outbound_peers_if_min_is_not_satisfied() { - let config: GossipsubConfig = GossipsubConfig::default(); - - // Fill full mesh with inbound peers - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); - - // graft all the peers - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } - - //create config.mesh_outbound_min() many outbound connections without grafting - for _ in 0..config.mesh_outbound_min() { - add_peer(&mut gs, &topics, true, false); - } - - // Nothing changed in the mesh yet - assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); - - // run a heartbeat - gs.heartbeat(); - - // The outbound peers got additionally added - assert_eq!( - gs.mesh[&topics[0]].len(), - config.mesh_n_high() + config.mesh_outbound_min() - ); - } - - //TODO add a test that ensures that new outbound connections are recognized as such. - // This is at the moment done in behaviour with relying on the fact that the call to - // `inject_connection_established` for the first connection is done before `inject_connected` - // gets called. For all further connections `inject_connection_established` should get called - // after `inject_connected`. - - #[test] - fn test_prune_negative_scored_peers() { - let config = GossipsubConfig::default(); - - //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some(( - PeerScoreParams::default(), - PeerScoreThresholds::default(), - ))) - .create_network(); - - //add penalty to peer - gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); - - //execute heartbeat - gs.heartbeat(); - - //peer should not be in mesh anymore - assert!(gs.mesh[&topics[0]].is_empty()); - - //check prune message - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && match m { - GossipsubControlAction::Prune { - topic_hash, - peers, - backoff, - } => - topic_hash == &topics[0] && - //no px in this case - peers.is_empty() && - backoff.unwrap() == config.prune_backoff().as_secs(), - _ => false, - }), - 1 - ); - } - - #[test] - fn test_dont_graft_to_negative_scored_peers() { - let config = GossipsubConfig::default(); - //init full mesh - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .scoring(Some(( - PeerScoreParams::default(), - PeerScoreThresholds::default(), - ))) - .create_network(); - - //add two additional peers that will not be part of the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); - - //reduce score of p1 to negative - gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 1); - - //handle prunes of all other peers - for p in peers { - gs.handle_prune(&p, vec![(topics[0].clone(), Vec::new(), None)]); - } - - //heartbeat - gs.heartbeat(); - - //assert that mesh only contains p2 - assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), 1); - assert!(gs.mesh.get(&topics[0]).unwrap().contains(&p2)); - } - - ///Note that in this test also without a penalty the px would be ignored because of the - /// acceptPXThreshold, but the spec still explicitely states the rule that px from negative - /// peers should get ignored, therefore we test it here. - #[test] - fn test_ignore_px_from_negative_scored_peer() { - let config = GossipsubConfig::default(); - - //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .scoring(Some(( - PeerScoreParams::default(), - PeerScoreThresholds::default(), - ))) - .create_network(); - - //penalize peer - gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); - - //handle prune from single peer with px peers - let px = vec![PeerInfo { - peer_id: Some(PeerId::random()), - }]; - - gs.handle_prune( - &peers[0], - vec![( - topics[0].clone(), - px.clone(), - Some(config.prune_backoff().as_secs()), - )], - ); - - //assert no dials - assert_eq!( - gs.events - .iter() - .filter(|e| match e { - NetworkBehaviourAction::Dial { .. } => true, - _ => false, - }) - .count(), - 0 - ); - } - - #[test] - fn test_only_send_nonnegative_scoring_peers_in_px() { - let config = GossipsubConfigBuilder::default() - .prune_peers(16) - .do_px() - .build() - .unwrap(); - - // Build mesh with three peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(3) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some(( - PeerScoreParams::default(), - PeerScoreThresholds::default(), - ))) - .create_network(); - - // Penalize first peer - gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); - - // Prune second peer - gs.send_graft_prune( - HashMap::new(), - vec![(peers[1].clone(), vec![topics[0].clone()])] - .into_iter() - .collect(), - HashSet::new(), - ); - - // Check that px in prune message only contains third peer - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] - && match m { - GossipsubControlAction::Prune { - topic_hash, - peers: px, - .. - } => - topic_hash == &topics[0] - && px.len() == 1 - && px[0].peer_id.as_ref().unwrap() == &peers[2], - _ => false, - }), - 1 - ); - } - - #[test] - fn test_do_not_gossip_to_peers_below_gossip_threshold() { - let config = GossipsubConfig::default(); - let peer_score_params = PeerScoreParams::default(); - let mut peer_score_thresholds = PeerScoreThresholds::default(); - peer_score_thresholds.gossip_threshold = 3.0 * peer_score_params.behaviour_penalty_weight; - - // Build full mesh - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - // Graft all the peer - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } - - // Add two additional peers that will not be part of the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); - - // Reduce score of p1 below peer_score_thresholds.gossip_threshold - // note that penalties get squared so two penalties means a score of - // 4 * peer_score_params.behaviour_penalty_weight. - gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - - // Reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold - gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - - // Receive message - let raw_message = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![], - sequence_number: Some(0), - topic: topics[0].clone(), - signature: None, - key: None, - validated: true, - }; - gs.handle_received_message(raw_message.clone(), &PeerId::random()); - - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); - - let msg_id = gs.config.message_id(&message); - - // Emit gossip - gs.emit_gossip(); - - // Check that exactly one gossip messages got sent and it got sent to p2 - assert_eq!( - count_control_msgs(&gs, |peer, action| match action { - GossipsubControlAction::IHave { - topic_hash, - message_ids, - } => { - if topic_hash == &topics[0] && message_ids.iter().any(|id| id == &msg_id) { - assert_eq!(peer, &p2); - true - } else { - false - } - } - _ => false, - }), - 1 - ); - } - - #[test] - fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { - let config = GossipsubConfig::default(); - let peer_score_params = PeerScoreParams::default(); - let mut peer_score_thresholds = PeerScoreThresholds::default(); - peer_score_thresholds.gossip_threshold = 3.0 * peer_score_params.behaviour_penalty_weight; - - // Build full mesh - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - // Graft all the peer - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } - - // Add two additional peers that will not be part of the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); - - // Reduce score of p1 below peer_score_thresholds.gossip_threshold - // note that penalties get squared so two penalties means a score of - // 4 * peer_score_params.behaviour_penalty_weight. - gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - - // Reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold - gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - - // Receive message - let raw_message = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![], - sequence_number: Some(0), - topic: topics[0].clone(), - signature: None, - key: None, - validated: true, - }; - gs.handle_received_message(raw_message.clone(), &PeerId::random()); - - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); - - let msg_id = gs.config.message_id(&message); - - gs.handle_iwant(&p1, vec![msg_id.clone()]); - gs.handle_iwant(&p2, vec![msg_id.clone()]); - - // the messages we are sending - let sent_messages = gs - .events - .iter() - .fold(vec![], |mut collected_messages, e| match e { - NetworkBehaviourAction::NotifyHandler { event, peer_id, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - for c in &event.messages { - collected_messages.push((peer_id.clone(), c.clone())) - } - } - collected_messages - } - _ => collected_messages, - }); - - //the message got sent to p2 - assert!(sent_messages - .iter() - .map(|(peer_id, msg)| ( - peer_id, - gs.data_transform.inbound_transform(msg.clone()).unwrap() - )) - .any(|(peer_id, msg)| peer_id == &p2 && &gs.config.message_id(&msg) == &msg_id)); - //the message got not sent to p1 - assert!(sent_messages - .iter() - .map(|(peer_id, msg)| ( - peer_id, - gs.data_transform.inbound_transform(msg.clone()).unwrap() - )) - .all(|(peer_id, msg)| !(peer_id == &p1 && &gs.config.message_id(&msg) == &msg_id))); - } - - #[test] - fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { - let config = GossipsubConfig::default(); - let peer_score_params = PeerScoreParams::default(); - let mut peer_score_thresholds = PeerScoreThresholds::default(); - peer_score_thresholds.gossip_threshold = 3.0 * peer_score_params.behaviour_penalty_weight; - - //build full mesh - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - // graft all the peer - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } - - //add two additional peers that will not be part of the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); - - //reduce score of p1 below peer_score_thresholds.gossip_threshold - //note that penalties get squared so two penalties means a score of - // 4 * peer_score_params.behaviour_penalty_weight. - gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - - //reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold - gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - - //message that other peers have - let raw_message = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![], - sequence_number: Some(0), - topic: topics[0].clone(), - signature: None, - key: None, - validated: true, - }; - - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); - - let msg_id = gs.config.message_id(&message); - - gs.handle_ihave(&p1, vec![(topics[0].clone(), vec![msg_id.clone()])]); - gs.handle_ihave(&p2, vec![(topics[0].clone(), vec![msg_id.clone()])]); - - // check that we sent exactly one IWANT request to p2 - assert_eq!( - count_control_msgs(&gs, |peer, c| match c { - GossipsubControlAction::IWant { message_ids } => - if message_ids.iter().any(|m| m == &msg_id) { - assert_eq!(peer, &p2); - true - } else { - false - }, - _ => false, - }), - 1 - ); - } - - #[test] - fn test_do_not_publish_to_peer_below_publish_threshold() { - let config = GossipsubConfigBuilder::default() - .flood_publish(false) - .build() - .unwrap(); - let peer_score_params = PeerScoreParams::default(); - let mut peer_score_thresholds = PeerScoreThresholds::default(); - peer_score_thresholds.gossip_threshold = 0.5 * peer_score_params.behaviour_penalty_weight; - peer_score_thresholds.publish_threshold = 3.0 * peer_score_params.behaviour_penalty_weight; - - //build mesh with no peers and no subscribed topics - let (mut gs, _, _) = inject_nodes1() - .gs_config(config.clone()) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - //create a new topic for which we are not subscribed - let topic = Topic::new("test"); - let topics = vec![topic.hash()]; - - //add two additional peers that will be added to the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); - - //reduce score of p1 below peer_score_thresholds.publish_threshold - //note that penalties get squared so two penalties means a score of - // 4 * peer_score_params.behaviour_penalty_weight. - gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - - //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold - gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - - //a heartbeat will remove the peers from the mesh - gs.heartbeat(); - - // publish on topic - let publish_data = vec![0; 42]; - gs.publish(topic, publish_data).unwrap(); - - // Collect all publish messages - let publishes = gs - .events - .iter() - .fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::NotifyHandler { event, peer_id, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push((peer_id.clone(), s.clone())); - } - } - collected_publish - } - _ => collected_publish, - }); - - //assert only published to p2 - assert_eq!(publishes.len(), 1); - assert_eq!(publishes[0].0, p2); - } - - #[test] - fn test_do_not_flood_publish_to_peer_below_publish_threshold() { - let config = GossipsubConfig::default(); - let peer_score_params = PeerScoreParams::default(); - let mut peer_score_thresholds = PeerScoreThresholds::default(); - peer_score_thresholds.gossip_threshold = 0.5 * peer_score_params.behaviour_penalty_weight; - peer_score_thresholds.publish_threshold = 3.0 * peer_score_params.behaviour_penalty_weight; - - //build mesh with no peers - let (mut gs, _, topics) = inject_nodes1() - .topics(vec!["test".into()]) - .gs_config(config.clone()) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - //add two additional peers that will be added to the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); - - //reduce score of p1 below peer_score_thresholds.publish_threshold - //note that penalties get squared so two penalties means a score of - // 4 * peer_score_params.behaviour_penalty_weight. - gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - - //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold - gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - - //a heartbeat will remove the peers from the mesh - gs.heartbeat(); - - // publish on topic - let publish_data = vec![0; 42]; - gs.publish(Topic::new("test"), publish_data).unwrap(); - - // Collect all publish messages - let publishes = gs - .events - .iter() - .fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::NotifyHandler { event, peer_id, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push((peer_id.clone(), s.clone())); - } - } - collected_publish - } - _ => collected_publish, - }); - - //assert only published to p2 - assert_eq!(publishes.len(), 1); - assert!(publishes[0].0 == p2); - } - - #[test] - fn test_ignore_rpc_from_peers_below_graylist_threshold() { - let config = GossipsubConfig::default(); - let peer_score_params = PeerScoreParams::default(); - let mut peer_score_thresholds = PeerScoreThresholds::default(); - peer_score_thresholds.gossip_threshold = 0.5 * peer_score_params.behaviour_penalty_weight; - peer_score_thresholds.publish_threshold = 0.5 * peer_score_params.behaviour_penalty_weight; - peer_score_thresholds.graylist_threshold = 3.0 * peer_score_params.behaviour_penalty_weight; - - //build mesh with no peers - let (mut gs, _, topics) = inject_nodes1() - .topics(vec!["test".into()]) - .gs_config(config.clone()) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - //add two additional peers that will be added to the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); - - //reduce score of p1 below peer_score_thresholds.graylist_threshold - //note that penalties get squared so two penalties means a score of - // 4 * peer_score_params.behaviour_penalty_weight. - gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - - //reduce score of p2 below publish_threshold but not below graylist_threshold - gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - - let raw_message1 = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![1, 2, 3, 4], - sequence_number: Some(1u64), - topic: topics[0].clone(), - signature: None, - key: None, - validated: true, - }; - - let raw_message2 = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![1, 2, 3, 4, 5], - sequence_number: Some(2u64), - topic: topics[0].clone(), - signature: None, - key: None, - validated: true, - }; - - let raw_message3 = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![1, 2, 3, 4, 5, 6], - sequence_number: Some(3u64), - topic: topics[0].clone(), - signature: None, - key: None, - validated: true, - }; - - let raw_message4 = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![1, 2, 3, 4, 5, 6, 7], - sequence_number: Some(4u64), - topic: topics[0].clone(), - signature: None, - key: None, - validated: true, - }; - - // Transform the inbound message - let message2 = &gs - .data_transform - .inbound_transform(raw_message2.clone()) - .unwrap(); + } - // Transform the inbound message - let message4 = &gs - .data_transform - .inbound_transform(raw_message4.clone()) - .unwrap(); + //we send a message for this topic => this will initialize the fanout + gs.publish(topic.clone(), vec![1, 2, 3]).unwrap(); + + //subscribe now to topic + gs.subscribe(&topic).unwrap(); + + //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); + + //assert that graft gets created to non-explicit peer + assert!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] + && matches!(m, GossipsubControlAction::Graft { .. })) + >= 1, + "No graft message got created to non-explicit peer" + ); + + //assert that no graft gets created to explicit peer + assert_eq!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] + && matches!(m, GossipsubControlAction::Graft { .. })), + 0, + "A graft message got created to an explicit peer" + ); +} - let subscription = GossipsubSubscription { - action: GossipsubSubscriptionAction::Subscribe, - topic_hash: topics[0].clone(), - }; +#[test] +fn no_gossip_gets_sent_to_explicit_peers() { + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(2) + .topics(vec![String::from("topic1"), String::from("topic2")]) + .to_subscribe(true) + .gs_config(GossipsubConfig::default()) + .explicit(1) + .create_network(); + + let local_id = PeerId::random(); + + let message = RawGossipsubMessage { + source: Some(peers[1]), + data: vec![], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; - let control_action = GossipsubControlAction::IHave { - topic_hash: topics[0].clone(), - message_ids: vec![config.message_id(&message2)], - }; + //forward the message + gs.handle_received_message(message, &local_id); - //clear events - gs.events.clear(); - - //receive from p1 - gs.inject_event( - p1.clone(), - ConnectionId::new(0), - HandlerEvent::Message { - rpc: GossipsubRpc { - messages: vec![raw_message1], - subscriptions: vec![subscription.clone()], - control_msgs: vec![control_action], - }, - invalid_messages: Vec::new(), - }, - ); + //simulate multiple gossip calls (for randomness) + for _ in 0..3 { + gs.emit_gossip(); + } - //only the subscription event gets processed, the rest is dropped - assert_eq!(gs.events.len(), 1); - assert!(match &gs.events[0] { - NetworkBehaviourAction::GenerateEvent(event) => match event { - GossipsubEvent::Subscribed { .. } => true, - _ => false, - }, - _ => false, - }); + //assert that no gossip gets sent to explicit peer + assert_eq!( + gs.control_pool + .get(&peers[0]) + .unwrap_or(&Vec::new()) + .iter() + .filter(|m| matches!(m, GossipsubControlAction::IHave { .. })) + .count(), + 0, + "Gossip got emitted to explicit peer" + ); +} - let control_action = GossipsubControlAction::IHave { - topic_hash: topics[0].clone(), - message_ids: vec![config.message_id(&message4)], - }; +// Tests the mesh maintenance addition +#[test] +fn test_mesh_addition() { + let config: GossipsubConfig = GossipsubConfig::default(); - //receive from p2 - gs.inject_event( - p2.clone(), - ConnectionId::new(0), - HandlerEvent::Message { - rpc: GossipsubRpc { - messages: vec![raw_message3], - subscriptions: vec![subscription.clone()], - control_msgs: vec![control_action], - }, - invalid_messages: Vec::new(), - }, - ); + // Adds mesh_low peers and PRUNE 2 giving us a deficit. + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.mesh_n() + 1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); - //events got processed - assert!(gs.events.len() > 1); - } + let to_remove_peers = config.mesh_n() + 1 - config.mesh_n_low() - 1; - #[test] - fn test_ignore_px_from_peers_below_accept_px_threshold() { - let config = GossipsubConfigBuilder::default() - .prune_peers(16) - .build() - .unwrap(); - let peer_score_params = PeerScoreParams::default(); - let mut peer_score_thresholds = PeerScoreThresholds::default(); - peer_score_thresholds.accept_px_threshold = peer_score_params.app_specific_weight; - - // Build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(2) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - // Decrease score of first peer to less than accept_px_threshold - gs.set_application_score(&peers[0], 0.99); - - // Increase score of second peer to accept_px_threshold - gs.set_application_score(&peers[1], 1.0); - - // Handle prune from peer peers[0] with px peers - let px = vec![PeerInfo { - peer_id: Some(PeerId::random()), - }]; + for peer in peers.iter().take(to_remove_peers) { gs.handle_prune( - &peers[0], - vec![( - topics[0].clone(), - px.clone(), - Some(config.prune_backoff().as_secs()), - )], + peer, + topics.iter().map(|h| (h.clone(), vec![], None)).collect(), ); + } - // Assert no dials - assert_eq!( - gs.events - .iter() - .filter(|e| match e { - NetworkBehaviourAction::Dial { .. } => true, - _ => false, - }) - .count(), - 0 - ); + // Verify the pruned peers are removed from the mesh. + assert_eq!( + gs.mesh.get(&topics[0]).unwrap().len(), + config.mesh_n_low() - 1 + ); - //handle prune from peer peers[1] with px peers - let px = vec![PeerInfo { - peer_id: Some(PeerId::random()), - }]; - gs.handle_prune( - &peers[1], - vec![( - topics[0].clone(), - px.clone(), - Some(config.prune_backoff().as_secs()), - )], - ); + // run a heartbeat + gs.heartbeat(); - //assert there are dials now - assert!( - gs.events - .iter() - .filter(|e| match e { - NetworkBehaviourAction::Dial { .. } => true, - _ => false, - }) - .count() - > 0 - ); + // Peers should be added to reach mesh_n + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), config.mesh_n()); +} + +// Tests the mesh maintenance subtraction +#[test] +fn test_mesh_subtraction() { + let config = GossipsubConfig::default(); + + // Adds mesh_low peers and PRUNE 2 giving us a deficit. + let n = config.mesh_n_high() + 10; + //make all outbound connections so that we allow grafting to all + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(n) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .outbound(n) + .create_network(); + + // graft all the peers + for peer in peers { + gs.handle_graft(&peer, topics.clone()); } - #[test] - fn test_keep_best_scoring_peers_on_oversubscription() { - let config = GossipsubConfigBuilder::default() - .mesh_n_low(15) - .mesh_n(30) - .mesh_n_high(60) - .retain_scores(29) - .build() - .unwrap(); + // run a heartbeat + gs.heartbeat(); - //build mesh with more peers than mesh can hold - let n = config.mesh_n_high() + 1; - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(n) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(n) - .scoring(Some(( - PeerScoreParams::default(), - PeerScoreThresholds::default(), - ))) - .create_network(); - - // graft all, will be accepted since the are outbound - for peer in &peers { - gs.handle_graft(peer, topics.clone()); - } + // Peers should be removed to reach mesh_n + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), config.mesh_n()); +} - //assign scores to peers equalling their index +#[test] +fn test_connect_to_px_peers_on_handle_prune() { + let config: GossipsubConfig = GossipsubConfig::default(); - //set random positive scores - for (index, peer) in peers.iter().enumerate() { - gs.set_application_score(peer, index as f64); - } + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); - assert_eq!(gs.mesh[&topics[0]].len(), n); + //handle prune from single peer with px peers - //heartbeat to prune some peers - gs.heartbeat(); + let mut px = Vec::new(); + //propose more px peers than config.prune_peers() + for _ in 0..config.prune_peers() + 5 { + px.push(PeerInfo { + peer_id: Some(PeerId::random()), + }); + } - assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n()); + gs.handle_prune( + &peers[0], + vec![( + topics[0].clone(), + px.clone(), + Some(config.prune_backoff().as_secs()), + )], + ); + + //Check DialPeer events for px peers + let dials: Vec<_> = gs + .events + .iter() + .filter_map(|e| match e { + NetworkBehaviourAction::Dial { opts, handler: _ } => opts.get_peer_id(), + _ => None, + }) + .collect(); + + // Exactly config.prune_peers() many random peers should be dialled + assert_eq!(dials.len(), config.prune_peers()); + + let dials_set: HashSet<_> = dials.into_iter().collect(); + + // No duplicates + assert_eq!(dials_set.len(), config.prune_peers()); + + //all dial peers must be in px + assert!(dials_set.is_subset( + &px.iter() + .map(|i| *i.peer_id.as_ref().unwrap()) + .collect::>() + )); +} - //mesh contains retain_scores best peers - assert!(gs.mesh[&topics[0]].is_superset( - &peers[(n - config.retain_scores())..] - .iter() - .cloned() - .collect() - )); - } +#[test] +fn test_send_px_and_backoff_in_prune() { + let config: GossipsubConfig = GossipsubConfig::default(); + + //build mesh with enough peers for px + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.prune_peers() + 1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); + + //send prune to peer + gs.send_graft_prune( + HashMap::new(), + vec![(peers[0], vec![topics[0].clone()])] + .into_iter() + .collect(), + HashSet::new(), + ); + + //check prune message + assert_eq!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] + && match m { + GossipsubControlAction::Prune { + topic_hash, + peers, + backoff, + } => + topic_hash == &topics[0] && + peers.len() == config.prune_peers() && + //all peers are different + peers.iter().collect::>().len() == + config.prune_peers() && + backoff.unwrap() == config.prune_backoff().as_secs(), + _ => false, + }), + 1 + ); +} - #[test] - fn test_scoring_p1() { - let config = GossipsubConfig::default(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 2.0; - topic_params.time_in_mesh_quantum = Duration::from_millis(50); - topic_params.time_in_mesh_cap = 10.0; - topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with one peer - let (mut gs, peers, _) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - //sleep for 2 times the mesh_quantum - sleep(topic_params.time_in_mesh_quantum * 2); - //refresh scores - gs.peer_score.as_mut().unwrap().0.refresh_scores(); - assert!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]) - >= 2.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, - "score should be at least 2 * time_in_mesh_weight * topic_weight" - ); - assert!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]) - < 3.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, - "score should be less than 3 * time_in_mesh_weight * topic_weight" - ); +#[test] +fn test_prune_backoffed_peer_on_graft() { + let config: GossipsubConfig = GossipsubConfig::default(); + + //build mesh with enough peers for px + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.prune_peers() + 1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); + + //remove peer from mesh and send prune to peer => this adds a backoff for this peer + gs.mesh.get_mut(&topics[0]).unwrap().remove(&peers[0]); + gs.send_graft_prune( + HashMap::new(), + vec![(peers[0], vec![topics[0].clone()])] + .into_iter() + .collect(), + HashSet::new(), + ); - //sleep again for 2 times the mesh_quantum - sleep(topic_params.time_in_mesh_quantum * 2); - //refresh scores - gs.peer_score.as_mut().unwrap().0.refresh_scores(); - assert!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]) - >= 2.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, - "score should be at least 4 * time_in_mesh_weight * topic_weight" - ); + //ignore all messages until now + gs.events.clear(); - //sleep for enough periods to reach maximum - sleep(topic_params.time_in_mesh_quantum * (topic_params.time_in_mesh_cap - 3.0) as u32); - //refresh scores - gs.peer_score.as_mut().unwrap().0.refresh_scores(); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - topic_params.time_in_mesh_cap - * topic_params.time_in_mesh_weight - * topic_params.topic_weight, - "score should be exactly time_in_mesh_cap * time_in_mesh_weight * topic_weight" - ); - } + //handle graft + gs.handle_graft(&peers[0], vec![topics[0].clone()]); - fn random_message(seq: &mut u64, topics: &Vec) -> RawGossipsubMessage { - let mut rng = rand::thread_rng(); - *seq += 1; - RawGossipsubMessage { - source: Some(PeerId::random()), - data: (0..rng.gen_range(10, 30)) - .into_iter() - .map(|_| rng.gen()) - .collect(), - sequence_number: Some(*seq), - topic: topics[rng.gen_range(0, topics.len())].clone(), - signature: None, - key: None, - validated: true, - } + //check prune message + assert_eq!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] + && match m { + GossipsubControlAction::Prune { + topic_hash, + peers, + backoff, + } => + topic_hash == &topics[0] && + //no px in this case + peers.is_empty() && + backoff.unwrap() == config.prune_backoff().as_secs(), + _ => false, + }), + 1 + ); +} + +#[test] +fn test_do_not_graft_within_backoff_period() { + let config = GossipsubConfigBuilder::default() + .backoff_slack(1) + .heartbeat_interval(Duration::from_millis(100)) + .build() + .unwrap(); + //only one peer => mesh too small and will try to regraft as early as possible + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + //handle prune from peer with backoff of one second + gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), Some(1))]); + + //forget all events until now + flush_events(&mut gs); + + //call heartbeat + gs.heartbeat(); + + //Sleep for one second and apply 10 regular heartbeats (interval = 100ms). + for _ in 0..10 { + sleep(Duration::from_millis(100)); + gs.heartbeat(); } - #[test] - fn test_scoring_p2() { - let config = GossipsubConfig::default(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 2.0; - topic_params.first_message_deliveries_cap = 10.0; - topic_params.first_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(2) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; + //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat + // is needed). + assert_eq!( + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )), + 0, + "Graft message created too early within backoff period" + ); + + //Heartbeat one more time this should graft now + sleep(Duration::from_millis(100)); + gs.heartbeat(); + + //check that graft got created + assert!( + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )) > 0, + "No graft message was created after backoff period" + ); +} - let m1 = random_message(&mut seq, &topics); - //peer 0 delivers message first - deliver_message(&mut gs, 0, m1.clone()); - //peer 1 delivers message second - deliver_message(&mut gs, 1, m1.clone()); +#[test] +fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without_backoff() { + //set default backoff period to 1 second + let config = GossipsubConfigBuilder::default() + .prune_backoff(Duration::from_millis(90)) + .backoff_slack(1) + .heartbeat_interval(Duration::from_millis(100)) + .build() + .unwrap(); + //only one peer => mesh too small and will try to regraft as early as possible + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + //handle prune from peer without a specified backoff + gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), None)]); + + //forget all events until now + flush_events(&mut gs); + + //call heartbeat + gs.heartbeat(); + + //Apply one more heartbeat + sleep(Duration::from_millis(100)); + gs.heartbeat(); + + //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat + // is needed). + assert_eq!( + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )), + 0, + "Graft message created too early within backoff period" + ); + + //Heartbeat one more time this should graft now + sleep(Duration::from_millis(100)); + gs.heartbeat(); + + //check that graft got created + assert!( + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )) > 0, + "No graft message was created after backoff period" + ); +} - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 1.0 * topic_params.first_message_deliveries_weight * topic_params.topic_weight, - "score should be exactly first_message_deliveries_weight * topic_weight" - ); +#[test] +fn test_unsubscribe_backoff() { + const HEARTBEAT_INTERVAL: Duration = Duration::from_millis(100); + let config = GossipsubConfigBuilder::default() + .backoff_slack(1) + // ensure a prune_backoff > unsubscribe_backoff + .prune_backoff(Duration::from_secs(5)) + .unsubscribe_backoff(1) + .heartbeat_interval(HEARTBEAT_INTERVAL) + .build() + .unwrap(); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[1]), - 0.0, - "there should be no score for second message deliveries * topic_weight" - ); + let topic = String::from("test"); + // only one peer => mesh too small and will try to regraft as early as possible + let (mut gs, _, topics) = inject_nodes1() + .peer_no(1) + .topics(vec![topic.clone()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); - //peer 2 delivers two new messages - deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); - deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[1]), - 2.0 * topic_params.first_message_deliveries_weight * topic_params.topic_weight, - "score should be exactly 2 * first_message_deliveries_weight * topic_weight" - ); + let _ = gs.unsubscribe(&Topic::new(topic)); - //test decaying - gs.peer_score.as_mut().unwrap().0.refresh_scores(); + assert_eq!( + count_control_msgs(&gs, |_, m| match m { + GossipsubControlAction::Prune { backoff, .. } => backoff == &Some(1), + _ => false, + }), + 1, + "Peer should be pruned with `unsubscribe_backoff`." + ); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 1.0 * topic_params.first_message_deliveries_decay - * topic_params.first_message_deliveries_weight - * topic_params.topic_weight, - "score should be exactly first_message_deliveries_decay * \ - first_message_deliveries_weight * topic_weight" - ); + let _ = gs.subscribe(&Topic::new(topics[0].to_string())); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[1]), - 2.0 * topic_params.first_message_deliveries_decay - * topic_params.first_message_deliveries_weight - * topic_params.topic_weight, - "score should be exactly 2 * first_message_deliveries_decay * \ - first_message_deliveries_weight * topic_weight" - ); + // forget all events until now + flush_events(&mut gs); - //test cap - for _ in 0..topic_params.first_message_deliveries_cap as u64 { - deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); - } + // call heartbeat + gs.heartbeat(); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[1]), - topic_params.first_message_deliveries_cap - * topic_params.first_message_deliveries_weight - * topic_params.topic_weight, - "score should be exactly first_message_deliveries_cap * \ - first_message_deliveries_weight * topic_weight" - ); + // Sleep for one second and apply 10 regular heartbeats (interval = 100ms). + for _ in 0..10 { + sleep(HEARTBEAT_INTERVAL); + gs.heartbeat(); } - #[test] - fn test_scoring_p3() { - let config = GossipsubConfig::default(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = -2.0; - topic_params.mesh_message_deliveries_decay = 0.9; - topic_params.mesh_message_deliveries_cap = 10.0; - topic_params.mesh_message_deliveries_threshold = 5.0; - topic_params.mesh_message_deliveries_activation = Duration::from_secs(1); - topic_params.mesh_message_deliveries_window = Duration::from_millis(100); - topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(2) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; - - let mut expected_message_deliveries = 0.0; + // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat + // is needed). + assert_eq!( + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )), + 0, + "Graft message created too early within backoff period" + ); + + // Heartbeat one more time this should graft now + sleep(HEARTBEAT_INTERVAL); + gs.heartbeat(); + + // check that graft got created + assert!( + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )) > 0, + "No graft message was created after backoff period" + ); +} - //messages used to test window - let m1 = random_message(&mut seq, &topics); - let m2 = random_message(&mut seq, &topics); +#[test] +fn test_flood_publish() { + let config: GossipsubConfig = GossipsubConfig::default(); + + let topic = "test"; + // Adds more peers than mesh can hold to test flood publishing + let (mut gs, _, _) = inject_nodes1() + .peer_no(config.mesh_n_high() + 10) + .topics(vec![topic.into()]) + .to_subscribe(true) + .create_network(); + + //publish message + let publish_data = vec![0; 42]; + gs.publish(Topic::new(topic), publish_data).unwrap(); + + // Collect all publish messages + let publishes = gs + .events + .iter() + .fold(vec![], |mut collected_publish, e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + for s in &event.messages { + collected_publish.push(s.clone()); + } + } + collected_publish + } + _ => collected_publish, + }); - //peer 1 delivers m1 - deliver_message(&mut gs, 1, m1.clone()); + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform( + publishes + .first() + .expect("Should contain > 0 entries") + .clone(), + ) + .unwrap(); - //peer 0 delivers two message - deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); - deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); - expected_message_deliveries += 2.0; + let msg_id = gs.config.message_id(message); - sleep(Duration::from_millis(60)); + let config: GossipsubConfig = GossipsubConfig::default(); + assert_eq!( + publishes.len(), + config.mesh_n_high() + 10, + "Should send a publish message to all known peers" + ); - //peer 1 delivers m2 - deliver_message(&mut gs, 1, m2.clone()); + assert!( + gs.mcache.get(&msg_id).is_some(), + "Message cache should contain published message" + ); +} - sleep(Duration::from_millis(70)); - //peer 0 delivers m1 and m2 only m2 gets counted - deliver_message(&mut gs, 0, m1); - deliver_message(&mut gs, 0, m2); - expected_message_deliveries += 1.0; +#[test] +fn test_gossip_to_at_least_gossip_lazy_peers() { + let config: GossipsubConfig = GossipsubConfig::default(); + + //add more peers than in mesh to test gossipping + //by default only mesh_n_low peers will get added to mesh + let (mut gs, _, topic_hashes) = inject_nodes1() + .peer_no(config.mesh_n_low() + config.gossip_lazy() + 1) + .topics(vec!["topic".into()]) + .to_subscribe(true) + .create_network(); + + //receive message + let raw_message = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(raw_message.clone(), &PeerId::random()); - sleep(Duration::from_millis(900)); + //emit gossip + gs.emit_gossip(); - //message deliveries penalties get activated, peer 0 has only delivered 3 messages and - // therefore gets a penalty - gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay + // Transform the inbound message + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - (5f64 - expected_message_deliveries).powi(2) * -2.0 * 0.7 - ); + let msg_id = gs.config.message_id(message); - // peer 0 delivers a lot of messages => message_deliveries should be capped at 10 - for _ in 0..20 { - deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); - } + //check that exactly config.gossip_lazy() many gossip messages were sent. + assert_eq!( + count_control_msgs(&gs, |_, action| match action { + GossipsubControlAction::IHave { + topic_hash, + message_ids, + } => topic_hash == &topic_hashes[0] && message_ids.iter().any(|id| id == &msg_id), + _ => false, + }), + config.gossip_lazy() + ); +} - expected_message_deliveries = 10.0; +#[test] +fn test_gossip_to_at_most_gossip_factor_peers() { + let config: GossipsubConfig = GossipsubConfig::default(); + + //add a lot of peers + let m = config.mesh_n_low() + config.gossip_lazy() * (2.0 / config.gossip_factor()) as usize; + let (mut gs, _, topic_hashes) = inject_nodes1() + .peer_no(m) + .topics(vec!["topic".into()]) + .to_subscribe(true) + .create_network(); + + //receive message + let raw_message = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(raw_message.clone(), &PeerId::random()); + + //emit gossip + gs.emit_gossip(); + + // Transform the inbound message + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + + let msg_id = gs.config.message_id(message); + //check that exactly config.gossip_lazy() many gossip messages were sent. + assert_eq!( + count_control_msgs(&gs, |_, action| match action { + GossipsubControlAction::IHave { + topic_hash, + message_ids, + } => topic_hash == &topic_hashes[0] && message_ids.iter().any(|id| id == &msg_id), + _ => false, + }), + ((m - config.mesh_n_low()) as f64 * config.gossip_factor()) as usize + ); +} - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); +#[test] +fn test_accept_only_outbound_peer_grafts_when_mesh_full() { + let config: GossipsubConfig = GossipsubConfig::default(); - //apply 10 decays - for _ in 0..10 { - gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay - } + //enough peers to fill the mesh + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - (5f64 - expected_message_deliveries).powi(2) * -2.0 * 0.7 - ); + // graft all the peers => this will fill the mesh + for peer in peers { + gs.handle_graft(&peer, topics.clone()); } - #[test] - fn test_scoring_p3b() { - let config = GossipsubConfigBuilder::default() - .prune_backoff(Duration::from_millis(100)) - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = -2.0; - topic_params.mesh_message_deliveries_decay = 0.9; - topic_params.mesh_message_deliveries_cap = 10.0; - topic_params.mesh_message_deliveries_threshold = 5.0; - topic_params.mesh_message_deliveries_activation = Duration::from_secs(1); - topic_params.mesh_message_deliveries_window = Duration::from_millis(100); - topic_params.mesh_failure_penalty_weight = -3.0; - topic_params.mesh_failure_penalty_decay = 0.95; - topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; + //assert current mesh size + assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); - let mut expected_message_deliveries = 0.0; + //create an outbound and an inbound peer + let inbound = add_peer(&mut gs, &topics, false, false); + let outbound = add_peer(&mut gs, &topics, true, false); - //add some positive score - gs.peer_score - .as_mut() - .unwrap() - .0 - .set_application_score(&peers[0], 100.0); + //send grafts + gs.handle_graft(&inbound, vec![topics[0].clone()]); + gs.handle_graft(&outbound, vec![topics[0].clone()]); - //peer 0 delivers two message - deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); - deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); - expected_message_deliveries += 2.0; + //assert mesh size + assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high() + 1); - sleep(Duration::from_millis(1050)); + //inbound is not in mesh + assert!(!gs.mesh[&topics[0]].contains(&inbound)); - //activation kicks in - gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay + //outbound is in mesh + assert!(gs.mesh[&topics[0]].contains(&outbound)); +} - //prune peer - gs.handle_prune(&peers[0], vec![(topics[0].clone(), vec![], None)]); +#[test] +fn test_do_not_remove_too_many_outbound_peers() { + //use an extreme case to catch errors with high probability + let m = 50; + let n = 2 * m; + let config = GossipsubConfigBuilder::default() + .mesh_n_high(n) + .mesh_n(n) + .mesh_n_low(n) + .mesh_outbound_min(m) + .build() + .unwrap(); - //wait backoff - sleep(Duration::from_millis(130)); + //fill the mesh with inbound connections + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(n) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + // graft all the peers + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } - //regraft peer - gs.handle_graft(&peers[0], topics.clone()); + //create m outbound connections and graft (we will accept the graft) + let mut outbound = HashSet::new(); + for _ in 0..m { + let peer = add_peer(&mut gs, &topics, true, false); + outbound.insert(peer); + gs.handle_graft(&peer, topics.clone()); + } - //the score should now consider p3b - let mut expected_b3 = (5f64 - expected_message_deliveries).powi(2); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 100.0 + expected_b3 * -3.0 * 0.7 - ); + //mesh is overly full + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n + m); - //we can also add a new p3 to the score + // run a heartbeat + gs.heartbeat(); - //peer 0 delivers one message - deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); - expected_message_deliveries += 1.0; + // Peers should be removed to reach n + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n); - sleep(Duration::from_millis(1050)); - gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay - expected_b3 *= 0.95; + //all outbound peers are still in the mesh + assert!(outbound.iter().all(|p| gs.mesh[&topics[0]].contains(p))); +} - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 100.0 - + (expected_b3 * -3.0 + (5f64 - expected_message_deliveries).powi(2) * -2.0) * 0.7 - ); +#[test] +fn test_add_outbound_peers_if_min_is_not_satisfied() { + let config: GossipsubConfig = GossipsubConfig::default(); + + // Fill full mesh with inbound peers + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); + + // graft all the peers + for peer in peers { + gs.handle_graft(&peer, topics.clone()); } - #[test] - fn test_scoring_p4_valid_message() { - let config = GossipsubConfigBuilder::default() - .validate_messages() - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = 0.0; //deactivate message deliveries - topic_params.mesh_failure_penalty_weight = 0.0; //deactivate mesh failure penalties - topic_params.invalid_message_deliveries_weight = -2.0; - topic_params.invalid_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; + //create config.mesh_outbound_min() many outbound connections without grafting + for _ in 0..config.mesh_outbound_min() { + add_peer(&mut gs, &topics, true, false); + } - //peer 0 delivers valid message - let m1 = random_message(&mut seq, &topics); - deliver_message(&mut gs, 0, m1.clone()); + // Nothing changed in the mesh yet + assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); - // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + // run a heartbeat + gs.heartbeat(); - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + // The outbound peers got additionally added + assert_eq!( + gs.mesh[&topics[0]].len(), + config.mesh_n_high() + config.mesh_outbound_min() + ); +} - //message m1 gets validated - gs.report_message_validation_result( - &config.message_id(&message1), - &peers[0], - MessageAcceptance::Accept, - ) +//TODO add a test that ensures that new outbound connections are recognized as such. +// This is at the moment done in behaviour with relying on the fact that the call to +// `inject_connection_established` for the first connection is done before `inject_connected` +// gets called. For all further connections `inject_connection_established` should get called +// after `inject_connected`. + +#[test] +fn test_prune_negative_scored_peers() { + let config = GossipsubConfig::default(); + + //build mesh with one peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); + + //add penalty to peer + gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); + + //execute heartbeat + gs.heartbeat(); + + //peer should not be in mesh anymore + assert!(gs.mesh[&topics[0]].is_empty()); + + //check prune message + assert_eq!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] + && match m { + GossipsubControlAction::Prune { + topic_hash, + peers, + backoff, + } => + topic_hash == &topics[0] && + //no px in this case + peers.is_empty() && + backoff.unwrap() == config.prune_backoff().as_secs(), + _ => false, + }), + 1 + ); +} + +#[test] +fn test_dont_graft_to_negative_scored_peers() { + let config = GossipsubConfig::default(); + //init full mesh + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); + + //add two additional peers that will not be part of the mesh + let p1 = add_peer(&mut gs, &topics, false, false); + let p2 = add_peer(&mut gs, &topics, false, false); + + //reduce score of p1 to negative + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 1); + + //handle prunes of all other peers + for p in peers { + gs.handle_prune(&p, vec![(topics[0].clone(), Vec::new(), None)]); + } + + //heartbeat + gs.heartbeat(); + + //assert that mesh only contains p2 + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), 1); + assert!(gs.mesh.get(&topics[0]).unwrap().contains(&p2)); +} + +///Note that in this test also without a penalty the px would be ignored because of the +/// acceptPXThreshold, but the spec still explicitely states the rule that px from negative +/// peers should get ignored, therefore we test it here. +#[test] +fn test_ignore_px_from_negative_scored_peer() { + let config = GossipsubConfig::default(); + + //build mesh with one peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); + + //penalize peer + gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); + + //handle prune from single peer with px peers + let px = vec![PeerInfo { + peer_id: Some(PeerId::random()), + }]; + + gs.handle_prune( + &peers[0], + vec![( + topics[0].clone(), + px, + Some(config.prune_backoff().as_secs()), + )], + ); + + //assert no dials + assert_eq!( + gs.events + .iter() + .filter(|e| matches!(e, NetworkBehaviourAction::Dial { .. })) + .count(), + 0 + ); +} + +#[test] +fn test_only_send_nonnegative_scoring_peers_in_px() { + let config = GossipsubConfigBuilder::default() + .prune_peers(16) + .do_px() + .build() .unwrap(); - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + // Build mesh with three peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(3) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); + + // Penalize first peer + gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); + + // Prune second peer + gs.send_graft_prune( + HashMap::new(), + vec![(peers[1], vec![topics[0].clone()])] + .into_iter() + .collect(), + HashSet::new(), + ); + + // Check that px in prune message only contains third peer + assert_eq!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] + && match m { + GossipsubControlAction::Prune { + topic_hash, + peers: px, + .. + } => + topic_hash == &topics[0] + && px.len() == 1 + && px[0].peer_id.as_ref().unwrap() == &peers[2], + _ => false, + }), + 1 + ); +} + +#[test] +fn test_do_not_gossip_to_peers_below_gossip_threshold() { + let config = GossipsubConfig::default(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; + + // Build full mesh + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + // Graft all the peer + for peer in peers { + gs.handle_graft(&peer, topics.clone()); } - #[test] - fn test_scoring_p4_invalid_signature() { - let config = GossipsubConfigBuilder::default() - .validate_messages() - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = 0.0; //deactivate message deliveries - topic_params.mesh_failure_penalty_weight = 0.0; //deactivate mesh failure penalties - topic_params.invalid_message_deliveries_weight = -2.0; - topic_params.invalid_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - - //peer 0 delivers message with invalid signature - let m = random_message(&mut seq, &topics); - - gs.inject_event( - peers[0].clone(), - ConnectionId::new(0), - HandlerEvent::Message { - rpc: GossipsubRpc { - messages: vec![], - subscriptions: vec![], - control_msgs: vec![], - }, - invalid_messages: vec![(m, ValidationError::InvalidSignature)], - }, - ); + // Add two additional peers that will not be part of the mesh + let p1 = add_peer(&mut gs, &topics, false, false); + let p2 = add_peer(&mut gs, &topics, false, false); + + // Reduce score of p1 below peer_score_thresholds.gossip_threshold + // note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + // Reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + // Receive message + let raw_message = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![], + sequence_number: Some(0), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(raw_message.clone(), &PeerId::random()); + + // Transform the inbound message + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + + let msg_id = gs.config.message_id(message); + + // Emit gossip + gs.emit_gossip(); + + // Check that exactly one gossip messages got sent and it got sent to p2 + assert_eq!( + count_control_msgs(&gs, |peer, action| match action { + GossipsubControlAction::IHave { + topic_hash, + message_ids, + } => { + if topic_hash == &topics[0] && message_ids.iter().any(|id| id == &msg_id) { + assert_eq!(peer, &p2); + true + } else { + false + } + } + _ => false, + }), + 1 + ); +} - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - -2.0 * 0.7 - ); +#[test] +fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { + let config = GossipsubConfig::default(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; + + // Build full mesh + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + // Graft all the peer + for peer in peers { + gs.handle_graft(&peer, topics.clone()); } - #[test] - fn test_scoring_p4_message_from_self() { - let config = GossipsubConfigBuilder::default() - .validate_messages() - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = 0.0; //deactivate message deliveries - topic_params.mesh_failure_penalty_weight = 0.0; //deactivate mesh failure penalties - topic_params.invalid_message_deliveries_weight = -2.0; - topic_params.invalid_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; + // Add two additional peers that will not be part of the mesh + let p1 = add_peer(&mut gs, &topics, false, false); + let p2 = add_peer(&mut gs, &topics, false, false); + + // Reduce score of p1 below peer_score_thresholds.gossip_threshold + // note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + // Reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + // Receive message + let raw_message = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![], + sequence_number: Some(0), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(raw_message.clone(), &PeerId::random()); + + // Transform the inbound message + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + + let msg_id = gs.config.message_id(message); + + gs.handle_iwant(&p1, vec![msg_id.clone()]); + gs.handle_iwant(&p2, vec![msg_id.clone()]); + + // the messages we are sending + let sent_messages = gs + .events + .iter() + .fold(vec![], |mut collected_messages, e| match e { + NetworkBehaviourAction::NotifyHandler { event, peer_id, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + for c in &event.messages { + collected_messages.push((*peer_id, c.clone())) + } + } + collected_messages + } + _ => collected_messages, + }); - //peer 0 delivers invalid message from self - let mut m = random_message(&mut seq, &topics); - m.source = Some(gs.publish_config.get_own_id().unwrap().clone()); + //the message got sent to p2 + assert!(sent_messages + .iter() + .map(|(peer_id, msg)| ( + peer_id, + gs.data_transform.inbound_transform(msg.clone()).unwrap() + )) + .any(|(peer_id, msg)| peer_id == &p2 && gs.config.message_id(&msg) == msg_id)); + //the message got not sent to p1 + assert!(sent_messages + .iter() + .map(|(peer_id, msg)| ( + peer_id, + gs.data_transform.inbound_transform(msg.clone()).unwrap() + )) + .all(|(peer_id, msg)| !(peer_id == &p1 && gs.config.message_id(&msg) == msg_id))); +} - deliver_message(&mut gs, 0, m.clone()); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - -2.0 * 0.7 - ); +#[test] +fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { + let config = GossipsubConfig::default(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; + //build full mesh + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + // graft all the peer + for peer in peers { + gs.handle_graft(&peer, topics.clone()); } - #[test] - fn test_scoring_p4_ignored_message() { - let config = GossipsubConfigBuilder::default() - .validate_messages() - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = 0.0; //deactivate message deliveries - topic_params.mesh_failure_penalty_weight = 0.0; //deactivate mesh failure penalties - topic_params.invalid_message_deliveries_weight = -2.0; - topic_params.invalid_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; + //add two additional peers that will not be part of the mesh + let p1 = add_peer(&mut gs, &topics, false, false); + let p2 = add_peer(&mut gs, &topics, false, false); + + //reduce score of p1 below peer_score_thresholds.gossip_threshold + //note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + //reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + //message that other peers have + let raw_message = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![], + sequence_number: Some(0), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; - //peer 0 delivers ignored message - let m1 = random_message(&mut seq, &topics); - deliver_message(&mut gs, 0, m1.clone()); + // Transform the inbound message + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + let msg_id = gs.config.message_id(message); - // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + gs.handle_ihave(&p1, vec![(topics[0].clone(), vec![msg_id.clone()])]); + gs.handle_ihave(&p2, vec![(topics[0].clone(), vec![msg_id.clone()])]); - //message m1 gets ignored - gs.report_message_validation_result( - &config.message_id(&message1), - &peers[0], - MessageAcceptance::Ignore, - ) + // check that we sent exactly one IWANT request to p2 + assert_eq!( + count_control_msgs(&gs, |peer, c| match c { + GossipsubControlAction::IWant { message_ids } => + if message_ids.iter().any(|m| m == &msg_id) { + assert_eq!(peer, &p2); + true + } else { + false + }, + _ => false, + }), + 1 + ); +} + +#[test] +fn test_do_not_publish_to_peer_below_publish_threshold() { + let config = GossipsubConfigBuilder::default() + .flood_publish(false) + .build() .unwrap(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, + publish_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - } + //build mesh with no peers and no subscribed topics + let (mut gs, _, _) = inject_nodes1() + .gs_config(config) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + //create a new topic for which we are not subscribed + let topic = Topic::new("test"); + let topics = vec![topic.hash()]; + + //add two additional peers that will be added to the mesh + let p1 = add_peer(&mut gs, &topics, false, false); + let p2 = add_peer(&mut gs, &topics, false, false); + + //reduce score of p1 below peer_score_thresholds.publish_threshold + //note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + //a heartbeat will remove the peers from the mesh + gs.heartbeat(); + + // publish on topic + let publish_data = vec![0; 42]; + gs.publish(topic, publish_data).unwrap(); + + // Collect all publish messages + let publishes = gs + .events + .iter() + .fold(vec![], |mut collected_publish, e| match e { + NetworkBehaviourAction::NotifyHandler { event, peer_id, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + for s in &event.messages { + collected_publish.push((*peer_id, s.clone())); + } + } + collected_publish + } + _ => collected_publish, + }); - #[test] - fn test_scoring_p4_application_invalidated_message() { - let config = GossipsubConfigBuilder::default() - .validate_messages() - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = 0.0; //deactivate message deliveries - topic_params.mesh_failure_penalty_weight = 0.0; //deactivate mesh failure penalties - topic_params.invalid_message_deliveries_weight = -2.0; - topic_params.invalid_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; + //assert only published to p2 + assert_eq!(publishes.len(), 1); + assert_eq!(publishes[0].0, p2); +} + +#[test] +fn test_do_not_flood_publish_to_peer_below_publish_threshold() { + let config = GossipsubConfig::default(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, + publish_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; + //build mesh with no peers + let (mut gs, _, topics) = inject_nodes1() + .topics(vec!["test".into()]) + .gs_config(config) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + //add two additional peers that will be added to the mesh + let p1 = add_peer(&mut gs, &topics, false, false); + let p2 = add_peer(&mut gs, &topics, false, false); + + //reduce score of p1 below peer_score_thresholds.publish_threshold + //note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + //a heartbeat will remove the peers from the mesh + gs.heartbeat(); + + // publish on topic + let publish_data = vec![0; 42]; + gs.publish(Topic::new("test"), publish_data).unwrap(); + + // Collect all publish messages + let publishes = gs + .events + .iter() + .fold(vec![], |mut collected_publish, e| match e { + NetworkBehaviourAction::NotifyHandler { event, peer_id, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + for s in &event.messages { + collected_publish.push((*peer_id, s.clone())); + } + } + collected_publish + } + _ => collected_publish, + }); - //peer 0 delivers invalid message - let m1 = random_message(&mut seq, &topics); - deliver_message(&mut gs, 0, m1.clone()); + //assert only published to p2 + assert_eq!(publishes.len(), 1); + assert!(publishes[0].0 == p2); +} - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); +#[test] +fn test_ignore_rpc_from_peers_below_graylist_threshold() { + let config = GossipsubConfig::default(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, + publish_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, + graylist_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; - // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + //build mesh with no peers + let (mut gs, _, topics) = inject_nodes1() + .topics(vec!["test".into()]) + .gs_config(config.clone()) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + //add two additional peers that will be added to the mesh + let p1 = add_peer(&mut gs, &topics, false, false); + let p2 = add_peer(&mut gs, &topics, false, false); + + //reduce score of p1 below peer_score_thresholds.graylist_threshold + //note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + //reduce score of p2 below publish_threshold but not below graylist_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + let raw_message1 = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![1, 2, 3, 4], + sequence_number: Some(1u64), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; - //message m1 gets rejected - gs.report_message_validation_result( - &config.message_id(&message1), - &peers[0], - MessageAcceptance::Reject, - ) - .unwrap(); + let raw_message2 = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![1, 2, 3, 4, 5], + sequence_number: Some(2u64), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; + + let raw_message3 = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![1, 2, 3, 4, 5, 6], + sequence_number: Some(3u64), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; + + let raw_message4 = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![1, 2, 3, 4, 5, 6, 7], + sequence_number: Some(4u64), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; + + // Transform the inbound message + let message2 = &gs.data_transform.inbound_transform(raw_message2).unwrap(); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - -2.0 * 0.7 - ); - } + // Transform the inbound message + let message4 = &gs.data_transform.inbound_transform(raw_message4).unwrap(); - #[test] - fn test_scoring_p4_application_invalid_message_from_two_peers() { - let config = GossipsubConfigBuilder::default() - .validate_messages() - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = 0.0; //deactivate message deliveries - topic_params.mesh_failure_penalty_weight = 0.0; //deactivate mesh failure penalties - topic_params.invalid_message_deliveries_weight = -2.0; - topic_params.invalid_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(2) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; + let subscription = GossipsubSubscription { + action: GossipsubSubscriptionAction::Subscribe, + topic_hash: topics[0].clone(), + }; - //peer 0 delivers invalid message - let m1 = random_message(&mut seq, &topics); - deliver_message(&mut gs, 0, m1.clone()); + let control_action = GossipsubControlAction::IHave { + topic_hash: topics[0].clone(), + message_ids: vec![config.message_id(message2)], + }; - // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + //clear events + gs.events.clear(); + + //receive from p1 + gs.inject_event( + p1, + ConnectionId::new(0), + HandlerEvent::Message { + rpc: GossipsubRpc { + messages: vec![raw_message1], + subscriptions: vec![subscription.clone()], + control_msgs: vec![control_action], + }, + invalid_messages: Vec::new(), + }, + ); + + //only the subscription event gets processed, the rest is dropped + assert_eq!(gs.events.len(), 1); + assert!(matches!( + gs.events[0], + NetworkBehaviourAction::GenerateEvent(GossipsubEvent::Subscribed { .. }) + )); + + let control_action = GossipsubControlAction::IHave { + topic_hash: topics[0].clone(), + message_ids: vec![config.message_id(message4)], + }; - //peer 1 delivers same message - deliver_message(&mut gs, 1, m1.clone()); + //receive from p2 + gs.inject_event( + p2, + ConnectionId::new(0), + HandlerEvent::Message { + rpc: GossipsubRpc { + messages: vec![raw_message3], + subscriptions: vec![subscription], + control_msgs: vec![control_action], + }, + invalid_messages: Vec::new(), + }, + ); - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[1]), 0.0); + //events got processed + assert!(gs.events.len() > 1); +} - //message m1 gets rejected - gs.report_message_validation_result( - &config.message_id(&message1), - &peers[0], - MessageAcceptance::Reject, - ) +#[test] +fn test_ignore_px_from_peers_below_accept_px_threshold() { + let config = GossipsubConfigBuilder::default() + .prune_peers(16) + .build() + .unwrap(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + accept_px_threshold: peer_score_params.app_specific_weight, + ..PeerScoreThresholds::default() + }; + // Build mesh with two peers + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + // Decrease score of first peer to less than accept_px_threshold + gs.set_application_score(&peers[0], 0.99); + + // Increase score of second peer to accept_px_threshold + gs.set_application_score(&peers[1], 1.0); + + // Handle prune from peer peers[0] with px peers + let px = vec![PeerInfo { + peer_id: Some(PeerId::random()), + }]; + gs.handle_prune( + &peers[0], + vec![( + topics[0].clone(), + px, + Some(config.prune_backoff().as_secs()), + )], + ); + + // Assert no dials + assert_eq!( + gs.events + .iter() + .filter(|e| matches!(e, NetworkBehaviourAction::Dial { .. })) + .count(), + 0 + ); + + //handle prune from peer peers[1] with px peers + let px = vec![PeerInfo { + peer_id: Some(PeerId::random()), + }]; + gs.handle_prune( + &peers[1], + vec![( + topics[0].clone(), + px, + Some(config.prune_backoff().as_secs()), + )], + ); + + //assert there are dials now + assert!( + gs.events + .iter() + .filter(|e| matches!(e, NetworkBehaviourAction::Dial { .. })) + .count() + > 0 + ); +} + +#[test] +fn test_keep_best_scoring_peers_on_oversubscription() { + let config = GossipsubConfigBuilder::default() + .mesh_n_low(15) + .mesh_n(30) + .mesh_n_high(60) + .retain_scores(29) + .build() .unwrap(); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - -2.0 * 0.7 - ); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[1]), - -2.0 * 0.7 - ); + //build mesh with more peers than mesh can hold + let n = config.mesh_n_high() + 1; + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(n) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(n) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); + + // graft all, will be accepted since the are outbound + for peer in &peers { + gs.handle_graft(peer, topics.clone()); } - #[test] - fn test_scoring_p4_three_application_invalid_messages() { - let config = GossipsubConfigBuilder::default() - .validate_messages() - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = 0.0; //deactivate message deliveries - topic_params.mesh_failure_penalty_weight = 0.0; //deactivate mesh failure penalties - topic_params.invalid_message_deliveries_weight = -2.0; - topic_params.invalid_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; + //assign scores to peers equalling their index - //peer 0 delivers two invalid message - let m1 = random_message(&mut seq, &topics); - let m2 = random_message(&mut seq, &topics); - let m3 = random_message(&mut seq, &topics); - deliver_message(&mut gs, 0, m1.clone()); - deliver_message(&mut gs, 0, m2.clone()); - deliver_message(&mut gs, 0, m3.clone()); + //set random positive scores + for (index, peer) in peers.iter().enumerate() { + gs.set_application_score(peer, index as f64); + } - // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + assert_eq!(gs.mesh[&topics[0]].len(), n); - // Transform the inbound message - let message2 = &gs.data_transform.inbound_transform(m2.clone()).unwrap(); - // Transform the inbound message - let message3 = &gs.data_transform.inbound_transform(m3.clone()).unwrap(); + //heartbeat to prune some peers + gs.heartbeat(); - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n()); - //messages gets rejected - gs.report_message_validation_result( - &config.message_id(&message1), - &peers[0], - MessageAcceptance::Reject, - ) - .unwrap(); - gs.report_message_validation_result( - &config.message_id(&message2), - &peers[0], - MessageAcceptance::Reject, - ) - .unwrap(); - gs.report_message_validation_result( - &config.message_id(&message3), - &peers[0], - MessageAcceptance::Reject, - ) - .unwrap(); + //mesh contains retain_scores best peers + assert!(gs.mesh[&topics[0]].is_superset( + &peers[(n - config.retain_scores())..] + .iter() + .cloned() + .collect() + )); +} - //number of invalid messages gets squared - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 9.0 * -2.0 * 0.7 - ); +#[test] +fn test_scoring_p1() { + let config = GossipsubConfig::default(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 2.0, + time_in_mesh_quantum: Duration::from_millis(50), + time_in_mesh_cap: 10.0, + topic_weight: 0.7, + ..TopicScoreParams::default() + }; + peer_score_params + .topics + .insert(topic_hash, topic_params.clone()); + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, _) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + //sleep for 2 times the mesh_quantum + sleep(topic_params.time_in_mesh_quantum * 2); + //refresh scores + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + assert!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]) + >= 2.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, + "score should be at least 2 * time_in_mesh_weight * topic_weight" + ); + assert!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]) + < 3.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, + "score should be less than 3 * time_in_mesh_weight * topic_weight" + ); + + //sleep again for 2 times the mesh_quantum + sleep(topic_params.time_in_mesh_quantum * 2); + //refresh scores + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + assert!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]) + >= 2.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, + "score should be at least 4 * time_in_mesh_weight * topic_weight" + ); + + //sleep for enough periods to reach maximum + sleep(topic_params.time_in_mesh_quantum * (topic_params.time_in_mesh_cap - 3.0) as u32); + //refresh scores + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + topic_params.time_in_mesh_cap + * topic_params.time_in_mesh_weight + * topic_params.topic_weight, + "score should be exactly time_in_mesh_cap * time_in_mesh_weight * topic_weight" + ); +} + +fn random_message(seq: &mut u64, topics: &Vec) -> RawGossipsubMessage { + let mut rng = rand::thread_rng(); + *seq += 1; + RawGossipsubMessage { + source: Some(PeerId::random()), + data: (0..rng.gen_range(10..30)) + .into_iter() + .map(|_| rng.gen()) + .collect(), + sequence_number: Some(*seq), + topic: topics[rng.gen_range(0..topics.len())].clone(), + signature: None, + key: None, + validated: true, } +} - #[test] - fn test_scoring_p4_decay() { - let config = GossipsubConfigBuilder::default() - .validate_messages() - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = 0.0; //deactivate message deliveries - topic_params.mesh_failure_penalty_weight = 0.0; //deactivate mesh failure penalties - topic_params.invalid_message_deliveries_weight = -2.0; - topic_params.invalid_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; +#[test] +fn test_scoring_p2() { + let config = GossipsubConfig::default(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 2.0, + first_message_deliveries_cap: 10.0, + first_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..TopicScoreParams::default() + }; + peer_score_params + .topics + .insert(topic_hash, topic_params.clone()); + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - //peer 0 delivers invalid message - let m1 = random_message(&mut seq, &topics); - deliver_message(&mut gs, 0, m1.clone()); + let m1 = random_message(&mut seq, &topics); + //peer 0 delivers message first + deliver_message(&mut gs, 0, m1.clone()); + //peer 1 delivers message second + deliver_message(&mut gs, 1, m1); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 1.0 * topic_params.first_message_deliveries_weight * topic_params.topic_weight, + "score should be exactly first_message_deliveries_weight * topic_weight" + ); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + 0.0, + "there should be no score for second message deliveries * topic_weight" + ); + + //peer 2 delivers two new messages + deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); + deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + 2.0 * topic_params.first_message_deliveries_weight * topic_params.topic_weight, + "score should be exactly 2 * first_message_deliveries_weight * topic_weight" + ); + + //test decaying + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 1.0 * topic_params.first_message_deliveries_decay + * topic_params.first_message_deliveries_weight + * topic_params.topic_weight, + "score should be exactly first_message_deliveries_decay * \ + first_message_deliveries_weight * topic_weight" + ); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + 2.0 * topic_params.first_message_deliveries_decay + * topic_params.first_message_deliveries_weight + * topic_params.topic_weight, + "score should be exactly 2 * first_message_deliveries_decay * \ + first_message_deliveries_weight * topic_weight" + ); + + //test cap + for _ in 0..topic_params.first_message_deliveries_cap as u64 { + deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); + } - // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - - //message m1 gets rejected - gs.report_message_validation_result( - &config.message_id(&message1), - &peers[0], - MessageAcceptance::Reject, - ) - .unwrap(); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + topic_params.first_message_deliveries_cap + * topic_params.first_message_deliveries_weight + * topic_params.topic_weight, + "score should be exactly first_message_deliveries_cap * \ + first_message_deliveries_weight * topic_weight" + ); +} - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - -2.0 * 0.7 - ); +#[test] +fn test_scoring_p3() { + let config = GossipsubConfig::default(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: -2.0, + mesh_message_deliveries_decay: 0.9, + mesh_message_deliveries_cap: 10.0, + mesh_message_deliveries_threshold: 5.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(100), + topic_weight: 0.7, + ..TopicScoreParams::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - //we decay - gs.peer_score.as_mut().unwrap().0.refresh_scores(); + let mut expected_message_deliveries = 0.0; - // the number of invalids gets decayed to 0.9 and then squared in the score - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 0.9 * 0.9 * -2.0 * 0.7 - ); - } + //messages used to test window + let m1 = random_message(&mut seq, &topics); + let m2 = random_message(&mut seq, &topics); - #[test] - fn test_scoring_p5() { - let mut peer_score_params = PeerScoreParams::default(); - peer_score_params.app_specific_weight = 2.0; + //peer 1 delivers m1 + deliver_message(&mut gs, 1, m1.clone()); - //build mesh with one peer - let (mut gs, peers, _) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(GossipsubConfig::default()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) - .create_network(); + //peer 0 delivers two message + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); + expected_message_deliveries += 2.0; - gs.set_application_score(&peers[0], 1.1); + sleep(Duration::from_millis(60)); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 1.1 * 2.0 - ); - } + //peer 1 delivers m2 + deliver_message(&mut gs, 1, m2.clone()); - #[test] - fn test_scoring_p6() { - let mut peer_score_params = PeerScoreParams::default(); - peer_score_params.ip_colocation_factor_threshold = 5.0; - peer_score_params.ip_colocation_factor_weight = -2.0; - - let (mut gs, _, _) = inject_nodes1() - .peer_no(0) - .topics(vec![]) - .to_subscribe(false) - .gs_config(GossipsubConfig::default()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) - .create_network(); - - //create 5 peers with the same ip - let addr = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 3)); - let peers = vec![ - add_peer_with_addr(&mut gs, &vec![], false, false, addr.clone()), - add_peer_with_addr(&mut gs, &vec![], false, false, addr.clone()), - add_peer_with_addr(&mut gs, &vec![], true, false, addr.clone()), - add_peer_with_addr(&mut gs, &vec![], true, false, addr.clone()), - add_peer_with_addr(&mut gs, &vec![], true, true, addr.clone()), - ]; - - //create 4 other peers with other ip - let addr2 = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 4)); - let others = vec![ - add_peer_with_addr(&mut gs, &vec![], false, false, addr2.clone()), - add_peer_with_addr(&mut gs, &vec![], false, false, addr2.clone()), - add_peer_with_addr(&mut gs, &vec![], true, false, addr2.clone()), - add_peer_with_addr(&mut gs, &vec![], true, false, addr2.clone()), - ]; - - //no penalties yet - for peer in peers.iter().chain(others.iter()) { - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0); - } + sleep(Duration::from_millis(70)); + //peer 0 delivers m1 and m2 only m2 gets counted + deliver_message(&mut gs, 0, m1); + deliver_message(&mut gs, 0, m2); + expected_message_deliveries += 1.0; - //add additional connection for 3 others with addr - for i in 0..3 { - gs.inject_connection_established( - &others[i], - &ConnectionId::new(0), - &ConnectedPoint::Dialer { - address: addr.clone(), - role_override: Endpoint::Dialer, - }, - None, - 0, - ); - } + sleep(Duration::from_millis(900)); - //penalties apply squared - for peer in peers.iter().chain(others.iter().take(3)) { - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); - } - //fourth other peer still no penalty - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&others[3]), 0.0); - - //add additional connection for 3 of the peers to addr2 - for i in 0..3 { - gs.inject_connection_established( - &peers[i], - &ConnectionId::new(0), - &ConnectedPoint::Dialer { - address: addr2.clone(), - role_override: Endpoint::Dialer, - }, - None, - 1, - ); - } + //message deliveries penalties get activated, peer 0 has only delivered 3 messages and + // therefore gets a penalty + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + expected_message_deliveries *= 0.9; //decay - //double penalties for the first three of each - for peer in peers.iter().take(3).chain(others.iter().take(3)) { - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(peer), - (9.0 + 4.0) * -2.0 - ); - } + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + (5f64 - expected_message_deliveries).powi(2) * -2.0 * 0.7 + ); - //single penalties for the rest - for peer in peers.iter().skip(3) { - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); - } - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&others[3]), - 4.0 * -2.0 - ); + // peer 0 delivers a lot of messages => message_deliveries should be capped at 10 + for _ in 0..20 { + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); + } - //two times same ip doesn't count twice - gs.inject_connection_established( - &peers[0], - &ConnectionId::new(0), - &ConnectedPoint::Dialer { - address: addr.clone(), - role_override: Endpoint::Dialer, - }, - None, - 2, - ); + expected_message_deliveries = 10.0; - //nothing changed - //double penalties for the first three of each - for peer in peers.iter().take(3).chain(others.iter().take(3)) { - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(peer), - (9.0 + 4.0) * -2.0 - ); - } + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - //single penalties for the rest - for peer in peers.iter().skip(3) { - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); - } - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&others[3]), - 4.0 * -2.0 - ); + //apply 10 decays + for _ in 0..10 { + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + expected_message_deliveries *= 0.9; //decay } - #[test] - fn test_scoring_p7_grafts_before_backoff() { - let config = GossipsubConfigBuilder::default() - .prune_backoff(Duration::from_millis(200)) - .graft_flood_threshold(Duration::from_millis(100)) - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - peer_score_params.behaviour_penalty_weight = -2.0; - peer_score_params.behaviour_penalty_decay = 0.9; - - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(2) - .topics(vec!["test".into()]) - .to_subscribe(false) - .gs_config(config) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) - .create_network(); - - //remove peers from mesh and send prune to them => this adds a backoff for the peers - for i in 0..2 { - gs.mesh.get_mut(&topics[0]).unwrap().remove(&peers[i]); - gs.send_graft_prune( - HashMap::new(), - vec![(peers[i].clone(), vec![topics[0].clone()])] - .into_iter() - .collect(), - HashSet::new(), - ); - } + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + (5f64 - expected_message_deliveries).powi(2) * -2.0 * 0.7 + ); +} - //wait 50 millisecs - sleep(Duration::from_millis(50)); +#[test] +fn test_scoring_p3b() { + let config = GossipsubConfigBuilder::default() + .prune_backoff(Duration::from_millis(100)) + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: -2.0, + mesh_message_deliveries_decay: 0.9, + mesh_message_deliveries_cap: 10.0, + mesh_message_deliveries_threshold: 5.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(100), + mesh_failure_penalty_weight: -3.0, + mesh_failure_penalty_decay: 0.95, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - //first peer tries to graft - gs.handle_graft(&peers[0], vec![topics[0].clone()]); + let mut expected_message_deliveries = 0.0; - //double behaviour penalty for first peer (squared) - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 4.0 * -2.0 - ); + //add some positive score + gs.peer_score + .as_mut() + .unwrap() + .0 + .set_application_score(&peers[0], 100.0); - //wait 100 millisecs - sleep(Duration::from_millis(100)); + //peer 0 delivers two message + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); + expected_message_deliveries += 2.0; - //second peer tries to graft - gs.handle_graft(&peers[1], vec![topics[0].clone()]); + sleep(Duration::from_millis(1050)); - //single behaviour penalty for second peer - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[1]), - 1.0 * -2.0 - ); + //activation kicks in + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + expected_message_deliveries *= 0.9; //decay - //test decay - gs.peer_score.as_mut().unwrap().0.refresh_scores(); + //prune peer + gs.handle_prune(&peers[0], vec![(topics[0].clone(), vec![], None)]); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 4.0 * 0.9 * 0.9 * -2.0 - ); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[1]), - 1.0 * 0.9 * 0.9 * -2.0 - ); - } + //wait backoff + sleep(Duration::from_millis(130)); - #[test] - fn test_opportunistic_grafting() { - let config = GossipsubConfigBuilder::default() - .mesh_n_low(3) - .mesh_n(5) - .mesh_n_high(7) - .mesh_outbound_min(0) //deactivate outbound handling - .opportunistic_graft_ticks(2) - .opportunistic_graft_peers(2) - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - peer_score_params.app_specific_weight = 1.0; - let mut thresholds = PeerScoreThresholds::default(); - thresholds.opportunistic_graft_threshold = 2.0; - - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(5) - .topics(vec!["test".into()]) - .to_subscribe(false) - .gs_config(config) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, thresholds))) - .create_network(); - - //fill mesh with 5 peers - for peer in &peers { - gs.handle_graft(peer, topics.clone()); - } + //regraft peer + gs.handle_graft(&peers[0], topics.clone()); - //add additional 5 peers - let others: Vec<_> = (0..5) - .into_iter() - .map(|_| add_peer(&mut gs, &topics, false, false)) - .collect(); + //the score should now consider p3b + let mut expected_b3 = (5f64 - expected_message_deliveries).powi(2); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 100.0 + expected_b3 * -3.0 * 0.7 + ); - //currently mesh equals peers - assert_eq!(gs.mesh[&topics[0]], peers.iter().cloned().collect()); + //we can also add a new p3 to the score - //give others high scores (but the first two have not high enough scores) - for i in 0..5 { - gs.set_application_score(&peers[i], 0.0 + i as f64); - } + //peer 0 delivers one message + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); + expected_message_deliveries += 1.0; - //set scores for peers in the mesh - for i in 0..5 { - gs.set_application_score(&others[i], 0.0 + i as f64); - } + sleep(Duration::from_millis(1050)); + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + expected_message_deliveries *= 0.9; //decay + expected_b3 *= 0.95; - //this gives a median of exactly 2.0 => should not apply opportunistic grafting - gs.heartbeat(); - gs.heartbeat(); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 100.0 + (expected_b3 * -3.0 + (5f64 - expected_message_deliveries).powi(2) * -2.0) * 0.7 + ); +} - assert_eq!( - gs.mesh[&topics[0]].len(), - 5, - "should not apply opportunistic grafting" - ); +#[test] +fn test_scoring_p4_valid_message() { + let config = GossipsubConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - //reduce middle score to 1.0 giving a median of 1.0 - gs.set_application_score(&peers[2], 1.0); + //peer 0 delivers valid message + let m1 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); - //opportunistic grafting after two heartbeats + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); - gs.heartbeat(); - assert_eq!( - gs.mesh[&topics[0]].len(), - 5, - "should not apply opportunistic grafting after first tick" - ); + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - gs.heartbeat(); + //message m1 gets validated + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Accept, + ) + .unwrap(); - assert_eq!( - gs.mesh[&topics[0]].len(), - 7, - "opportunistic grafting should have added 2 peers" - ); + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); +} - assert!( - gs.mesh[&topics[0]].is_superset(&peers.iter().cloned().collect()), - "old peers are still part of the mesh" - ); +#[test] +fn test_scoring_p4_invalid_signature() { + let config = GossipsubConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + + //peer 0 delivers message with invalid signature + let m = random_message(&mut seq, &topics); + + gs.inject_event( + peers[0], + ConnectionId::new(0), + HandlerEvent::Message { + rpc: GossipsubRpc { + messages: vec![], + subscriptions: vec![], + control_msgs: vec![], + }, + invalid_messages: vec![(m, ValidationError::InvalidSignature)], + }, + ); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + -2.0 * 0.7 + ); +} - assert!( - gs.mesh[&topics[0]].is_disjoint(&others.iter().cloned().take(2).collect()), - "peers below or equal to median should not be added in opportunistic grafting" - ); - } +#[test] +fn test_scoring_p4_message_from_self() { + let config = GossipsubConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - #[test] - fn test_ignore_graft_from_unknown_topic() { - //build gossipsub without subscribing to any topics - let (mut gs, _, _) = inject_nodes1() - .peer_no(0) - .topics(vec![]) - .to_subscribe(false) - .create_network(); + //peer 0 delivers invalid message from self + let mut m = random_message(&mut seq, &topics); + m.source = Some(*gs.publish_config.get_own_id().unwrap()); - //handle an incoming graft for some topic - gs.handle_graft(&PeerId::random(), vec![Topic::new("test").hash()]); + deliver_message(&mut gs, 0, m); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + -2.0 * 0.7 + ); +} - //assert that no prune got created - assert_eq!( - count_control_msgs(&gs, |_, a| match a { - GossipsubControlAction::Prune { .. } => true, - _ => false, - }), - 0, - "we should not prune after graft in unknown topic" - ); - } +#[test] +fn test_scoring_p4_ignored_message() { + let config = GossipsubConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - #[test] - fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { - let config = GossipsubConfig::default(); - //build gossipsub with full mesh - let (mut gs, _, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(false) - .create_network(); + //peer 0 delivers ignored message + let m1 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); - //add another peer not in the mesh - let peer = add_peer(&mut gs, &topics, false, false); + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - //receive a message - let mut seq = 0; - let m1 = random_message(&mut seq, &topics); + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); - // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + //message m1 gets ignored + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Ignore, + ) + .unwrap(); - let id = config.message_id(&message1); + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); +} - gs.handle_received_message(m1.clone(), &PeerId::random()); +#[test] +fn test_scoring_p4_application_invalidated_message() { + let config = GossipsubConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - //clear events - gs.events.clear(); + //peer 0 delivers invalid message + let m1 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); - //the first gossip_retransimission many iwants return the valid message, all others are - // ignored. - for _ in 0..(2 * config.gossip_retransimission() + 10) { - gs.handle_iwant(&peer, vec![id.clone()]); - } + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - assert_eq!( - gs.events - .iter() - .map(|e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - event.messages.len() - } else { - 0 - } - } - _ => 0, - }) - .sum::(), - config.gossip_retransimission() as usize, - "not more then gossip_retransmission many messages get sent back" - ); - } + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); - #[test] - fn test_ignore_too_many_ihaves() { - let config = GossipsubConfigBuilder::default() - .max_ihave_messages(10) - .build() - .unwrap(); - //build gossipsub with full mesh - let (mut gs, _, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(false) - .gs_config(config.clone()) - .create_network(); - - //add another peer not in the mesh - let peer = add_peer(&mut gs, &topics, false, false); - - //peer has 20 messages - let mut seq = 0; - let messages: Vec<_> = (0..20).map(|_| random_message(&mut seq, &topics)).collect(); - - //peer sends us one ihave for each message in order - for raw_message in &messages { - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); + //message m1 gets rejected + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); - gs.handle_ihave( - &peer, - vec![(topics[0].clone(), vec![config.message_id(&message)])], - ); - } + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + -2.0 * 0.7 + ); +} - let first_ten: HashSet<_> = messages - .iter() - .take(10) - .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) - .map(|m| config.message_id(&m)) - .collect(); +#[test] +fn test_scoring_p4_application_invalid_message_from_two_peers() { + let config = GossipsubConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - //we send iwant only for the first 10 messages - assert_eq!( - count_control_msgs(&gs, |p, action| match action { - GossipsubControlAction::IWant { message_ids } => - p == &peer && { - assert_eq!( - message_ids.len(), - 1, - "each iwant should have one message \ - corresponding to one ihave" - ); - - assert!(first_ten.contains(&message_ids[0])); - - true - }, - _ => false, - }), - 10, - "exactly the first ten ihaves should be processed and one iwant for each created" - ); + //peer 0 delivers invalid message + let m1 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); + + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + + //peer 1 delivers same message + deliver_message(&mut gs, 1, m1); + + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[1]), 0.0); + + //message m1 gets rejected + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + -2.0 * 0.7 + ); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + -2.0 * 0.7 + ); +} - //after a heartbeat everything is forgotten - gs.heartbeat(); - for raw_message in messages[10..].iter() { - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); +#[test] +fn test_scoring_p4_three_application_invalid_messages() { + let config = GossipsubConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - gs.handle_ihave( - &peer, - vec![(topics[0].clone(), vec![config.message_id(&message)])], - ); - } + //peer 0 delivers two invalid message + let m1 = random_message(&mut seq, &topics); + let m2 = random_message(&mut seq, &topics); + let m3 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); + deliver_message(&mut gs, 0, m2.clone()); + deliver_message(&mut gs, 0, m3.clone()); + + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); + + // Transform the inbound message + let message2 = &gs.data_transform.inbound_transform(m2).unwrap(); + // Transform the inbound message + let message3 = &gs.data_transform.inbound_transform(m3).unwrap(); + + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + + //messages gets rejected + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); + gs.report_message_validation_result( + &config.message_id(message2), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); + gs.report_message_validation_result( + &config.message_id(message3), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); + + //number of invalid messages gets squared + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 9.0 * -2.0 * 0.7 + ); +} - //we sent iwant for all 20 messages - assert_eq!( - count_control_msgs(&gs, |p, action| match action { - GossipsubControlAction::IWant { message_ids } => - p == &peer && { - assert_eq!( - message_ids.len(), - 1, - "each iwant should have one message \ - corresponding to one ihave" - ); - true - }, - _ => false, - }), - 20, - "all 20 should get sent" - ); - } +#[test] +fn test_scoring_p4_decay() { + let config = GossipsubConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - #[test] - fn test_ignore_too_many_messages_in_ihave() { - let config = GossipsubConfigBuilder::default() - .max_ihave_messages(10) - .max_ihave_length(10) - .build() - .unwrap(); - //build gossipsub with full mesh - let (mut gs, _, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(false) - .gs_config(config.clone()) - .create_network(); - - //add another peer not in the mesh - let peer = add_peer(&mut gs, &topics, false, false); - - //peer has 20 messages - let mut seq = 0; - let message_ids: Vec<_> = (0..20) - .map(|_| random_message(&mut seq, &topics)) - .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) - .map(|msg| config.message_id(&msg)) - .collect(); + //peer 0 delivers invalid message + let m1 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); + + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + + //message m1 gets rejected + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + -2.0 * 0.7 + ); + + //we decay + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + + // the number of invalids gets decayed to 0.9 and then squared in the score + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 0.9 * 0.9 * -2.0 * 0.7 + ); +} - //peer sends us three ihaves - gs.handle_ihave( - &peer, - vec![( - topics[0].clone(), - message_ids[0..8].iter().cloned().collect(), - )], - ); - gs.handle_ihave( - &peer, - vec![( - topics[0].clone(), - message_ids[0..12].iter().cloned().collect(), - )], - ); - gs.handle_ihave( - &peer, - vec![( - topics[0].clone(), - message_ids[0..20].iter().cloned().collect(), - )], - ); +#[test] +fn test_scoring_p5() { + let peer_score_params = PeerScoreParams { + app_specific_weight: 2.0, + ..PeerScoreParams::default() + }; - let first_twelve: HashSet<_> = message_ids.iter().take(12).collect(); + //build mesh with one peer + let (mut gs, peers, _) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(GossipsubConfig::default()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) + .create_network(); + + gs.set_application_score(&peers[0], 1.1); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 1.1 * 2.0 + ); +} - //we send iwant only for the first 10 messages - let mut sum = 0; - assert_eq!( - count_control_msgs(&gs, |p, action| match action { - GossipsubControlAction::IWant { message_ids } => - p == &peer && { - assert!(first_twelve.is_superset(&message_ids.iter().collect())); - sum += message_ids.len(); - true - }, - _ => false, - }), - 2, - "the third ihave should get ignored and no iwant sent" +#[test] +fn test_scoring_p6() { + let peer_score_params = PeerScoreParams { + ip_colocation_factor_threshold: 5.0, + ip_colocation_factor_weight: -2.0, + ..Default::default() + }; + + let (mut gs, _, _) = inject_nodes1() + .peer_no(0) + .topics(vec![]) + .to_subscribe(false) + .gs_config(GossipsubConfig::default()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) + .create_network(); + + //create 5 peers with the same ip + let addr = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 3)); + let peers = vec![ + add_peer_with_addr(&mut gs, &vec![], false, false, addr.clone()), + add_peer_with_addr(&mut gs, &vec![], false, false, addr.clone()), + add_peer_with_addr(&mut gs, &vec![], true, false, addr.clone()), + add_peer_with_addr(&mut gs, &vec![], true, false, addr.clone()), + add_peer_with_addr(&mut gs, &vec![], true, true, addr.clone()), + ]; + + //create 4 other peers with other ip + let addr2 = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 4)); + let others = vec![ + add_peer_with_addr(&mut gs, &vec![], false, false, addr2.clone()), + add_peer_with_addr(&mut gs, &vec![], false, false, addr2.clone()), + add_peer_with_addr(&mut gs, &vec![], true, false, addr2.clone()), + add_peer_with_addr(&mut gs, &vec![], true, false, addr2.clone()), + ]; + + //no penalties yet + for peer in peers.iter().chain(others.iter()) { + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0); + } + + //add additional connection for 3 others with addr + for id in others.iter().take(3) { + gs.inject_connection_established( + id, + &ConnectionId::new(0), + &ConnectedPoint::Dialer { + address: addr.clone(), + role_override: Endpoint::Dialer, + }, + None, + 0, ); + } - assert_eq!(sum, 10, "exactly the first ten ihaves should be processed"); + //penalties apply squared + for peer in peers.iter().chain(others.iter().take(3)) { + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); + } + //fourth other peer still no penalty + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&others[3]), 0.0); - //after a heartbeat everything is forgotten - gs.heartbeat(); - gs.handle_ihave( - &peer, - vec![( - topics[0].clone(), - message_ids[10..20].iter().cloned().collect(), - )], + //add additional connection for 3 of the peers to addr2 + for peer in peers.iter().take(3) { + gs.inject_connection_established( + peer, + &ConnectionId::new(0), + &ConnectedPoint::Dialer { + address: addr2.clone(), + role_override: Endpoint::Dialer, + }, + None, + 1, ); + } - //we sent 20 iwant messages - let mut sum = 0; + //double penalties for the first three of each + for peer in peers.iter().take(3).chain(others.iter().take(3)) { assert_eq!( - count_control_msgs(&gs, |p, action| match action { - GossipsubControlAction::IWant { message_ids } => - p == &peer && { - sum += message_ids.len(); - true - }, - _ => false, - }), - 3 + gs.peer_score.as_ref().unwrap().0.score(peer), + (9.0 + 4.0) * -2.0 ); - assert_eq!(sum, 20, "exactly 20 iwants should get sent"); } - #[test] - fn test_limit_number_of_message_ids_inside_ihave() { - let config = GossipsubConfigBuilder::default() - .max_ihave_messages(10) - .max_ihave_length(100) - .build() - .unwrap(); - //build gossipsub with full mesh - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(false) - .gs_config(config.clone()) - .create_network(); - - //graft to all peers to really fill the mesh with all the peers - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } - - //add two other peers not in the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); - - //receive 200 messages from another peer - let mut seq = 0; - for _ in 0..200 { - gs.handle_received_message(random_message(&mut seq, &topics), &PeerId::random()); - } - - //emit gossip - gs.emit_gossip(); - - // both peers should have gotten 100 random ihave messages, to asser the randomness, we - // assert that both have not gotten the same set of messages, but have an intersection - // (which is the case with very high probability, the probabiltity of failure is < 10^-58). - - let mut ihaves1 = HashSet::new(); - let mut ihaves2 = HashSet::new(); - + //single penalties for the rest + for peer in peers.iter().skip(3) { + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); + } + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&others[3]), + 4.0 * -2.0 + ); + + //two times same ip doesn't count twice + gs.inject_connection_established( + &peers[0], + &ConnectionId::new(0), + &ConnectedPoint::Dialer { + address: addr, + role_override: Endpoint::Dialer, + }, + None, + 2, + ); + + //nothing changed + //double penalties for the first three of each + for peer in peers.iter().take(3).chain(others.iter().take(3)) { assert_eq!( - count_control_msgs(&gs, |p, action| match action { - GossipsubControlAction::IHave { message_ids, .. } => { - if p == &p1 { - ihaves1 = message_ids.iter().cloned().collect(); - true - } else if p == &p2 { - ihaves2 = message_ids.iter().cloned().collect(); - true - } else { - false - } - } - _ => false, - }), - 2, - "should have emitted one ihave to p1 and one to p2" + gs.peer_score.as_ref().unwrap().0.score(peer), + (9.0 + 4.0) * -2.0 ); + } - assert_eq!( - ihaves1.len(), - 100, - "should have sent 100 message ids in ihave to p1" - ); - assert_eq!( - ihaves2.len(), - 100, - "should have sent 100 message ids in ihave to p2" - ); - assert!( - ihaves1 != ihaves2, - "should have sent different random messages to p1 and p2 \ - (this may fail with a probability < 10^-58" - ); - assert!( - ihaves1.intersection(&ihaves2).into_iter().count() > 0, - "should have sent random messages with some common messages to p1 and p2 \ - (this may fail with a probability < 10^-58" + //single penalties for the rest + for peer in peers.iter().skip(3) { + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); + } + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&others[3]), + 4.0 * -2.0 + ); +} + +#[test] +fn test_scoring_p7_grafts_before_backoff() { + let config = GossipsubConfigBuilder::default() + .prune_backoff(Duration::from_millis(200)) + .graft_flood_threshold(Duration::from_millis(100)) + .build() + .unwrap(); + let peer_score_params = PeerScoreParams { + behaviour_penalty_weight: -2.0, + behaviour_penalty_decay: 0.9, + ..Default::default() + }; + + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) + .create_network(); + + //remove peers from mesh and send prune to them => this adds a backoff for the peers + for peer in peers.iter().take(2) { + gs.mesh.get_mut(&topics[0]).unwrap().remove(peer); + gs.send_graft_prune( + HashMap::new(), + HashMap::from([(*peer, vec![topics[0].clone()])]), + HashSet::new(), ); } - #[test] - fn test_iwant_penalties() { - let _ = env_logger::try_init(); + //wait 50 millisecs + sleep(Duration::from_millis(50)); + + //first peer tries to graft + gs.handle_graft(&peers[0], vec![topics[0].clone()]); + + //double behaviour penalty for first peer (squared) + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 4.0 * -2.0 + ); + + //wait 100 millisecs + sleep(Duration::from_millis(100)); + + //second peer tries to graft + gs.handle_graft(&peers[1], vec![topics[0].clone()]); + + //single behaviour penalty for second peer + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + 1.0 * -2.0 + ); + + //test decay + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 4.0 * 0.9 * 0.9 * -2.0 + ); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + 1.0 * 0.9 * 0.9 * -2.0 + ); +} - let config = GossipsubConfigBuilder::default() - .iwant_followup_time(Duration::from_secs(4)) - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - peer_score_params.behaviour_penalty_weight = -1.0; - - // fill the mesh - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(2) - .topics(vec!["test".into()]) - .to_subscribe(false) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) - .create_network(); - - // graft to all peers to really fill the mesh with all the peers - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } +#[test] +fn test_opportunistic_grafting() { + let config = GossipsubConfigBuilder::default() + .mesh_n_low(3) + .mesh_n(5) + .mesh_n_high(7) + .mesh_outbound_min(0) //deactivate outbound handling + .opportunistic_graft_ticks(2) + .opportunistic_graft_peers(2) + .build() + .unwrap(); + let peer_score_params = PeerScoreParams { + app_specific_weight: 1.0, + ..Default::default() + }; + let thresholds = PeerScoreThresholds { + opportunistic_graft_threshold: 2.0, + ..Default::default() + }; - // add 100 more peers - let other_peers: Vec<_> = (0..100) - .map(|_| add_peer(&mut gs, &topics, false, false)) - .collect(); + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(5) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, thresholds))) + .create_network(); + + //fill mesh with 5 peers + for peer in &peers { + gs.handle_graft(peer, topics.clone()); + } - // each peer sends us an ihave containing each two message ids - let mut first_messages = Vec::new(); - let mut second_messages = Vec::new(); - let mut seq = 0; - for peer in &other_peers { - let msg1 = random_message(&mut seq, &topics); - let msg2 = random_message(&mut seq, &topics); - - // Decompress the raw message and calculate the message id. - // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(msg1.clone()).unwrap(); - - // Transform the inbound message - let message2 = &gs.data_transform.inbound_transform(msg2.clone()).unwrap(); - - first_messages.push(msg1.clone()); - second_messages.push(msg2.clone()); - gs.handle_ihave( - peer, - vec![( - topics[0].clone(), - vec![config.message_id(&message1), config.message_id(&message2)], - )], - ); - } + //add additional 5 peers + let others: Vec<_> = (0..5) + .into_iter() + .map(|_| add_peer(&mut gs, &topics, false, false)) + .collect(); - // the peers send us all the first message ids in time - for (index, peer) in other_peers.iter().enumerate() { - gs.handle_received_message(first_messages[index].clone(), &peer); - } + //currently mesh equals peers + assert_eq!(gs.mesh[&topics[0]], peers.iter().cloned().collect()); - // now we do a heartbeat no penalization should have been applied yet - gs.heartbeat(); + //give others high scores (but the first two have not high enough scores) + for (i, peer) in peers.iter().enumerate().take(5) { + gs.set_application_score(peer, 0.0 + i as f64); + } - for peer in &other_peers { - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0); - } + //set scores for peers in the mesh + for (i, peer) in others.iter().enumerate().take(5) { + gs.set_application_score(peer, 0.0 + i as f64); + } - // receive the first twenty of the other peers then send their response - for (index, peer) in other_peers.iter().enumerate().take(20) { - gs.handle_received_message(second_messages[index].clone(), &peer); - } + //this gives a median of exactly 2.0 => should not apply opportunistic grafting + gs.heartbeat(); + gs.heartbeat(); + + assert_eq!( + gs.mesh[&topics[0]].len(), + 5, + "should not apply opportunistic grafting" + ); + + //reduce middle score to 1.0 giving a median of 1.0 + gs.set_application_score(&peers[2], 1.0); + + //opportunistic grafting after two heartbeats + + gs.heartbeat(); + assert_eq!( + gs.mesh[&topics[0]].len(), + 5, + "should not apply opportunistic grafting after first tick" + ); + + gs.heartbeat(); + + assert_eq!( + gs.mesh[&topics[0]].len(), + 7, + "opportunistic grafting should have added 2 peers" + ); + + assert!( + gs.mesh[&topics[0]].is_superset(&peers.iter().cloned().collect()), + "old peers are still part of the mesh" + ); + + assert!( + gs.mesh[&topics[0]].is_disjoint(&others.iter().cloned().take(2).collect()), + "peers below or equal to median should not be added in opportunistic grafting" + ); +} - // sleep for the promise duration - sleep(Duration::from_secs(4)); +#[test] +fn test_ignore_graft_from_unknown_topic() { + //build gossipsub without subscribing to any topics + let (mut gs, _, _) = inject_nodes1() + .peer_no(0) + .topics(vec![]) + .to_subscribe(false) + .create_network(); + + //handle an incoming graft for some topic + gs.handle_graft(&PeerId::random(), vec![Topic::new("test").hash()]); + + //assert that no prune got created + assert_eq!( + count_control_msgs(&gs, |_, a| matches!( + a, + GossipsubControlAction::Prune { .. } + )), + 0, + "we should not prune after graft in unknown topic" + ); +} - // now we do a heartbeat to apply penalization - gs.heartbeat(); +#[test] +fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { + let config = GossipsubConfig::default(); + //build gossipsub with full mesh + let (mut gs, _, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(false) + .create_network(); - // now we get the second messages from the last 80 peers. - for (index, peer) in other_peers.iter().enumerate() { - if index > 19 { - gs.handle_received_message(second_messages[index].clone(), &peer); - } - } + //add another peer not in the mesh + let peer = add_peer(&mut gs, &topics, false, false); - // no further penalizations should get applied - gs.heartbeat(); + //receive a message + let mut seq = 0; + let m1 = random_message(&mut seq, &topics); - // Only the last 80 peers should be penalized for not responding in time - let mut not_penalized = 0; - let mut single_penalized = 0; - let mut double_penalized = 0; - - for (i, peer) in other_peers.iter().enumerate() { - let score = gs.peer_score.as_ref().unwrap().0.score(peer); - if score == 0.0 { - not_penalized += 1; - } else if score == -1.0 { - assert!(i > 9); - single_penalized += 1; - } else if score == -4.0 { - assert!(i > 9); - double_penalized += 1 - } else { - println!("{}", peer); - println!("{}", score); - assert!(false, "Invalid score of peer") - } - } + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); - assert_eq!(not_penalized, 20); - assert_eq!(single_penalized, 80); - assert_eq!(double_penalized, 0); - } + let id = config.message_id(message1); - #[test] - fn test_publish_to_floodsub_peers_without_flood_publish() { - let config = GossipsubConfigBuilder::default() - .flood_publish(false) - .build() - .unwrap(); - let (mut gs, _, topics) = inject_nodes1() - .peer_no(config.mesh_n_low() - 1) - .topics(vec!["test".into()]) - .to_subscribe(false) - .gs_config(config) - .create_network(); - - //add two floodsub peer, one explicit, one implicit - let p1 = add_peer_with_addr_and_kind( - &mut gs, - &topics, - false, - false, - Multiaddr::empty(), - Some(PeerKind::Floodsub), - ); - let p2 = - add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + gs.handle_received_message(m1, &PeerId::random()); - //p1 and p2 are not in the mesh - assert!(!gs.mesh[&topics[0]].contains(&p1) && !gs.mesh[&topics[0]].contains(&p2)); + //clear events + gs.events.clear(); - //publish a message - let publish_data = vec![0; 42]; - gs.publish(Topic::new("test"), publish_data).unwrap(); + //the first gossip_retransimission many iwants return the valid message, all others are + // ignored. + for _ in 0..(2 * config.gossip_retransimission() + 10) { + gs.handle_iwant(&peer, vec![id.clone()]); + } - // Collect publish messages to floodsub peers - let publishes = gs - .events + assert_eq!( + gs.events .iter() - .fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { - if peer_id == &p1 || peer_id == &p2 { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push(s.clone()); - } - } + .map(|e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + event.messages.len() + } else { + 0 } - collected_publish } - _ => collected_publish, - }); + _ => 0, + }) + .sum::(), + config.gossip_retransimission() as usize, + "not more then gossip_retransmission many messages get sent back" + ); +} - assert_eq!( - publishes.len(), - 2, - "Should send a publish message to all floodsub peers" +#[test] +fn test_ignore_too_many_ihaves() { + let config = GossipsubConfigBuilder::default() + .max_ihave_messages(10) + .build() + .unwrap(); + //build gossipsub with full mesh + let (mut gs, _, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config.clone()) + .create_network(); + + //add another peer not in the mesh + let peer = add_peer(&mut gs, &topics, false, false); + + //peer has 20 messages + let mut seq = 0; + let messages: Vec<_> = (0..20).map(|_| random_message(&mut seq, &topics)).collect(); + + //peer sends us one ihave for each message in order + for raw_message in &messages { + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform(raw_message.clone()) + .unwrap(); + + gs.handle_ihave( + &peer, + vec![(topics[0].clone(), vec![config.message_id(message)])], ); } - #[test] - fn test_do_not_use_floodsub_in_fanout() { - let config = GossipsubConfigBuilder::default() - .flood_publish(false) - .build() + let first_ten: HashSet<_> = messages + .iter() + .take(10) + .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) + .map(|m| config.message_id(&m)) + .collect(); + + //we send iwant only for the first 10 messages + assert_eq!( + count_control_msgs(&gs, |p, action| p == &peer + && matches!(action, GossipsubControlAction::IWant { message_ids } if message_ids.len() == 1 && first_ten.contains(&message_ids[0]))), + 10, + "exactly the first ten ihaves should be processed and one iwant for each created" + ); + + //after a heartbeat everything is forgotten + gs.heartbeat(); + for raw_message in messages[10..].iter() { + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform(raw_message.clone()) .unwrap(); - let (mut gs, _, _) = inject_nodes1() - .peer_no(config.mesh_n_low() - 1) - .topics(Vec::new()) - .to_subscribe(false) - .gs_config(config) - .create_network(); - - let topic = Topic::new("test"); - let topics = vec![topic.hash()]; - - //add two floodsub peer, one explicit, one implicit - let p1 = add_peer_with_addr_and_kind( - &mut gs, - &topics, - false, - false, - Multiaddr::empty(), - Some(PeerKind::Floodsub), - ); - let p2 = - add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); - //publish a message - let publish_data = vec![0; 42]; - gs.publish(Topic::new("test"), publish_data).unwrap(); + gs.handle_ihave( + &peer, + vec![(topics[0].clone(), vec![config.message_id(message)])], + ); + } - // Collect publish messages to floodsub peers - let publishes = gs - .events - .iter() - .fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { - if peer_id == &p1 || peer_id == &p2 { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push(s.clone()); - } - } - } - collected_publish - } - _ => collected_publish, - }); + //we sent iwant for all 20 messages + assert_eq!( + count_control_msgs(&gs, |p, action| p == &peer + && matches!(action, GossipsubControlAction::IWant { message_ids } if message_ids.len() == 1)), + 20, + "all 20 should get sent" + ); +} - assert_eq!( - publishes.len(), - 2, - "Should send a publish message to all floodsub peers" - ); +#[test] +fn test_ignore_too_many_messages_in_ihave() { + let config = GossipsubConfigBuilder::default() + .max_ihave_messages(10) + .max_ihave_length(10) + .build() + .unwrap(); + //build gossipsub with full mesh + let (mut gs, _, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config.clone()) + .create_network(); + + //add another peer not in the mesh + let peer = add_peer(&mut gs, &topics, false, false); + + //peer has 20 messages + let mut seq = 0; + let message_ids: Vec<_> = (0..20) + .map(|_| random_message(&mut seq, &topics)) + .map(|msg| gs.data_transform.inbound_transform(msg).unwrap()) + .map(|msg| config.message_id(&msg)) + .collect(); + + //peer sends us three ihaves + gs.handle_ihave(&peer, vec![(topics[0].clone(), message_ids[0..8].to_vec())]); + gs.handle_ihave( + &peer, + vec![(topics[0].clone(), message_ids[0..12].to_vec())], + ); + gs.handle_ihave( + &peer, + vec![(topics[0].clone(), message_ids[0..20].to_vec())], + ); + + let first_twelve: HashSet<_> = message_ids.iter().take(12).collect(); + + //we send iwant only for the first 10 messages + let mut sum = 0; + assert_eq!( + count_control_msgs(&gs, |p, action| match action { + GossipsubControlAction::IWant { message_ids } => + p == &peer && { + assert!(first_twelve.is_superset(&message_ids.iter().collect())); + sum += message_ids.len(); + true + }, + _ => false, + }), + 2, + "the third ihave should get ignored and no iwant sent" + ); + + assert_eq!(sum, 10, "exactly the first ten ihaves should be processed"); + + //after a heartbeat everything is forgotten + gs.heartbeat(); + gs.handle_ihave( + &peer, + vec![(topics[0].clone(), message_ids[10..20].to_vec())], + ); + + //we sent 20 iwant messages + let mut sum = 0; + assert_eq!( + count_control_msgs(&gs, |p, action| match action { + GossipsubControlAction::IWant { message_ids } => + p == &peer && { + sum += message_ids.len(); + true + }, + _ => false, + }), + 3 + ); + assert_eq!(sum, 20, "exactly 20 iwants should get sent"); +} - assert!( - !gs.fanout[&topics[0]].contains(&p1) && !gs.fanout[&topics[0]].contains(&p2), - "Floodsub peers are not allowed in fanout" - ); +#[test] +fn test_limit_number_of_message_ids_inside_ihave() { + let config = GossipsubConfigBuilder::default() + .max_ihave_messages(10) + .max_ihave_length(100) + .build() + .unwrap(); + //build gossipsub with full mesh + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config) + .create_network(); + + //graft to all peers to really fill the mesh with all the peers + for peer in peers { + gs.handle_graft(&peer, topics.clone()); } - #[test] - fn test_dont_add_floodsub_peers_to_mesh_on_join() { - let (mut gs, _, _) = inject_nodes1() - .peer_no(0) - .topics(Vec::new()) - .to_subscribe(false) - .create_network(); - - let topic = Topic::new("test"); - let topics = vec![topic.hash()]; - - //add two floodsub peer, one explicit, one implicit - let _p1 = add_peer_with_addr_and_kind( - &mut gs, - &topics, - false, - false, - Multiaddr::empty(), - Some(PeerKind::Floodsub), - ); - let _p2 = - add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); - - gs.join(&topics[0]); + //add two other peers not in the mesh + let p1 = add_peer(&mut gs, &topics, false, false); + let p2 = add_peer(&mut gs, &topics, false, false); - assert!( - gs.mesh[&topics[0]].is_empty(), - "Floodsub peers should not get added to mesh" - ); + //receive 200 messages from another peer + let mut seq = 0; + for _ in 0..200 { + gs.handle_received_message(random_message(&mut seq, &topics), &PeerId::random()); } - #[test] - fn test_dont_send_px_to_old_gossipsub_peers() { - let (mut gs, _, topics) = inject_nodes1() - .peer_no(0) - .topics(vec!["test".into()]) - .to_subscribe(false) - .create_network(); - - //add an old gossipsub peer - let p1 = add_peer_with_addr_and_kind( - &mut gs, - &topics, - false, - false, - Multiaddr::empty(), - Some(PeerKind::Gossipsub), - ); + //emit gossip + gs.emit_gossip(); + + // both peers should have gotten 100 random ihave messages, to asser the randomness, we + // assert that both have not gotten the same set of messages, but have an intersection + // (which is the case with very high probability, the probabiltity of failure is < 10^-58). + + let mut ihaves1 = HashSet::new(); + let mut ihaves2 = HashSet::new(); + + assert_eq!( + count_control_msgs(&gs, |p, action| match action { + GossipsubControlAction::IHave { message_ids, .. } => { + if p == &p1 { + ihaves1 = message_ids.iter().cloned().collect(); + true + } else if p == &p2 { + ihaves2 = message_ids.iter().cloned().collect(); + true + } else { + false + } + } + _ => false, + }), + 2, + "should have emitted one ihave to p1 and one to p2" + ); + + assert_eq!( + ihaves1.len(), + 100, + "should have sent 100 message ids in ihave to p1" + ); + assert_eq!( + ihaves2.len(), + 100, + "should have sent 100 message ids in ihave to p2" + ); + assert!( + ihaves1 != ihaves2, + "should have sent different random messages to p1 and p2 \ + (this may fail with a probability < 10^-58" + ); + assert!( + ihaves1.intersection(&ihaves2).into_iter().count() > 0, + "should have sent random messages with some common messages to p1 and p2 \ + (this may fail with a probability < 10^-58" + ); +} - //prune the peer - gs.send_graft_prune( - HashMap::new(), - vec![(p1.clone(), topics.clone())].into_iter().collect(), - HashSet::new(), - ); +#[test] +fn test_iwant_penalties() { + let _ = env_logger::try_init(); - //check that prune does not contain px - assert_eq!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Prune { peers: px, .. } => !px.is_empty(), - _ => false, - }), - 0, - "Should not send px to floodsub peers" - ); + let config = GossipsubConfigBuilder::default() + .iwant_followup_time(Duration::from_secs(4)) + .build() + .unwrap(); + let peer_score_params = PeerScoreParams { + behaviour_penalty_weight: -1.0, + ..Default::default() + }; + + // fill the mesh + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) + .create_network(); + + // graft to all peers to really fill the mesh with all the peers + for peer in peers { + gs.handle_graft(&peer, topics.clone()); } - #[test] - fn test_dont_send_floodsub_peers_in_px() { - //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); - - //add two floodsub peers - let _p1 = add_peer_with_addr_and_kind( - &mut gs, - &topics, - false, - false, - Multiaddr::empty(), - Some(PeerKind::Floodsub), - ); - let _p2 = - add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + // add 100 more peers + let other_peers: Vec<_> = (0..100) + .map(|_| add_peer(&mut gs, &topics, false, false)) + .collect(); - //prune only mesh node - gs.send_graft_prune( - HashMap::new(), - vec![(peers[0].clone(), topics.clone())] - .into_iter() - .collect(), - HashSet::new(), - ); + // each peer sends us an ihave containing each two message ids + let mut first_messages = Vec::new(); + let mut second_messages = Vec::new(); + let mut seq = 0; + for peer in &other_peers { + let msg1 = random_message(&mut seq, &topics); + let msg2 = random_message(&mut seq, &topics); - //check that px in prune message is empty - assert_eq!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Prune { peers: px, .. } => !px.is_empty(), - _ => false, - }), - 0, - "Should not include floodsub peers in px" + // Decompress the raw message and calculate the message id. + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(msg1.clone()).unwrap(); + + // Transform the inbound message + let message2 = &gs.data_transform.inbound_transform(msg2.clone()).unwrap(); + + first_messages.push(msg1.clone()); + second_messages.push(msg2.clone()); + gs.handle_ihave( + peer, + vec![( + topics[0].clone(), + vec![config.message_id(message1), config.message_id(message2)], + )], ); } - #[test] - fn test_dont_add_floodsub_peers_to_mesh_in_heartbeat() { - let (mut gs, _, topics) = inject_nodes1() - .peer_no(0) - .topics(vec!["test".into()]) - .to_subscribe(false) - .create_network(); - - //add two floodsub peer, one explicit, one implicit - let _p1 = add_peer_with_addr_and_kind( - &mut gs, - &topics, - true, - false, - Multiaddr::empty(), - Some(PeerKind::Floodsub), - ); - let _p2 = - add_peer_with_addr_and_kind(&mut gs, &topics, true, false, Multiaddr::empty(), None); + // the peers send us all the first message ids in time + for (index, peer) in other_peers.iter().enumerate() { + gs.handle_received_message(first_messages[index].clone(), peer); + } - gs.heartbeat(); + // now we do a heartbeat no penalization should have been applied yet + gs.heartbeat(); - assert!( - gs.mesh[&topics[0]].is_empty(), - "Floodsub peers should not get added to mesh" - ); + for peer in &other_peers { + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0); } - // Some very basic test of public api methods. - #[test] - fn test_public_api() { - let (gs, peers, topic_hashes) = inject_nodes1() - .peer_no(4) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .create_network(); - let peers = peers.into_iter().collect::>(); + // receive the first twenty of the other peers then send their response + for (index, peer) in other_peers.iter().enumerate().take(20) { + gs.handle_received_message(second_messages[index].clone(), peer); + } - assert_eq!( - gs.topics().cloned().collect::>(), - topic_hashes, - "Expected topics to match registered topic." - ); + // sleep for the promise duration + sleep(Duration::from_secs(4)); - assert_eq!( - gs.mesh_peers(&TopicHash::from_raw("topic1")) - .cloned() - .collect::>(), - peers, - "Expected peers for a registered topic to contain all peers." - ); + // now we do a heartbeat to apply penalization + gs.heartbeat(); - assert_eq!( - gs.all_mesh_peers().cloned().collect::>(), - peers, - "Expected all_peers to contain all peers." - ); + // now we get the second messages from the last 80 peers. + for (index, peer) in other_peers.iter().enumerate() { + if index > 19 { + gs.handle_received_message(second_messages[index].clone(), peer); + } } - #[test] - fn test_msg_id_fn_only_called_once_with_fast_message_ids() { - struct Pointers { - slow_counter: u32, - fast_counter: u32, + // no further penalizations should get applied + gs.heartbeat(); + + // Only the last 80 peers should be penalized for not responding in time + let mut not_penalized = 0; + let mut single_penalized = 0; + let mut double_penalized = 0; + + for (i, peer) in other_peers.iter().enumerate() { + let score = gs.peer_score.as_ref().unwrap().0.score(peer); + if score == 0.0 { + not_penalized += 1; + } else if score == -1.0 { + assert!(i > 9); + single_penalized += 1; + } else if score == -4.0 { + assert!(i > 9); + double_penalized += 1 + } else { + println!("{}", peer); + println!("{}", score); + panic!("Invalid score of peer"); } + } - let mut counters = Pointers { - slow_counter: 0, - fast_counter: 0, - }; + assert_eq!(not_penalized, 20); + assert_eq!(single_penalized, 80); + assert_eq!(double_penalized, 0); +} - let counters_pointer: *mut Pointers = &mut counters; +#[test] +fn test_publish_to_floodsub_peers_without_flood_publish() { + let config = GossipsubConfigBuilder::default() + .flood_publish(false) + .build() + .unwrap(); + let (mut gs, _, topics) = inject_nodes1() + .peer_no(config.mesh_n_low() - 1) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config) + .create_network(); + + //add two floodsub peer, one explicit, one implicit + let p1 = add_peer_with_addr_and_kind( + &mut gs, + &topics, + false, + false, + Multiaddr::empty(), + Some(PeerKind::Floodsub), + ); + let p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + + //p1 and p2 are not in the mesh + assert!(!gs.mesh[&topics[0]].contains(&p1) && !gs.mesh[&topics[0]].contains(&p2)); + + //publish a message + let publish_data = vec![0; 42]; + gs.publish(Topic::new("test"), publish_data).unwrap(); + + // Collect publish messages to floodsub peers + let publishes = gs + .events + .iter() + .fold(vec![], |mut collected_publish, e| match e { + NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { + if peer_id == &p1 || peer_id == &p2 { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + for s in &event.messages { + collected_publish.push(s.clone()); + } + } + } + collected_publish + } + _ => collected_publish, + }); - let counters_address = counters_pointer as u64; + assert_eq!( + publishes.len(), + 2, + "Should send a publish message to all floodsub peers" + ); +} - macro_rules! get_counters_pointer { - ($m: expr) => {{ - let mut address_bytes: [u8; 8] = Default::default(); - address_bytes.copy_from_slice($m.as_slice()); - let address = u64::from_be_bytes(address_bytes); - address as *mut Pointers - }}; - } +#[test] +fn test_do_not_use_floodsub_in_fanout() { + let config = GossipsubConfigBuilder::default() + .flood_publish(false) + .build() + .unwrap(); + let (mut gs, _, _) = inject_nodes1() + .peer_no(config.mesh_n_low() - 1) + .topics(Vec::new()) + .to_subscribe(false) + .gs_config(config) + .create_network(); + + let topic = Topic::new("test"); + let topics = vec![topic.hash()]; + + //add two floodsub peer, one explicit, one implicit + let p1 = add_peer_with_addr_and_kind( + &mut gs, + &topics, + false, + false, + Multiaddr::empty(), + Some(PeerKind::Floodsub), + ); + let p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + + //publish a message + let publish_data = vec![0; 42]; + gs.publish(Topic::new("test"), publish_data).unwrap(); + + // Collect publish messages to floodsub peers + let publishes = gs + .events + .iter() + .fold(vec![], |mut collected_publish, e| match e { + NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { + if peer_id == &p1 || peer_id == &p2 { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + for s in &event.messages { + collected_publish.push(s.clone()); + } + } + } + collected_publish + } + _ => collected_publish, + }); - macro_rules! get_counters_and_hash { - ($m: expr) => {{ - let mut hasher = DefaultHasher::new(); - $m.hash(&mut hasher); - let id = hasher.finish().to_be_bytes().into(); - (id, get_counters_pointer!($m)) - }}; - } + assert_eq!( + publishes.len(), + 2, + "Should send a publish message to all floodsub peers" + ); - let message_id_fn = |m: &GossipsubMessage| -> MessageId { - let (mut id, mut counters_pointer): (MessageId, *mut Pointers) = - get_counters_and_hash!(&m.data); - unsafe { - (*counters_pointer).slow_counter += 1; - } - id.0.reverse(); - id - }; - let fast_message_id_fn = |m: &RawGossipsubMessage| -> FastMessageId { - let (id, mut counters_pointer) = get_counters_and_hash!(&m.data); - unsafe { - (*counters_pointer).fast_counter += 1; - } - id - }; - let config = GossipsubConfigBuilder::default() - .message_id_fn(message_id_fn) - .fast_message_id_fn(fast_message_id_fn) - .build() - .unwrap(); - let (mut gs, _, topic_hashes) = inject_nodes1() - .peer_no(0) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .gs_config(config) - .create_network(); - - let message = RawGossipsubMessage { - source: None, - data: counters_address.to_be_bytes().to_vec(), - sequence_number: None, - topic: topic_hashes[0].clone(), - signature: None, - key: None, - validated: true, - }; + assert!( + !gs.fanout[&topics[0]].contains(&p1) && !gs.fanout[&topics[0]].contains(&p2), + "Floodsub peers are not allowed in fanout" + ); +} - for _ in 0..5 { - gs.handle_received_message(message.clone(), &PeerId::random()); - } +#[test] +fn test_dont_add_floodsub_peers_to_mesh_on_join() { + let (mut gs, _, _) = inject_nodes1() + .peer_no(0) + .topics(Vec::new()) + .to_subscribe(false) + .create_network(); + + let topic = Topic::new("test"); + let topics = vec![topic.hash()]; + + //add two floodsub peer, one explicit, one implicit + let _p1 = add_peer_with_addr_and_kind( + &mut gs, + &topics, + false, + false, + Multiaddr::empty(), + Some(PeerKind::Floodsub), + ); + let _p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + + gs.join(&topics[0]); + + assert!( + gs.mesh[&topics[0]].is_empty(), + "Floodsub peers should not get added to mesh" + ); +} + +#[test] +fn test_dont_send_px_to_old_gossipsub_peers() { + let (mut gs, _, topics) = inject_nodes1() + .peer_no(0) + .topics(vec!["test".into()]) + .to_subscribe(false) + .create_network(); + + //add an old gossipsub peer + let p1 = add_peer_with_addr_and_kind( + &mut gs, + &topics, + false, + false, + Multiaddr::empty(), + Some(PeerKind::Gossipsub), + ); + + //prune the peer + gs.send_graft_prune( + HashMap::new(), + vec![(p1, topics.clone())].into_iter().collect(), + HashSet::new(), + ); + + //check that prune does not contain px + assert_eq!( + count_control_msgs(&gs, |_, m| match m { + GossipsubControlAction::Prune { peers: px, .. } => !px.is_empty(), + _ => false, + }), + 0, + "Should not send px to floodsub peers" + ); +} + +#[test] +fn test_dont_send_floodsub_peers_in_px() { + //build mesh with one peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); + + //add two floodsub peers + let _p1 = add_peer_with_addr_and_kind( + &mut gs, + &topics, + false, + false, + Multiaddr::empty(), + Some(PeerKind::Floodsub), + ); + let _p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + + //prune only mesh node + gs.send_graft_prune( + HashMap::new(), + vec![(peers[0], topics.clone())].into_iter().collect(), + HashSet::new(), + ); + + //check that px in prune message is empty + assert_eq!( + count_control_msgs(&gs, |_, m| match m { + GossipsubControlAction::Prune { peers: px, .. } => !px.is_empty(), + _ => false, + }), + 0, + "Should not include floodsub peers in px" + ); +} + +#[test] +fn test_dont_add_floodsub_peers_to_mesh_in_heartbeat() { + let (mut gs, _, topics) = inject_nodes1() + .peer_no(0) + .topics(vec!["test".into()]) + .to_subscribe(false) + .create_network(); + + //add two floodsub peer, one explicit, one implicit + let _p1 = add_peer_with_addr_and_kind( + &mut gs, + &topics, + true, + false, + Multiaddr::empty(), + Some(PeerKind::Floodsub), + ); + let _p2 = add_peer_with_addr_and_kind(&mut gs, &topics, true, false, Multiaddr::empty(), None); + + gs.heartbeat(); + + assert!( + gs.mesh[&topics[0]].is_empty(), + "Floodsub peers should not get added to mesh" + ); +} + +// Some very basic test of public api methods. +#[test] +fn test_public_api() { + let (gs, peers, topic_hashes) = inject_nodes1() + .peer_no(4) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + let peers = peers.into_iter().collect::>(); + + assert_eq!( + gs.topics().cloned().collect::>(), + topic_hashes, + "Expected topics to match registered topic." + ); + + assert_eq!( + gs.mesh_peers(&TopicHash::from_raw("topic1")) + .cloned() + .collect::>(), + peers, + "Expected peers for a registered topic to contain all peers." + ); + + assert_eq!( + gs.all_mesh_peers().cloned().collect::>(), + peers, + "Expected all_peers to contain all peers." + ); +} + +#[test] +fn test_msg_id_fn_only_called_once_with_fast_message_ids() { + struct Pointers { + slow_counter: u32, + fast_counter: u32, + } + + let mut counters = Pointers { + slow_counter: 0, + fast_counter: 0, + }; + + let counters_pointer: *mut Pointers = &mut counters; + + let counters_address = counters_pointer as u64; - assert_eq!(counters.fast_counter, 5); - assert_eq!(counters.slow_counter, 1); + macro_rules! get_counters_pointer { + ($m: expr) => {{ + let mut address_bytes: [u8; 8] = Default::default(); + address_bytes.copy_from_slice($m.as_slice()); + let address = u64::from_be_bytes(address_bytes); + address as *mut Pointers + }}; } - #[test] - fn test_subscribe_to_invalid_topic() { - let t1 = Topic::new("t1"); - let t2 = Topic::new("t2"); - let (mut gs, _, _) = inject_nodes::() - .subscription_filter(WhitelistSubscriptionFilter( - vec![t1.hash()].into_iter().collect(), - )) - .to_subscribe(false) - .create_network(); - - assert!(gs.subscribe(&t1).is_ok()); - assert!(gs.subscribe(&t2).is_err()); + macro_rules! get_counters_and_hash { + ($m: expr) => {{ + let mut hasher = DefaultHasher::new(); + $m.hash(&mut hasher); + let id = hasher.finish().to_be_bytes().into(); + (id, get_counters_pointer!($m)) + }}; } - #[test] - fn test_subscribe_and_graft_with_negative_score() { - //simulate a communication between two gossipsub instances - let (mut gs1, _, topic_hashes) = inject_nodes1() - .topics(vec!["test".into()]) - .scoring(Some(( - PeerScoreParams::default(), - PeerScoreThresholds::default(), - ))) - .create_network(); + let message_id_fn = |m: &GossipsubMessage| -> MessageId { + let (mut id, mut counters_pointer): (MessageId, *mut Pointers) = + get_counters_and_hash!(&m.data); + unsafe { + (*counters_pointer).slow_counter += 1; + } + id.0.reverse(); + id + }; + let fast_message_id_fn = |m: &RawGossipsubMessage| -> FastMessageId { + let (id, mut counters_pointer) = get_counters_and_hash!(&m.data); + unsafe { + (*counters_pointer).fast_counter += 1; + } + id + }; + let config = GossipsubConfigBuilder::default() + .message_id_fn(message_id_fn) + .fast_message_id_fn(fast_message_id_fn) + .build() + .unwrap(); + let (mut gs, _, topic_hashes) = inject_nodes1() + .peer_no(0) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + let message = RawGossipsubMessage { + source: None, + data: counters_address.to_be_bytes().to_vec(), + sequence_number: None, + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + + for _ in 0..5 { + gs.handle_received_message(message.clone(), &PeerId::random()); + } - let (mut gs2, _, _) = inject_nodes1().create_network(); + assert_eq!(counters.fast_counter, 5); + assert_eq!(counters.slow_counter, 1); +} - let connection_id = ConnectionId::new(0); +#[test] +fn test_subscribe_to_invalid_topic() { + let t1 = Topic::new("t1"); + let t2 = Topic::new("t2"); + let (mut gs, _, _) = inject_nodes::() + .subscription_filter(WhitelistSubscriptionFilter( + vec![t1.hash()].into_iter().collect(), + )) + .to_subscribe(false) + .create_network(); + + assert!(gs.subscribe(&t1).is_ok()); + assert!(gs.subscribe(&t2).is_err()); +} - let topic = Topic::new("test"); +#[test] +fn test_subscribe_and_graft_with_negative_score() { + //simulate a communication between two gossipsub instances + let (mut gs1, _, topic_hashes) = inject_nodes1() + .topics(vec!["test".into()]) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); - let p2 = add_peer(&mut gs1, &Vec::new(), true, false); - let p1 = add_peer(&mut gs2, &topic_hashes, false, false); + let (mut gs2, _, _) = inject_nodes1().create_network(); - //add penalty to peer p2 - gs1.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + let connection_id = ConnectionId::new(0); - let original_score = gs1.peer_score.as_ref().unwrap().0.score(&p2); + let topic = Topic::new("test"); - //subscribe to topic in gs2 - gs2.subscribe(&topic).unwrap(); + let p2 = add_peer(&mut gs1, &Vec::new(), true, false); + let p1 = add_peer(&mut gs2, &topic_hashes, false, false); - let forward_messages_to_p1 = |gs1: &mut Gossipsub<_, _>, gs2: &mut Gossipsub<_, _>| { - //collect messages to p1 - let messages_to_p1 = gs2.events.drain(..).filter_map(|e| match e { - NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { - if &peer_id == &p1 { - if let GossipsubHandlerIn::Message(m) = Arc::try_unwrap(event).unwrap() { - Some(m) - } else { - None - } + //add penalty to peer p2 + gs1.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + let original_score = gs1.peer_score.as_ref().unwrap().0.score(&p2); + + //subscribe to topic in gs2 + gs2.subscribe(&topic).unwrap(); + + let forward_messages_to_p1 = |gs1: &mut Gossipsub<_, _>, gs2: &mut Gossipsub<_, _>| { + //collect messages to p1 + let messages_to_p1 = gs2.events.drain(..).filter_map(|e| match e { + NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { + if peer_id == p1 { + if let GossipsubHandlerIn::Message(m) = Arc::try_unwrap(event).unwrap() { + Some(m) } else { None } + } else { + None } - _ => None, - }); - for message in messages_to_p1 { - gs1.inject_event( - p2.clone(), - connection_id, - HandlerEvent::Message { - rpc: proto_to_message(&message), - invalid_messages: vec![], - }, - ); } - }; - - //forward the subscribe message - forward_messages_to_p1(&mut gs1, &mut gs2); - - //heartbeats on both - gs1.heartbeat(); - gs2.heartbeat(); - - //forward messages again - forward_messages_to_p1(&mut gs1, &mut gs2); - - //nobody got penalized - assert!(gs1.peer_score.as_ref().unwrap().0.score(&p2) >= original_score); - } + _ => None, + }); + for message in messages_to_p1 { + gs1.inject_event( + p2, + connection_id, + HandlerEvent::Message { + rpc: proto_to_message(&message), + invalid_messages: vec![], + }, + ); + } + }; - #[test] - /// Test nodes that send grafts without subscriptions. - fn test_graft_without_subscribe() { - // The node should: - // - Create an empty vector in mesh[topic] - // - Send subscription request to all peers - // - run JOIN(topic) - - let topic = String::from("test_subscribe"); - let subscribe_topic = vec![topic.clone()]; - let subscribe_topic_hash = vec![Topic::new(topic.clone()).hash()]; - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(1) - .topics(subscribe_topic) - .to_subscribe(false) - .create_network(); + //forward the subscribe message + forward_messages_to_p1(&mut gs1, &mut gs2); - assert!( - gs.mesh.get(&topic_hashes[0]).is_some(), - "Subscribe should add a new entry to the mesh[topic] hashmap" - ); + //heartbeats on both + gs1.heartbeat(); + gs2.heartbeat(); - // The node sends a graft for the subscribe topic. - gs.handle_graft(&peers[0], subscribe_topic_hash); + //forward messages again + forward_messages_to_p1(&mut gs1, &mut gs2); - // The node disconnects - disconnect_peer(&mut gs, &peers[0]); + //nobody got penalized + assert!(gs1.peer_score.as_ref().unwrap().0.score(&p2) >= original_score); +} - // We unsubscribe from the topic. - let _ = gs.unsubscribe(&Topic::new(topic)); - } +#[test] +/// Test nodes that send grafts without subscriptions. +fn test_graft_without_subscribe() { + // The node should: + // - Create an empty vector in mesh[topic] + // - Send subscription request to all peers + // - run JOIN(topic) + + let topic = String::from("test_subscribe"); + let subscribe_topic = vec![topic.clone()]; + let subscribe_topic_hash = vec![Topic::new(topic.clone()).hash()]; + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(1) + .topics(subscribe_topic) + .to_subscribe(false) + .create_network(); + + assert!( + gs.mesh.get(&topic_hashes[0]).is_some(), + "Subscribe should add a new entry to the mesh[topic] hashmap" + ); + + // The node sends a graft for the subscribe topic. + gs.handle_graft(&peers[0], subscribe_topic_hash); + + // The node disconnects + disconnect_peer(&mut gs, &peers[0]); + + // We unsubscribe from the topic. + let _ = gs.unsubscribe(&Topic::new(topic)); } diff --git a/protocols/gossipsub/src/config.rs b/protocols/gossipsub/src/config.rs index a0a8d5e46a1..34956fe614c 100644 --- a/protocols/gossipsub/src/config.rs +++ b/protocols/gossipsub/src/config.rs @@ -267,7 +267,7 @@ impl GossipsubConfig { /// connected/trusted nodes. The default is false. /// /// Note: Peer exchange is not implemented today, see - /// https://github.com/libp2p/rust-libp2p/issues/2398. + /// . pub fn do_px(&self) -> bool { self.do_px } @@ -334,7 +334,7 @@ impl GossipsubConfig { /// Number of heartbeat ticks that specifcy the interval in which opportunistic grafting is /// applied. Every `opportunistic_graft_ticks` we will attempt to select some high-scoring mesh /// peers to replace lower-scoring ones, if the median score of our mesh peers falls below a - /// threshold (see https://godoc.org/github.com/libp2p/go-libp2p-pubsub#PeerScoreThresholds). + /// threshold (see ). /// The default is 60. pub fn opportunistic_graft_ticks(&self) -> u64 { self.opportunistic_graft_ticks @@ -650,7 +650,7 @@ impl GossipsubConfigBuilder { /// connected/trusted nodes. The default is false. /// /// Note: Peer exchange is not implemented today, see - /// https://github.com/libp2p/rust-libp2p/issues/2398. + /// . pub fn do_px(&mut self) -> &mut Self { self.config.do_px = true; self @@ -725,7 +725,7 @@ impl GossipsubConfigBuilder { /// Number of heartbeat ticks that specifcy the interval in which opportunistic grafting is /// applied. Every `opportunistic_graft_ticks` we will attempt to select some high-scoring mesh /// peers to replace lower-scoring ones, if the median score of our mesh peers falls below a - /// threshold (see https://godoc.org/github.com/libp2p/go-libp2p-pubsub#PeerScoreThresholds). + /// threshold (see ). /// The default is 60. pub fn opportunistic_graft_ticks(&mut self, opportunistic_graft_ticks: u64) -> &mut Self { self.config.opportunistic_graft_ticks = opportunistic_graft_ticks; diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs index 4022a23185d..4f00f82ba36 100644 --- a/protocols/gossipsub/src/lib.rs +++ b/protocols/gossipsub/src/lib.rs @@ -24,7 +24,7 @@ //! # Overview //! //! *Note: The gossipsub protocol specifications -//! (https://github.com/libp2p/specs/tree/master/pubsub/gossipsub) provide an outline for the +//! () provide an outline for the //! routing protocol. They should be consulted for further detail.* //! //! Gossipsub is a blend of meshsub for data and randomsub for mesh metadata. It provides bounded @@ -99,8 +99,8 @@ //! // This is test transport (memory). //! let transport = MemoryTransport::default() //! .upgrade(libp2p_core::upgrade::Version::V1) -//! .authenticate(libp2p_noise::NoiseAuthenticated::xx(&local_key).unwrap()) -//! .multiplex(libp2p_mplex::MplexConfig::new()) +//! .authenticate(libp2p::noise::NoiseAuthenticated::xx(&local_key).unwrap()) +//! .multiplex(libp2p::mplex::MplexConfig::new()) //! .boxed(); //! //! // Create a Gossipsub topic diff --git a/protocols/gossipsub/src/mcache.rs b/protocols/gossipsub/src/mcache.rs index 6d1c0465a76..ef838c82a8d 100644 --- a/protocols/gossipsub/src/mcache.rs +++ b/protocols/gossipsub/src/mcache.rs @@ -270,23 +270,16 @@ mod tests { fn test_put_get_one() { let mut mc = new_cache(10, 15); - let topic1_hash = Topic::new("topic1").hash().clone(); + let topic1_hash = Topic::new("topic1").hash(); let (id, m) = gen_testm(10, topic1_hash); mc.put(&id, m.clone()); - assert!(mc.history[0].len() == 1); + assert_eq!(mc.history[0].len(), 1); let fetched = mc.get(&id); - assert_eq!(fetched.is_none(), false); - assert_eq!(fetched.is_some(), true); - - // Make sure it is the same fetched message - match fetched { - Some(x) => assert_eq!(*x, m), - _ => assert!(false), - } + assert_eq!(fetched.unwrap(), &m); } #[test] @@ -294,15 +287,15 @@ mod tests { fn test_get_wrong() { let mut mc = new_cache(10, 15); - let topic1_hash = Topic::new("topic1").hash().clone(); + let topic1_hash = Topic::new("topic1").hash(); let (id, m) = gen_testm(10, topic1_hash); - mc.put(&id, m.clone()); + mc.put(&id, m); // Try to get an incorrect ID let wrong_id = MessageId::new(b"wrongid"); let fetched = mc.get(&wrong_id); - assert_eq!(fetched.is_none(), true); + assert!(fetched.is_none()); } #[test] @@ -313,7 +306,7 @@ mod tests { // Try to get an incorrect ID let wrong_string = MessageId::new(b"imempty"); let fetched = mc.get(&wrong_string); - assert_eq!(fetched.is_none(), true); + assert!(fetched.is_none()); } #[test] @@ -321,7 +314,7 @@ mod tests { fn test_shift() { let mut mc = new_cache(1, 5); - let topic1_hash = Topic::new("topic1").hash().clone(); + let topic1_hash = Topic::new("topic1").hash(); // Build the message for i in 0..10 { @@ -332,7 +325,7 @@ mod tests { mc.shift(); // Ensure the shift occurred - assert!(mc.history[0].len() == 0); + assert!(mc.history[0].is_empty()); assert!(mc.history[1].len() == 10); // Make sure no messages deleted @@ -344,7 +337,7 @@ mod tests { fn test_empty_shift() { let mut mc = new_cache(1, 5); - let topic1_hash = Topic::new("topic1").hash().clone(); + let topic1_hash = Topic::new("topic1").hash(); // Build the message for i in 0..10 { @@ -355,14 +348,14 @@ mod tests { mc.shift(); // Ensure the shift occurred - assert!(mc.history[0].len() == 0); + assert!(mc.history[0].is_empty()); assert!(mc.history[1].len() == 10); mc.shift(); assert!(mc.history[2].len() == 10); - assert!(mc.history[1].len() == 0); - assert!(mc.history[0].len() == 0); + assert!(mc.history[1].is_empty()); + assert!(mc.history[0].is_empty()); } #[test] @@ -370,7 +363,7 @@ mod tests { fn test_remove_last_from_shift() { let mut mc = new_cache(4, 5); - let topic1_hash = Topic::new("topic1").hash().clone(); + let topic1_hash = Topic::new("topic1").hash(); // Build the message for i in 0..10 { diff --git a/protocols/gossipsub/src/peer_score/tests.rs b/protocols/gossipsub/src/peer_score/tests.rs index 4ede29b7c51..c457ffe0f70 100644 --- a/protocols/gossipsub/src/peer_score/tests.rs +++ b/protocols/gossipsub/src/peer_score/tests.rs @@ -29,7 +29,7 @@ fn within_variance(value: f64, expected: f64, variance: f64) -> bool { if expected >= 0.0 { return value > expected * (1.0 - variance) && value < expected * (1.0 + variance); } - return value > expected * (1.0 + variance) && value < expected * (1.0 - variance); + value > expected * (1.0 + variance) && value < expected * (1.0 - variance) } // generates a random gossipsub message with sequence number i @@ -45,7 +45,7 @@ fn make_test_message(seq: u64) -> (MessageId, RawGossipsubMessage) { }; let message = GossipsubMessage { - source: raw_message.source.clone(), + source: raw_message.source, data: raw_message.data.clone(), sequence_number: raw_message.sequence_number, topic: raw_message.topic.clone(), @@ -62,7 +62,7 @@ fn default_message_id() -> fn(&GossipsubMessage) -> MessageId { let mut source_string = if let Some(peer_id) = message.source.as_ref() { peer_id.to_base58() } else { - PeerId::from_bytes(&vec![0, 1, 0]) + PeerId::from_bytes(&[0, 1, 0]) .expect("Valid peer id") .to_base58() }; @@ -76,14 +76,18 @@ fn test_score_time_in_mesh() { // Create parameters with reasonable default values let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut params = PeerScoreParams::default(); - params.topic_score_cap = 1000.0; + let mut params = PeerScoreParams { + topic_score_cap: 1000.0, + ..Default::default() + }; - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 0.5; - topic_params.time_in_mesh_weight = 1.0; - topic_params.time_in_mesh_quantum = Duration::from_millis(1); - topic_params.time_in_mesh_cap = 3600.0; + let topic_params = TopicScoreParams { + topic_weight: 0.5, + time_in_mesh_weight: 1.0, + time_in_mesh_quantum: Duration::from_millis(1), + time_in_mesh_cap: 3600.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); @@ -91,7 +95,7 @@ fn test_score_time_in_mesh() { let mut peer_score = PeerScore::new(params); // Peer score should start at 0 - peer_score.add_peer(peer_id.clone()); + peer_score.add_peer(peer_id); let score = peer_score.score(&peer_id); assert!( @@ -125,11 +129,13 @@ fn test_score_time_in_mesh_cap() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 0.5; - topic_params.time_in_mesh_weight = 1.0; - topic_params.time_in_mesh_quantum = Duration::from_millis(1); - topic_params.time_in_mesh_cap = 10.0; + let topic_params = TopicScoreParams { + topic_weight: 0.5, + time_in_mesh_weight: 1.0, + time_in_mesh_quantum: Duration::from_millis(1), + time_in_mesh_cap: 10.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); @@ -137,7 +143,7 @@ fn test_score_time_in_mesh_cap() { let mut peer_score = PeerScore::new(params); // Peer score should start at 0 - peer_score.add_peer(peer_id.clone()); + peer_score.add_peer(peer_id); let score = peer_score.score(&peer_id); assert!( @@ -173,12 +179,14 @@ fn test_score_first_message_deliveries() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.first_message_deliveries_weight = 1.0; - topic_params.first_message_deliveries_decay = 1.0; - topic_params.first_message_deliveries_cap = 2000.0; - topic_params.time_in_mesh_weight = 0.0; + let topic_params = TopicScoreParams { + topic_weight: 1.0, + first_message_deliveries_weight: 1.0, + first_message_deliveries_decay: 1.0, + first_message_deliveries_cap: 2000.0, + time_in_mesh_weight: 0.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); @@ -186,7 +194,7 @@ fn test_score_first_message_deliveries() { let mut peer_score = PeerScore::new(params); // Peer score should start at 0 - peer_score.add_peer(peer_id.clone()); + peer_score.add_peer(peer_id); peer_score.graft(&peer_id, topic); // deliver a bunch of messages from the peer @@ -217,12 +225,14 @@ fn test_score_first_message_deliveries_cap() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.first_message_deliveries_weight = 1.0; - topic_params.first_message_deliveries_decay = 1.0; // test without decay - topic_params.first_message_deliveries_cap = 50.0; - topic_params.time_in_mesh_weight = 0.0; + let topic_params = TopicScoreParams { + topic_weight: 1.0, + first_message_deliveries_weight: 1.0, + first_message_deliveries_decay: 1.0, // test without decay + first_message_deliveries_cap: 50.0, + time_in_mesh_weight: 0.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); @@ -230,7 +240,7 @@ fn test_score_first_message_deliveries_cap() { let mut peer_score = PeerScore::new(params); // Peer score should start at 0 - peer_score.add_peer(peer_id.clone()); + peer_score.add_peer(peer_id); peer_score.graft(&peer_id, topic); // deliver a bunch of messages from the peer @@ -261,17 +271,19 @@ fn test_score_first_message_deliveries_decay() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.first_message_deliveries_weight = 1.0; - topic_params.first_message_deliveries_decay = 0.9; // decay 10% per decay interval - topic_params.first_message_deliveries_cap = 2000.0; - topic_params.time_in_mesh_weight = 0.0; + let topic_params = TopicScoreParams { + topic_weight: 1.0, + first_message_deliveries_weight: 1.0, + first_message_deliveries_decay: 0.9, // decay 10% per decay interval + first_message_deliveries_cap: 2000.0, + time_in_mesh_weight: 0.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); let peer_id = PeerId::random(); let mut peer_score = PeerScore::new(params); - peer_score.add_peer(peer_id.clone()); + peer_score.add_peer(peer_id); peer_score.graft(&peer_id, topic); // deliver a bunch of messages from the peer @@ -317,17 +329,19 @@ fn test_score_mesh_message_deliveries() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = -1.0; - topic_params.mesh_message_deliveries_activation = Duration::from_secs(1); - topic_params.mesh_message_deliveries_window = Duration::from_millis(10); - topic_params.mesh_message_deliveries_threshold = 20.0; - topic_params.mesh_message_deliveries_cap = 100.0; - topic_params.mesh_message_deliveries_decay = 1.0; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.mesh_failure_penalty_weight = 0.0; + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: -1.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 1.0, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); let mut peer_score = PeerScore::new(params); @@ -341,11 +355,11 @@ fn test_score_mesh_message_deliveries() { let peer_id_b = PeerId::random(); let peer_id_c = PeerId::random(); - let peers = vec![peer_id_a.clone(), peer_id_b.clone(), peer_id_c.clone()]; + let peers = vec![peer_id_a, peer_id_b, peer_id_c]; for peer_id in &peers { - peer_score.add_peer(peer_id.clone()); - peer_score.graft(&peer_id, topic.clone()); + peer_score.add_peer(*peer_id); + peer_score.graft(peer_id, topic.clone()); } // assert that nobody has been penalized yet for not delivering messages before activation time @@ -419,25 +433,27 @@ fn test_score_mesh_message_deliveries_decay() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = -1.0; - topic_params.mesh_message_deliveries_activation = Duration::from_secs(0); - topic_params.mesh_message_deliveries_window = Duration::from_millis(10); - topic_params.mesh_message_deliveries_threshold = 20.0; - topic_params.mesh_message_deliveries_cap = 100.0; - topic_params.mesh_message_deliveries_decay = 0.9; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.time_in_mesh_quantum = Duration::from_secs(1); - topic_params.mesh_failure_penalty_weight = 0.0; + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: -1.0, + mesh_message_deliveries_activation: Duration::from_secs(0), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 0.9, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + mesh_failure_penalty_weight: 0.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); let mut peer_score = PeerScore::new(params); let peer_id_a = PeerId::random(); - peer_score.add_peer(peer_id_a.clone()); - peer_score.graft(&peer_id_a, topic.clone()); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); // deliver a bunch of messages from peer A let messages = 100; @@ -480,24 +496,26 @@ fn test_score_mesh_failure_penalty() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - // the mesh failure penalty is applied when a peer is pruned while their - // mesh deliveries are under the threshold. - // for this test, we set the mesh delivery threshold, but set - // mesh_message_deliveries to zero, so the only affect on the score - // is from the mesh failure penalty - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = 0.0; - topic_params.mesh_message_deliveries_activation = Duration::from_secs(0); - topic_params.mesh_message_deliveries_window = Duration::from_millis(10); - topic_params.mesh_message_deliveries_threshold = 20.0; - topic_params.mesh_message_deliveries_cap = 100.0; - topic_params.mesh_message_deliveries_decay = 1.0; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.time_in_mesh_quantum = Duration::from_secs(1); - topic_params.mesh_failure_penalty_weight = -1.0; - topic_params.mesh_failure_penalty_decay = 1.0; + let topic_params = TopicScoreParams { + // the mesh failure penalty is applied when a peer is pruned while their + // mesh deliveries are under the threshold. + // for this test, we set the mesh delivery threshold, but set + // mesh_message_deliveries to zero, so the only affect on the score + // is from the mesh failure penalty + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + mesh_message_deliveries_activation: Duration::from_secs(0), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 1.0, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + mesh_failure_penalty_weight: -1.0, + mesh_failure_penalty_decay: 1.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); let mut peer_score = PeerScore::new(params); @@ -505,11 +523,11 @@ fn test_score_mesh_failure_penalty() { let peer_id_a = PeerId::random(); let peer_id_b = PeerId::random(); - let peers = vec![peer_id_a.clone(), peer_id_b.clone()]; + let peers = vec![peer_id_a, peer_id_b]; for peer_id in &peers { - peer_score.add_peer(peer_id.clone()); - peer_score.graft(&peer_id, topic.clone()); + peer_score.add_peer(*peer_id); + peer_score.graft(peer_id, topic.clone()); } // deliver a bunch of messages from peer A @@ -562,27 +580,28 @@ fn test_score_invalid_message_deliveries() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = 0.0; - topic_params.mesh_message_deliveries_activation = Duration::from_secs(1); - topic_params.mesh_message_deliveries_window = Duration::from_millis(10); - topic_params.mesh_message_deliveries_threshold = 20.0; - topic_params.mesh_message_deliveries_cap = 100.0; - topic_params.mesh_message_deliveries_decay = 1.0; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.mesh_failure_penalty_weight = 0.0; - - topic_params.invalid_message_deliveries_weight = -1.0; - topic_params.invalid_message_deliveries_decay = 1.0; + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 1.0, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + invalid_message_deliveries_weight: -1.0, + invalid_message_deliveries_decay: 1.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); let mut peer_score = PeerScore::new(params); let peer_id_a = PeerId::random(); - peer_score.add_peer(peer_id_a.clone()); - peer_score.graft(&peer_id_a, topic.clone()); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); // reject a bunch of messages from peer A let messages = 100; @@ -608,27 +627,28 @@ fn test_score_invalid_message_deliveris_decay() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = 0.0; - topic_params.mesh_message_deliveries_activation = Duration::from_secs(1); - topic_params.mesh_message_deliveries_window = Duration::from_millis(10); - topic_params.mesh_message_deliveries_threshold = 20.0; - topic_params.mesh_message_deliveries_cap = 100.0; - topic_params.mesh_message_deliveries_decay = 1.0; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.mesh_failure_penalty_weight = 0.0; - - topic_params.invalid_message_deliveries_weight = -1.0; - topic_params.invalid_message_deliveries_decay = 0.9; + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 1.0, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + invalid_message_deliveries_weight: -1.0, + invalid_message_deliveries_decay: 0.9, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); let mut peer_score = PeerScore::new(params); let peer_id_a = PeerId::random(); - peer_score.add_peer(peer_id_a.clone()); - peer_score.graft(&peer_id_a, topic.clone()); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); // reject a bunch of messages from peer A let messages = 100; @@ -667,26 +687,28 @@ fn test_score_reject_message_deliveries() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = 0.0; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.mesh_failure_penalty_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.time_in_mesh_quantum = Duration::from_secs(1); - topic_params.invalid_message_deliveries_weight = -1.0; - topic_params.invalid_message_deliveries_decay = 1.0; + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + first_message_deliveries_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + invalid_message_deliveries_weight: -1.0, + invalid_message_deliveries_decay: 1.0, + ..Default::default() + }; - params.topics.insert(topic_hash, topic_params.clone()); + params.topics.insert(topic_hash, topic_params); let mut peer_score = PeerScore::new(params); let peer_id_a = PeerId::random(); let peer_id_b = PeerId::random(); - let peers = vec![peer_id_a.clone(), peer_id_b.clone()]; + let peers = vec![peer_id_a, peer_id_b]; for peer_id in &peers { - peer_score.add_peer(peer_id.clone()); + peer_score.add_peer(*peer_id); } let (id, msg) = make_test_message(1); @@ -777,25 +799,29 @@ fn test_application_score() { let app_specific_weight = 0.5; let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut params = PeerScoreParams::default(); - params.app_specific_weight = app_specific_weight; - - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = 0.0; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.mesh_failure_penalty_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.time_in_mesh_quantum = Duration::from_secs(1); - topic_params.invalid_message_deliveries_weight = 0.0; - topic_params.invalid_message_deliveries_decay = 1.0; + let mut params = PeerScoreParams { + app_specific_weight, + ..Default::default() + }; - params.topics.insert(topic_hash, topic_params.clone()); + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + first_message_deliveries_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + invalid_message_deliveries_weight: 0.0, + invalid_message_deliveries_decay: 1.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params); let mut peer_score = PeerScore::new(params); let peer_id_a = PeerId::random(); - peer_score.add_peer(peer_id_a.clone()); - peer_score.graft(&peer_id_a, topic.clone()); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); let messages = 100; for i in -100..messages { @@ -815,20 +841,24 @@ fn test_score_ip_colocation() { let ip_colocation_factor_threshold = 1.0; let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut params = PeerScoreParams::default(); - params.ip_colocation_factor_weight = ip_colocation_factor_weight; - params.ip_colocation_factor_threshold = ip_colocation_factor_threshold; - - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = 0.0; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.mesh_failure_penalty_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.time_in_mesh_quantum = Duration::from_secs(1); - topic_params.invalid_message_deliveries_weight = 0.0; + let mut params = PeerScoreParams { + ip_colocation_factor_weight, + ip_colocation_factor_threshold, + ..Default::default() + }; - params.topics.insert(topic_hash, topic_params.clone()); + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + first_message_deliveries_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + invalid_message_deliveries_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params); let mut peer_score = PeerScore::new(params); let peer_id_a = PeerId::random(); @@ -836,15 +866,10 @@ fn test_score_ip_colocation() { let peer_id_c = PeerId::random(); let peer_id_d = PeerId::random(); - let peers = vec![ - peer_id_a.clone(), - peer_id_b.clone(), - peer_id_c.clone(), - peer_id_d.clone(), - ]; + let peers = vec![peer_id_a, peer_id_b, peer_id_c, peer_id_d]; for peer_id in &peers { - peer_score.add_peer(peer_id.clone()); - peer_score.graft(&peer_id, topic.clone()); + peer_score.add_peer(*peer_id); + peer_score.graft(peer_id, topic.clone()); } // peerA should have no penalty, but B, C, and D should be penalized for sharing an IP @@ -880,20 +905,24 @@ fn test_score_behaviour_penality() { let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut params = PeerScoreParams::default(); - params.behaviour_penalty_decay = behaviour_penalty_decay; - params.behaviour_penalty_weight = behaviour_penalty_weight; - - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = 0.0; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.mesh_failure_penalty_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.time_in_mesh_quantum = Duration::from_secs(1); - topic_params.invalid_message_deliveries_weight = 0.0; + let mut params = PeerScoreParams { + behaviour_penalty_decay, + behaviour_penalty_weight, + ..Default::default() + }; - params.topics.insert(topic_hash, topic_params.clone()); + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + first_message_deliveries_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + invalid_message_deliveries_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params); let mut peer_score = PeerScore::new(params); let peer_id_a = PeerId::random(); @@ -905,7 +934,7 @@ fn test_score_behaviour_penality() { assert_eq!(score_a, 0.0, "Peer A should be unaffected"); // add the peer and test penalties - peer_score.add_peer(peer_id_a.clone()); + peer_score.add_peer(peer_id_a); assert_eq!(score_a, 0.0, "Peer A should be unaffected"); peer_score.add_penalty(&peer_id_a, 1); @@ -931,23 +960,27 @@ fn test_score_retention() { let app_specific_weight = 1.0; let app_score_value = -1000.0; let retain_score = Duration::from_secs(1); - let mut params = PeerScoreParams::default(); - params.app_specific_weight = app_specific_weight; - params.retain_score = retain_score; + let mut params = PeerScoreParams { + app_specific_weight, + retain_score, + ..Default::default() + }; - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 0.0; - topic_params.mesh_message_deliveries_weight = 0.0; - topic_params.mesh_message_deliveries_activation = Duration::from_secs(0); - topic_params.first_message_deliveries_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; + let topic_params = TopicScoreParams { + topic_weight: 0.0, + mesh_message_deliveries_weight: 0.0, + mesh_message_deliveries_activation: Duration::from_secs(0), + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + ..Default::default() + }; - params.topics.insert(topic_hash, topic_params.clone()); + params.topics.insert(topic_hash, topic_params); let mut peer_score = PeerScore::new(params); let peer_id_a = PeerId::random(); - peer_score.add_peer(peer_id_a.clone()); - peer_score.graft(&peer_id_a, topic.clone()); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); peer_score.set_application_score(&peer_id_a, app_score_value); diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 0eb5f4ee56b..c6aa2bdd56b 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -572,24 +572,20 @@ mod tests { use crate::IdentTopic as Topic; use libp2p_core::identity::Keypair; use quickcheck::*; - use rand::Rng; #[derive(Clone, Debug)] struct Message(RawGossipsubMessage); impl Arbitrary for Message { - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { let keypair = TestKeypair::arbitrary(g); // generate an arbitrary GossipsubMessage using the behaviour signing functionality let config = GossipsubConfig::default(); - let gs: Gossipsub = Gossipsub::new( - crate::MessageAuthenticity::Signed(keypair.0.clone()), - config, - ) - .unwrap(); - let data = (0..g.gen_range(10, 10024)) - .map(|_| g.gen()) + let gs: Gossipsub = + Gossipsub::new(crate::MessageAuthenticity::Signed(keypair.0), config).unwrap(); + let data = (0..g.gen_range(10..10024u32)) + .map(|_| u8::arbitrary(g)) .collect::>(); let topic_id = TopicId::arbitrary(g).0; Message(gs.build_raw_message(topic_id, data).unwrap()) @@ -600,11 +596,10 @@ mod tests { struct TopicId(TopicHash); impl Arbitrary for TopicId { - fn arbitrary(g: &mut G) -> Self { - let topic_string: String = (0..g.gen_range(20, 1024)) - .map(|_| g.gen::()) - .collect::() - .into(); + fn arbitrary(g: &mut Gen) -> Self { + let topic_string: String = (0..g.gen_range(20..1024u32)) + .map(|_| char::arbitrary(g)) + .collect::(); TopicId(Topic::new(topic_string).into()) } } @@ -614,8 +609,8 @@ mod tests { impl Arbitrary for TestKeypair { #[cfg(feature = "rsa")] - fn arbitrary(g: &mut G) -> Self { - let keypair = if g.gen() { + fn arbitrary(g: &mut Gen) -> Self { + let keypair = if bool::arbitrary(g) { // Small enough to be inlined. Keypair::generate_ed25519() } else { @@ -627,7 +622,7 @@ mod tests { } #[cfg(not(feature = "rsa"))] - fn arbitrary(_g: &mut G) -> Self { + fn arbitrary(_g: &mut Gen) -> Self { // Small enough to be inlined. TestKeypair(Keypair::generate_ed25519()) } @@ -655,7 +650,7 @@ mod tests { let mut codec = GossipsubCodec::new(codec::UviBytes::default(), ValidationMode::Strict); let mut buf = BytesMut::new(); - codec.encode(rpc.clone().into_protobuf(), &mut buf).unwrap(); + codec.encode(rpc.into_protobuf(), &mut buf).unwrap(); let decoded_rpc = codec.decode(&mut buf).unwrap().unwrap(); // mark as validated as its a published message match decoded_rpc { diff --git a/protocols/gossipsub/src/rpc_proto.rs b/protocols/gossipsub/src/rpc_proto.rs index 3903318fbfb..7b952ef0926 100644 --- a/protocols/gossipsub/src/rpc_proto.rs +++ b/protocols/gossipsub/src/rpc_proto.rs @@ -75,7 +75,7 @@ mod test { assert_eq!(new_message.topic, topic1.clone().into_string()); let new_message = super::Message::decode(&old_message2b[..]).unwrap(); - assert_eq!(new_message.topic, topic2.clone().into_string()); + assert_eq!(new_message.topic, topic2.into_string()); let old_message = compat_proto::Message::decode(&new_message1b[..]).unwrap(); assert_eq!(old_message.topic_ids, vec![topic1.into_string()]); diff --git a/protocols/gossipsub/src/subscription_filter.rs b/protocols/gossipsub/src/subscription_filter.rs index 960d0cb8a54..600a02c7a64 100644 --- a/protocols/gossipsub/src/subscription_filter.rs +++ b/protocols/gossipsub/src/subscription_filter.rs @@ -232,15 +232,15 @@ pub mod regex { let subscriptions = vec![ GossipsubSubscription { action: Subscribe, - topic_hash: t1.clone(), + topic_hash: t1, }, GossipsubSubscription { action: Subscribe, - topic_hash: t2.clone(), + topic_hash: t2, }, GossipsubSubscription { action: Subscribe, - topic_hash: t3.clone(), + topic_hash: t3, }, ]; @@ -277,7 +277,7 @@ mod test { }, GossipsubSubscription { action: Subscribe, - topic_hash: t2.clone(), + topic_hash: t2, }, GossipsubSubscription { action: Subscribe, @@ -285,7 +285,7 @@ mod test { }, GossipsubSubscription { action: Unsubscribe, - topic_hash: t1.clone(), + topic_hash: t1, }, ]; @@ -306,11 +306,11 @@ mod test { let subscriptions = vec![ GossipsubSubscription { action: Subscribe, - topic_hash: t1.clone(), + topic_hash: t1, }, GossipsubSubscription { action: Subscribe, - topic_hash: t2.clone(), + topic_hash: t2, }, ]; @@ -343,7 +343,7 @@ mod test { }, GossipsubSubscription { action: Subscribe, - topic_hash: t1.clone(), + topic_hash: t1, }, ]; @@ -434,11 +434,11 @@ mod test { let subscriptions = vec![ GossipsubSubscription { action: Subscribe, - topic_hash: t1.clone(), + topic_hash: t1, }, GossipsubSubscription { action: Subscribe, - topic_hash: t2.clone(), + topic_hash: t2, }, ]; diff --git a/protocols/gossipsub/tests/smoke.rs b/protocols/gossipsub/tests/smoke.rs index fae06c3e283..43ad944dccb 100644 --- a/protocols/gossipsub/tests/smoke.rs +++ b/protocols/gossipsub/tests/smoke.rs @@ -29,16 +29,16 @@ use std::{ }; use futures::StreamExt; -use libp2p_core::{ +use libp2p::core::{ identity, multiaddr::Protocol, transport::MemoryTransport, upgrade, Multiaddr, Transport, }; -use libp2p_gossipsub::{ +use libp2p::gossipsub::{ Gossipsub, GossipsubConfigBuilder, GossipsubEvent, IdentTopic as Topic, MessageAuthenticity, ValidationMode, }; -use libp2p_plaintext::PlainText2Config; -use libp2p_swarm::{Swarm, SwarmEvent}; -use libp2p_yamux as yamux; +use libp2p::plaintext::PlainText2Config; +use libp2p::swarm::{Swarm, SwarmEvent}; +use libp2p::yamux; struct Graph { pub nodes: Vec<(Multiaddr, Swarm)>, @@ -170,16 +170,14 @@ fn build_node() -> (Multiaddr, Swarm) { .validation_mode(ValidationMode::Permissive) .build() .unwrap(); - let behaviour = Gossipsub::new(MessageAuthenticity::Author(peer_id.clone()), config).unwrap(); + let behaviour = Gossipsub::new(MessageAuthenticity::Author(peer_id), config).unwrap(); let mut swarm = Swarm::new(transport, behaviour, peer_id); let port = 1 + random::(); let mut addr: Multiaddr = Protocol::Memory(port).into(); swarm.listen_on(addr.clone()).unwrap(); - addr = addr.with(libp2p_core::multiaddr::Protocol::P2p( - public_key.to_peer_id().into(), - )); + addr = addr.with(Protocol::P2p(public_key.to_peer_id().into())); (addr, swarm) } @@ -189,7 +187,7 @@ fn multi_hop_propagation() { let _ = env_logger::try_init(); fn prop(num_nodes: u8, seed: u64) -> TestResult { - if num_nodes < 2 || num_nodes > 50 { + if !(2..=50).contains(&num_nodes) { return TestResult::discard(); } diff --git a/protocols/identify/CHANGELOG.md b/protocols/identify/CHANGELOG.md index 1455966b091..00ed1e138e2 100644 --- a/protocols/identify/CHANGELOG.md +++ b/protocols/identify/CHANGELOG.md @@ -1,3 +1,19 @@ +# 0.40.0 [unreleased] + +- Update dependencies. + +- Rename types as per [discussion 2174]. + `Identify` has been renamed to `Behaviour`. + The `Identify` prefix has been removed from various types like `IdentifyEvent`. + Users should prefer importing the identify protocol as a module (`use libp2p::identify;`), + and refer to its types via `identify::`. For example: `identify::Behaviour` or `identify::Event`. + + [discussion 2174]: https://github.com/libp2p/rust-libp2p/discussions/2174 + +- Update to `libp2p-core` `v0.37.0`. + +- Update to `libp2p-swarm` `v0.40.0`. + # 0.39.0 - Update to `libp2p-swarm` `v0.39.0`. diff --git a/protocols/identify/Cargo.toml b/protocols/identify/Cargo.toml index f147c5bb2c8..c24c2e8a9c6 100644 --- a/protocols/identify/Cargo.toml +++ b/protocols/identify/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-identify" edition = "2021" rust-version = "1.56.1" description = "Nodes identifcation protocol for libp2p" -version = "0.39.0" +version = "0.40.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,10 +14,10 @@ categories = ["network-programming", "asynchronous"] asynchronous-codec = "0.6" futures = "0.3.1" futures-timer = "3.0.2" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.39.0", path = "../../swarm" } +libp2p-core = { version = "0.37.0", path = "../../core" } +libp2p-swarm = { version = "0.40.0", path = "../../swarm" } log = "0.4.1" -lru = "0.7.2" +lru = "0.8.0" prost-codec = { version = "0.2", path = "../../misc/prost-codec" } prost = "0.11" smallvec = "1.6.1" @@ -27,14 +27,7 @@ void = "1.0" [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } env_logger = "0.9" -libp2p = { path = "../..", default-features = false, features = [ - "dns-async-std", - "mplex", - "noise", - "tcp-async-io", - "websocket", - "yamux", -]} +libp2p = { path = "../..", features = ["full"] } [build-dependencies] prost-build = "0.11" diff --git a/protocols/identify/examples/identify.rs b/protocols/identify/examples/identify.rs index 278cf0cc991..b02eb1c9ebf 100644 --- a/protocols/identify/examples/identify.rs +++ b/protocols/identify/examples/identify.rs @@ -37,8 +37,7 @@ //! and will send each other identify info which is then printed to the console. use futures::prelude::*; -use libp2p::{identity, Multiaddr, PeerId}; -use libp2p_identify::{Identify, IdentifyConfig, IdentifyEvent}; +use libp2p::{identify, identity, Multiaddr, PeerId}; use libp2p_swarm::{Swarm, SwarmEvent}; use std::error::Error; @@ -51,7 +50,7 @@ async fn main() -> Result<(), Box> { let transport = libp2p::development_transport(local_key.clone()).await?; // Create a identify network behaviour. - let behaviour = Identify::new(IdentifyConfig::new( + let behaviour = identify::Behaviour::new(identify::Config::new( "/ipfs/id/1.0.0".to_string(), local_key.public(), )); @@ -74,11 +73,11 @@ async fn main() -> Result<(), Box> { match swarm.select_next_some().await { SwarmEvent::NewListenAddr { address, .. } => println!("Listening on {:?}", address), // Prints peer id identify info is being sent to. - SwarmEvent::Behaviour(IdentifyEvent::Sent { peer_id, .. }) => { + SwarmEvent::Behaviour(identify::Event::Sent { peer_id, .. }) => { println!("Sent identify info to {:?}", peer_id) } // Prints out the info received via the identify event - SwarmEvent::Behaviour(IdentifyEvent::Received { info, .. }) => { + SwarmEvent::Behaviour(identify::Event::Received { info, .. }) => { println!("Received {:?}", info) } _ => {} diff --git a/protocols/identify/src/identify.rs b/protocols/identify/src/behaviour.rs similarity index 86% rename from protocols/identify/src/identify.rs rename to protocols/identify/src/behaviour.rs index d30e98e1400..eb81c64ccba 100644 --- a/protocols/identify/src/identify.rs +++ b/protocols/identify/src/behaviour.rs @@ -18,8 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::handler::{IdentifyHandlerEvent, IdentifyHandlerProto, IdentifyPush}; -use crate::protocol::{IdentifyInfo, ReplySubstream, UpgradeError}; +use crate::handler::{self, Proto, Push}; +use crate::protocol::{Info, ReplySubstream, UpgradeError}; use futures::prelude::*; use libp2p_core::{ connection::ConnectionId, multiaddr::Protocol, transport::ListenerId, ConnectedPoint, @@ -31,6 +31,7 @@ use libp2p_swarm::{ NotifyHandler, PollParameters, }; use lru::LruCache; +use std::num::NonZeroUsize; use std::{ collections::{HashMap, HashSet, VecDeque}, iter::FromIterator, @@ -46,19 +47,19 @@ use std::{ /// All external addresses of the local node supposedly observed by remotes /// are reported via [`NetworkBehaviourAction::ReportObservedAddr`] with a /// [score](AddressScore) of `1`. -pub struct Identify { - config: IdentifyConfig, +pub struct Behaviour { + config: Config, /// For each peer we're connected to, the observed address to send back to it. connected: HashMap>, /// Pending replies to send. pending_replies: VecDeque, /// Pending events to be emitted when polled. - events: VecDeque>, + events: VecDeque>, /// Peers to which an active push with current information about /// the local peer should be sent. pending_push: HashSet, /// The addresses of all peers that we have discovered. - discovered_peers: LruCache>, + discovered_peers: PeerCache, } /// A pending reply to an inbound identification request. @@ -76,10 +77,10 @@ enum Reply { }, } -/// Configuration for the [`Identify`] [`NetworkBehaviour`]. +/// Configuration for the [`identify::Behaviour`](Behaviour). #[non_exhaustive] #[derive(Debug, Clone)] -pub struct IdentifyConfig { +pub struct Config { /// Application-specific version of the protocol family used by the peer, /// e.g. `ipfs/1.0.0` or `polkadot/1.0.0`. pub protocol_version: String, @@ -119,11 +120,11 @@ pub struct IdentifyConfig { pub cache_size: usize, } -impl IdentifyConfig { - /// Creates a new configuration for the `Identify` behaviour that +impl Config { + /// Creates a new configuration for the identify [`Behaviour`] that /// advertises the given protocol version and public key. pub fn new(protocol_version: String, local_public_key: PublicKey) -> Self { - IdentifyConfig { + Self { protocol_version, agent_version: format!("rust-libp2p/{}", env!("CARGO_PKG_VERSION")), local_public_key, @@ -165,19 +166,22 @@ impl IdentifyConfig { /// Configures the size of the LRU cache, caching addresses of discovered peers. /// /// The [`Swarm`](libp2p_swarm::Swarm) may extend the set of addresses of an outgoing connection attempt via - /// [`Identify::addresses_of_peer`]. + /// [`Behaviour::addresses_of_peer`]. pub fn with_cache_size(mut self, cache_size: usize) -> Self { self.cache_size = cache_size; self } } -impl Identify { - /// Creates a new `Identify` network behaviour. - pub fn new(config: IdentifyConfig) -> Self { - let discovered_peers = LruCache::new(config.cache_size); +impl Behaviour { + /// Creates a new identify [`Behaviour`]. + pub fn new(config: Config) -> Self { + let discovered_peers = match NonZeroUsize::new(config.cache_size) { + None => PeerCache::disabled(), + Some(size) => PeerCache::enabled(size), + }; - Identify { + Self { config, connected: HashMap::new(), pending_replies: VecDeque::new(), @@ -204,12 +208,12 @@ impl Identify { } } -impl NetworkBehaviour for Identify { - type ConnectionHandler = IdentifyHandlerProto; - type OutEvent = IdentifyEvent; +impl NetworkBehaviour for Behaviour { + type ConnectionHandler = Proto; + type OutEvent = Event; fn new_handler(&mut self) -> Self::ConnectionHandler { - IdentifyHandlerProto::new(self.config.initial_delay, self.config.interval) + Proto::new(self.config.initial_delay, self.config.interval) } fn inject_connection_established( @@ -296,31 +300,34 @@ impl NetworkBehaviour for Identify { event: <::Handler as ConnectionHandler>::OutEvent, ) { match event { - IdentifyHandlerEvent::Identified(mut info) => { + handler::Event::Identified(mut info) => { // Remove invalid multiaddrs. info.listen_addrs .retain(|addr| multiaddr_matches_peer_id(addr, &peer_id)); // Replace existing addresses to prevent other peer from filling up our memory. self.discovered_peers - .put(peer_id, HashSet::from_iter(info.listen_addrs.clone())); + .put(peer_id, info.listen_addrs.iter().cloned()); let observed = info.observed_addr.clone(); - self.events.push_back(NetworkBehaviourAction::GenerateEvent( - IdentifyEvent::Received { peer_id, info }, - )); + self.events + .push_back(NetworkBehaviourAction::GenerateEvent(Event::Received { + peer_id, + info, + })); self.events .push_back(NetworkBehaviourAction::ReportObservedAddr { address: observed, score: AddressScore::Finite(1), }); } - IdentifyHandlerEvent::IdentificationPushed => { - self.events.push_back(NetworkBehaviourAction::GenerateEvent( - IdentifyEvent::Pushed { peer_id }, - )); + handler::Event::IdentificationPushed => { + self.events + .push_back(NetworkBehaviourAction::GenerateEvent(Event::Pushed { + peer_id, + })); } - IdentifyHandlerEvent::Identify(sender) => { + handler::Event::Identify(sender) => { let observed = self .connected .get(&peer_id) @@ -335,10 +342,12 @@ impl NetworkBehaviour for Identify { observed: observed.clone(), }); } - IdentifyHandlerEvent::IdentificationError(error) => { - self.events.push_back(NetworkBehaviourAction::GenerateEvent( - IdentifyEvent::Error { peer_id, error }, - )); + handler::Event::IdentificationError(error) => { + self.events + .push_back(NetworkBehaviourAction::GenerateEvent(Event::Error { + peer_id, + error, + })); } } } @@ -364,7 +373,7 @@ impl NetworkBehaviour for Identify { let listen_addrs = listen_addrs(params); let protocols = supported_protocols(params); - let info = IdentifyInfo { + let info = Info { public_key: self.config.local_public_key.clone(), protocol_version: self.config.protocol_version.clone(), agent_version: self.config.agent_version.clone(), @@ -373,7 +382,7 @@ impl NetworkBehaviour for Identify { observed_addr, }; - (*peer, IdentifyPush(info)) + (*peer, Push(info)) }) }); @@ -394,7 +403,7 @@ impl NetworkBehaviour for Identify { loop { match reply { Some(Reply::Queued { peer, io, observed }) => { - let info = IdentifyInfo { + let info = Info { listen_addrs: listen_addrs(params), protocols: supported_protocols(params), public_key: self.config.local_public_key.clone(), @@ -409,7 +418,7 @@ impl NetworkBehaviour for Identify { sending += 1; match Future::poll(Pin::new(&mut io), cx) { Poll::Ready(Ok(())) => { - let event = IdentifyEvent::Sent { peer_id: peer }; + let event = Event::Sent { peer_id: peer }; return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); } Poll::Pending => { @@ -422,7 +431,7 @@ impl NetworkBehaviour for Identify { } } Poll::Ready(Err(err)) => { - let event = IdentifyEvent::Error { + let event = Event::Error { peer_id: peer, error: ConnectionHandlerUpgrErr::Upgrade( libp2p_core::upgrade::UpgradeError::Apply(err), @@ -441,24 +450,20 @@ impl NetworkBehaviour for Identify { } fn addresses_of_peer(&mut self, peer: &PeerId) -> Vec { - self.discovered_peers - .get(peer) - .cloned() - .map(Vec::from_iter) - .unwrap_or_default() + self.discovered_peers.get(peer) } } /// Event emitted by the `Identify` behaviour. #[allow(clippy::large_enum_variant)] #[derive(Debug)] -pub enum IdentifyEvent { +pub enum Event { /// Identification information has been received from a peer. Received { /// The peer that has been identified. peer_id: PeerId, /// The information provided by the peer. - info: IdentifyInfo, + info: Info, }, /// Identification information of the local node has been sent to a peer in /// response to an identification request. @@ -506,6 +511,44 @@ fn multiaddr_matches_peer_id(addr: &Multiaddr, peer_id: &PeerId) -> bool { true } +struct PeerCache(Option>>); + +impl PeerCache { + fn disabled() -> Self { + Self(None) + } + + fn enabled(size: NonZeroUsize) -> Self { + Self(Some(LruCache::new(size))) + } + + fn get_mut(&mut self, peer: &PeerId) -> Option<&mut HashSet> { + self.0.as_mut()?.get_mut(peer) + } + + fn put(&mut self, peer: PeerId, addresses: impl Iterator) { + let cache = match self.0.as_mut() { + None => return, + Some(cache) => cache, + }; + + cache.put(peer, HashSet::from_iter(addresses)); + } + + fn get(&mut self, peer: &PeerId) -> Vec { + let cache = match self.0.as_mut() { + None => return Vec::new(), + Some(cache) => cache, + }; + + cache + .get(peer) + .cloned() + .map(Vec::from_iter) + .unwrap_or_default() + } +} + #[cfg(test)] mod tests { use super::*; @@ -538,9 +581,8 @@ mod tests { fn periodic_identify() { let (mut swarm1, pubkey1) = { let (pubkey, transport) = transport(); - let protocol = Identify::new( - IdentifyConfig::new("a".to_string(), pubkey.clone()) - .with_agent_version("b".to_string()), + let protocol = Behaviour::new( + Config::new("a".to_string(), pubkey.clone()).with_agent_version("b".to_string()), ); let swarm = Swarm::new(transport, protocol, pubkey.to_peer_id()); (swarm, pubkey) @@ -548,9 +590,8 @@ mod tests { let (mut swarm2, pubkey2) = { let (pubkey, transport) = transport(); - let protocol = Identify::new( - IdentifyConfig::new("c".to_string(), pubkey.clone()) - .with_agent_version("d".to_string()), + let protocol = Behaviour::new( + Config::new("c".to_string(), pubkey.clone()).with_agent_version("d".to_string()), ); let swarm = Swarm::new(transport, protocol, pubkey.to_peer_id()); (swarm, pubkey) @@ -564,9 +605,8 @@ mod tests { loop { let swarm1_fut = swarm1.select_next_some(); pin_mut!(swarm1_fut); - match swarm1_fut.await { - SwarmEvent::NewListenAddr { address, .. } => return address, - _ => {} + if let SwarmEvent::NewListenAddr { address, .. } = swarm1_fut.await { + return address; } } }); @@ -588,9 +628,8 @@ mod tests { .factor_second() .0 { - future::Either::Left(SwarmEvent::Behaviour(IdentifyEvent::Received { - info, - .. + future::Either::Left(SwarmEvent::Behaviour(Event::Received { + info, .. })) => { assert_eq!(info.public_key, pubkey2); assert_eq!(info.protocol_version, "c"); @@ -599,9 +638,8 @@ mod tests { assert!(info.listen_addrs.is_empty()); return; } - future::Either::Right(SwarmEvent::Behaviour(IdentifyEvent::Received { - info, - .. + future::Either::Right(SwarmEvent::Behaviour(Event::Received { + info, .. })) => { assert_eq!(info.public_key, pubkey1); assert_eq!(info.protocol_version, "a"); @@ -622,16 +660,15 @@ mod tests { let (mut swarm1, pubkey1) = { let (pubkey, transport) = transport(); - let protocol = Identify::new(IdentifyConfig::new("a".to_string(), pubkey.clone())); + let protocol = Behaviour::new(Config::new("a".to_string(), pubkey.clone())); let swarm = Swarm::new(transport, protocol, pubkey.to_peer_id()); (swarm, pubkey) }; let (mut swarm2, pubkey2) = { let (pubkey, transport) = transport(); - let protocol = Identify::new( - IdentifyConfig::new("a".to_string(), pubkey.clone()) - .with_agent_version("b".to_string()), + let protocol = Behaviour::new( + Config::new("a".to_string(), pubkey.clone()).with_agent_version("b".to_string()), ); let swarm = Swarm::new(transport, protocol, pubkey.to_peer_id()); (swarm, pubkey) @@ -643,9 +680,8 @@ mod tests { loop { let swarm1_fut = swarm1.select_next_some(); pin_mut!(swarm1_fut); - match swarm1_fut.await { - SwarmEvent::NewListenAddr { address, .. } => return address, - _ => {} + if let SwarmEvent::NewListenAddr { address, .. } = swarm1_fut.await { + return address; } } }); @@ -665,7 +701,7 @@ mod tests { .factor_second() .0 { - future::Either::Left(SwarmEvent::Behaviour(IdentifyEvent::Received { + future::Either::Left(SwarmEvent::Behaviour(Event::Received { info, .. })) => { @@ -697,8 +733,8 @@ mod tests { let mut swarm1 = { let (pubkey, transport) = transport(); - let protocol = Identify::new( - IdentifyConfig::new("a".to_string(), pubkey.clone()) + let protocol = Behaviour::new( + Config::new("a".to_string(), pubkey.clone()) // `swarm1` will set `KeepAlive::No` once it identified `swarm2` and thus // closes the connection. At this point in time `swarm2` might not yet have // identified `swarm1`. To give `swarm2` enough time, set an initial delay on @@ -711,8 +747,8 @@ mod tests { let mut swarm2 = { let (pubkey, transport) = transport(); - let protocol = Identify::new( - IdentifyConfig::new("a".to_string(), pubkey.clone()) + let protocol = Behaviour::new( + Config::new("a".to_string(), pubkey.clone()) .with_cache_size(100) .with_agent_version("b".to_string()), ); @@ -749,7 +785,7 @@ mod tests { // Wait until we identified. async_std::task::block_on(async { loop { - if let SwarmEvent::Behaviour(IdentifyEvent::Received { .. }) = + if let SwarmEvent::Behaviour(Event::Received { .. }) = swarm2.select_next_some().await { break; @@ -797,7 +833,7 @@ mod tests { let addr_without_peer_id: Multiaddr = addr.clone(); let mut addr_with_other_peer_id = addr.clone(); - addr.push(Protocol::P2p(peer_id.clone().into())); + addr.push(Protocol::P2p(peer_id.into())); addr_with_other_peer_id.push(Protocol::P2p(other_peer_id.into())); assert!(multiaddr_matches_peer_id(&addr, &peer_id)); diff --git a/protocols/identify/src/handler.rs b/protocols/identify/src/handler.rs index af4d7f8cba3..20db7f84496 100644 --- a/protocols/identify/src/handler.rs +++ b/protocols/identify/src/handler.rs @@ -19,8 +19,7 @@ // DEALINGS IN THE SOFTWARE. use crate::protocol::{ - IdentifyInfo, IdentifyProtocol, IdentifyPushProtocol, InboundPush, OutboundPush, - ReplySubstream, UpgradeError, + InboundPush, Info, OutboundPush, Protocol, PushProtocol, ReplySubstream, UpgradeError, }; use futures::future::BoxFuture; use futures::prelude::*; @@ -36,29 +35,29 @@ use log::warn; use smallvec::SmallVec; use std::{io, pin::Pin, task::Context, task::Poll, time::Duration}; -pub struct IdentifyHandlerProto { +pub struct Proto { initial_delay: Duration, interval: Duration, } -impl IdentifyHandlerProto { +impl Proto { pub fn new(initial_delay: Duration, interval: Duration) -> Self { - IdentifyHandlerProto { + Proto { initial_delay, interval, } } } -impl IntoConnectionHandler for IdentifyHandlerProto { - type Handler = IdentifyHandler; +impl IntoConnectionHandler for Proto { + type Handler = Handler; fn into_handler(self, remote_peer_id: &PeerId, _endpoint: &ConnectedPoint) -> Self::Handler { - IdentifyHandler::new(self.initial_delay, self.interval, *remote_peer_id) + Handler::new(self.initial_delay, self.interval, *remote_peer_id) } fn inbound_protocol(&self) -> ::InboundProtocol { - SelectUpgrade::new(IdentifyProtocol, IdentifyPushProtocol::inbound()) + SelectUpgrade::new(Protocol, PushProtocol::inbound()) } } @@ -67,15 +66,15 @@ impl IntoConnectionHandler for IdentifyHandlerProto { /// Outbound requests are sent periodically. The handler performs expects /// at least one identification request to be answered by the remote before /// permitting the underlying connection to be closed. -pub struct IdentifyHandler { +pub struct Handler { remote_peer_id: PeerId, - inbound_identify_push: Option>>, + inbound_identify_push: Option>>, /// Pending events to yield. events: SmallVec< [ConnectionHandlerEvent< - EitherUpgrade>, + EitherUpgrade>, (), - IdentifyHandlerEvent, + Event, io::Error, >; 4], >, @@ -92,9 +91,9 @@ pub struct IdentifyHandler { /// Event produced by the `IdentifyHandler`. #[derive(Debug)] -pub enum IdentifyHandlerEvent { +pub enum Event { /// We obtained identification information from the remote. - Identified(IdentifyInfo), + Identified(Info), /// We actively pushed our identification information to the remote. IdentificationPushed, /// We received a request for identification. @@ -105,12 +104,12 @@ pub enum IdentifyHandlerEvent { /// Identifying information of the local node that is pushed to a remote. #[derive(Debug)] -pub struct IdentifyPush(pub IdentifyInfo); +pub struct Push(pub Info); -impl IdentifyHandler { +impl Handler { /// Creates a new `IdentifyHandler`. pub fn new(initial_delay: Duration, interval: Duration, remote_peer_id: PeerId) -> Self { - IdentifyHandler { + Self { remote_peer_id, inbound_identify_push: Default::default(), events: SmallVec::new(), @@ -121,20 +120,17 @@ impl IdentifyHandler { } } -impl ConnectionHandler for IdentifyHandler { - type InEvent = IdentifyPush; - type OutEvent = IdentifyHandlerEvent; +impl ConnectionHandler for Handler { + type InEvent = Push; + type OutEvent = Event; type Error = io::Error; - type InboundProtocol = SelectUpgrade>; - type OutboundProtocol = EitherUpgrade>; + type InboundProtocol = SelectUpgrade>; + type OutboundProtocol = EitherUpgrade>; type OutboundOpenInfo = (); type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new( - SelectUpgrade::new(IdentifyProtocol, IdentifyPushProtocol::inbound()), - (), - ) + SubstreamProtocol::new(SelectUpgrade::new(Protocol, PushProtocol::inbound()), ()) } fn inject_fully_negotiated_inbound( @@ -143,9 +139,9 @@ impl ConnectionHandler for IdentifyHandler { _: Self::InboundOpenInfo, ) { match output { - EitherOutput::First(substream) => self.events.push(ConnectionHandlerEvent::Custom( - IdentifyHandlerEvent::Identify(substream), - )), + EitherOutput::First(substream) => self + .events + .push(ConnectionHandlerEvent::Custom(Event::Identify(substream))), EitherOutput::Second(fut) => { if self.inbound_identify_push.replace(fut).is_some() { warn!( @@ -165,22 +161,23 @@ impl ConnectionHandler for IdentifyHandler { ) { match output { EitherOutput::First(remote_info) => { - self.events.push(ConnectionHandlerEvent::Custom( - IdentifyHandlerEvent::Identified(remote_info), - )); + self.events + .push(ConnectionHandlerEvent::Custom(Event::Identified( + remote_info, + ))); self.keep_alive = KeepAlive::No; } - EitherOutput::Second(()) => self.events.push(ConnectionHandlerEvent::Custom( - IdentifyHandlerEvent::IdentificationPushed, - )), + EitherOutput::Second(()) => self + .events + .push(ConnectionHandlerEvent::Custom(Event::IdentificationPushed)), } } - fn inject_event(&mut self, IdentifyPush(push): Self::InEvent) { + fn inject_event(&mut self, Push(push): Self::InEvent) { self.events .push(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol: SubstreamProtocol::new( - EitherUpgrade::B(IdentifyPushProtocol::outbound(push)), + EitherUpgrade::B(PushProtocol::outbound(push)), (), ), }); @@ -200,9 +197,10 @@ impl ConnectionHandler for IdentifyHandler { UpgradeError::Apply(EitherError::A(ioe)) => UpgradeError::Apply(ioe), UpgradeError::Apply(EitherError::B(ioe)) => UpgradeError::Apply(ioe), }); - self.events.push(ConnectionHandlerEvent::Custom( - IdentifyHandlerEvent::IdentificationError(err), - )); + self.events + .push(ConnectionHandlerEvent::Custom(Event::IdentificationError( + err, + ))); self.keep_alive = KeepAlive::No; self.trigger_next_identify.reset(self.interval); } @@ -215,12 +213,7 @@ impl ConnectionHandler for IdentifyHandler { &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - IdentifyHandlerEvent, - Self::Error, - >, + ConnectionHandlerEvent, > { if !self.events.is_empty() { return Poll::Ready(self.events.remove(0)); @@ -232,7 +225,7 @@ impl ConnectionHandler for IdentifyHandler { Poll::Ready(()) => { self.trigger_next_identify.reset(self.interval); let ev = ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(EitherUpgrade::A(IdentifyProtocol), ()), + protocol: SubstreamProtocol::new(EitherUpgrade::A(Protocol), ()), }; return Poll::Ready(ev); } @@ -246,9 +239,7 @@ impl ConnectionHandler for IdentifyHandler { self.inbound_identify_push.take(); if let Ok(info) = res { - return Poll::Ready(ConnectionHandlerEvent::Custom( - IdentifyHandlerEvent::Identified(info), - )); + return Poll::Ready(ConnectionHandlerEvent::Custom(Event::Identified(info))); } } diff --git a/protocols/identify/src/lib.rs b/protocols/identify/src/lib.rs index 2f73a5a0cad..8680920ec1f 100644 --- a/protocols/identify/src/lib.rs +++ b/protocols/identify/src/lib.rs @@ -21,7 +21,7 @@ //! Implementation of the [Identify] protocol. //! //! This implementation of the protocol periodically exchanges -//! [`IdentifyInfo`] messages between the peers on an established connection. +//! [`Info`] messages between the peers on an established connection. //! //! At least one identification request is sent on a newly established //! connection, beyond which the behaviour does not keep connections alive. @@ -35,20 +35,36 @@ //! //! # Usage //! -//! The [`Identify`] struct implements a `NetworkBehaviour` that negotiates -//! and executes the protocol on every established connection, emitting -//! [`IdentifyEvent`]s. -//! -//! [Identify]: https://github.com/libp2p/specs/tree/master/identify -//! [`Identify`]: self::Identify -//! [`IdentifyEvent`]: self::IdentifyEvent -//! [`IdentifyInfo`]: self::IdentifyInfo +//! The [`Behaviour`] struct implements a [`NetworkBehaviour`](libp2p_swarm::NetworkBehaviour) +//! that negotiates and executes the protocol on every established connection, emitting +//! [`Event`]s. + +pub use self::behaviour::{Behaviour, Config, Event}; +pub use self::protocol::{Info, UpgradeError, PROTOCOL_NAME, PUSH_PROTOCOL_NAME}; + +#[deprecated( + since = "0.40.0", + note = "Use re-exports that omit `Identify` prefix, i.e. `libp2p::identify::Config`" +)] +pub type IdentifyConfig = Config; + +#[deprecated( + since = "0.40.0", + note = "Use re-exports that omit `Identify` prefix, i.e. `libp2p::identify::Event`" +)] +pub type IdentifyEvent = Event; + +#[deprecated(since = "0.40.0", note = "Use libp2p::identify::Behaviour instead.")] +pub type Identify = Behaviour; -pub use self::identify::{Identify, IdentifyConfig, IdentifyEvent}; -pub use self::protocol::{IdentifyInfo, UpgradeError, PROTOCOL_NAME, PUSH_PROTOCOL_NAME}; +#[deprecated( + since = "0.40.0", + note = "Use re-exports that omit `Identify` prefix, i.e. `libp2p::identify::Info`" +)] +pub type IdentifyInfo = Info; +mod behaviour; mod handler; -mod identify; mod protocol; #[allow(clippy::derive_partial_eq_without_eq)] diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs index 163ac0aa396..63a03df3707 100644 --- a/protocols/identify/src/protocol.rs +++ b/protocols/identify/src/protocol.rs @@ -40,29 +40,29 @@ pub const PUSH_PROTOCOL_NAME: &[u8; 19] = b"/ipfs/id/push/1.0.0"; /// Substream upgrade protocol for `/ipfs/id/1.0.0`. #[derive(Debug, Clone)] -pub struct IdentifyProtocol; +pub struct Protocol; /// Substream upgrade protocol for `/ipfs/id/push/1.0.0`. #[derive(Debug, Clone)] -pub struct IdentifyPushProtocol(T); +pub struct PushProtocol(T); pub struct InboundPush(); -pub struct OutboundPush(IdentifyInfo); +pub struct OutboundPush(Info); -impl IdentifyPushProtocol { +impl PushProtocol { pub fn inbound() -> Self { - IdentifyPushProtocol(InboundPush()) + PushProtocol(InboundPush()) } } -impl IdentifyPushProtocol { - pub fn outbound(info: IdentifyInfo) -> Self { - IdentifyPushProtocol(OutboundPush(info)) +impl PushProtocol { + pub fn outbound(info: Info) -> Self { + PushProtocol(OutboundPush(info)) } } /// Information of a peer sent in protocol messages. #[derive(Debug, Clone)] -pub struct IdentifyInfo { +pub struct Info { /// The public key of the local peer. pub public_key: PublicKey, /// Application-specific version of the protocol family used by the peer, @@ -98,12 +98,12 @@ where /// /// Consumes the substream, returning a future that resolves /// when the reply has been sent on the underlying connection. - pub async fn send(self, info: IdentifyInfo) -> Result<(), UpgradeError> { + pub async fn send(self, info: Info) -> Result<(), UpgradeError> { send(self.inner, info).await.map_err(Into::into) } } -impl UpgradeInfo for IdentifyProtocol { +impl UpgradeInfo for Protocol { type Info = &'static [u8]; type InfoIter = iter::Once; @@ -112,7 +112,7 @@ impl UpgradeInfo for IdentifyProtocol { } } -impl InboundUpgrade for IdentifyProtocol { +impl InboundUpgrade for Protocol { type Output = ReplySubstream; type Error = UpgradeError; type Future = future::Ready>; @@ -122,11 +122,11 @@ impl InboundUpgrade for IdentifyProtocol { } } -impl OutboundUpgrade for IdentifyProtocol +impl OutboundUpgrade for Protocol where C: AsyncRead + AsyncWrite + Unpin + Send + 'static, { - type Output = IdentifyInfo; + type Output = Info; type Error = UpgradeError; type Future = Pin> + Send>>; @@ -135,7 +135,7 @@ where } } -impl UpgradeInfo for IdentifyPushProtocol { +impl UpgradeInfo for PushProtocol { type Info = &'static [u8]; type InfoIter = iter::Once; @@ -144,11 +144,11 @@ impl UpgradeInfo for IdentifyPushProtocol { } } -impl InboundUpgrade for IdentifyPushProtocol +impl InboundUpgrade for PushProtocol where C: AsyncRead + AsyncWrite + Unpin + Send + 'static, { - type Output = BoxFuture<'static, Result>; + type Output = BoxFuture<'static, Result>; type Error = Void; type Future = future::Ready>; @@ -158,7 +158,7 @@ where } } -impl OutboundUpgrade for IdentifyPushProtocol +impl OutboundUpgrade for PushProtocol where C: AsyncWrite + Unpin + Send + 'static, { @@ -171,7 +171,7 @@ where } } -async fn send(io: T, info: IdentifyInfo) -> Result<(), UpgradeError> +async fn send(io: T, info: Info) -> Result<(), UpgradeError> where T: AsyncWrite + Unpin, { @@ -205,7 +205,7 @@ where Ok(()) } -async fn recv(mut socket: T) -> Result +async fn recv(mut socket: T) -> Result where T: AsyncRead + AsyncWrite + Unpin, { @@ -225,7 +225,7 @@ where Ok(info) } -impl TryFrom for IdentifyInfo { +impl TryFrom for Info { type Error = UpgradeError; fn try_from(msg: structs_proto::Identify) -> Result { @@ -244,7 +244,7 @@ impl TryFrom for IdentifyInfo { let public_key = PublicKey::from_protobuf_encoding(&msg.public_key.unwrap_or_default())?; let observed_addr = parse_multiaddr(msg.observed_addr.unwrap_or_default())?; - let info = IdentifyInfo { + let info = Info { public_key, protocol_version: msg.protocol_version.unwrap_or_default(), agent_version: msg.agent_version.unwrap_or_default(), @@ -332,10 +332,10 @@ mod tests { .await .unwrap(); - let sender = apply_inbound(socket, IdentifyProtocol).await.unwrap(); + let sender = apply_inbound(socket, Protocol).await.unwrap(); sender - .send(IdentifyInfo { + .send(Info { public_key: send_pubkey, protocol_version: "proto_version".to_owned(), agent_version: "agent_version".to_owned(), @@ -354,7 +354,7 @@ mod tests { let mut transport = TcpTransport::default(); let socket = transport.dial(rx.await.unwrap()).unwrap().await.unwrap(); - let info = apply_outbound(socket, IdentifyProtocol, upgrade::Version::V1) + let info = apply_outbound(socket, Protocol, upgrade::Version::V1) .await .unwrap(); assert_eq!( diff --git a/protocols/kad/CHANGELOG.md b/protocols/kad/CHANGELOG.md index 2f41a093019..dcf4eedf2fb 100644 --- a/protocols/kad/CHANGELOG.md +++ b/protocols/kad/CHANGELOG.md @@ -3,7 +3,14 @@ - Remove deprecated `set_protocol_name()` from `KademliaConfig` & `KademliaProtocolConfig`. Use `set_protocol_names()` instead. See [PR 2866]. +- Bump rand to 0.8 and quickcheck to 1. See [PR 2857]. + +- Update to `libp2p-core` `v0.37.0`. + +- Update to `libp2p-swarm` `v0.40.0`. + [PR 2866]: https://github.com/libp2p/rust-libp2p/pull/2866 +[PR 2857]: https://github.com/libp2p/rust-libp2p/pull/2857 # 0.40.0 diff --git a/protocols/kad/Cargo.toml b/protocols/kad/Cargo.toml index c65e34ecdbc..e97394b7071 100644 --- a/protocols/kad/Cargo.toml +++ b/protocols/kad/Cargo.toml @@ -18,10 +18,10 @@ fnv = "1.0" asynchronous-codec = "0.6" futures = "0.3.1" log = "0.4" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.39.0", path = "../../swarm" } +libp2p-core = { version = "0.37.0", path = "../../core" } +libp2p-swarm = { version = "0.40.0", path = "../../swarm" } prost = "0.11" -rand = "0.7.2" +rand = "0.8" sha2 = "0.10.0" smallvec = "1.6.1" uint = "0.9" @@ -35,9 +35,8 @@ thiserror = "1" [dev-dependencies] env_logger = "0.9.0" futures-timer = "3.0" -libp2p-noise = { path = "../../transports/noise" } -libp2p-yamux = { path = "../../muxers/yamux" } -quickcheck = "0.9.0" +libp2p = { path = "../..", features = ["full"] } +quickcheck = { package = "quickcheck-ext", path = "../../misc/quickcheck-ext" } [build-dependencies] prost-build = "0.11" diff --git a/protocols/kad/src/behaviour/test.rs b/protocols/kad/src/behaviour/test.rs index aab7fa0ef28..c61ffaf158f 100644 --- a/protocols/kad/src/behaviour/test.rs +++ b/protocols/kad/src/behaviour/test.rs @@ -27,6 +27,9 @@ use crate::record::{store::MemoryStore, Key}; use crate::K_VALUE; use futures::{executor::block_on, future::poll_fn, prelude::*}; use futures_timer::Delay; +use libp2p::noise; +use libp2p::swarm::{Swarm, SwarmEvent}; +use libp2p::yamux; use libp2p_core::{ connection::{ConnectedPoint, ConnectionId}, identity, @@ -35,9 +38,6 @@ use libp2p_core::{ transport::MemoryTransport, upgrade, Endpoint, PeerId, Transport, }; -use libp2p_noise as noise; -use libp2p_swarm::{Swarm, SwarmEvent}; -use libp2p_yamux as yamux; use quickcheck::*; use rand::{random, rngs::StdRng, thread_rng, Rng, SeedableRng}; use std::{ @@ -63,8 +63,8 @@ fn build_node_with_config(cfg: KademliaConfig) -> (Multiaddr, TestSwarm) { .boxed(); let local_id = local_public_key.to_peer_id(); - let store = MemoryStore::new(local_id.clone()); - let behaviour = Kademlia::with_config(local_id.clone(), store, cfg.clone()); + let store = MemoryStore::new(local_id); + let behaviour = Kademlia::with_config(local_id, store, cfg); let mut swarm = Swarm::new(transport, behaviour, local_id); @@ -129,7 +129,7 @@ fn build_fully_connected_nodes_with_config( for (_addr, swarm) in swarms.iter_mut() { for (addr, peer) in &swarm_addr_and_peer_id { - swarm.behaviour_mut().add_address(&peer, addr.clone()); + swarm.behaviour_mut().add_address(peer, addr.clone()); } } @@ -144,8 +144,9 @@ fn random_multihash() -> Multihash { struct Seed([u8; 32]); impl Arbitrary for Seed { - fn arbitrary(g: &mut G) -> Seed { - Seed(g.gen()) + fn arbitrary(g: &mut Gen) -> Seed { + let seed = core::array::from_fn(|_| u8::arbitrary(g)); + Seed(seed) } } @@ -154,14 +155,14 @@ fn bootstrap() { fn prop(seed: Seed) { let mut rng = StdRng::from_seed(seed.0); - let num_total = rng.gen_range(2, 20); + let num_total = rng.gen_range(2..20); // When looking for the closest node to a key, Kademlia considers // K_VALUE nodes to query at initialization. If `num_group` is larger // than K_VALUE the remaining locally known nodes will not be // considered. Given that no other node is aware of them, they would be // lost entirely. To prevent the above restrict `num_group` to be equal // or smaller than K_VALUE. - let num_group = rng.gen_range(1, (num_total % K_VALUE.get()) + 2); + let num_group = rng.gen_range(1..(num_total % K_VALUE.get()) + 2); let mut cfg = KademliaConfig::default(); if rng.gen() { @@ -209,7 +210,7 @@ fn bootstrap() { let mut known = HashSet::new(); for b in swarm.behaviour_mut().kbuckets.iter() { for e in b.iter() { - known.insert(e.node.key.preimage().clone()); + known.insert(*e.node.key.preimage()); } } assert_eq!(expected_known, known); @@ -241,7 +242,7 @@ fn query_iter() { } fn run(rng: &mut impl Rng) { - let num_total = rng.gen_range(2, 20); + let num_total = rng.gen_range(2..20); let mut swarms = build_connected_nodes(num_total, 1) .into_iter() .map(|(_a, s)| s) @@ -265,7 +266,7 @@ fn query_iter() { } // Set up expectations. - let expected_swarm_id = swarm_ids[0].clone(); + let expected_swarm_id = swarm_ids[0]; let expected_peer_ids: Vec<_> = swarm_ids.iter().skip(1).cloned().collect(); let mut expected_distances = distances(&search_target_key, expected_peer_ids.clone()); expected_distances.sort(); @@ -489,7 +490,7 @@ fn put_record() { fn prop(records: Vec, seed: Seed, filter_records: bool, drop_records: bool) { let mut rng = StdRng::from_seed(seed.0); let replication_factor = - NonZeroUsize::new(rng.gen_range(1, (K_VALUE.get() / 2) + 1)).unwrap(); + NonZeroUsize::new(rng.gen_range(1..(K_VALUE.get() / 2) + 1)).unwrap(); // At least 4 nodes, 1 under test + 3 bootnodes. let num_total = usize::max(4, replication_factor.get() * 2); @@ -509,11 +510,11 @@ fn put_record() { let mut single_swarm = build_node_with_config(config); // Connect `single_swarm` to three bootnodes. - for i in 0..3 { - single_swarm.1.behaviour_mut().add_address( - fully_connected_swarms[i].1.local_peer_id(), - fully_connected_swarms[i].0.clone(), - ); + for swarm in fully_connected_swarms.iter().take(3) { + single_swarm + .1 + .behaviour_mut() + .add_address(swarm.1.local_peer_id(), swarm.0.clone()); } let mut swarms = vec![single_swarm]; @@ -526,6 +527,7 @@ fn put_record() { .collect::>() }; + #[allow(clippy::mutable_key_type)] // False positive, we never modify `Bytes`. let records = records .into_iter() .take(num_total) @@ -709,7 +711,7 @@ fn put_record() { ); assert_eq!(swarms[0].behaviour_mut().queries.size(), 0); for k in records.keys() { - swarms[0].behaviour_mut().store.remove(&k); + swarms[0].behaviour_mut().store.remove(k); } assert_eq!(swarms[0].behaviour_mut().store.records().count(), 0); // All records have been republished, thus the test is complete. @@ -739,7 +741,7 @@ fn get_record() { // Let first peer know of second peer and second peer know of third peer. for i in 0..2 { let (peer_id, address) = ( - Swarm::local_peer_id(&swarms[i + 1].1).clone(), + *Swarm::local_peer_id(&swarms[i + 1].1), swarms[i + 1].0.clone(), ); swarms[i].1.behaviour_mut().add_address(&peer_id, address); @@ -809,8 +811,8 @@ fn get_record_many() { let record = Record::new(random_multihash(), vec![4, 5, 6]); - for i in 0..num_nodes { - swarms[i].behaviour_mut().store.put(record.clone()).unwrap(); + for swarm in swarms.iter_mut().take(num_nodes) { + swarm.behaviour_mut().store.put(record.clone()).unwrap(); } let quorum = Quorum::N(NonZeroUsize::new(num_results).unwrap()); @@ -853,7 +855,7 @@ fn add_provider() { fn prop(keys: Vec, seed: Seed) { let mut rng = StdRng::from_seed(seed.0); let replication_factor = - NonZeroUsize::new(rng.gen_range(1, (K_VALUE.get() / 2) + 1)).unwrap(); + NonZeroUsize::new(rng.gen_range(1..(K_VALUE.get() / 2) + 1)).unwrap(); // At least 4 nodes, 1 under test + 3 bootnodes. let num_total = usize::max(4, replication_factor.get() * 2); @@ -869,11 +871,11 @@ fn add_provider() { let mut single_swarm = build_node_with_config(config); // Connect `single_swarm` to three bootnodes. - for i in 0..3 { - single_swarm.1.behaviour_mut().add_address( - fully_connected_swarms[i].1.local_peer_id(), - fully_connected_swarms[i].0.clone(), - ); + for swarm in fully_connected_swarms.iter().take(3) { + single_swarm + .1 + .behaviour_mut() + .add_address(swarm.1.local_peer_id(), swarm.0.clone()); } let mut swarms = vec![single_swarm]; @@ -886,6 +888,7 @@ fn add_provider() { .collect::>() }; + #[allow(clippy::mutable_key_type)] // False positive, we never modify `Bytes`. let keys: HashSet<_> = keys.into_iter().take(num_total).collect(); // Each test run publishes all records twice. @@ -960,7 +963,7 @@ fn add_provider() { .skip(1) .filter_map(|swarm| { if swarm.behaviour().store.providers(&key).len() == 1 { - Some(Swarm::local_peer_id(&swarm).clone()) + Some(*Swarm::local_peer_id(swarm)) } else { None } @@ -1006,7 +1009,7 @@ fn add_provider() { keys.len() ); for k in &keys { - swarms[0].behaviour_mut().stop_providing(&k); + swarms[0].behaviour_mut().stop_providing(k); } assert_eq!(swarms[0].behaviour_mut().store.provided().count(), 0); // All records have been republished, thus the test is complete. @@ -1105,11 +1108,11 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { alice .1 .behaviour_mut() - .add_address(&trudy.1.local_peer_id(), trudy.0.clone()); + .add_address(trudy.1.local_peer_id(), trudy.0.clone()); alice .1 .behaviour_mut() - .add_address(&bob.1.local_peer_id(), bob.0.clone()); + .add_address(bob.1.local_peer_id(), bob.0.clone()); // Drop the swarm addresses. let (mut alice, mut bob, mut trudy) = (alice.1, bob.1, trudy.1); @@ -1168,12 +1171,12 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { assert_eq!( *records, vec![PeerRecord { - peer: Some(Swarm::local_peer_id(&trudy).clone()), + peer: Some(*Swarm::local_peer_id(&trudy)), record: record_trudy.clone(), }], ); } - i @ _ => panic!("Unexpected query info: {:?}", i), + i => panic!("Unexpected query info: {:?}", i), }); // Poll `alice` and `bob` expecting `alice` to return a successful query @@ -1210,11 +1213,11 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { assert_eq!(2, records.len()); assert!(records.contains(&PeerRecord { - peer: Some(Swarm::local_peer_id(&bob).clone()), + peer: Some(*Swarm::local_peer_id(&bob)), record: record_bob, })); assert!(records.contains(&PeerRecord { - peer: Some(Swarm::local_peer_id(&trudy).clone()), + peer: Some(*Swarm::local_peer_id(&trudy)), record: record_trudy, })); } @@ -1282,7 +1285,7 @@ fn network_behaviour_inject_address_change() { let old_address: Multiaddr = Protocol::Memory(1).into(); let new_address: Multiaddr = Protocol::Memory(2).into(); - let mut kademlia = Kademlia::new(local_peer_id.clone(), MemoryStore::new(local_peer_id)); + let mut kademlia = Kademlia::new(local_peer_id, MemoryStore::new(local_peer_id)); let endpoint = ConnectedPoint::Dialer { address: old_address.clone(), @@ -1300,8 +1303,8 @@ fn network_behaviour_inject_address_change() { // Mimick the connection handler confirming the protocol for // the test connection, so that the peer is added to the routing table. kademlia.inject_event( - remote_peer_id.clone(), - connection_id.clone(), + remote_peer_id, + connection_id, KademliaHandlerEvent::ProtocolConfirmed { endpoint }, ); @@ -1314,7 +1317,7 @@ fn network_behaviour_inject_address_change() { &remote_peer_id, &connection_id, &ConnectedPoint::Dialer { - address: old_address.clone(), + address: old_address, role_override: Endpoint::Dialer, }, &ConnectedPoint::Dialer { @@ -1324,7 +1327,7 @@ fn network_behaviour_inject_address_change() { ); assert_eq!( - vec![new_address.clone()], + vec![new_address], kademlia.addresses_of_peer(&remote_peer_id), ); } diff --git a/protocols/kad/src/jobs.rs b/protocols/kad/src/jobs.rs index 7e8ca628ce6..8855026e8d5 100644 --- a/protocols/kad/src/jobs.rs +++ b/protocols/kad/src/jobs.rs @@ -335,15 +335,15 @@ mod tests { fn rand_put_record_job() -> PutRecordJob { let mut rng = rand::thread_rng(); let id = PeerId::random(); - let replicate_interval = Duration::from_secs(rng.gen_range(1, 60)); - let publish_interval = Some(replicate_interval * rng.gen_range(1, 10)); - let record_ttl = Some(Duration::from_secs(rng.gen_range(1, 600))); - PutRecordJob::new(id.clone(), replicate_interval, publish_interval, record_ttl) + let replicate_interval = Duration::from_secs(rng.gen_range(1..60)); + let publish_interval = Some(replicate_interval * rng.gen_range(1..10)); + let record_ttl = Some(Duration::from_secs(rng.gen_range(1..600))); + PutRecordJob::new(id, replicate_interval, publish_interval, record_ttl) } fn rand_add_provider_job() -> AddProviderJob { let mut rng = rand::thread_rng(); - let interval = Duration::from_secs(rng.gen_range(1, 60)); + let interval = Duration::from_secs(rng.gen_range(1..60)); AddProviderJob::new(interval) } @@ -360,7 +360,7 @@ mod tests { fn prop(records: Vec) { let mut job = rand_put_record_job(); // Fill a record store. - let mut store = MemoryStore::new(job.local_id.clone()); + let mut store = MemoryStore::new(job.local_id); for r in records { let _ = store.put(r); } @@ -389,9 +389,9 @@ mod tests { let mut job = rand_add_provider_job(); let id = PeerId::random(); // Fill a record store. - let mut store = MemoryStore::new(id.clone()); + let mut store = MemoryStore::new(id); for mut r in records { - r.provider = id.clone(); + r.provider = id; let _ = store.add_provider(r); } diff --git a/protocols/kad/src/kbucket.rs b/protocols/kad/src/kbucket.rs index d1c8622dc27..82b88b2936e 100644 --- a/protocols/kad/src/kbucket.rs +++ b/protocols/kad/src/kbucket.rs @@ -138,7 +138,7 @@ impl BucketIndex { let rem = (self.0 % 8) as u32; let lower = usize::pow(2, rem); let upper = usize::pow(2, rem + 1); - bytes[31 - quot] = rng.gen_range(lower, upper) as u8; + bytes[31 - quot] = rng.gen_range(lower..upper) as u8; Distance(U256::from(bytes)) } } @@ -524,22 +524,21 @@ mod tests { use super::*; use libp2p_core::PeerId; use quickcheck::*; - use rand::Rng; type TestTable = KBucketsTable; impl Arbitrary for TestTable { - fn arbitrary(g: &mut G) -> TestTable { + fn arbitrary(g: &mut Gen) -> TestTable { let local_key = Key::from(PeerId::random()); - let timeout = Duration::from_secs(g.gen_range(1, 360)); + let timeout = Duration::from_secs(g.gen_range(1..360)); let mut table = TestTable::new(local_key.clone().into(), timeout); - let mut num_total = g.gen_range(0, 100); + let mut num_total = g.gen_range(0..100); for (i, b) in &mut table.buckets.iter_mut().enumerate().rev() { let ix = BucketIndex(i); - let num = g.gen_range(0, usize::min(K_VALUE.get(), num_total) + 1); + let num = g.gen_range(0..usize::min(K_VALUE.get(), num_total) + 1); num_total -= num; for _ in 0..num { - let distance = ix.rand_distance(g); + let distance = ix.rand_distance(&mut rand::thread_rng()); let key = local_key.for_distance(distance); let node = Node { key: key.clone(), @@ -590,8 +589,15 @@ mod tests { assert!(bucket_ref.contains(&min)); assert!(bucket_ref.contains(&max)); - assert!(!bucket_ref.contains(&Distance(min.0 - 1))); - assert!(!bucket_ref.contains(&Distance(max.0 + 1))); + if min != Distance(0.into()) { + // ^ avoid underflow + assert!(!bucket_ref.contains(&Distance(min.0 - 1))); + } + + if max != Distance(U256::max_value()) { + // ^ avoid overflow + assert!(!bucket_ref.contains(&Distance(max.0 + 1))); + } } quickcheck(prop as fn(_)); @@ -605,7 +611,7 @@ mod tests { let b = U256::from(2); let e = U256::from(ix); let lower = b.pow(e); - let upper = b.pow(e + U256::from(1)) - U256::from(1); + let upper = b.checked_pow(e + U256::from(1)).unwrap_or(U256::MAX) - U256::from(1); lower <= n && n <= upper } quickcheck(prop as fn(_) -> _); diff --git a/protocols/kad/src/kbucket/bucket.rs b/protocols/kad/src/kbucket/bucket.rs index 0e3ad368863..0a6b69003bd 100644 --- a/protocols/kad/src/kbucket/bucket.rs +++ b/protocols/kad/src/kbucket/bucket.rs @@ -437,14 +437,13 @@ mod tests { use super::*; use libp2p_core::PeerId; use quickcheck::*; - use rand::Rng; use std::collections::VecDeque; impl Arbitrary for KBucket, ()> { - fn arbitrary(g: &mut G) -> KBucket, ()> { - let timeout = Duration::from_secs(g.gen_range(1, g.size() as u64)); + fn arbitrary(g: &mut Gen) -> KBucket, ()> { + let timeout = Duration::from_secs(g.gen_range(1..g.size()) as u64); let mut bucket = KBucket::, ()>::new(timeout); - let num_nodes = g.gen_range(1, K_VALUE.get() + 1); + let num_nodes = g.gen_range(1..K_VALUE.get() + 1); for _ in 0..num_nodes { let key = Key::from(PeerId::random()); let node = Node { @@ -462,8 +461,8 @@ mod tests { } impl Arbitrary for NodeStatus { - fn arbitrary(g: &mut G) -> NodeStatus { - if g.gen() { + fn arbitrary(g: &mut Gen) -> NodeStatus { + if bool::arbitrary(g) { NodeStatus::Connected } else { NodeStatus::Disconnected @@ -472,8 +471,8 @@ mod tests { } impl Arbitrary for Position { - fn arbitrary(g: &mut G) -> Position { - Position(g.gen_range(0, K_VALUE.get())) + fn arbitrary(g: &mut Gen) -> Position { + Position(g.gen_range(0..K_VALUE.get())) } } @@ -505,18 +504,15 @@ mod tests { value: (), }; let full = bucket.num_entries() == K_VALUE.get(); - match bucket.insert(node, status) { - InsertResult::Inserted => { - let vec = match status { - NodeStatus::Connected => &mut connected, - NodeStatus::Disconnected => &mut disconnected, - }; - if full { - vec.pop_front(); - } - vec.push_back((status, key.clone())); + if let InsertResult::Inserted = bucket.insert(node, status) { + let vec = match status { + NodeStatus::Connected => &mut connected, + NodeStatus::Disconnected => &mut disconnected, + }; + if full { + vec.pop_front(); } - _ => {} + vec.push_back((status, key.clone())); } } @@ -534,7 +530,7 @@ mod tests { // All nodes before the first connected node must be disconnected and // in insertion order. Similarly, all remaining nodes must be connected // and in insertion order. - nodes == Vec::from(disconnected) && tail == Vec::from(connected) + disconnected == nodes && connected == tail } quickcheck(prop as fn(_) -> _); @@ -636,7 +632,7 @@ mod tests { // The pending node has been discarded. assert!(bucket.pending().is_none()); - assert!(bucket.iter().all(|(n, _)| &n.key != &key)); + assert!(bucket.iter().all(|(n, _)| n.key != key)); // The initially disconnected node is now the most-recently connected. assert_eq!( diff --git a/protocols/kad/src/kbucket/key.rs b/protocols/kad/src/kbucket/key.rs index 9133757a0ff..00a15765de4 100644 --- a/protocols/kad/src/kbucket/key.rs +++ b/protocols/kad/src/kbucket/key.rs @@ -197,17 +197,16 @@ mod tests { use super::*; use libp2p_core::multihash::Code; use quickcheck::*; - use rand::Rng; impl Arbitrary for Key { - fn arbitrary(_: &mut G) -> Key { + fn arbitrary(_: &mut Gen) -> Key { Key::from(PeerId::random()) } } impl Arbitrary for Key { - fn arbitrary(_: &mut G) -> Key { - let hash = rand::thread_rng().gen::<[u8; 32]>(); + fn arbitrary(g: &mut Gen) -> Key { + let hash: [u8; 32] = core::array::from_fn(|_| u8::arbitrary(g)); Key::from(Multihash::wrap(Code::Sha2_256.into(), &hash).unwrap()) } } diff --git a/protocols/kad/src/query/peers/closest.rs b/protocols/kad/src/query/peers/closest.rs index 2b3cb124274..7fe87b7fe4d 100644 --- a/protocols/kad/src/query/peers/closest.rs +++ b/protocols/kad/src/query/peers/closest.rs @@ -494,22 +494,35 @@ mod tests { .collect() } - fn sorted>(target: &T, peers: &Vec>) -> bool { + fn sorted>(target: &T, peers: &[Key]) -> bool { peers .windows(2) .all(|w| w[0].distance(&target) < w[1].distance(&target)) } + #[derive(Clone, Debug)] + struct ArbitraryPeerId(PeerId); + + impl Arbitrary for ArbitraryPeerId { + fn arbitrary(g: &mut Gen) -> ArbitraryPeerId { + let hash: [u8; 32] = core::array::from_fn(|_| u8::arbitrary(g)); + let peer_id = + PeerId::from_multihash(Multihash::wrap(Code::Sha2_256.into(), &hash).unwrap()) + .unwrap(); + ArbitraryPeerId(peer_id) + } + } + impl Arbitrary for ClosestPeersIter { - fn arbitrary(g: &mut G) -> ClosestPeersIter { - let known_closest_peers = random_peers(g.gen_range(1, 60), g) - .into_iter() - .map(Key::from); - let target = Key::from(Into::::into(PeerId::random())); + fn arbitrary(g: &mut Gen) -> ClosestPeersIter { + let known_closest_peers = (0..g.gen_range(1..60u8)) + .map(|_| Key::from(ArbitraryPeerId::arbitrary(g).0)) + .collect::>(); + let target = Key::from(ArbitraryPeerId::arbitrary(g).0); let config = ClosestPeersIterConfig { - parallelism: NonZeroUsize::new(g.gen_range(1, 10)).unwrap(), - num_results: NonZeroUsize::new(g.gen_range(1, 25)).unwrap(), - peer_timeout: Duration::from_secs(g.gen_range(10, 30)), + parallelism: NonZeroUsize::new(g.gen_range(1..10)).unwrap(), + num_results: NonZeroUsize::new(g.gen_range(1..25)).unwrap(), + peer_timeout: Duration::from_secs(g.gen_range(10..30)), }; ClosestPeersIter::with_config(config, target, known_closest_peers) } @@ -519,8 +532,9 @@ mod tests { struct Seed([u8; 32]); impl Arbitrary for Seed { - fn arbitrary(g: &mut G) -> Seed { - Seed(g.gen()) + fn arbitrary(g: &mut Gen) -> Seed { + let seed = core::array::from_fn(|_| u8::arbitrary(g)); + Seed(seed) } } @@ -535,10 +549,7 @@ mod tests { .map(|e| (e.key.clone(), &e.state)) .unzip(); - let none_contacted = states.iter().all(|s| match s { - PeerState::NotContacted => true, - _ => false, - }); + let none_contacted = states.iter().all(|s| matches!(s, PeerState::NotContacted)); assert!(none_contacted, "Unexpected peer state in new iterator."); assert!( @@ -579,7 +590,7 @@ mod tests { let mut num_failures = 0; 'finished: loop { - if expected.len() == 0 { + if expected.is_empty() { break; } // Split off the next up to `parallelism` expected peers. @@ -612,7 +623,7 @@ mod tests { // peers or an error, thus finishing the "in-flight requests". for (i, k) in expected.iter().enumerate() { if rng.gen_bool(0.75) { - let num_closer = rng.gen_range(0, iter.config.num_results.get() + 1); + let num_closer = rng.gen_range(0..iter.config.num_results.get() + 1); let closer_peers = random_peers(num_closer, &mut rng); remaining.extend(closer_peers.iter().cloned().map(Key::from)); iter.on_success(k.preimage(), closer_peers); @@ -636,10 +647,10 @@ mod tests { // Determine if all peers have been contacted by the iterator. This _must_ be // the case if the iterator finished with fewer than the requested number // of results. - let all_contacted = iter.closest_peers.values().all(|e| match e.state { - PeerState::NotContacted | PeerState::Waiting { .. } => false, - _ => true, - }); + let all_contacted = iter + .closest_peers + .values() + .all(|e| !matches!(e.state, PeerState::NotContacted | PeerState::Waiting { .. })); let target = iter.target.clone(); let num_results = iter.config.num_results; @@ -668,11 +679,10 @@ mod tests { #[test] fn no_duplicates() { - fn prop(mut iter: ClosestPeersIter, seed: Seed) -> bool { + fn prop(mut iter: ClosestPeersIter, closer: ArbitraryPeerId) -> bool { let now = Instant::now(); - let mut rng = StdRng::from_seed(seed.0); - let closer = random_peers(1, &mut rng); + let closer = vec![closer.0]; // A first peer reports a "closer" peer. let peer1 = match iter.next(now) { @@ -729,7 +739,7 @@ mod tests { } // Artificially advance the clock. - now = now + iter.config.peer_timeout; + now += iter.config.peer_timeout; // Advancing the iterator again should mark the first peer as unresponsive. let _ = iter.next(now); diff --git a/protocols/kad/src/query/peers/closest/disjoint.rs b/protocols/kad/src/query/peers/closest/disjoint.rs index af91b8c1f0b..2272b8e46fc 100644 --- a/protocols/kad/src/query/peers/closest/disjoint.rs +++ b/protocols/kad/src/query/peers/closest/disjoint.rs @@ -447,32 +447,29 @@ mod tests { use crate::K_VALUE; use libp2p_core::multihash::{Code, Multihash}; use quickcheck::*; - use rand::{seq::SliceRandom, Rng}; use std::collections::HashSet; use std::iter; impl Arbitrary for ResultIter>> { - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { let target = Target::arbitrary(g).0; - let num_closest_iters = g.gen_range(0, 20 + 1); - let peers = random_peers(g.gen_range(0, 20 * num_closest_iters + 1), g); - - let iters: Vec<_> = (0..num_closest_iters) - .map(|_| { - let num_peers = g.gen_range(0, 20 + 1); - let mut peers = peers - .choose_multiple(g, num_peers) - .cloned() - .map(Key::from) - .collect::>(); + let num_closest_iters = g.gen_range(0..20 + 1); + let peers = random_peers(g.gen_range(0..20 * num_closest_iters + 1), g); - peers.sort_unstable_by(|a, b| target.distance(a).cmp(&target.distance(b))); + let iters = (0..num_closest_iters).map(|_| { + let num_peers = g.gen_range(0..20 + 1); + let mut peers = g + .choose_multiple(&peers, num_peers) + .cloned() + .map(Key::from) + .collect::>(); - peers.into_iter() - }) - .collect(); + peers.sort_unstable_by_key(|a| target.distance(a)); - ResultIter::new(target, iters.into_iter()) + peers.into_iter() + }); + + ResultIter::new(target.clone(), iters) } fn shrink(&self) -> Box> { @@ -515,20 +512,28 @@ mod tests { // The peer that should not be included. let peer = self.peers.pop()?; - let iters = self - .iters - .clone() - .into_iter() - .filter_map(|mut iter| { - iter.retain(|p| p != &peer); - if iter.is_empty() { - return None; - } - Some(iter.into_iter()) - }) - .collect::>(); + let iters = self.iters.clone().into_iter().filter_map(|mut iter| { + iter.retain(|p| p != &peer); + if iter.is_empty() { + return None; + } + Some(iter.into_iter()) + }); - Some(ResultIter::new(self.target.clone(), iters.into_iter())) + Some(ResultIter::new(self.target.clone(), iters)) + } + } + + #[derive(Clone, Debug)] + struct ArbitraryPeerId(PeerId); + + impl Arbitrary for ArbitraryPeerId { + fn arbitrary(g: &mut Gen) -> ArbitraryPeerId { + let hash: [u8; 32] = core::array::from_fn(|_| u8::arbitrary(g)); + let peer_id = + PeerId::from_multihash(Multihash::wrap(Code::Sha2_256.into(), &hash).unwrap()) + .unwrap(); + ArbitraryPeerId(peer_id) } } @@ -536,20 +541,14 @@ mod tests { struct Target(KeyBytes); impl Arbitrary for Target { - fn arbitrary(g: &mut G) -> Self { - Target(Key::from(random_peers(1, g).pop().unwrap()).into()) + fn arbitrary(g: &mut Gen) -> Self { + let peer_id = ArbitraryPeerId::arbitrary(g).0; + Target(Key::from(peer_id).into()) } } - fn random_peers(n: usize, g: &mut R) -> Vec { - (0..n) - .map(|_| { - PeerId::from_multihash( - Multihash::wrap(Code::Sha2_256.into(), &g.gen::<[u8; 32]>()).unwrap(), - ) - .unwrap() - }) - .collect() + fn random_peers(n: usize, g: &mut Gen) -> Vec { + (0..n).map(|_| ArbitraryPeerId::arbitrary(g).0).collect() } #[test] @@ -586,8 +585,8 @@ mod tests { struct Parallelism(NonZeroUsize); impl Arbitrary for Parallelism { - fn arbitrary(g: &mut G) -> Self { - Parallelism(NonZeroUsize::new(g.gen_range(1, 10)).unwrap()) + fn arbitrary(g: &mut Gen) -> Self { + Parallelism(NonZeroUsize::new(g.gen_range(1..10)).unwrap()) } } @@ -595,13 +594,13 @@ mod tests { struct NumResults(NonZeroUsize); impl Arbitrary for NumResults { - fn arbitrary(g: &mut G) -> Self { - NumResults(NonZeroUsize::new(g.gen_range(1, K_VALUE.get())).unwrap()) + fn arbitrary(g: &mut Gen) -> Self { + NumResults(NonZeroUsize::new(g.gen_range(1..K_VALUE.get())).unwrap()) } } impl Arbitrary for ClosestPeersIterConfig { - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { ClosestPeersIterConfig { parallelism: Parallelism::arbitrary(g).0, num_results: NumResults::arbitrary(g).0, @@ -614,10 +613,10 @@ mod tests { struct PeerVec(pub Vec>); impl Arbitrary for PeerVec { - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { PeerVec( - (0..g.gen_range(1, 60)) - .map(|_| PeerId::random()) + (0..g.gen_range(1..60u8)) + .map(|_| ArbitraryPeerId::arbitrary(g).0) .map(Key::from) .collect(), ) @@ -634,7 +633,7 @@ mod tests { .map(|_| Key::from(PeerId::random())) .collect::>(); - pool.sort_unstable_by(|a, b| target.distance(a).cmp(&target.distance(b))); + pool.sort_unstable_by_key(|a| target.distance(a)); let known_closest_peers = pool.split_off(pool.len() - 3); @@ -644,11 +643,8 @@ mod tests { ..ClosestPeersIterConfig::default() }; - let mut peers_iter = ClosestDisjointPeersIter::with_config( - config.clone(), - target, - known_closest_peers.clone(), - ); + let mut peers_iter = + ClosestDisjointPeersIter::with_config(config, target, known_closest_peers.clone()); //////////////////////////////////////////////////////////////////////// // First round. @@ -675,19 +671,19 @@ mod tests { malicious_response_1 .clone() .into_iter() - .map(|k| k.preimage().clone()), + .map(|k| *k.preimage()), ); // Response from peer 2. peers_iter.on_success( known_closest_peers[1].preimage(), - response_2.clone().into_iter().map(|k| k.preimage().clone()), + response_2.clone().into_iter().map(|k| *k.preimage()), ); // Response from peer 3. peers_iter.on_success( known_closest_peers[2].preimage(), - response_3.clone().into_iter().map(|k| k.preimage().clone()), + response_3.clone().into_iter().map(|k| *k.preimage()), ); //////////////////////////////////////////////////////////////////////// @@ -743,10 +739,10 @@ mod tests { } impl Arbitrary for Graph { - fn arbitrary(g: &mut G) -> Self { - let mut peer_ids = random_peers(g.gen_range(K_VALUE.get(), 200), g) + fn arbitrary(g: &mut Gen) -> Self { + let mut peer_ids = random_peers(g.gen_range(K_VALUE.get()..200), g) .into_iter() - .map(|peer_id| (peer_id.clone(), Key::from(peer_id))) + .map(|peer_id| (peer_id, Key::from(peer_id))) .collect::>(); // Make each peer aware of its direct neighborhood. @@ -773,18 +769,18 @@ mod tests { // Make each peer aware of a random set of other peers within the graph. for (peer_id, peer) in peers.iter_mut() { - peer_ids.shuffle(g); + g.shuffle(&mut peer_ids); - let num_peers = g.gen_range(K_VALUE.get(), peer_ids.len() + 1); - let mut random_peer_ids = peer_ids - .choose_multiple(g, num_peers) + let num_peers = g.gen_range(K_VALUE.get()..peer_ids.len() + 1); + let mut random_peer_ids = g + .choose_multiple(&peer_ids, num_peers) // Make sure not to include itself. .filter(|(id, _)| peer_id != id) .cloned() .collect::>(); peer.known_peers.append(&mut random_peer_ids); - peer.known_peers = std::mem::replace(&mut peer.known_peers, vec![]) + peer.known_peers = std::mem::take(&mut peer.known_peers) // Deduplicate peer ids. .into_iter() .collect::>() @@ -798,7 +794,8 @@ mod tests { impl Graph { fn get_closest_peer(&self, target: &KeyBytes) -> PeerId { - self.0 + *self + .0 .iter() .map(|(peer_id, _)| (target.distance(&Key::from(*peer_id)), peer_id)) .fold(None, |acc, (distance_b, peer_id_b)| match acc { @@ -813,7 +810,6 @@ mod tests { }) .expect("Graph to have at least one peer.") .1 - .clone() } } @@ -886,8 +882,7 @@ mod tests { .take(K_VALUE.get()) .map(|(key, _peers)| Key::from(*key)) .collect::>(); - known_closest_peers - .sort_unstable_by(|a, b| target.distance(a).cmp(&target.distance(b))); + known_closest_peers.sort_unstable_by_key(|a| target.distance(a)); let cfg = ClosestPeersIterConfig { parallelism: parallelism.0, @@ -911,7 +906,7 @@ mod tests { target.clone(), known_closest_peers.clone(), )), - graph.clone(), + graph, &target, ); @@ -958,11 +953,8 @@ mod tests { match iter.next(now) { PeersIterState::Waiting(Some(peer_id)) => { let peer_id = peer_id.clone().into_owned(); - let closest_peers = graph - .0 - .get_mut(&peer_id) - .unwrap() - .get_closest_peers(&target); + let closest_peers = + graph.0.get_mut(&peer_id).unwrap().get_closest_peers(target); iter.on_success(&peer_id, closest_peers); } PeersIterState::WaitingAtCapacity | PeersIterState::Waiting(None) => { @@ -977,7 +969,7 @@ mod tests { .into_iter() .map(Key::from) .collect::>(); - result.sort_unstable_by(|a, b| target.distance(a).cmp(&target.distance(b))); + result.sort_unstable_by_key(|a| target.distance(a)); result.into_iter().map(|k| k.into_preimage()).collect() } @@ -992,7 +984,7 @@ mod tests { let peer = PeerId::random(); let mut iter = ClosestDisjointPeersIter::new( Key::from(PeerId::random()).into(), - iter::once(Key::from(peer.clone())), + iter::once(Key::from(peer)), ); assert!(matches!(iter.next(now), PeersIterState::Waiting(Some(_)))); diff --git a/protocols/kad/src/record.rs b/protocols/kad/src/record.rs index 2a40292c243..0da60e704e9 100644 --- a/protocols/kad/src/record.rs +++ b/protocols/kad/src/record.rs @@ -162,28 +162,27 @@ mod tests { use super::*; use libp2p_core::multihash::Code; use quickcheck::*; - use rand::Rng; use std::time::Duration; impl Arbitrary for Key { - fn arbitrary(_: &mut G) -> Key { - let hash = rand::thread_rng().gen::<[u8; 32]>(); + fn arbitrary(g: &mut Gen) -> Key { + let hash: [u8; 32] = core::array::from_fn(|_| u8::arbitrary(g)); Key::from(Multihash::wrap(Code::Sha2_256.into(), &hash).unwrap()) } } impl Arbitrary for Record { - fn arbitrary(g: &mut G) -> Record { + fn arbitrary(g: &mut Gen) -> Record { Record { key: Key::arbitrary(g), value: Vec::arbitrary(g), - publisher: if g.gen() { + publisher: if bool::arbitrary(g) { Some(PeerId::random()) } else { None }, - expires: if g.gen() { - Some(Instant::now() + Duration::from_secs(g.gen_range(0, 60))) + expires: if bool::arbitrary(g) { + Some(Instant::now() + Duration::from_secs(g.gen_range(0..60))) } else { None }, @@ -192,12 +191,12 @@ mod tests { } impl Arbitrary for ProviderRecord { - fn arbitrary(g: &mut G) -> ProviderRecord { + fn arbitrary(g: &mut Gen) -> ProviderRecord { ProviderRecord { key: Key::arbitrary(g), provider: PeerId::random(), - expires: if g.gen() { - Some(Instant::now() + Duration::from_secs(g.gen_range(0, 60))) + expires: if bool::arbitrary(g) { + Some(Instant::now() + Duration::from_secs(g.gen_range(0..60))) } else { None }, diff --git a/protocols/kad/src/record/store/memory.rs b/protocols/kad/src/record/store/memory.rs index 93542683a57..39d17d37c2b 100644 --- a/protocols/kad/src/record/store/memory.rs +++ b/protocols/kad/src/record/store/memory.rs @@ -267,7 +267,7 @@ mod tests { assert!(store.add_provider(r.clone()).is_ok()); } - records.sort_by(|r1, r2| distance(r1).cmp(&distance(r2))); + records.sort_by_key(distance); records.truncate(store.config.max_providers_per_key); records == store.providers(&key).to_vec() @@ -279,9 +279,9 @@ mod tests { #[test] fn provided() { let id = PeerId::random(); - let mut store = MemoryStore::new(id.clone()); + let mut store = MemoryStore::new(id); let key = random_multihash(); - let rec = ProviderRecord::new(key, id.clone(), Vec::new()); + let rec = ProviderRecord::new(key, id, Vec::new()); assert!(store.add_provider(rec.clone()).is_ok()); assert_eq!( vec![Cow::Borrowed(&rec)], diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md index 2be2db4079b..915564341ae 100644 --- a/protocols/mdns/CHANGELOG.md +++ b/protocols/mdns/CHANGELOG.md @@ -1,3 +1,22 @@ +# 0.41.0 [unreleased] + +- Remove default features. If you previously depended on `async-io` you need to enable this explicitly now. See [PR 2918]. + +- Update to `libp2p-core` `v0.37.0`. + +- Update to `libp2p-swarm` `v0.40.0`. + +- Fix a bug that could cause a delay of ~10s until peers would get discovered when using the tokio runtime. See [PR 2939]. + +- Removed the `lazy_static` dependency. See [PR 2977]. + +- Update to `if-watch` `v2.0.0` and thus the `async` method `Mdns::new` and `TokioMdns::new` becomes synchronous. See [PR 2978]. + +[PR 2918]: https://github.com/libp2p/rust-libp2p/pull/2918 +[PR 2939]: https://github.com/libp2p/rust-libp2p/pull/2939 +[PR 2977]: https://github.com/libp2p/rust-libp2p/pull/2977 +[PR 2978]: https://github.com/libp2p/rust-libp2p/pull/2978 + # 0.40.0 - Update to `libp2p-swarm` `v0.39.0`. diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index 2ec4ac44958..2663c76f820 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-mdns" edition = "2021" rust-version = "1.56.1" -version = "0.40.0" +version = "0.41.0" description = "Implementation of the libp2p mDNS discovery method" authors = ["Parity Technologies "] license = "MIT" @@ -14,10 +14,9 @@ categories = ["network-programming", "asynchronous"] data-encoding = "2.3.2" dns-parser = "0.8.0" futures = "0.3.13" -if-watch = "1.1.1" -lazy_static = "1.4.0" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.39.0", path = "../../swarm" } +if-watch = "2.0.0" +libp2p-core = { version = "0.37.0", path = "../../core" } +libp2p-swarm = { version = "0.40.0", path = "../../swarm" } log = "0.4.14" rand = "0.8.3" smallvec = "1.6.1" @@ -28,14 +27,13 @@ async-io = { version = "1.3.1", optional = true } tokio = { version = "1.19", default-features = false, features = ["net", "time"], optional = true} [features] -default = ["async-io"] tokio = ["dep:tokio"] async-io = ["dep:async-io"] [dev-dependencies] async-std = { version = "1.9.0", features = ["attributes"] } env_logger = "0.9.0" -libp2p = { path = "../..", default-features = false, features = ["mdns-async-io", "tcp-async-io", "dns-async-std", "tcp-tokio", "dns-tokio", "websocket", "noise", "mplex", "yamux"] } +libp2p = { path = "../..", features = ["full"] } tokio = { version = "1.19", default-features = false, features = ["macros", "rt", "rt-multi-thread", "time"] } diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index 854bd885a22..b19845ac1d7 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -25,14 +25,12 @@ mod timer; use self::iface::InterfaceState; use crate::behaviour::{socket::AsyncSocket, timer::Builder}; use crate::MdnsConfig; -use futures::prelude::*; use futures::Stream; use if_watch::{IfEvent, IfWatcher}; use libp2p_core::transport::ListenerId; use libp2p_core::{Multiaddr, PeerId}; use libp2p_swarm::{ - handler::DummyConnectionHandler, ConnectionHandler, NetworkBehaviour, NetworkBehaviourAction, - PollParameters, + dummy, ConnectionHandler, NetworkBehaviour, NetworkBehaviourAction, PollParameters, }; use smallvec::SmallVec; use std::collections::hash_map::{Entry, HashMap}; @@ -82,8 +80,8 @@ where T: Builder, { /// Builds a new `Mdns` behaviour. - pub async fn new(config: MdnsConfig) -> io::Result { - let if_watch = if_watch::IfWatcher::new().await?; + pub fn new(config: MdnsConfig) -> io::Result { + let if_watch = if_watch::IfWatcher::new()?; Ok(Self { config, if_watch, @@ -120,11 +118,11 @@ where T: Builder + Stream, S: AsyncSocket, { - type ConnectionHandler = DummyConnectionHandler; + type ConnectionHandler = dummy::ConnectionHandler; type OutEvent = MdnsEvent; fn new_handler(&mut self) -> Self::ConnectionHandler { - DummyConnectionHandler::default() + dummy::ConnectionHandler } fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { @@ -168,9 +166,9 @@ where &mut self, cx: &mut Context<'_>, params: &mut impl PollParameters, - ) -> Poll> { + ) -> Poll> { // Poll ifwatch. - while let Poll::Ready(event) = Pin::new(&mut self.if_watch).poll(cx) { + while let Poll::Ready(Some(event)) = Pin::new(&mut self.if_watch).poll_next(cx) { match event { Ok(IfEvent::Up(inet)) => { let addr = inet.addr(); @@ -203,7 +201,7 @@ where // Emit discovered event. let mut discovered = SmallVec::<[(PeerId, Multiaddr); 4]>::new(); for iface_state in self.iface_states.values_mut() { - while let Some((peer, addr, expiration)) = iface_state.poll(cx, params) { + while let Poll::Ready((peer, addr, expiration)) = iface_state.poll(cx, params) { if let Some((_, _, cur_expires)) = self .discovered_nodes .iter_mut() diff --git a/protocols/mdns/src/behaviour/iface.rs b/protocols/mdns/src/behaviour/iface.rs index b2d0506b226..e1768720ef9 100644 --- a/protocols/mdns/src/behaviour/iface.rs +++ b/protocols/mdns/src/behaviour/iface.rs @@ -25,12 +25,12 @@ use self::dns::{build_query, build_query_response, build_service_discovery_respo use self::query::MdnsPacket; use crate::behaviour::{socket::AsyncSocket, timer::Builder}; use crate::MdnsConfig; -use libp2p_core::{address_translation, multiaddr::Protocol, Multiaddr, PeerId}; +use libp2p_core::{Multiaddr, PeerId}; use libp2p_swarm::PollParameters; use socket2::{Domain, Socket, Type}; use std::{ collections::VecDeque, - io, iter, + io, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}, pin::Pin, task::{Context, Poll}, @@ -120,8 +120,8 @@ where config.query_interval + Duration::from_millis(jitter) }; let multicast_addr = match addr { - IpAddr::V4(_) => IpAddr::V4(*crate::IPV4_MDNS_MULTICAST_ADDRESS), - IpAddr::V6(_) => IpAddr::V6(*crate::IPV6_MDNS_MULTICAST_ADDRESS), + IpAddr::V4(_) => IpAddr::V4(crate::IPV4_MDNS_MULTICAST_ADDRESS), + IpAddr::V6(_) => IpAddr::V6(crate::IPV6_MDNS_MULTICAST_ADDRESS), }; Ok(Self { addr, @@ -145,106 +145,101 @@ where self.timeout = T::interval_at(Instant::now(), self.query_interval); } - fn inject_mdns_packet(&mut self, packet: MdnsPacket, params: &impl PollParameters) { - log::trace!("received packet on iface {} {:?}", self.addr, packet); - match packet { - MdnsPacket::Query(query) => { - self.reset_timer(); - log::trace!("sending response on iface {}", self.addr); - for packet in build_query_response( - query.query_id(), - *params.local_peer_id(), - params.listened_addresses(), - self.ttl, - ) { - self.send_buffer.push_back(packet); - } + pub fn poll( + &mut self, + cx: &mut Context, + params: &impl PollParameters, + ) -> Poll<(PeerId, Multiaddr, Instant)> { + loop { + // 1st priority: Low latency: Create packet ASAP after timeout. + if Pin::new(&mut self.timeout).poll_next(cx).is_ready() { + log::trace!("sending query on iface {}", self.addr); + self.send_buffer.push_back(build_query()); } - MdnsPacket::Response(response) => { - // We replace the IP address with the address we observe the - // remote as and the address they listen on. - let obs_ip = Protocol::from(response.remote_addr().ip()); - let obs_port = Protocol::Udp(response.remote_addr().port()); - let observed: Multiaddr = iter::once(obs_ip).chain(iter::once(obs_port)).collect(); - for peer in response.discovered_peers() { - if peer.id() == params.local_peer_id() { + // 2nd priority: Keep local buffers small: Send packets to remote. + if let Some(packet) = self.send_buffer.pop_front() { + match Pin::new(&mut self.send_socket).poll_write( + cx, + &packet, + SocketAddr::new(self.multicast_addr, 5353), + ) { + Poll::Ready(Ok(_)) => { + log::trace!("sent packet on iface {}", self.addr); continue; } - - let new_expiration = Instant::now() + peer.ttl(); - - for addr in peer.addresses() { - if let Some(new_addr) = address_translation(addr, &observed) { - self.discovered.push_back(( - *peer.id(), - new_addr.clone(), - new_expiration, - )); - } - - self.discovered - .push_back((*peer.id(), addr.clone(), new_expiration)); + Poll::Ready(Err(err)) => { + log::error!("error sending packet on iface {} {}", self.addr, err); + continue; + } + Poll::Pending => { + self.send_buffer.push_front(packet); } } } - MdnsPacket::ServiceDiscovery(disc) => { - let resp = build_service_discovery_response(disc.query_id(), self.ttl); - self.send_buffer.push_back(resp); + + // 3rd priority: Keep local buffers small: Return discovered addresses. + if let Some(discovered) = self.discovered.pop_front() { + return Poll::Ready(discovered); } - } - } - pub fn poll( - &mut self, - cx: &mut Context, - params: &impl PollParameters, - ) -> Option<(PeerId, Multiaddr, Instant)> { - // Poll receive socket. - while let Poll::Ready(data) = - Pin::new(&mut self.recv_socket).poll_read(cx, &mut self.recv_buffer) - { - match data { - Ok((len, from)) => { - if let Some(packet) = MdnsPacket::new_from_bytes(&self.recv_buffer[..len], from) - { - self.inject_mdns_packet(packet, params); - } + // 4th priority: Remote work: Answer incoming requests. + match Pin::new(&mut self.recv_socket) + .poll_read(cx, &mut self.recv_buffer) + .map_ok(|(len, from)| MdnsPacket::new_from_bytes(&self.recv_buffer[..len], from)) + { + Poll::Ready(Ok(Ok(Some(MdnsPacket::Query(query))))) => { + self.reset_timer(); + log::trace!( + "received query from {} on {}", + query.remote_addr(), + self.addr + ); + + self.send_buffer.extend(build_query_response( + query.query_id(), + *params.local_peer_id(), + params.listened_addresses(), + self.ttl, + )); + continue; } - Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { - // No more bytes available on the socket to read - break; + Poll::Ready(Ok(Ok(Some(MdnsPacket::Response(response))))) => { + log::trace!( + "received response from {} on {}", + response.remote_addr(), + self.addr + ); + + self.discovered.extend( + response.extract_discovered(Instant::now(), *params.local_peer_id()), + ); + continue; } - Err(err) => { - log::error!("failed reading datagram: {}", err); + Poll::Ready(Ok(Ok(Some(MdnsPacket::ServiceDiscovery(disc))))) => { + log::trace!( + "received service discovery from {} on {}", + disc.remote_addr(), + self.addr + ); + + self.send_buffer + .push_back(build_service_discovery_response(disc.query_id(), self.ttl)); + continue; + } + Poll::Ready(Err(err)) if err.kind() == std::io::ErrorKind::WouldBlock => { + // No more bytes available on the socket to read } - } - } - - // Send responses. - while let Some(packet) = self.send_buffer.pop_front() { - match Pin::new(&mut self.send_socket).poll_write( - cx, - &packet, - SocketAddr::new(self.multicast_addr, 5353), - ) { - Poll::Ready(Ok(_)) => log::trace!("sent packet on iface {}", self.addr), Poll::Ready(Err(err)) => { - log::error!("error sending packet on iface {} {}", self.addr, err); + log::error!("failed reading datagram: {}", err); } - Poll::Pending => { - self.send_buffer.push_front(packet); - break; + Poll::Ready(Ok(Err(err))) => { + log::debug!("Parsing mdns packet failed: {:?}", err); } + Poll::Ready(Ok(Ok(None))) | Poll::Pending => {} } - } - if Pin::new(&mut self.timeout).poll_next(cx).is_ready() { - log::trace!("sending query on iface {}", self.addr); - self.send_buffer.push_back(build_query()); + return Poll::Pending; } - - // Emit discovered event. - self.discovered.pop_front() } } diff --git a/protocols/mdns/src/behaviour/iface/query.rs b/protocols/mdns/src/behaviour/iface/query.rs index 5b579254f1f..70e38016849 100644 --- a/protocols/mdns/src/behaviour/iface/query.rs +++ b/protocols/mdns/src/behaviour/iface/query.rs @@ -22,9 +22,11 @@ use super::dns; use crate::{META_QUERY_SERVICE, SERVICE_NAME}; use dns_parser::{Packet, RData}; use libp2p_core::{ + address_translation, multiaddr::{Multiaddr, Protocol}, PeerId, }; +use std::time::Instant; use std::{convert::TryFrom, fmt, net::SocketAddr, str, time::Duration}; /// A valid mDNS packet received by the service. @@ -39,44 +41,40 @@ pub enum MdnsPacket { } impl MdnsPacket { - pub fn new_from_bytes(buf: &[u8], from: SocketAddr) -> Option { - match Packet::parse(buf) { - Ok(packet) => { - if packet.header.query { - if packet - .questions - .iter() - .any(|q| q.qname.to_string().as_bytes() == SERVICE_NAME) - { - let query = MdnsPacket::Query(MdnsQuery { - from, - query_id: packet.header.id, - }); - Some(query) - } else if packet - .questions - .iter() - .any(|q| q.qname.to_string().as_bytes() == META_QUERY_SERVICE) - { - // TODO: what if multiple questions, one with SERVICE_NAME and one with META_QUERY_SERVICE? - let discovery = MdnsPacket::ServiceDiscovery(MdnsServiceDiscovery { - from, - query_id: packet.header.id, - }); - Some(discovery) - } else { - None - } - } else { - let resp = MdnsPacket::Response(MdnsResponse::new(packet, from)); - Some(resp) - } - } - Err(err) => { - log::debug!("Parsing mdns packet failed: {:?}", err); - None - } + pub fn new_from_bytes( + buf: &[u8], + from: SocketAddr, + ) -> Result, dns_parser::Error> { + let packet = Packet::parse(buf)?; + + if !packet.header.query { + return Ok(Some(MdnsPacket::Response(MdnsResponse::new(packet, from)))); } + + if packet + .questions + .iter() + .any(|q| q.qname.to_string().as_bytes() == SERVICE_NAME) + { + return Ok(Some(MdnsPacket::Query(MdnsQuery { + from, + query_id: packet.header.id, + }))); + } + + if packet + .questions + .iter() + .any(|q| q.qname.to_string().as_bytes() == META_QUERY_SERVICE) + { + // TODO: what if multiple questions, one with SERVICE_NAME and one with META_QUERY_SERVICE? + return Ok(Some(MdnsPacket::ServiceDiscovery(MdnsServiceDiscovery { + from, + query_id: packet.header.id, + }))); + } + + Ok(None) } } @@ -167,18 +165,45 @@ impl MdnsResponse { MdnsResponse { peers, from } } - /// Returns the list of peers that have been reported in this packet. - /// - /// > **Note**: Keep in mind that this will also contain the responses we sent ourselves. - pub fn discovered_peers(&self) -> impl Iterator { - self.peers.iter() + pub fn extract_discovered( + &self, + now: Instant, + local_peer_id: PeerId, + ) -> impl Iterator + '_ { + self.discovered_peers() + .filter(move |peer| peer.id() != &local_peer_id) + .flat_map(move |peer| { + let observed = self.observed_address(); + let new_expiration = now + peer.ttl(); + + peer.addresses().iter().filter_map(move |address| { + let new_addr = address_translation(address, &observed)?; + + Some((*peer.id(), new_addr, new_expiration)) + }) + }) } /// Source address of the packet. - #[inline] pub fn remote_addr(&self) -> &SocketAddr { &self.from } + + fn observed_address(&self) -> Multiaddr { + // We replace the IP address with the address we observe the + // remote as and the address they listen on. + let obs_ip = Protocol::from(self.remote_addr().ip()); + let obs_port = Protocol::Udp(self.remote_addr().port()); + + Multiaddr::empty().with(obs_ip).with(obs_port) + } + + /// Returns the list of peers that have been reported in this packet. + /// + /// > **Note**: Keep in mind that this will also contain the responses we sent ourselves. + fn discovered_peers(&self) -> impl Iterator { + self.peers.iter() + } } impl fmt::Debug for MdnsResponse { @@ -301,8 +326,8 @@ mod tests { let mut addr1: Multiaddr = "/ip4/1.2.3.4/tcp/5000".parse().expect("bad multiaddress"); let mut addr2: Multiaddr = "/ip6/::1/udp/10000".parse().expect("bad multiaddress"); - addr1.push(Protocol::P2p(peer_id.clone().into())); - addr2.push(Protocol::P2p(peer_id.clone().into())); + addr1.push(Protocol::P2p(peer_id.into())); + addr2.push(Protocol::P2p(peer_id.into())); let packets = build_query_response( 0xf8f8, @@ -324,7 +349,7 @@ mod tests { RData::PTR(record) => record.0.to_string(), _ => return None, }; - return Some(record_value); + Some(record_value) }) .next() .expect("empty record value"); diff --git a/protocols/mdns/src/lib.rs b/protocols/mdns/src/lib.rs index 3b484c91daa..f05476af7dd 100644 --- a/protocols/mdns/src/lib.rs +++ b/protocols/mdns/src/lib.rs @@ -30,7 +30,6 @@ //! implements the `NetworkBehaviour` trait. This struct will automatically discover other //! libp2p nodes on the local network. //! -use lazy_static::lazy_static; use std::net::{Ipv4Addr, Ipv6Addr}; use std::time::Duration; @@ -48,11 +47,8 @@ const SERVICE_NAME: &[u8] = b"_p2p._udp.local"; /// The meta query for looking up the `SERVICE_NAME`. const META_QUERY_SERVICE: &[u8] = b"_services._dns-sd._udp.local"; -lazy_static! { - pub static ref IPV4_MDNS_MULTICAST_ADDRESS: Ipv4Addr = Ipv4Addr::new(224, 0, 0, 251); - pub static ref IPV6_MDNS_MULTICAST_ADDRESS: Ipv6Addr = - Ipv6Addr::new(0xFF02, 0, 0, 0, 0, 0, 0, 0xFB); -} +pub const IPV4_MDNS_MULTICAST_ADDRESS: Ipv4Addr = Ipv4Addr::new(224, 0, 0, 251); +pub const IPV6_MDNS_MULTICAST_ADDRESS: Ipv6Addr = Ipv6Addr::new(0xFF02, 0, 0, 0, 0, 0, 0, 0xFB); /// Configuration for mDNS. #[derive(Debug, Clone)] diff --git a/protocols/mdns/tests/use-async-std.rs b/protocols/mdns/tests/use-async-std.rs index 683aed338ce..2ddb36355be 100644 --- a/protocols/mdns/tests/use-async-std.rs +++ b/protocols/mdns/tests/use-async-std.rs @@ -61,7 +61,7 @@ async fn create_swarm(config: MdnsConfig) -> Result, Box> let id_keys = identity::Keypair::generate_ed25519(); let peer_id = PeerId::from(id_keys.public()); let transport = libp2p::development_transport(id_keys).await?; - let behaviour = Mdns::new(config).await?; + let behaviour = Mdns::new(config)?; let mut swarm = Swarm::new(transport, behaviour, peer_id); swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?; Ok(swarm) @@ -75,33 +75,27 @@ async fn run_discovery_test(config: MdnsConfig) -> Result<(), Box> { let mut discovered_b = false; loop { futures::select! { - ev = a.select_next_some() => match ev { - SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) => { - for (peer, _addr) in peers { - if peer == *b.local_peer_id() { - if discovered_a { - return Ok(()); - } else { - discovered_b = true; - } + ev = a.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) = ev { + for (peer, _addr) in peers { + if peer == *b.local_peer_id() { + if discovered_a { + return Ok(()); + } else { + discovered_b = true; } } } - _ => {} }, - ev = b.select_next_some() => match ev { - SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) => { - for (peer, _addr) in peers { - if peer == *a.local_peer_id() { - if discovered_b { - return Ok(()); - } else { - discovered_a = true; - } + ev = b.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) = ev { + for (peer, _addr) in peers { + if peer == *a.local_peer_id() { + if discovered_b { + return Ok(()); + } else { + discovered_a = true; } } } - _ => {} } } } @@ -113,27 +107,20 @@ async fn run_peer_expiration_test(config: MdnsConfig) -> Result<(), Box match ev { - SwarmEvent::Behaviour(MdnsEvent::Expired(peers)) => { - for (peer, _addr) in peers { - if peer == *b.local_peer_id() { - return Ok(()); - } + ev = a.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Expired(peers)) = ev { + for (peer, _addr) in peers { + if peer == *b.local_peer_id() { + return Ok(()); } } - _ => {} }, - ev = b.select_next_some() => match ev { - SwarmEvent::Behaviour(MdnsEvent::Expired(peers)) => { - for (peer, _addr) in peers { - if peer == *a.local_peer_id() { - return Ok(()); - } + ev = b.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Expired(peers)) = ev { + for (peer, _addr) in peers { + if peer == *a.local_peer_id() { + return Ok(()); } } - _ => {} } - } } } diff --git a/protocols/mdns/tests/use-tokio.rs b/protocols/mdns/tests/use-tokio.rs index 9d6cacd76cb..830557d3f00 100644 --- a/protocols/mdns/tests/use-tokio.rs +++ b/protocols/mdns/tests/use-tokio.rs @@ -57,7 +57,7 @@ async fn create_swarm(config: MdnsConfig) -> Result, Box Result<(), Box> { let mut discovered_b = false; loop { futures::select! { - ev = a.select_next_some() => match ev { - SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) => { - for (peer, _addr) in peers { - if peer == *b.local_peer_id() { - if discovered_a { - return Ok(()); - } else { - discovered_b = true; - } + ev = a.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) = ev { + for (peer, _addr) in peers { + if peer == *b.local_peer_id() { + if discovered_a { + return Ok(()); + } else { + discovered_b = true; } } } - _ => {} }, - ev = b.select_next_some() => match ev { - SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) => { - for (peer, _addr) in peers { - if peer == *a.local_peer_id() { - if discovered_b { - return Ok(()); - } else { - discovered_a = true; - } + ev = b.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) = ev { + for (peer, _addr) in peers { + if peer == *a.local_peer_id() { + if discovered_b { + return Ok(()); + } else { + discovered_a = true; } } } - _ => {} } } } diff --git a/protocols/ping/CHANGELOG.md b/protocols/ping/CHANGELOG.md index b1037ac1ef7..923dfa48c84 100644 --- a/protocols/ping/CHANGELOG.md +++ b/protocols/ping/CHANGELOG.md @@ -1,3 +1,19 @@ +# 0.40.0 [unreleased] + +- Bump rand to 0.8 and quickcheck to 1. See [PR 2857]. +- Deprecate types with `Ping` prefix. Prefer importing them via the `ping` namespace, i.e. `libp2p::ping::Event` instead + of `libp2p::ping::PingEvent`. See [PR 2937]. + +- Update to `libp2p-core` `v0.37.0`. + +- Update to `libp2p-swarm` `v0.40.0`. + +- Deprecate `Config::with_keep_alive`. See [PR 2859]. + +[PR 2857]: https://github.com/libp2p/rust-libp2p/pull/2857 +[PR 2937]: https://github.com/libp2p/rust-libp2p/pull/2937 +[PR 2859]: https://github.com/libp2p/rust-libp2p/pull/2859/ + # 0.39.0 - Update to `libp2p-swarm` `v0.39.0`. diff --git a/protocols/ping/Cargo.toml b/protocols/ping/Cargo.toml index aa2b596d5f1..70e03164318 100644 --- a/protocols/ping/Cargo.toml +++ b/protocols/ping/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-ping" edition = "2021" rust-version = "1.56.1" description = "Ping protocol for libp2p" -version = "0.39.0" +version = "0.40.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,16 +14,13 @@ categories = ["network-programming", "asynchronous"] futures = "0.3.1" futures-timer = "3.0.2" instant = "0.1.11" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.39.0", path = "../../swarm" } +libp2p-core = { version = "0.37.0", path = "../../core" } +libp2p-swarm = { version = "0.40.0", path = "../../swarm" } log = "0.4.1" -rand = "0.7.2" +rand = "0.8" void = "1.0" [dev-dependencies] async-std = "1.6.2" -libp2p-tcp = { path = "../../transports/tcp" } -libp2p-noise = { path = "../../transports/noise" } -libp2p-yamux = { path = "../../muxers/yamux" } -libp2p-mplex = { path = "../../muxers/mplex" } -quickcheck = "0.9.0" +libp2p = { path = "../..", features = ["full"] } +quickcheck = { package = "quickcheck-ext", path = "../../misc/quickcheck-ext" } diff --git a/protocols/ping/src/handler.rs b/protocols/ping/src/handler.rs index f0e71fb070e..af1ef898981 100644 --- a/protocols/ping/src/handler.rs +++ b/protocols/ping/src/handler.rs @@ -56,7 +56,7 @@ pub struct Config { } impl Config { - /// Creates a new `PingConfig` with the following default settings: + /// Creates a new [`Config`] with the following default settings: /// /// * [`Config::with_interval`] 15s /// * [`Config::with_timeout`] 20s @@ -111,6 +111,10 @@ impl Config { /// If the maximum number of allowed ping failures is reached, the /// connection is always terminated as a result of [`ConnectionHandler::poll`] /// returning an error, regardless of the keep-alive setting. + #[deprecated( + since = "0.40.0", + note = "Use `libp2p::swarm::behaviour::KeepAlive` if you need to keep connections alive unconditionally." + )] pub fn with_keep_alive(mut self, b: bool) -> Self { self.keep_alive = b; self @@ -185,7 +189,7 @@ pub struct Handler { /// Each successful ping resets this counter to 0. failures: u32, /// The outbound ping state. - outbound: Option, + outbound: Option, /// The inbound pong handler, i.e. if there is an inbound /// substream, this is always a future that waits for the /// next inbound ping to be answered. @@ -208,7 +212,7 @@ enum State { } impl Handler { - /// Builds a new `PingHandler` with the given configuration. + /// Builds a new [`Handler`] with the given configuration. pub fn new(config: Config) -> Self { Handler { config, @@ -241,7 +245,7 @@ impl ConnectionHandler for Handler { fn inject_fully_negotiated_outbound(&mut self, stream: NegotiatedSubstream, (): ()) { self.timer.reset(self.config.timeout); - self.outbound = Some(PingState::Ping(protocol::send_ping(stream).boxed())); + self.outbound = Some(OutboundState::Ping(protocol::send_ping(stream).boxed())); } fn inject_event(&mut self, _: Void) {} @@ -330,19 +334,19 @@ impl ConnectionHandler for Handler { // Continue outbound pings. match self.outbound.take() { - Some(PingState::Ping(mut ping)) => match ping.poll_unpin(cx) { + Some(OutboundState::Ping(mut ping)) => match ping.poll_unpin(cx) { Poll::Pending => { if self.timer.poll_unpin(cx).is_ready() { self.pending_errors.push_front(Failure::Timeout); } else { - self.outbound = Some(PingState::Ping(ping)); + self.outbound = Some(OutboundState::Ping(ping)); break; } } Poll::Ready(Ok((stream, rtt))) => { self.failures = 0; self.timer.reset(self.config.interval); - self.outbound = Some(PingState::Idle(stream)); + self.outbound = Some(OutboundState::Idle(stream)); return Poll::Ready(ConnectionHandlerEvent::Custom(Ok(Success::Ping { rtt, }))); @@ -352,22 +356,23 @@ impl ConnectionHandler for Handler { .push_front(Failure::Other { error: Box::new(e) }); } }, - Some(PingState::Idle(stream)) => match self.timer.poll_unpin(cx) { + Some(OutboundState::Idle(stream)) => match self.timer.poll_unpin(cx) { Poll::Pending => { - self.outbound = Some(PingState::Idle(stream)); + self.outbound = Some(OutboundState::Idle(stream)); break; } Poll::Ready(()) => { self.timer.reset(self.config.timeout); - self.outbound = Some(PingState::Ping(protocol::send_ping(stream).boxed())); + self.outbound = + Some(OutboundState::Ping(protocol::send_ping(stream).boxed())); } }, - Some(PingState::OpenStream) => { - self.outbound = Some(PingState::OpenStream); + Some(OutboundState::OpenStream) => { + self.outbound = Some(OutboundState::OpenStream); break; } None => { - self.outbound = Some(PingState::OpenStream); + self.outbound = Some(OutboundState::OpenStream); let protocol = SubstreamProtocol::new(ReadyUpgrade::new(PROTOCOL_NAME), ()) .with_timeout(self.config.timeout); return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { @@ -385,7 +390,7 @@ type PingFuture = BoxFuture<'static, Result<(NegotiatedSubstream, Duration), io: type PongFuture = BoxFuture<'static, Result>; /// The current state w.r.t. outbound pings. -enum PingState { +enum OutboundState { /// A new substream is being negotiated for the ping protocol. OpenStream, /// The substream is idle, waiting to send the next ping. diff --git a/protocols/ping/src/lib.rs b/protocols/ping/src/lib.rs index 2a01025ee6d..42234488964 100644 --- a/protocols/ping/src/lib.rs +++ b/protocols/ping/src/lib.rs @@ -26,16 +26,16 @@ //! //! # Usage //! -//! The [`Ping`] struct implements the [`NetworkBehaviour`] trait. When used with a [`Swarm`], +//! The [`Behaviour`] struct implements the [`NetworkBehaviour`] trait. When used with a [`Swarm`], //! it will respond to inbound ping requests and as necessary periodically send outbound //! ping requests on every established connection. If a configurable number of consecutive //! pings fail, the connection will be closed. //! -//! The `Ping` network behaviour produces [`PingEvent`]s, which may be consumed from the `Swarm` +//! The [`Behaviour`] network behaviour produces [`Event`]s, which may be consumed from the [`Swarm`] //! by an application, e.g. to collect statistics. //! //! > **Note**: The ping protocol does not keep otherwise idle connections alive -//! > by default, see [`PingConfig::with_keep_alive`] for changing this behaviour. +//! > by default, see [`Config::with_keep_alive`] for changing this behaviour. //! //! [`Swarm`]: libp2p_swarm::Swarm //! [`Transport`]: libp2p_core::Transport @@ -52,16 +52,25 @@ use std::{ task::{Context, Poll}, }; -#[deprecated( - since = "0.30.0", - note = "Use re-exports that omit `Ping` prefix, i.e. `libp2p::ping::Config` etc" -)] -pub use self::{ - protocol::PROTOCOL_NAME, Config as PingConfig, Event as PingEvent, Failure as PingFailure, - Result as PingResult, Success as PingSuccess, -}; -#[deprecated(since = "0.30.0", note = "Use libp2p::ping::Behaviour instead.")] -pub use Behaviour as Ping; +#[deprecated(since = "0.39.1", note = "Use libp2p::ping::Config instead.")] +pub type PingConfig = Config; + +#[deprecated(since = "0.39.1", note = "Use libp2p::ping::Event instead.")] +pub type PingEvent = Event; + +#[deprecated(since = "0.39.1", note = "Use libp2p::ping::Success instead.")] +pub type PingSuccess = Success; + +#[deprecated(since = "0.39.1", note = "Use libp2p::ping::Failure instead.")] +pub type PingFailure = Failure; + +#[deprecated(since = "0.39.1", note = "Use libp2p::ping::Result instead.")] +pub type PingResult = Result; + +#[deprecated(since = "0.39.1", note = "Use libp2p::ping::Behaviour instead.")] +pub type Ping = Behaviour; + +pub use self::protocol::PROTOCOL_NAME; /// The result of an inbound or outbound ping. pub type Result = std::result::Result; diff --git a/protocols/ping/tests/ping.rs b/protocols/ping/tests/ping.rs index 2f75c09fb3d..e489f90c254 100644 --- a/protocols/ping/tests/ping.rs +++ b/protocols/ping/tests/ping.rs @@ -21,38 +21,37 @@ //! Integration tests for the `Ping` network behaviour. use futures::{channel::mpsc, prelude::*}; -use libp2p_core::{ +use libp2p::core::{ identity, muxing::StreamMuxerBox, transport::{self, Transport}, upgrade, Multiaddr, PeerId, }; -use libp2p_mplex as mplex; -use libp2p_noise as noise; -use libp2p_ping as ping; -use libp2p_swarm::{DummyBehaviour, KeepAlive, Swarm, SwarmEvent}; -use libp2p_tcp::{GenTcpConfig, TcpTransport}; -use libp2p_yamux as yamux; +use libp2p::mplex; +use libp2p::noise; +use libp2p::ping; +use libp2p::swarm::{Swarm, SwarmEvent}; +use libp2p::tcp::{GenTcpConfig, TcpTransport}; +use libp2p::yamux; +use libp2p::NetworkBehaviour; +use libp2p_swarm::keep_alive; use quickcheck::*; -use rand::prelude::*; use std::{num::NonZeroU8, time::Duration}; #[test] fn ping_pong() { fn prop(count: NonZeroU8, muxer: MuxerChoice) { - let cfg = ping::Config::new() - .with_keep_alive(true) - .with_interval(Duration::from_millis(10)); + let cfg = ping::Config::new().with_interval(Duration::from_millis(10)); let (peer1_id, trans) = mk_transport(muxer); - let mut swarm1 = Swarm::new(trans, ping::Behaviour::new(cfg.clone()), peer1_id.clone()); + let mut swarm1 = Swarm::new(trans, Behaviour::new(cfg.clone()), peer1_id); let (peer2_id, trans) = mk_transport(muxer); - let mut swarm2 = Swarm::new(trans, ping::Behaviour::new(cfg), peer2_id.clone()); + let mut swarm2 = Swarm::new(trans, Behaviour::new(cfg), peer2_id); let (mut tx, mut rx) = mpsc::channel::(1); - let pid1 = peer1_id.clone(); + let pid1 = peer1_id; let addr = "/ip4/127.0.0.1/tcp/0".parse().unwrap(); swarm1.listen_on(addr).unwrap(); @@ -63,16 +62,19 @@ fn ping_pong() { loop { match swarm1.select_next_some().await { SwarmEvent::NewListenAddr { address, .. } => tx.send(address).await.unwrap(), - SwarmEvent::Behaviour(ping::Event { + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { peer, result: Ok(ping::Success::Ping { rtt }), - }) => { + })) => { count1 -= 1; if count1 == 0 { - return (pid1.clone(), peer, rtt); + return (pid1, peer, rtt); } } - SwarmEvent::Behaviour(ping::Event { result: Err(e), .. }) => { + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { + result: Err(e), + .. + })) => { panic!("Ping failure: {:?}", e) } _ => {} @@ -80,22 +82,25 @@ fn ping_pong() { } }; - let pid2 = peer2_id.clone(); + let pid2 = peer2_id; let peer2 = async move { swarm2.dial(rx.next().await.unwrap()).unwrap(); loop { match swarm2.select_next_some().await { - SwarmEvent::Behaviour(ping::Event { + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { peer, result: Ok(ping::Success::Ping { rtt }), - }) => { + })) => { count2 -= 1; if count2 == 0 { - return (pid2.clone(), peer, rtt); + return (pid2, peer, rtt); } } - SwarmEvent::Behaviour(ping::Event { result: Err(e), .. }) => { + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { + result: Err(e), + .. + })) => { panic!("Ping failure: {:?}", e) } _ => {} @@ -118,16 +123,15 @@ fn ping_pong() { fn max_failures() { fn prop(max_failures: NonZeroU8, muxer: MuxerChoice) { let cfg = ping::Config::new() - .with_keep_alive(true) .with_interval(Duration::from_millis(10)) .with_timeout(Duration::from_millis(0)) .with_max_failures(max_failures.into()); let (peer1_id, trans) = mk_transport(muxer); - let mut swarm1 = Swarm::new(trans, ping::Behaviour::new(cfg.clone()), peer1_id.clone()); + let mut swarm1 = Swarm::new(trans, Behaviour::new(cfg.clone()), peer1_id); let (peer2_id, trans) = mk_transport(muxer); - let mut swarm2 = Swarm::new(trans, ping::Behaviour::new(cfg), peer2_id.clone()); + let mut swarm2 = Swarm::new(trans, Behaviour::new(cfg), peer2_id); let (mut tx, mut rx) = mpsc::channel::(1); @@ -140,13 +144,16 @@ fn max_failures() { loop { match swarm1.select_next_some().await { SwarmEvent::NewListenAddr { address, .. } => tx.send(address).await.unwrap(), - SwarmEvent::Behaviour(ping::Event { + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { result: Ok(ping::Success::Ping { .. }), .. - }) => { + })) => { count1 = 0; // there may be an occasional success } - SwarmEvent::Behaviour(ping::Event { result: Err(_), .. }) => { + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { + result: Err(_), + .. + })) => { count1 += 1; } SwarmEvent::ConnectionClosed { .. } => return count1, @@ -162,13 +169,16 @@ fn max_failures() { loop { match swarm2.select_next_some().await { - SwarmEvent::Behaviour(ping::Event { + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { result: Ok(ping::Success::Ping { .. }), .. - }) => { + })) => { count2 = 0; // there may be an occasional success } - SwarmEvent::Behaviour(ping::Event { result: Err(_), .. }) => { + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { + result: Err(_), + .. + })) => { count2 += 1; } SwarmEvent::ConnectionClosed { .. } => return count2, @@ -188,18 +198,10 @@ fn max_failures() { #[test] fn unsupported_doesnt_fail() { let (peer1_id, trans) = mk_transport(MuxerChoice::Mplex); - let mut swarm1 = Swarm::new( - trans, - DummyBehaviour::with_keep_alive(KeepAlive::Yes), - peer1_id.clone(), - ); + let mut swarm1 = Swarm::new(trans, keep_alive::Behaviour, peer1_id); let (peer2_id, trans) = mk_transport(MuxerChoice::Mplex); - let mut swarm2 = Swarm::new( - trans, - ping::Behaviour::new(ping::Config::new().with_keep_alive(true)), - peer2_id.clone(), - ); + let mut swarm2 = Swarm::new(trans, Behaviour::default(), peer2_id); let (mut tx, mut rx) = mpsc::channel::(1); @@ -208,9 +210,8 @@ fn unsupported_doesnt_fail() { async_std::task::spawn(async move { loop { - match swarm1.select_next_some().await { - SwarmEvent::NewListenAddr { address, .. } => tx.send(address).await.unwrap(), - _ => {} + if let SwarmEvent::NewListenAddr { address, .. } = swarm1.select_next_some().await { + tx.send(address).await.unwrap() } } }); @@ -220,10 +221,10 @@ fn unsupported_doesnt_fail() { loop { match swarm2.select_next_some().await { - SwarmEvent::Behaviour(ping::Event { + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { result: Err(ping::Failure::Unsupported), .. - }) => { + })) => { swarm2.disconnect_peer_id(peer1_id).unwrap(); } SwarmEvent::ConnectionClosed { cause: Some(e), .. } => { @@ -263,7 +264,22 @@ enum MuxerChoice { } impl Arbitrary for MuxerChoice { - fn arbitrary(g: &mut G) -> MuxerChoice { - *[MuxerChoice::Mplex, MuxerChoice::Yamux].choose(g).unwrap() + fn arbitrary(g: &mut Gen) -> MuxerChoice { + *g.choose(&[MuxerChoice::Mplex, MuxerChoice::Yamux]).unwrap() + } +} + +#[derive(NetworkBehaviour, Default)] +struct Behaviour { + keep_alive: keep_alive::Behaviour, + ping: ping::Behaviour, +} + +impl Behaviour { + fn new(config: ping::Config) -> Self { + Self { + keep_alive: keep_alive::Behaviour, + ping: ping::Behaviour::new(config), + } } } diff --git a/protocols/relay/CHANGELOG.md b/protocols/relay/CHANGELOG.md index 1262a250880..9937fffec0e 100644 --- a/protocols/relay/CHANGELOG.md +++ b/protocols/relay/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.13.0 [unreleased] + +- Update to `libp2p-core` `v0.37.0`. + +- Update to `libp2p-swarm` `v0.40.0`. + # 0.12.0 - Update to `libp2p-swarm` `v0.39.0`. diff --git a/protocols/relay/Cargo.toml b/protocols/relay/Cargo.toml index 41964edcf41..052eb1127a8 100644 --- a/protocols/relay/Cargo.toml +++ b/protocols/relay/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-relay" edition = "2021" rust-version = "1.56.1" description = "Communications relaying for libp2p" -version = "0.12.0" +version = "0.13.0" authors = ["Parity Technologies ", "Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -17,8 +17,8 @@ either = "1.6.0" futures = "0.3.1" futures-timer = "3" instant = "0.1.11" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.39.0", path = "../../swarm" } +libp2p-core = { version = "0.37.0", path = "../../core" } +libp2p-swarm = { version = "0.40.0", path = "../../swarm" } log = "0.4" pin-project = "1" prost-codec = { version = "0.2", path = "../../misc/prost-codec" } @@ -34,11 +34,6 @@ prost-build = "0.11" [dev-dependencies] env_logger = "0.9.0" -libp2p = { path = "../..", default-features = false, features = ["identify", "relay", "ping", "noise", "plaintext", "tcp-async-io"] } -libp2p-identify = { path = "../identify" } -libp2p-kad = { path = "../kad" } -libp2p-ping = { path = "../ping" } -libp2p-plaintext = { path = "../../transports/plaintext" } -libp2p-yamux = { path = "../../muxers/yamux" } -quickcheck = "1" +libp2p = { path = "../..", features = ["full"] } +quickcheck = { package = "quickcheck-ext", path = "../../misc/quickcheck-ext" } clap = {version = "3.1.6", features = ["derive"]} diff --git a/protocols/relay/examples/relay_v2.rs b/protocols/relay/examples/relay_v2.rs index b89c88b2829..dd2a87fb8b8 100644 --- a/protocols/relay/examples/relay_v2.rs +++ b/protocols/relay/examples/relay_v2.rs @@ -23,15 +23,14 @@ use clap::Parser; use futures::executor::block_on; use futures::stream::StreamExt; use libp2p::core::upgrade; -use libp2p::identify::{Identify, IdentifyConfig, IdentifyEvent}; +use libp2p::identify; use libp2p::multiaddr::Protocol; -use libp2p::ping::{Ping, PingConfig, PingEvent}; use libp2p::relay::v2::relay::{self, Relay}; use libp2p::swarm::{Swarm, SwarmEvent}; use libp2p::tcp::TcpTransport; -use libp2p::Transport; use libp2p::{identity, NetworkBehaviour, PeerId}; use libp2p::{noise, Multiaddr}; +use libp2p::{ping, Transport}; use std::error::Error; use std::net::{Ipv4Addr, Ipv6Addr}; @@ -54,13 +53,13 @@ fn main() -> Result<(), Box> { noise::NoiseAuthenticated::xx(&local_key) .expect("Signing libp2p-noise static DH keypair failed."), ) - .multiplex(libp2p_yamux::YamuxConfig::default()) + .multiplex(libp2p::yamux::YamuxConfig::default()) .boxed(); let behaviour = Behaviour { relay: Relay::new(local_peer_id, Default::default()), - ping: Ping::new(PingConfig::new()), - identify: Identify::new(IdentifyConfig::new( + ping: ping::Behaviour::new(ping::Config::new()), + identify: identify::Behaviour::new(identify::Config::new( "/TODO/0.0.1".to_string(), local_key.public(), )), @@ -96,25 +95,25 @@ fn main() -> Result<(), Box> { #[behaviour(out_event = "Event", event_process = false)] struct Behaviour { relay: Relay, - ping: Ping, - identify: Identify, + ping: ping::Behaviour, + identify: identify::Behaviour, } #[derive(Debug)] enum Event { - Ping(PingEvent), - Identify(IdentifyEvent), + Ping(ping::Event), + Identify(identify::Event), Relay(relay::Event), } -impl From for Event { - fn from(e: PingEvent) -> Self { +impl From for Event { + fn from(e: ping::Event) -> Self { Event::Ping(e) } } -impl From for Event { - fn from(e: IdentifyEvent) -> Self { +impl From for Event { + fn from(e: identify::Event) -> Self { Event::Identify(e) } } diff --git a/protocols/relay/src/v2/client.rs b/protocols/relay/src/v2/client.rs index d039cd08cbd..68e69e20a28 100644 --- a/protocols/relay/src/v2/client.rs +++ b/protocols/relay/src/v2/client.rs @@ -35,7 +35,7 @@ use futures::stream::StreamExt; use libp2p_core::connection::{ConnectedPoint, ConnectionId}; use libp2p_core::{Multiaddr, PeerId}; use libp2p_swarm::dial_opts::DialOpts; -use libp2p_swarm::handler::DummyConnectionHandler; +use libp2p_swarm::dummy; use libp2p_swarm::{ ConnectionHandlerUpgrErr, NegotiatedSubstream, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, PollParameters, @@ -144,7 +144,7 @@ impl NetworkBehaviour for Client { peer_id: &PeerId, connection_id: &ConnectionId, endpoint: &ConnectedPoint, - _handler: Either, + _handler: Either, _remaining_established: usize, ) { if !endpoint.is_relayed() { diff --git a/protocols/relay/src/v2/client/handler.rs b/protocols/relay/src/v2/client/handler.rs index c0e2172329d..a346a4eaeb3 100644 --- a/protocols/relay/src/v2/client/handler.rs +++ b/protocols/relay/src/v2/client/handler.rs @@ -31,12 +31,10 @@ use instant::Instant; use libp2p_core::either::EitherError; use libp2p_core::multiaddr::Protocol; use libp2p_core::{upgrade, ConnectedPoint, Multiaddr, PeerId}; -use libp2p_swarm::handler::{ - DummyConnectionHandler, InboundUpgradeSend, OutboundUpgradeSend, SendWrapper, -}; +use libp2p_swarm::handler::{InboundUpgradeSend, OutboundUpgradeSend, SendWrapper}; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, IntoConnectionHandler, - KeepAlive, NegotiatedSubstream, SubstreamProtocol, + dummy, ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, + IntoConnectionHandler, KeepAlive, NegotiatedSubstream, SubstreamProtocol, }; use log::debug; use std::collections::{HashMap, VecDeque}; @@ -125,7 +123,7 @@ impl Prototype { } impl IntoConnectionHandler for Prototype { - type Handler = Either; + type Handler = Either; fn into_handler(self, remote_peer_id: &PeerId, endpoint: &ConnectedPoint) -> Self::Handler { if endpoint.is_relayed() { @@ -138,7 +136,7 @@ impl IntoConnectionHandler for Prototype { } // Deny all substreams on relayed connection. - Either::Right(DummyConnectionHandler::default()) + Either::Right(dummy::ConnectionHandler) } else { let mut handler = Handler { remote_peer_id: *remote_peer_id, diff --git a/protocols/relay/src/v2/copy_future.rs b/protocols/relay/src/v2/copy_future.rs index 47652c92ed7..12a8c486d3a 100644 --- a/protocols/relay/src/v2/copy_future.rs +++ b/protocols/relay/src/v2/copy_future.rs @@ -197,7 +197,7 @@ mod tests { let n = std::cmp::min(self.read.len(), buf.len()); buf[0..n].copy_from_slice(&self.read[0..n]); self.read = self.read.split_off(n); - return Poll::Ready(Ok(n)); + Poll::Ready(Ok(n)) } } diff --git a/protocols/relay/src/v2/relay.rs b/protocols/relay/src/v2/relay.rs index ed5fe6ca326..5b1eb810f60 100644 --- a/protocols/relay/src/v2/relay.rs +++ b/protocols/relay/src/v2/relay.rs @@ -30,9 +30,8 @@ use instant::Instant; use libp2p_core::connection::{ConnectedPoint, ConnectionId}; use libp2p_core::multiaddr::Protocol; use libp2p_core::PeerId; -use libp2p_swarm::handler::DummyConnectionHandler; use libp2p_swarm::{ - ConnectionHandlerUpgrErr, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, + dummy, ConnectionHandlerUpgrErr, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, PollParameters, }; use std::collections::{hash_map, HashMap, HashSet, VecDeque}; @@ -234,7 +233,7 @@ impl NetworkBehaviour for Relay { peer: &PeerId, connection: &ConnectionId, _: &ConnectedPoint, - _handler: Either, + _handler: Either, _remaining_established: usize, ) { if let hash_map::Entry::Occupied(mut peer) = self.reservations.entry(*peer) { @@ -283,7 +282,7 @@ impl NetworkBehaviour for Relay { assert!( !endpoint.is_relayed(), - "`DummyConnectionHandler` handles relayed connections. It \ + "`dummy::ConnectionHandler` handles relayed connections. It \ denies all inbound substreams." ); @@ -410,7 +409,7 @@ impl NetworkBehaviour for Relay { assert!( !endpoint.is_relayed(), - "`DummyConnectionHandler` handles relayed connections. It \ + "`dummy::ConnectionHandler` handles relayed connections. It \ denies all inbound substreams." ); diff --git a/protocols/relay/src/v2/relay/handler.rs b/protocols/relay/src/v2/relay/handler.rs index 9801ca74b43..1c6987692fa 100644 --- a/protocols/relay/src/v2/relay/handler.rs +++ b/protocols/relay/src/v2/relay/handler.rs @@ -33,11 +33,11 @@ use instant::Instant; use libp2p_core::connection::ConnectionId; use libp2p_core::either::EitherError; use libp2p_core::{upgrade, ConnectedPoint, Multiaddr, PeerId}; -use libp2p_swarm::handler::{DummyConnectionHandler, SendWrapper}; +use libp2p_swarm::handler::SendWrapper; use libp2p_swarm::handler::{InboundUpgradeSend, OutboundUpgradeSend}; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, IntoConnectionHandler, - KeepAlive, NegotiatedSubstream, SubstreamProtocol, + dummy, ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, + IntoConnectionHandler, KeepAlive, NegotiatedSubstream, SubstreamProtocol, }; use std::collections::VecDeque; use std::fmt; @@ -342,12 +342,12 @@ pub struct Prototype { } impl IntoConnectionHandler for Prototype { - type Handler = Either; + type Handler = Either; fn into_handler(self, _remote_peer_id: &PeerId, endpoint: &ConnectedPoint) -> Self::Handler { if endpoint.is_relayed() { // Deny all substreams on relayed connection. - Either::Right(DummyConnectionHandler::default()) + Either::Right(dummy::ConnectionHandler) } else { Either::Left(Handler { endpoint: endpoint.clone(), diff --git a/protocols/relay/src/v2/relay/rate_limiter.rs b/protocols/relay/src/v2/relay/rate_limiter.rs index d0b4b4e631f..00d70aa3541 100644 --- a/protocols/relay/src/v2/relay/rate_limiter.rs +++ b/protocols/relay/src/v2/relay/rate_limiter.rs @@ -279,10 +279,7 @@ mod generic { } let mut now = Instant::now(); - let mut l = RateLimiter::new(RateLimiterConfig { - limit: limit.try_into().unwrap(), - interval, - }); + let mut l = RateLimiter::new(RateLimiterConfig { limit, interval }); for (id, d) in events { now = if let Some(now) = now.checked_add(d) { diff --git a/protocols/relay/tests/v2.rs b/protocols/relay/tests/v2.rs index c0d084aa606..e377a9249f5 100644 --- a/protocols/relay/tests/v2.rs +++ b/protocols/relay/tests/v2.rs @@ -29,12 +29,11 @@ use libp2p::core::transport::choice::OrTransport; use libp2p::core::transport::{Boxed, MemoryTransport, Transport}; use libp2p::core::PublicKey; use libp2p::core::{identity, upgrade, PeerId}; -use libp2p::ping::{Ping, PingConfig, PingEvent}; use libp2p::plaintext::PlainText2Config; use libp2p::relay::v2::client; use libp2p::relay::v2::relay; -use libp2p::NetworkBehaviour; -use libp2p_swarm::{AddressScore, NetworkBehaviour, Swarm, SwarmEvent}; +use libp2p::swarm::{AddressScore, NetworkBehaviour, Swarm, SwarmEvent}; +use libp2p::{ping, NetworkBehaviour}; use std::time::Duration; #[test] @@ -51,7 +50,6 @@ fn reservation() { spawn_swarm_on_pool(&pool, relay); let client_addr = relay_addr - .clone() .with(Protocol::P2p(relay_peer_id.into())) .with(Protocol::P2pCircuit); let mut client = build_client(); @@ -97,7 +95,6 @@ fn new_reservation_to_same_relay_replaces_old() { let mut client = build_client(); let client_peer_id = *client.local_peer_id(); let client_addr = relay_addr - .clone() .with(Protocol::P2p(relay_peer_id.into())) .with(Protocol::P2pCircuit); let client_addr_with_peer_id = client_addr @@ -118,7 +115,7 @@ fn new_reservation_to_same_relay_replaces_old() { )); // Trigger new reservation. - let new_listener = client.listen_on(client_addr.clone()).unwrap(); + let new_listener = client.listen_on(client_addr).unwrap(); // Wait for // - listener of old reservation to close @@ -191,7 +188,6 @@ fn connect() { let mut dst = build_client(); let dst_peer_id = *dst.local_peer_id(); let dst_addr = relay_addr - .clone() .with(Protocol::P2p(relay_peer_id.into())) .with(Protocol::P2pCircuit) .with(Protocol::P2p(dst_peer_id.into())); @@ -217,7 +213,7 @@ fn connect() { match src.select_next_some().await { SwarmEvent::Dialing(peer_id) if peer_id == relay_peer_id => {} SwarmEvent::ConnectionEstablished { peer_id, .. } if peer_id == relay_peer_id => {} - SwarmEvent::Behaviour(ClientEvent::Ping(PingEvent { peer, .. })) + SwarmEvent::Behaviour(ClientEvent::Ping(ping::Event { peer, .. })) if peer == dst_peer_id => { break @@ -225,7 +221,7 @@ fn connect() { SwarmEvent::Behaviour(ClientEvent::Relay( client::Event::OutboundCircuitEstablished { .. }, )) => {} - SwarmEvent::Behaviour(ClientEvent::Ping(PingEvent { peer, .. })) + SwarmEvent::Behaviour(ClientEvent::Ping(ping::Event { peer, .. })) if peer == relay_peer_id => {} SwarmEvent::ConnectionEstablished { peer_id, .. } if peer_id == dst_peer_id => { break @@ -247,12 +243,11 @@ fn handle_dial_failure() { let mut client = build_client(); let client_peer_id = *client.local_peer_id(); let client_addr = relay_addr - .clone() .with(Protocol::P2p(relay_peer_id.into())) .with(Protocol::P2pCircuit) .with(Protocol::P2p(client_peer_id.into())); - client.listen_on(client_addr.clone()).unwrap(); + client.listen_on(client_addr).unwrap(); assert!(!pool.run_until(wait_for_dial(&mut client, relay_peer_id))); } @@ -292,14 +287,14 @@ fn reuse_connection() { fn build_relay() -> Swarm { let local_key = identity::Keypair::generate_ed25519(); let local_public_key = local_key.public(); - let local_peer_id = local_public_key.clone().to_peer_id(); + let local_peer_id = local_public_key.to_peer_id(); let transport = upgrade_transport(MemoryTransport::default().boxed(), local_public_key); Swarm::new( transport, Relay { - ping: Ping::new(PingConfig::new()), + ping: ping::Behaviour::new(ping::Config::new()), relay: relay::Relay::new( local_peer_id, relay::Config { @@ -315,7 +310,7 @@ fn build_relay() -> Swarm { fn build_client() -> Swarm { let local_key = identity::Keypair::generate_ed25519(); let local_public_key = local_key.public(); - let local_peer_id = local_public_key.clone().to_peer_id(); + let local_peer_id = local_public_key.to_peer_id(); let (relay_transport, behaviour) = client::Client::new_transport_and_behaviour(local_peer_id); let transport = upgrade_transport( @@ -326,7 +321,7 @@ fn build_client() -> Swarm { Swarm::new( transport, Client { - ping: Ping::new(PingConfig::new()), + ping: ping::Behaviour::new(ping::Config::new()), relay: behaviour, }, local_peer_id, @@ -343,7 +338,7 @@ where transport .upgrade(upgrade::Version::V1) .authenticate(PlainText2Config { local_public_key }) - .multiplex(libp2p_yamux::YamuxConfig::default()) + .multiplex(libp2p::yamux::YamuxConfig::default()) .boxed() } @@ -351,13 +346,13 @@ where #[behaviour(out_event = "RelayEvent", event_process = false)] struct Relay { relay: relay::Relay, - ping: Ping, + ping: ping::Behaviour, } #[derive(Debug)] enum RelayEvent { Relay(relay::Event), - Ping(PingEvent), + Ping(ping::Event), } impl From for RelayEvent { @@ -366,8 +361,8 @@ impl From for RelayEvent { } } -impl From for RelayEvent { - fn from(event: PingEvent) -> Self { +impl From for RelayEvent { + fn from(event: ping::Event) -> Self { RelayEvent::Ping(event) } } @@ -376,13 +371,13 @@ impl From for RelayEvent { #[behaviour(out_event = "ClientEvent", event_process = false)] struct Client { relay: client::Client, - ping: Ping, + ping: ping::Behaviour, } #[derive(Debug)] enum ClientEvent { Relay(client::Event), - Ping(PingEvent), + Ping(ping::Event), } impl From for ClientEvent { @@ -391,8 +386,8 @@ impl From for ClientEvent { } } -impl From for ClientEvent { - fn from(event: PingEvent) -> Self { +impl From for ClientEvent { + fn from(event: ping::Event) -> Self { ClientEvent::Ping(event) } } diff --git a/protocols/rendezvous/CHANGELOG.md b/protocols/rendezvous/CHANGELOG.md index edf55a5d420..3e04334ad50 100644 --- a/protocols/rendezvous/CHANGELOG.md +++ b/protocols/rendezvous/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.10.0 [unreleased] + +- Update to `libp2p-core` `v0.37.0`. + +- Update to `libp2p-swarm` `v0.40.0`. + # 0.9.0 - Update to `libp2p-swarm` `v0.39.0`. diff --git a/protocols/rendezvous/Cargo.toml b/protocols/rendezvous/Cargo.toml index 3eee5c833e6..fc25965bccd 100644 --- a/protocols/rendezvous/Cargo.toml +++ b/protocols/rendezvous/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-rendezvous" edition = "2021" rust-version = "1.56.1" description = "Rendezvous protocol for libp2p" -version = "0.9.0" +version = "0.10.0" authors = ["The COMIT guys "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,8 +12,8 @@ categories = ["network-programming", "asynchronous"] [dependencies] asynchronous-codec = "0.6" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.39.0", path = "../../swarm" } +libp2p-core = { version = "0.37.0", path = "../../core" } +libp2p-swarm = { version = "0.40.0", path = "../../swarm" } prost = "0.11" void = "1" log = "0.4" @@ -29,7 +29,7 @@ instant = "0.1.11" [dev-dependencies] async-trait = "0.1" env_logger = "0.9.0" -libp2p = { path = "../..", default-features = false, features = ["ping", "identify", "tcp-async-io", "dns-async-std", "websocket", "noise", "mplex", "yamux", "rendezvous"] } +libp2p = { path = "../..", features = ["full"] } rand = "0.8" tokio = { version = "1.15", features = [ "rt-multi-thread", "time", "macros", "sync", "process", "fs", "net" ] } diff --git a/protocols/rendezvous/examples/discover.rs b/protocols/rendezvous/examples/discover.rs index ceca71c6c4c..c14e114ee88 100644 --- a/protocols/rendezvous/examples/discover.rs +++ b/protocols/rendezvous/examples/discover.rs @@ -22,11 +22,12 @@ use futures::StreamExt; use libp2p::core::identity; use libp2p::core::PeerId; use libp2p::multiaddr::Protocol; -use libp2p::ping::{Ping, PingConfig, PingEvent, PingSuccess}; -use libp2p::swarm::SwarmEvent; +use libp2p::ping; +use libp2p::swarm::{keep_alive, SwarmEvent}; use libp2p::Swarm; use libp2p::{development_transport, rendezvous, Multiaddr}; use std::time::Duration; +use void::Void; const NAMESPACE: &str = "rendezvous"; @@ -44,18 +45,15 @@ async fn main() { development_transport(identity.clone()).await.unwrap(), MyBehaviour { rendezvous: rendezvous::client::Behaviour::new(identity.clone()), - ping: Ping::new( - PingConfig::new() - .with_interval(Duration::from_secs(1)) - .with_keep_alive(true), - ), + ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), + keep_alive: keep_alive::Behaviour, }, PeerId::from(identity.public()), ); log::info!("Local peer id: {}", swarm.local_peer_id()); - let _ = swarm.dial(rendezvous_point_address.clone()).unwrap(); + swarm.dial(rendezvous_point_address.clone()).unwrap(); let mut discover_tick = tokio::time::interval(Duration::from_secs(30)); let mut cookie = None; @@ -100,9 +98,9 @@ async fn main() { } } } - SwarmEvent::Behaviour(MyEvent::Ping(PingEvent { + SwarmEvent::Behaviour(MyEvent::Ping(ping::Event { peer, - result: Ok(PingSuccess::Ping { rtt }), + result: Ok(ping::Success::Ping { rtt }), })) if peer != rendezvous_point => { log::info!("Ping to {} is {}ms", peer, rtt.as_millis()) } @@ -124,7 +122,7 @@ async fn main() { #[derive(Debug)] enum MyEvent { Rendezvous(rendezvous::client::Event), - Ping(PingEvent), + Ping(ping::Event), } impl From for MyEvent { @@ -133,16 +131,23 @@ impl From for MyEvent { } } -impl From for MyEvent { - fn from(event: PingEvent) -> Self { +impl From for MyEvent { + fn from(event: ping::Event) -> Self { MyEvent::Ping(event) } } +impl From for MyEvent { + fn from(event: Void) -> Self { + void::unreachable(event) + } +} + #[derive(libp2p::NetworkBehaviour)] #[behaviour(event_process = false)] #[behaviour(out_event = "MyEvent")] struct MyBehaviour { rendezvous: rendezvous::client::Behaviour, - ping: Ping, + ping: ping::Behaviour, + keep_alive: keep_alive::Behaviour, } diff --git a/protocols/rendezvous/examples/register.rs b/protocols/rendezvous/examples/register.rs index 9c21874d50c..3fbfa02785d 100644 --- a/protocols/rendezvous/examples/register.rs +++ b/protocols/rendezvous/examples/register.rs @@ -21,7 +21,7 @@ use futures::StreamExt; use libp2p::core::identity; use libp2p::core::PeerId; -use libp2p::ping::{Ping, PingConfig, PingEvent, PingSuccess}; +use libp2p::ping; use libp2p::swarm::{Swarm, SwarmEvent}; use libp2p::{development_transport, rendezvous}; use libp2p::{Multiaddr, NetworkBehaviour}; @@ -43,7 +43,7 @@ async fn main() { development_transport(identity.clone()).await.unwrap(), MyBehaviour { rendezvous: rendezvous::client::Behaviour::new(identity.clone()), - ping: Ping::new(PingConfig::new().with_interval(Duration::from_secs(1))), + ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), }, PeerId::from(identity.public()), ); @@ -98,9 +98,9 @@ async fn main() { log::error!("Failed to register {}", error); return; } - SwarmEvent::Behaviour(MyEvent::Ping(PingEvent { + SwarmEvent::Behaviour(MyEvent::Ping(ping::Event { peer, - result: Ok(PingSuccess::Ping { rtt }), + result: Ok(ping::Success::Ping { rtt }), })) if peer != rendezvous_point => { log::info!("Ping to {} is {}ms", peer, rtt.as_millis()) } @@ -114,7 +114,7 @@ async fn main() { #[derive(Debug)] enum MyEvent { Rendezvous(rendezvous::client::Event), - Ping(PingEvent), + Ping(ping::Event), } impl From for MyEvent { @@ -123,8 +123,8 @@ impl From for MyEvent { } } -impl From for MyEvent { - fn from(event: PingEvent) -> Self { +impl From for MyEvent { + fn from(event: ping::Event) -> Self { MyEvent::Ping(event) } } @@ -134,5 +134,5 @@ impl From for MyEvent { #[behaviour(out_event = "MyEvent")] struct MyBehaviour { rendezvous: rendezvous::client::Behaviour, - ping: Ping, + ping: ping::Behaviour, } diff --git a/protocols/rendezvous/examples/register_with_identify.rs b/protocols/rendezvous/examples/register_with_identify.rs index 3896db3e3d1..f12a1a6ed98 100644 --- a/protocols/rendezvous/examples/register_with_identify.rs +++ b/protocols/rendezvous/examples/register_with_identify.rs @@ -21,12 +21,13 @@ use futures::StreamExt; use libp2p::core::identity; use libp2p::core::PeerId; -use libp2p::identify::{Identify, IdentifyConfig, IdentifyEvent}; -use libp2p::ping::{Ping, PingConfig, PingEvent, PingSuccess}; -use libp2p::swarm::{Swarm, SwarmEvent}; +use libp2p::identify; +use libp2p::ping; +use libp2p::swarm::{keep_alive, Swarm, SwarmEvent}; use libp2p::{development_transport, rendezvous}; use libp2p::{Multiaddr, NetworkBehaviour}; use std::time::Duration; +use void::Void; #[tokio::main] async fn main() { @@ -42,16 +43,13 @@ async fn main() { let mut swarm = Swarm::new( development_transport(identity.clone()).await.unwrap(), MyBehaviour { - identify: Identify::new(IdentifyConfig::new( + identify: identify::Behaviour::new(identify::Config::new( "rendezvous-example/1.0.0".to_string(), identity.public(), )), rendezvous: rendezvous::client::Behaviour::new(identity.clone()), - ping: Ping::new( - PingConfig::new() - .with_interval(Duration::from_secs(1)) - .with_keep_alive(true), - ), + ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), + keep_alive: keep_alive::Behaviour, }, PeerId::from(identity.public()), ); @@ -75,7 +73,7 @@ async fn main() { log::error!("Lost connection to rendezvous point {}", error); } // once `/identify` did its job, we know our external address and can register - SwarmEvent::Behaviour(MyEvent::Identify(IdentifyEvent::Received { .. })) => { + SwarmEvent::Behaviour(MyEvent::Identify(identify::Event::Received { .. })) => { swarm.behaviour_mut().rendezvous.register( rendezvous::Namespace::from_static("rendezvous"), rendezvous_point, @@ -100,9 +98,9 @@ async fn main() { log::error!("Failed to register {}", error); return; } - SwarmEvent::Behaviour(MyEvent::Ping(PingEvent { + SwarmEvent::Behaviour(MyEvent::Ping(ping::Event { peer, - result: Ok(PingSuccess::Ping { rtt }), + result: Ok(ping::Success::Ping { rtt }), })) if peer != rendezvous_point => { log::info!("Ping to {} is {}ms", peer, rtt.as_millis()) } @@ -114,10 +112,11 @@ async fn main() { } #[derive(Debug)] +#[allow(clippy::large_enum_variant)] enum MyEvent { Rendezvous(rendezvous::client::Event), - Identify(IdentifyEvent), - Ping(PingEvent), + Identify(identify::Event), + Ping(ping::Event), } impl From for MyEvent { @@ -126,23 +125,30 @@ impl From for MyEvent { } } -impl From for MyEvent { - fn from(event: IdentifyEvent) -> Self { +impl From for MyEvent { + fn from(event: identify::Event) -> Self { MyEvent::Identify(event) } } -impl From for MyEvent { - fn from(event: PingEvent) -> Self { +impl From for MyEvent { + fn from(event: ping::Event) -> Self { MyEvent::Ping(event) } } +impl From for MyEvent { + fn from(event: Void) -> Self { + void::unreachable(event) + } +} + #[derive(NetworkBehaviour)] #[behaviour(event_process = false)] #[behaviour(out_event = "MyEvent")] struct MyBehaviour { - identify: Identify, + identify: identify::Behaviour, rendezvous: rendezvous::client::Behaviour, - ping: Ping, + ping: ping::Behaviour, + keep_alive: keep_alive::Behaviour, } diff --git a/protocols/rendezvous/examples/rendezvous_point.rs b/protocols/rendezvous/examples/rendezvous_point.rs index 9d5ee6ca7db..980a3a6fd5d 100644 --- a/protocols/rendezvous/examples/rendezvous_point.rs +++ b/protocols/rendezvous/examples/rendezvous_point.rs @@ -21,14 +21,12 @@ use futures::StreamExt; use libp2p::core::identity; use libp2p::core::PeerId; -use libp2p::identify::Identify; -use libp2p::identify::IdentifyConfig; -use libp2p::identify::IdentifyEvent; +use libp2p::identify; use libp2p::ping; -use libp2p::ping::{Ping, PingEvent}; -use libp2p::swarm::{Swarm, SwarmEvent}; +use libp2p::swarm::{keep_alive, Swarm, SwarmEvent}; use libp2p::NetworkBehaviour; use libp2p::{development_transport, rendezvous}; +use void::Void; /// Examples for the rendezvous protocol: /// @@ -49,12 +47,13 @@ async fn main() { let mut swarm = Swarm::new( development_transport(identity.clone()).await.unwrap(), MyBehaviour { - identify: Identify::new(IdentifyConfig::new( + identify: identify::Behaviour::new(identify::Config::new( "rendezvous-example/1.0.0".to_string(), identity.public(), )), rendezvous: rendezvous::server::Behaviour::new(rendezvous::server::Config::default()), - ping: Ping::new(ping::Config::new().with_keep_alive(true)), + ping: ping::Behaviour::new(ping::Config::new()), + keep_alive: keep_alive::Behaviour, }, PeerId::from(identity.public()), ); @@ -104,8 +103,8 @@ async fn main() { #[derive(Debug)] enum MyEvent { Rendezvous(rendezvous::server::Event), - Ping(PingEvent), - Identify(IdentifyEvent), + Ping(ping::Event), + Identify(identify::Event), } impl From for MyEvent { @@ -114,23 +113,30 @@ impl From for MyEvent { } } -impl From for MyEvent { - fn from(event: PingEvent) -> Self { +impl From for MyEvent { + fn from(event: ping::Event) -> Self { MyEvent::Ping(event) } } -impl From for MyEvent { - fn from(event: IdentifyEvent) -> Self { +impl From for MyEvent { + fn from(event: identify::Event) -> Self { MyEvent::Identify(event) } } +impl From for MyEvent { + fn from(event: Void) -> Self { + void::unreachable(event) + } +} + #[derive(NetworkBehaviour)] #[behaviour(event_process = false)] #[behaviour(out_event = "MyEvent")] struct MyBehaviour { - identify: Identify, + identify: identify::Behaviour, rendezvous: rendezvous::server::Behaviour, - ping: Ping, + ping: ping::Behaviour, + keep_alive: keep_alive::Behaviour, } diff --git a/protocols/rendezvous/tests/harness.rs b/protocols/rendezvous/tests/harness.rs index 30dace245ff..cad3a087afb 100644 --- a/protocols/rendezvous/tests/harness.rs +++ b/protocols/rendezvous/tests/harness.rs @@ -62,11 +62,10 @@ where fn get_rand_memory_address() -> Multiaddr { let address_port = rand::random::(); - let addr = format!("/memory/{}", address_port) - .parse::() - .unwrap(); - addr + format!("/memory/{}", address_port) + .parse::() + .unwrap() } pub async fn await_event_or_timeout( diff --git a/protocols/rendezvous/tests/rendezvous.rs b/protocols/rendezvous/tests/rendezvous.rs index 458ea588832..e9c4d871b1c 100644 --- a/protocols/rendezvous/tests/rendezvous.rs +++ b/protocols/rendezvous/tests/rendezvous.rs @@ -24,9 +24,9 @@ pub mod harness; use crate::harness::{await_event_or_timeout, await_events_or_timeout, new_swarm, SwarmExt}; use futures::stream::FuturesUnordered; use futures::StreamExt; -use libp2p_core::identity; -use libp2p_rendezvous as rendezvous; -use libp2p_swarm::{DialError, Swarm, SwarmEvent}; +use libp2p::core::identity; +use libp2p::rendezvous; +use libp2p::swarm::{DialError, Swarm, SwarmEvent}; use std::convert::TryInto; use std::time::Duration; @@ -37,7 +37,7 @@ async fn given_successful_registration_then_successful_discovery() { let ([mut alice, mut bob], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; - let _ = alice + alice .behaviour_mut() .register(namespace.clone(), *robert.local_peer_id(), None); @@ -86,7 +86,7 @@ async fn given_successful_registration_then_refresh_ttl() { let roberts_peer_id = *robert.local_peer_id(); let refresh_ttl = 10_000; - let _ = alice + alice .behaviour_mut() .register(namespace.clone(), roberts_peer_id, None); @@ -374,6 +374,7 @@ struct CombinedBehaviour { } #[derive(Debug)] +#[allow(clippy::large_enum_variant)] enum CombinedEvent { Client(rendezvous::client::Event), Server(rendezvous::server::Event), diff --git a/protocols/request-response/CHANGELOG.md b/protocols/request-response/CHANGELOG.md index 2e7dd0f84d4..7e4ba48044d 100644 --- a/protocols/request-response/CHANGELOG.md +++ b/protocols/request-response/CHANGELOG.md @@ -1,3 +1,13 @@ +# 0.22.0 [unreleased] + +- Bump rand to 0.8 and quickcheck to 1. See [PR 2857]. + +- Update to `libp2p-core` `v0.37.0`. + +- Update to `libp2p-swarm` `v0.40.0`. + +[PR 2857]: https://github.com/libp2p/rust-libp2p/pull/2857 + # 0.21.0 - Update to `libp2p-swarm` `v0.39.0`. diff --git a/protocols/request-response/Cargo.toml b/protocols/request-response/Cargo.toml index 76e802cfca4..53e0b756392 100644 --- a/protocols/request-response/Cargo.toml +++ b/protocols/request-response/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-request-response" edition = "2021" rust-version = "1.56.1" description = "Generic Request/Response Protocols" -version = "0.21.0" +version = "0.22.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -15,17 +15,15 @@ async-trait = "0.1" bytes = "1" futures = "0.3.1" instant = "0.1.11" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.39.0", path = "../../swarm" } +libp2p-core = { version = "0.37.0", path = "../../core" } +libp2p-swarm = { version = "0.40.0", path = "../../swarm" } log = "0.4.11" -rand = "0.7" +rand = "0.8" smallvec = "1.6.1" unsigned-varint = { version = "0.7", features = ["std", "futures"] } [dev-dependencies] async-std = "1.6.2" env_logger = "0.9.0" -libp2p-noise = { path = "../../transports/noise" } -libp2p-tcp = { path = "../../transports/tcp" } -libp2p-yamux = { path = "../../muxers/yamux" } -rand = "0.7" +libp2p = { path = "../..", features = ["full"] } +rand = "0.8" diff --git a/protocols/request-response/tests/ping.rs b/protocols/request-response/tests/ping.rs index bfb8641c106..a333ef9111a 100644 --- a/protocols/request-response/tests/ping.rs +++ b/protocols/request-response/tests/ping.rs @@ -22,17 +22,17 @@ use async_trait::async_trait; use futures::{channel::mpsc, prelude::*, AsyncWriteExt}; -use libp2p_core::{ +use libp2p::core::{ identity, muxing::StreamMuxerBox, transport::{self, Transport}, upgrade::{self, read_length_prefixed, write_length_prefixed}, Multiaddr, PeerId, }; -use libp2p_noise::NoiseAuthenticated; -use libp2p_request_response::*; -use libp2p_swarm::{Swarm, SwarmEvent}; -use libp2p_tcp::{GenTcpConfig, TcpTransport}; +use libp2p::noise::NoiseAuthenticated; +use libp2p::request_response::*; +use libp2p::swarm::{Swarm, SwarmEvent}; +use libp2p::tcp::{GenTcpConfig, TcpTransport}; use rand::{self, Rng}; use std::{io, iter}; @@ -127,7 +127,7 @@ fn ping_protocol() { } }; - let num_pings: u8 = rand::thread_rng().gen_range(1, 100); + let num_pings: u8 = rand::thread_rng().gen_range(1..100); let peer2 = async move { let mut count = 0; @@ -301,7 +301,7 @@ fn mk_transport() -> (PeerId, transport::Boxed<(PeerId, StreamMuxerBox)>) { TcpTransport::new(GenTcpConfig::default().nodelay(true)) .upgrade(upgrade::Version::V1) .authenticate(NoiseAuthenticated::xx(&id_keys).unwrap()) - .multiplex(libp2p_yamux::YamuxConfig::default()) + .multiplex(libp2p::yamux::YamuxConfig::default()) .boxed(), ) } diff --git a/src/lib.rs b/src/lib.rs index 96a197cf516..a345a64deab 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -54,14 +54,31 @@ pub use libp2p_dcutr as dcutr; #[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] #[doc(inline)] pub use libp2p_deflate as deflate; -#[cfg(any(feature = "dns-async-std", feature = "dns-tokio"))] +#[deprecated( + since = "0.49.0", + note = "The `dns-tokio` and `dns-async-std` features are deprecated. Use the new `dns` feature together with the `tokio` or `async-std` features." +)] +#[cfg(all( + any(feature = "dns-tokio", feature = "dns-async-std"), + not(feature = "dns") +))] +#[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] #[cfg_attr( docsrs, - doc(cfg(any(feature = "dns-async-std", feature = "dns-tokio"))) + doc(cfg(any(feature = "dns-tokio", feature = "dns-async-std"))) )] +pub mod dns { + #[doc(inline)] + pub use libp2p_dns::*; +} + +#[cfg(feature = "dns")] #[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] -#[doc(inline)] -pub use libp2p_dns as dns; +#[cfg_attr(docsrs, doc(cfg(feature = "dns")))] +pub mod dns { + #[doc(inline)] + pub use libp2p_dns::*; +} #[cfg(feature = "floodsub")] #[cfg_attr(docsrs, doc(cfg(feature = "floodsub")))] #[doc(inline)] @@ -79,14 +96,31 @@ pub use libp2p_identify as identify; #[cfg_attr(docsrs, doc(cfg(feature = "kad")))] #[doc(inline)] pub use libp2p_kad as kad; -#[cfg(any(feature = "mdns-async-io", feature = "mdns-tokio"))] +#[deprecated( + since = "0.49.0", + note = "The `mdns-tokio` and `mdns-async-io` features are deprecated. Use the new `mdns` feature together with the `tokio` or `async-std` features." +)] +#[cfg(all( + any(feature = "mdns-async-io", feature = "mdns-tokio"), + not(feature = "mdns") +))] +#[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] #[cfg_attr( docsrs, doc(cfg(any(feature = "mdns-tokio", feature = "mdns-async-io"))) )] +pub mod mdns { + #[doc(inline)] + pub use libp2p_mdns::*; +} + +#[cfg(feature = "mdns")] #[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] -#[doc(inline)] -pub use libp2p_mdns as mdns; +#[cfg_attr(docsrs, doc(cfg(feature = "mdns")))] +pub mod mdns { + #[doc(inline)] + pub use libp2p_mdns::*; +} #[cfg(feature = "metrics")] #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] #[doc(inline)] @@ -125,11 +159,28 @@ pub use libp2p_rendezvous as rendezvous; pub use libp2p_request_response as request_response; #[doc(inline)] pub use libp2p_swarm as swarm; -#[cfg(any(feature = "tcp-async-io", feature = "tcp-tokio"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "tcp-async-io", feature = "tcp-tokio"))))] +#[deprecated( + since = "0.49.0", + note = "The `tcp-tokio` and `tcp-async-io` features are deprecated. Use the new `tcp` feature together with the `tokio` or `async-std` features." +)] +#[cfg(all( + any(feature = "tcp-tokio", feature = "tcp-async-io"), + not(feature = "tcp") +))] #[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] -#[doc(inline)] -pub use libp2p_tcp as tcp; +#[cfg_attr(docsrs, doc(cfg(any(feature = "tcp-tokio", feature = "tcp-async-io"))))] +pub mod tcp { + #[doc(inline)] + pub use libp2p_tcp::*; +} + +#[cfg(feature = "tcp")] +#[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] +#[cfg_attr(docsrs, doc(cfg(feature = "tcp")))] +pub mod tcp { + #[doc(inline)] + pub use libp2p_tcp::*; +} #[cfg(feature = "uds")] #[cfg_attr(docsrs, doc(cfg(feature = "uds")))] #[doc(inline)] @@ -181,8 +232,10 @@ pub use libp2p_swarm_derive::NetworkBehaviour; /// > reserves the right to support additional protocols or remove deprecated protocols. #[cfg(all( not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), - feature = "tcp-async-io", - feature = "dns-async-std", + any( + all(feature = "tcp-async-io", feature = "dns-async-std"), + all(feature = "tcp", feature = "dns", feature = "async-std") + ), feature = "websocket", feature = "noise", feature = "mplex", @@ -192,14 +245,26 @@ pub use libp2p_swarm_derive::NetworkBehaviour; docsrs, doc(cfg(all( not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), - feature = "tcp-async-io", - feature = "dns-async-std", + any( + all(feature = "tcp-async-io", feature = "dns-async-std"), + all(feature = "tcp", feature = "dns", feature = "async-std") + ), feature = "websocket", feature = "noise", feature = "mplex", feature = "yamux" ))) )] +#[cfg_attr( + all( + any(feature = "tcp-async-io", feature = "dns-async-std"), + not(feature = "async-std") + ), + deprecated( + since = "0.49.0", + note = "The `tcp-async-io` and `dns-async-std` features are deprecated. Use the new `tcp` and `dns` features together with the `async-std` feature." + ) +)] pub async fn development_transport( keypair: identity::Keypair, ) -> std::io::Result> { @@ -241,8 +306,10 @@ pub async fn development_transport( /// > reserves the right to support additional protocols or remove deprecated protocols. #[cfg(all( not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), - feature = "tcp-tokio", - feature = "dns-tokio", + any( + all(feature = "tcp-tokio", feature = "dns-tokio"), + all(feature = "tcp", feature = "dns", feature = "tokio") + ), feature = "websocket", feature = "noise", feature = "mplex", @@ -252,14 +319,26 @@ pub async fn development_transport( docsrs, doc(cfg(all( not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), - feature = "tcp-tokio", - feature = "dns-tokio", + any( + all(feature = "tcp-tokio", feature = "dns-tokio"), + all(feature = "tcp", feature = "dns", feature = "tokio") + ), feature = "websocket", feature = "noise", feature = "mplex", feature = "yamux" ))) )] +#[cfg_attr( + all( + any(feature = "tcp-tokio", feature = "dns-tokio"), + not(feature = "tokio") + ), + deprecated( + since = "0.49.0", + note = "The `tcp-tokio` and `dns-tokio` features are deprecated. Use the new `tcp` and `dns` feature together with the `tokio` feature." + ) +)] pub fn tokio_development_transport( keypair: identity::Keypair, ) -> std::io::Result> { diff --git a/src/tutorials/ping.rs b/src/tutorials/ping.rs index f33d90015bf..ce982861d06 100644 --- a/src/tutorials/ping.rs +++ b/src/tutorials/ping.rs @@ -127,12 +127,11 @@ //! _what_ bytes to send on the network. //! //! To make this more concrete, let's take a look at a simple implementation of -//! the [`NetworkBehaviour`] trait: the [`Ping`](crate::ping::Ping) -//! [`NetworkBehaviour`]. As you might have guessed, similar to the good old -//! `ping` network tool, libp2p [`Ping`](crate::ping::Ping) sends a ping to a -//! peer and expects to receive a pong in turn. The -//! [`Ping`](crate::ping::Ping) [`NetworkBehaviour`] does not care _how_ the -//! ping and pong messages are sent on the network, whether they are sent via +//! the [`NetworkBehaviour`] trait: the [`ping::Behaviour`](crate::ping::Behaviour). +//! As you might have guessed, similar to the good old `ping` network tool, +//! libp2p [`ping::Behaviour`](crate::ping::Behaviour) sends a ping to a peer and expects +//! to receive a pong in turn. The [`ping::Behaviour`](crate::ping::Behaviour) does not care _how_ +//! the ping and pong messages are sent on the network, whether they are sent via //! TCP, whether they are encrypted via [noise](crate::noise) or just in //! [plaintext](crate::plaintext). It only cares about _what_ messages are sent //! on the network. @@ -140,12 +139,10 @@ //! The two traits [`Transport`] and [`NetworkBehaviour`] allow us to cleanly //! separate _how_ to send bytes from _what_ bytes to send. //! -//! With the above in mind, let's extend our example, creating a -//! [`Ping`](crate::ping::Ping) [`NetworkBehaviour`] at the end: +//! With the above in mind, let's extend our example, creating a [`ping::Behaviour`](crate::ping::Behaviour) at the end: //! //! ```rust -//! use libp2p::{identity, PeerId}; -//! use libp2p::ping::{Ping, PingConfig}; +//! use libp2p::{identity, PeerId, ping}; //! use std::error::Error; //! //! #[async_std::main] @@ -161,7 +158,7 @@ //! // For illustrative purposes, the ping protocol is configured to //! // keep the connection alive, so a continuous sequence of pings //! // can be observed. -//! let behaviour = Ping::new(PingConfig::new().with_keep_alive(true)); +//! let behaviour = ping::Behaviour::new(ping::Config::new().with_keep_alive(true)); //! //! Ok(()) //! } @@ -177,8 +174,7 @@ //! [`Transport`] to the [`NetworkBehaviour`]. //! //! ```rust -//! use libp2p::{identity, PeerId}; -//! use libp2p::ping::{Ping, PingConfig}; +//! use libp2p::{identity, PeerId, ping}; //! use libp2p::swarm::Swarm; //! use std::error::Error; //! @@ -195,7 +191,7 @@ //! // For illustrative purposes, the ping protocol is configured to //! // keep the connection alive, so a continuous sequence of pings //! // can be observed. -//! let behaviour = Ping::new(PingConfig::new().with_keep_alive(true)); +//! let behaviour = ping::Behaviour::new(ping::Config::new().with_keep_alive(true)); //! //! let mut swarm = Swarm::new(transport, behaviour, local_peer_id); //! @@ -230,8 +226,7 @@ //! remote peer. //! //! ```rust -//! use libp2p::{identity, Multiaddr, PeerId}; -//! use libp2p::ping::{Ping, PingConfig}; +//! use libp2p::{identity, Multiaddr, PeerId, ping}; //! use libp2p::swarm::{Swarm, dial_opts::DialOpts}; //! use std::error::Error; //! @@ -248,7 +243,7 @@ //! // For illustrative purposes, the ping protocol is configured to //! // keep the connection alive, so a continuous sequence of pings //! // can be observed. -//! let behaviour = Ping::new(PingConfig::new().with_keep_alive(true)); +//! let behaviour = ping::Behaviour::new(ping::Config::new().with_keep_alive(true)); //! //! let mut swarm = Swarm::new(transport, behaviour, local_peer_id); //! @@ -276,9 +271,8 @@ //! //! ```no_run //! use futures::prelude::*; -//! use libp2p::ping::{Ping, PingConfig}; //! use libp2p::swarm::{Swarm, SwarmEvent, dial_opts::DialOpts}; -//! use libp2p::{identity, Multiaddr, PeerId}; +//! use libp2p::{identity, Multiaddr, PeerId, ping}; //! use std::error::Error; //! //! #[async_std::main] @@ -294,7 +288,7 @@ //! // For illustrative purposes, the ping protocol is configured to //! // keep the connection alive, so a continuous sequence of pings //! // can be observed. -//! let behaviour = Ping::new(PingConfig::new().with_keep_alive(true)); +//! let behaviour = ping::Behaviour::new(ping::Config::new().with_keep_alive(true)); //! //! let mut swarm = Swarm::new(transport, behaviour, local_peer_id); //! diff --git a/swarm-derive/CHANGELOG.md b/swarm-derive/CHANGELOG.md index 464bec7fe95..0c64fd352da 100644 --- a/swarm-derive/CHANGELOG.md +++ b/swarm-derive/CHANGELOG.md @@ -2,8 +2,10 @@ - Fix an issue where the derive would generate bad code if the type parameters between the behaviour and a custom out event differed. See [PR 2907]. +- Fix an issue where the derive would generate incorrect code depending on available imports. See [PR 2921]. [PR 2907]: https://github.com/libp2p/rust-libp2p/pull/2907 +[PR 2921]: https://github.com/libp2p/rust-libp2p/pull/2921 # 0.30.0 diff --git a/swarm-derive/Cargo.toml b/swarm-derive/Cargo.toml index 1d2c54a9da1..baab666b0eb 100644 --- a/swarm-derive/Cargo.toml +++ b/swarm-derive/Cargo.toml @@ -19,7 +19,7 @@ quote = "1.0" syn = { version = "1.0.8", default-features = false, features = ["clone-impls", "derive", "parsing", "printing", "proc-macro"] } [dev-dependencies] -libp2p = { path = "../", default-features = false, features = ["ping", "identify", "kad"] } +libp2p = { path = "..", features = ["full"] } either = "1.6.0" futures = "0.3.1" void = "1" diff --git a/swarm-derive/src/lib.rs b/swarm-derive/src/lib.rs index 6899ba7d79d..426e4faec1f 100644 --- a/swarm-derive/src/lib.rs +++ b/swarm-derive/src/lib.rs @@ -125,7 +125,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { ) .unwrap(); let ty = &field.ty; - quote! {#variant(<#ty as NetworkBehaviour>::OutEvent)} + quote! {#variant(<#ty as #trait_to_impl>::OutEvent)} }) .collect::>(); let visibility = &ast.vis; diff --git a/swarm-derive/tests/test.rs b/swarm-derive/tests/test.rs index e0f77eefd30..2404ef699ca 100644 --- a/swarm-derive/tests/test.rs +++ b/swarm-derive/tests/test.rs @@ -19,7 +19,8 @@ // DEALINGS IN THE SOFTWARE. use futures::prelude::*; -use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; +use libp2p::swarm::{dummy, NetworkBehaviour, SwarmEvent}; +use libp2p::{identify, ping}; use libp2p_swarm_derive::*; use std::fmt::Debug; @@ -40,15 +41,14 @@ fn one_field() { #[allow(dead_code)] #[derive(NetworkBehaviour)] struct Foo { - ping: libp2p::ping::Ping, + ping: ping::Behaviour, } - #[allow(dead_code)] - #[allow(unreachable_code)] + #[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)] fn foo() { let _out_event: ::OutEvent = unimplemented!(); match _out_event { - FooEvent::Ping(libp2p::ping::Event { .. }) => {} + FooEvent::Ping(ping::Event { .. }) => {} } } } @@ -58,18 +58,17 @@ fn two_fields() { #[allow(dead_code)] #[derive(NetworkBehaviour)] struct Foo { - ping: libp2p::ping::Ping, - identify: libp2p::identify::Identify, + ping: ping::Behaviour, + identify: identify::Behaviour, } - #[allow(dead_code)] - #[allow(unreachable_code)] + #[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)] fn foo() { let _out_event: ::OutEvent = unimplemented!(); match _out_event { - FooEvent::Ping(libp2p::ping::Event { .. }) => {} + FooEvent::Ping(ping::Event { .. }) => {} FooEvent::Identify(event) => { - let _: libp2p::identify::IdentifyEvent = event; + let _: identify::Event = event; } } } @@ -80,19 +79,18 @@ fn three_fields() { #[allow(dead_code)] #[derive(NetworkBehaviour)] struct Foo { - ping: libp2p::ping::Ping, - identify: libp2p::identify::Identify, + ping: ping::Behaviour, + identify: identify::Behaviour, kad: libp2p::kad::Kademlia, } - #[allow(dead_code)] - #[allow(unreachable_code)] + #[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)] fn foo() { let _out_event: ::OutEvent = unimplemented!(); match _out_event { - FooEvent::Ping(libp2p::ping::Event { .. }) => {} + FooEvent::Ping(ping::Event { .. }) => {} FooEvent::Identify(event) => { - let _: libp2p::identify::IdentifyEvent = event; + let _: identify::Event = event; } FooEvent::Kad(event) => { let _: libp2p::kad::KademliaEvent = event; @@ -107,23 +105,24 @@ fn custom_event() { #[derive(NetworkBehaviour)] #[behaviour(out_event = "MyEvent")] struct Foo { - ping: libp2p::ping::Ping, - identify: libp2p::identify::Identify, + ping: ping::Behaviour, + identify: identify::Behaviour, } + #[allow(clippy::large_enum_variant)] enum MyEvent { - Ping(libp2p::ping::PingEvent), - Identify(libp2p::identify::IdentifyEvent), + Ping(ping::Event), + Identify(identify::Event), } - impl From for MyEvent { - fn from(event: libp2p::ping::PingEvent) -> Self { + impl From for MyEvent { + fn from(event: ping::Event) -> Self { MyEvent::Ping(event) } } - impl From for MyEvent { - fn from(event: libp2p::identify::IdentifyEvent) -> Self { + impl From for MyEvent { + fn from(event: libp2p::identify::Event) -> Self { MyEvent::Identify(event) } } @@ -140,23 +139,24 @@ fn custom_event_mismatching_field_names() { #[derive(NetworkBehaviour)] #[behaviour(out_event = "MyEvent")] struct Foo { - a: libp2p::ping::Ping, - b: libp2p::identify::Identify, + a: ping::Behaviour, + b: libp2p::identify::Behaviour, } + #[allow(clippy::large_enum_variant)] enum MyEvent { - Ping(libp2p::ping::PingEvent), - Identify(libp2p::identify::IdentifyEvent), + Ping(ping::Event), + Identify(libp2p::identify::Event), } - impl From for MyEvent { - fn from(event: libp2p::ping::PingEvent) -> Self { + impl From for MyEvent { + fn from(event: ping::Event) -> Self { MyEvent::Ping(event) } } - impl From for MyEvent { - fn from(event: libp2p::identify::IdentifyEvent) -> Self { + impl From for MyEvent { + fn from(event: libp2p::identify::Event) -> Self { MyEvent::Identify(event) } } @@ -175,7 +175,7 @@ fn bound() { where ::OutEvent: Debug, { - ping: libp2p::ping::Ping, + ping: ping::Behaviour, bar: T, } } @@ -189,7 +189,7 @@ fn where_clause() { T: Copy + NetworkBehaviour, ::OutEvent: Debug, { - ping: libp2p::ping::Ping, + ping: ping::Behaviour, bar: T, } } @@ -199,7 +199,7 @@ fn nested_derives_with_import() { #[allow(dead_code)] #[derive(NetworkBehaviour)] struct Foo { - ping: libp2p::ping::Ping, + ping: ping::Behaviour, } #[allow(dead_code)] @@ -208,44 +208,44 @@ fn nested_derives_with_import() { foo: Foo, } - #[allow(dead_code)] - #[allow(unreachable_code)] + #[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)] fn foo() { let _out_event: ::OutEvent = unimplemented!(); match _out_event { - BarEvent::Foo(FooEvent::Ping(libp2p::ping::Event { .. })) => {} + BarEvent::Foo(FooEvent::Ping(ping::Event { .. })) => {} } } } #[test] fn custom_event_emit_event_through_poll() { + #[allow(clippy::large_enum_variant)] enum BehaviourOutEvent { - Ping(libp2p::ping::PingEvent), - Identify(libp2p::identify::IdentifyEvent), + Ping(ping::Event), + Identify(identify::Event), } - impl From for BehaviourOutEvent { - fn from(event: libp2p::ping::PingEvent) -> Self { + impl From for BehaviourOutEvent { + fn from(event: ping::Event) -> Self { BehaviourOutEvent::Ping(event) } } - impl From for BehaviourOutEvent { - fn from(event: libp2p::identify::IdentifyEvent) -> Self { + impl From for BehaviourOutEvent { + fn from(event: libp2p::identify::Event) -> Self { BehaviourOutEvent::Identify(event) } } - #[allow(dead_code)] + #[allow(dead_code, clippy::large_enum_variant)] #[derive(NetworkBehaviour)] #[behaviour(out_event = "BehaviourOutEvent")] struct Foo { - ping: libp2p::ping::Ping, - identify: libp2p::identify::Identify, + ping: ping::Behaviour, + identify: identify::Behaviour, } - #[allow(dead_code, unreachable_code)] + #[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)] fn bar() { require_net_behaviour::(); @@ -271,8 +271,8 @@ fn with_toggle() { #[allow(dead_code)] #[derive(NetworkBehaviour)] struct Foo { - identify: libp2p::identify::Identify, - ping: Toggle, + identify: identify::Behaviour, + ping: Toggle, } #[allow(dead_code)] @@ -289,7 +289,7 @@ fn with_either() { #[derive(NetworkBehaviour)] struct Foo { kad: libp2p::kad::Kademlia, - ping_or_identify: Either, + ping_or_identify: Either, } #[allow(dead_code)] @@ -304,7 +304,7 @@ fn custom_event_with_either() { enum BehaviourOutEvent { Kad(libp2p::kad::KademliaEvent), - PingOrIdentify(Either), + PingOrIdentify(Either), } impl From for BehaviourOutEvent { @@ -313,8 +313,8 @@ fn custom_event_with_either() { } } - impl From> for BehaviourOutEvent { - fn from(event: Either) -> Self { + impl From> for BehaviourOutEvent { + fn from(event: Either) -> Self { BehaviourOutEvent::PingOrIdentify(event) } } @@ -324,7 +324,7 @@ fn custom_event_with_either() { #[behaviour(out_event = "BehaviourOutEvent")] struct Foo { kad: libp2p::kad::Kademlia, - ping_or_identify: Either, + ping_or_identify: Either, } #[allow(dead_code)] @@ -338,7 +338,7 @@ fn generated_out_event_derive_debug() { #[allow(dead_code)] #[derive(NetworkBehaviour)] struct Foo { - ping: libp2p::ping::Ping, + ping: ping::Behaviour, } fn require_debug() @@ -354,7 +354,6 @@ fn generated_out_event_derive_debug() { #[test] fn custom_out_event_no_type_parameters() { use libp2p::core::connection::ConnectionId; - use libp2p::swarm::handler::DummyConnectionHandler; use libp2p::swarm::{ ConnectionHandler, IntoConnectionHandler, NetworkBehaviourAction, PollParameters, }; @@ -367,11 +366,11 @@ fn custom_out_event_no_type_parameters() { } impl NetworkBehaviour for TemplatedBehaviour { - type ConnectionHandler = DummyConnectionHandler; + type ConnectionHandler = dummy::ConnectionHandler; type OutEvent = void::Void; fn new_handler(&mut self) -> Self::ConnectionHandler { - DummyConnectionHandler::default() + dummy::ConnectionHandler } fn inject_event( diff --git a/swarm/CHANGELOG.md b/swarm/CHANGELOG.md index abf54657cda..ec7b8e83d4f 100644 --- a/swarm/CHANGELOG.md +++ b/swarm/CHANGELOG.md @@ -1,3 +1,21 @@ +# 0.40.0 [unreleased] + +- Bump rand to 0.8 and quickcheck to 1. See [PR 2857]. + +- Update to `libp2p-core` `v0.37.0`. + +- Introduce `libp2p_swarm::keep_alive::ConnectionHandler` in favor of removing `keep_alive` from + `libp2p_swarm::dummy::ConnectionHandler`. `dummy::ConnectionHandler` now literally does not do anything. In the same + spirit, introduce `libp2p_swarm::keep_alive::Behaviour` and `libp2p_swarm::dummy::Behaviour`. See [PR 2859]. + +[PR 2857]: https://github.com/libp2p/rust-libp2p/pull/2857 +[PR 2859]: https://github.com/libp2p/rust-libp2p/pull/2859/ + +- Pass actual `PeerId` of dial to `NetworkBehaviour::inject_dial_failure` on `DialError::ConnectionLimit`. See [PR 2928]. + +[PR 2928]: https://github.com/libp2p/rust-libp2p/pull/2928 + + # 0.39.0 - Remove deprecated `NetworkBehaviourEventProcess`. See [libp2p-swarm v0.38.0 changelog entry] for @@ -5,7 +23,12 @@ - Update to `libp2p-core` `v0.36.0`. +- Enforce backpressure on incoming streams via `StreamMuxer` interface. In case we hit the configured limit of maximum + number of inbound streams, we will stop polling the `StreamMuxer` for new inbound streams. Depending on the muxer + implementation in use, this may lead to instant dropping of inbound streams. See [PR 2861]. + [libp2p-swarm v0.38.0 changelog entry]: https://github.com/libp2p/rust-libp2p/blob/master/swarm/CHANGELOG.md#0380 +[PR 2861]: https://github.com/libp2p/rust-libp2p/pull/2861/ # 0.38.0 diff --git a/swarm/Cargo.toml b/swarm/Cargo.toml index a22b114f82e..adbb81cd70e 100644 --- a/swarm/Cargo.toml +++ b/swarm/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-swarm" edition = "2021" rust-version = "1.56.1" description = "The libp2p swarm" -version = "0.39.0" +version = "0.40.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -16,10 +16,10 @@ fnv = "1.0" futures = "0.3.1" futures-timer = "3.0.2" instant = "0.1.11" -libp2p-core = { version = "0.36.0", path = "../core", default-features = false } +libp2p-core = { version = "0.37.0", path = "../core" } log = "0.4" pin-project = "1.0.0" -rand = "0.7" +rand = "0.8" smallvec = "1.6.1" thiserror = "1.0" void = "1" @@ -27,9 +27,5 @@ void = "1" [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } env_logger = "0.9" -libp2p = { path = "../", default-features = false, features = ["identify", "ping", "plaintext", "yamux"] } -libp2p-mplex = { path = "../muxers/mplex" } -libp2p-noise = { path = "../transports/noise" } -libp2p-tcp = { path = "../transports/tcp" } -quickcheck = "0.9.0" -rand = "0.7.2" +libp2p = { path = "..", features = ["full"] } +quickcheck = { package = "quickcheck-ext", path = "../misc/quickcheck-ext" } diff --git a/swarm/src/behaviour.rs b/swarm/src/behaviour.rs index 2227e73c9de..f5972569438 100644 --- a/swarm/src/behaviour.rs +++ b/swarm/src/behaviour.rs @@ -62,9 +62,9 @@ pub(crate) type THandlerOutEvent = /// [`Toggle`](crate::behaviour::toggle::Toggle) [`NetworkBehaviour`]. /// /// ``` rust -/// # use libp2p_swarm::DummyBehaviour; +/// # use libp2p_swarm::dummy; /// # use libp2p_swarm::behaviour::toggle::Toggle; -/// let my_behaviour = DummyBehaviour::default(); +/// let my_behaviour = dummy::Behaviour; /// let my_toggled_behaviour = Toggle::from(Some(my_behaviour)); /// ``` /// @@ -90,29 +90,29 @@ pub(crate) type THandlerOutEvent = /// addition to the event `enum` itself. /// /// ``` rust -/// # use libp2p::identify::{Identify, IdentifyEvent}; -/// # use libp2p::ping::{Ping, PingEvent}; +/// # use libp2p::identify; +/// # use libp2p::ping; /// # use libp2p::NetworkBehaviour; /// #[derive(NetworkBehaviour)] /// #[behaviour(out_event = "Event")] /// struct MyBehaviour { -/// identify: Identify, -/// ping: Ping, +/// identify: identify::Behaviour, +/// ping: ping::Behaviour, /// } /// /// enum Event { -/// Identify(IdentifyEvent), -/// Ping(PingEvent), +/// Identify(identify::Event), +/// Ping(ping::Event), /// } /// -/// impl From for Event { -/// fn from(event: IdentifyEvent) -> Self { +/// impl From for Event { +/// fn from(event: identify::Event) -> Self { /// Self::Identify(event) /// } /// } /// -/// impl From for Event { -/// fn from(event: PingEvent) -> Self { +/// impl From for Event { +/// fn from(event: ping::Event) -> Self { /// Self::Ping(event) /// } /// } diff --git a/swarm/src/behaviour/toggle.rs b/swarm/src/behaviour/toggle.rs index 6d886425a6f..c4e1b2bf560 100644 --- a/swarm/src/behaviour/toggle.rs +++ b/swarm/src/behaviour/toggle.rs @@ -297,7 +297,7 @@ where #[cfg(test)] mod tests { use super::*; - use crate::handler::DummyConnectionHandler; + use crate::dummy; /// A disabled [`ToggleConnectionHandler`] can receive listen upgrade errors in /// the following two cases: @@ -315,7 +315,7 @@ mod tests { /// [`ToggleConnectionHandler`] should ignore the error in both of these cases. #[test] fn ignore_listen_upgrade_error_when_disabled() { - let mut handler = ToggleConnectionHandler:: { inner: None }; + let mut handler = ToggleConnectionHandler:: { inner: None }; handler.inject_listen_upgrade_error(Either::Right(()), ConnectionHandlerUpgrErr::Timeout); } diff --git a/swarm/src/connection.rs b/swarm/src/connection.rs index 24e54aba525..304f2b5c759 100644 --- a/swarm/src/connection.rs +++ b/swarm/src/connection.rs @@ -19,7 +19,6 @@ // DEALINGS IN THE SOFTWARE. mod error; -mod handler_wrapper; pub(crate) mod pool; @@ -31,16 +30,23 @@ pub use pool::{ConnectionCounters, ConnectionLimits}; pub use pool::{EstablishedConnection, PendingConnection}; use crate::handler::ConnectionHandler; -use crate::IntoConnectionHandler; -use handler_wrapper::HandlerWrapper; +use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, SendWrapper}; +use crate::{ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, SubstreamProtocol}; +use futures::stream::FuturesUnordered; +use futures::FutureExt; +use futures::StreamExt; +use futures_timer::Delay; +use instant::Instant; use libp2p_core::connection::ConnectedPoint; use libp2p_core::multiaddr::Multiaddr; -use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerEvent, StreamMuxerExt}; -use libp2p_core::upgrade; +use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerEvent, StreamMuxerExt, SubstreamBox}; +use libp2p_core::upgrade::{InboundUpgradeApply, OutboundUpgradeApply}; use libp2p_core::PeerId; -use std::collections::VecDeque; +use libp2p_core::{upgrade, UpgradeError}; use std::future::Future; -use std::{error::Error, fmt, io, pin::Pin, task::Context, task::Poll}; +use std::task::Waker; +use std::time::Duration; +use std::{fmt, io, mem, pin::Pin, task::Context, task::Poll}; /// Information about a successfully established connection. #[derive(Debug, Clone, PartialEq, Eq)] @@ -51,13 +57,6 @@ pub struct Connected { pub peer_id: PeerId, } -/// Endpoint for a received substream. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum SubstreamEndpoint { - Dialer(TDialInfo), - Listener, -} - /// Event generated by a [`Connection`]. #[derive(Debug, Clone)] pub enum Event { @@ -74,10 +73,43 @@ where { /// Node that handles the muxing. muxing: StreamMuxerBox, - /// Handler that processes substreams. - handler: HandlerWrapper, - /// List of "open_info" that is waiting for new outbound substreams. - open_info: VecDeque>, + /// The underlying handler. + handler: THandler, + /// Futures that upgrade incoming substreams. + negotiating_in: FuturesUnordered< + SubstreamUpgrade< + THandler::InboundOpenInfo, + InboundUpgradeApply>, + >, + >, + /// Futures that upgrade outgoing substreams. + negotiating_out: FuturesUnordered< + SubstreamUpgrade< + THandler::OutboundOpenInfo, + OutboundUpgradeApply>, + >, + >, + /// The currently planned connection & handler shutdown. + shutdown: Shutdown, + /// The substream upgrade protocol override, if any. + substream_upgrade_protocol_override: Option, + /// The maximum number of inbound streams concurrently negotiating on a + /// connection. New inbound streams exceeding the limit are dropped and thus + /// reset. + /// + /// Note: This only enforces a limit on the number of concurrently + /// negotiating inbound streams. The total number of inbound streams on a + /// connection is the sum of negotiating and negotiated streams. A limit on + /// the total number of streams can be enforced at the + /// [`StreamMuxerBox`](libp2p_core::muxing::StreamMuxerBox) level. + max_negotiating_inbound_streams: usize, + /// Contains all upgrades that are waiting for a new outbound substream. + /// + /// The upgrade timeout is already ticking here so this may fail in case the remote is not quick + /// enough in providing us with a new stream. + requested_substreams: FuturesUnordered< + SubstreamRequested, + >, } impl fmt::Debug for Connection @@ -88,7 +120,6 @@ where fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Connection") .field("handler", &self.handler) - .field("open_info", &self.open_info) .finish() } } @@ -102,24 +133,20 @@ where /// Builds a new `Connection` from the given substream multiplexer /// and connection handler. pub fn new( - peer_id: PeerId, - endpoint: ConnectedPoint, muxer: StreamMuxerBox, - handler: impl IntoConnectionHandler, + handler: THandler, substream_upgrade_protocol_override: Option, max_negotiating_inbound_streams: usize, ) -> Self { - let wrapped_handler = HandlerWrapper::new( - peer_id, - endpoint, + Connection { + muxing: muxer, handler, + negotiating_in: Default::default(), + negotiating_out: Default::default(), + shutdown: Shutdown::None, substream_upgrade_protocol_override, max_negotiating_inbound_streams, - ); - Connection { - muxing: muxer, - handler: wrapped_handler, - open_info: VecDeque::with_capacity(8), + requested_substreams: Default::default(), } } @@ -131,57 +158,157 @@ where /// Begins an orderly shutdown of the connection, returning the connection /// handler and a `Future` that resolves when connection shutdown is complete. pub fn close(self) -> (THandler, impl Future>) { - (self.handler.into_connection_handler(), self.muxing.close()) + (self.handler, self.muxing.close()) } /// Polls the handler and the substream, forwarding events from the former to the latter and /// vice versa. pub fn poll( - mut self: Pin<&mut Self>, + self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll, ConnectionError>> { + let Self { + requested_substreams, + muxing, + handler, + negotiating_out, + negotiating_in, + shutdown, + max_negotiating_inbound_streams, + substream_upgrade_protocol_override, + } = self.get_mut(); + loop { - // Poll the handler for new events. - match self.handler.poll(cx)? { + match requested_substreams.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(()))) => continue, + Poll::Ready(Some(Err(user_data))) => { + handler.inject_dial_upgrade_error(user_data, ConnectionHandlerUpgrErr::Timeout); + continue; + } + Poll::Ready(None) | Poll::Pending => {} + } + + // Poll the [`ConnectionHandler`]. + match handler.poll(cx) { Poll::Pending => {} - Poll::Ready(handler_wrapper::Event::OutboundSubstreamRequest(user_data)) => { - self.open_info.push_back(user_data); + Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol }) => { + let timeout = *protocol.timeout(); + let (upgrade, user_data) = protocol.into_upgrade(); + + requested_substreams.push(SubstreamRequested::new(user_data, timeout, upgrade)); continue; // Poll handler until exhausted. } - Poll::Ready(handler_wrapper::Event::Custom(event)) => { + Poll::Ready(ConnectionHandlerEvent::Custom(event)) => { return Poll::Ready(Ok(Event::Handler(event))); } + Poll::Ready(ConnectionHandlerEvent::Close(err)) => { + return Poll::Ready(Err(ConnectionError::Handler(err))); + } } - match self.muxing.poll_unpin(cx)? { + // In case the [`ConnectionHandler`] can not make any more progress, poll the negotiating outbound streams. + match negotiating_out.poll_next_unpin(cx) { + Poll::Pending | Poll::Ready(None) => {} + Poll::Ready(Some((user_data, Ok(upgrade)))) => { + handler.inject_fully_negotiated_outbound(upgrade, user_data); + continue; + } + Poll::Ready(Some((user_data, Err(err)))) => { + handler.inject_dial_upgrade_error(user_data, err); + continue; + } + } + + // In case both the [`ConnectionHandler`] and the negotiating outbound streams can not + // make any more progress, poll the negotiating inbound streams. + match negotiating_in.poll_next_unpin(cx) { + Poll::Pending | Poll::Ready(None) => {} + Poll::Ready(Some((user_data, Ok(upgrade)))) => { + handler.inject_fully_negotiated_inbound(upgrade, user_data); + continue; + } + Poll::Ready(Some((user_data, Err(err)))) => { + handler.inject_listen_upgrade_error(user_data, err); + continue; + } + } + + // Ask the handler whether it wants the connection (and the handler itself) + // to be kept alive, which determines the planned shutdown, if any. + let keep_alive = handler.connection_keep_alive(); + match (&mut *shutdown, keep_alive) { + (Shutdown::Later(timer, deadline), KeepAlive::Until(t)) => { + if *deadline != t { + *deadline = t; + if let Some(dur) = deadline.checked_duration_since(Instant::now()) { + timer.reset(dur) + } + } + } + (_, KeepAlive::Until(t)) => { + if let Some(dur) = t.checked_duration_since(Instant::now()) { + *shutdown = Shutdown::Later(Delay::new(dur), t) + } + } + (_, KeepAlive::No) => *shutdown = Shutdown::Asap, + (_, KeepAlive::Yes) => *shutdown = Shutdown::None, + }; + + // Check if the connection (and handler) should be shut down. + // As long as we're still negotiating substreams, shutdown is always postponed. + if negotiating_in.is_empty() + && negotiating_out.is_empty() + && requested_substreams.is_empty() + { + match shutdown { + Shutdown::None => {} + Shutdown::Asap => return Poll::Ready(Err(ConnectionError::KeepAliveTimeout)), + Shutdown::Later(delay, _) => match Future::poll(Pin::new(delay), cx) { + Poll::Ready(_) => { + return Poll::Ready(Err(ConnectionError::KeepAliveTimeout)) + } + Poll::Pending => {} + }, + } + } + + match muxing.poll_unpin(cx)? { Poll::Pending => {} Poll::Ready(StreamMuxerEvent::AddressChange(address)) => { - self.handler.inject_address_change(&address); + handler.inject_address_change(&address); return Poll::Ready(Ok(Event::AddressChange(address))); } } - if !self.open_info.is_empty() { - match self.muxing.poll_outbound_unpin(cx)? { + if let Some(requested_substream) = requested_substreams.iter_mut().next() { + match muxing.poll_outbound_unpin(cx)? { Poll::Pending => {} Poll::Ready(substream) => { - let user_data = self - .open_info - .pop_front() - .expect("`open_info` is not empty"); - let endpoint = SubstreamEndpoint::Dialer(user_data); - self.handler.inject_substream(substream, endpoint); + let (user_data, timeout, upgrade) = requested_substream.extract(); + + negotiating_out.push(SubstreamUpgrade::new_outbound( + substream, + user_data, + timeout, + upgrade, + *substream_upgrade_protocol_override, + )); + continue; // Go back to the top, handler can potentially make progress again. } } } - match self.muxing.poll_inbound_unpin(cx)? { - Poll::Pending => {} - Poll::Ready(substream) => { - self.handler - .inject_substream(substream, SubstreamEndpoint::Listener); - continue; // Go back to the top, handler can potentially make progress again. + if negotiating_in.len() < *max_negotiating_inbound_streams { + match muxing.poll_inbound_unpin(cx)? { + Poll::Pending => {} + Poll::Ready(substream) => { + let protocol = handler.listen_protocol(); + + negotiating_in.push(SubstreamUpgrade::new_inbound(substream, protocol)); + + continue; // Go back to the top, handler can potentially make progress again. + } } } @@ -225,4 +352,454 @@ impl fmt::Display for ConnectionLimit { } /// A `ConnectionLimit` can represent an error if it has been exceeded. -impl Error for ConnectionLimit {} +impl std::error::Error for ConnectionLimit {} + +struct SubstreamUpgrade { + user_data: Option, + timeout: Delay, + upgrade: Upgrade, +} + +impl + SubstreamUpgrade>> +where + Upgrade: Send + OutboundUpgradeSend, +{ + fn new_outbound( + substream: SubstreamBox, + user_data: UserData, + timeout: Delay, + upgrade: Upgrade, + version_override: Option, + ) -> Self { + let effective_version = match version_override { + Some(version_override) if version_override != upgrade::Version::default() => { + log::debug!( + "Substream upgrade protocol override: {:?} -> {:?}", + upgrade::Version::default(), + version_override + ); + + version_override + } + _ => upgrade::Version::default(), + }; + + Self { + user_data: Some(user_data), + timeout, + upgrade: upgrade::apply_outbound(substream, SendWrapper(upgrade), effective_version), + } + } +} + +impl + SubstreamUpgrade>> +where + Upgrade: Send + InboundUpgradeSend, +{ + fn new_inbound( + substream: SubstreamBox, + protocol: SubstreamProtocol, + ) -> Self { + let timeout = *protocol.timeout(); + let (upgrade, open_info) = protocol.into_upgrade(); + + Self { + user_data: Some(open_info), + timeout: Delay::new(timeout), + upgrade: upgrade::apply_inbound(substream, SendWrapper(upgrade)), + } + } +} + +impl Unpin for SubstreamUpgrade {} + +impl Future for SubstreamUpgrade +where + Upgrade: Future>> + Unpin, +{ + type Output = ( + UserData, + Result>, + ); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + match self.timeout.poll_unpin(cx) { + Poll::Ready(()) => { + return Poll::Ready(( + self.user_data + .take() + .expect("Future not to be polled again once ready."), + Err(ConnectionHandlerUpgrErr::Timeout), + )) + } + + Poll::Pending => {} + } + + match self.upgrade.poll_unpin(cx) { + Poll::Ready(Ok(upgrade)) => Poll::Ready(( + self.user_data + .take() + .expect("Future not to be polled again once ready."), + Ok(upgrade), + )), + Poll::Ready(Err(err)) => Poll::Ready(( + self.user_data + .take() + .expect("Future not to be polled again once ready."), + Err(ConnectionHandlerUpgrErr::Upgrade(err)), + )), + Poll::Pending => Poll::Pending, + } + } +} + +enum SubstreamRequested { + Waiting { + user_data: UserData, + timeout: Delay, + upgrade: Upgrade, + /// A waker to notify our [`FuturesUnordered`] that we have extracted the data. + /// + /// This will ensure that we will get polled again in the next iteration which allows us to + /// resolve with `Ok(())` and be removed from the [`FuturesUnordered`]. + extracted_waker: Option, + }, + Done, +} + +impl SubstreamRequested { + fn new(user_data: UserData, timeout: Duration, upgrade: Upgrade) -> Self { + Self::Waiting { + user_data, + timeout: Delay::new(timeout), + upgrade, + extracted_waker: None, + } + } + + fn extract(&mut self) -> (UserData, Delay, Upgrade) { + match mem::replace(self, Self::Done) { + SubstreamRequested::Waiting { + user_data, + timeout, + upgrade, + extracted_waker: waker, + } => { + if let Some(waker) = waker { + waker.wake(); + } + + (user_data, timeout, upgrade) + } + SubstreamRequested::Done => panic!("cannot extract twice"), + } + } +} + +impl Unpin for SubstreamRequested {} + +impl Future for SubstreamRequested { + type Output = Result<(), UserData>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + match mem::replace(this, Self::Done) { + SubstreamRequested::Waiting { + user_data, + upgrade, + mut timeout, + .. + } => match timeout.poll_unpin(cx) { + Poll::Ready(()) => Poll::Ready(Err(user_data)), + Poll::Pending => { + *this = Self::Waiting { + user_data, + upgrade, + timeout, + extracted_waker: Some(cx.waker().clone()), + }; + Poll::Pending + } + }, + SubstreamRequested::Done => Poll::Ready(Ok(())), + } + } +} + +/// The options for a planned connection & handler shutdown. +/// +/// A shutdown is planned anew based on the the return value of +/// [`ConnectionHandler::connection_keep_alive`] of the underlying handler +/// after every invocation of [`ConnectionHandler::poll`]. +/// +/// A planned shutdown is always postponed for as long as there are ingoing +/// or outgoing substreams being negotiated, i.e. it is a graceful, "idle" +/// shutdown. +#[derive(Debug)] +enum Shutdown { + /// No shutdown is planned. + None, + /// A shut down is planned as soon as possible. + Asap, + /// A shut down is planned for when a `Delay` has elapsed. + Later(Delay, Instant), +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::keep_alive; + use futures::AsyncRead; + use futures::AsyncWrite; + use libp2p_core::upgrade::DeniedUpgrade; + use libp2p_core::StreamMuxer; + use quickcheck::*; + use std::sync::{Arc, Weak}; + use void::Void; + + #[test] + fn max_negotiating_inbound_streams() { + fn prop(max_negotiating_inbound_streams: u8) { + let max_negotiating_inbound_streams: usize = max_negotiating_inbound_streams.into(); + + let alive_substream_counter = Arc::new(()); + + let mut connection = Connection::new( + StreamMuxerBox::new(DummyStreamMuxer { + counter: alive_substream_counter.clone(), + }), + keep_alive::ConnectionHandler, + None, + max_negotiating_inbound_streams, + ); + + let result = Pin::new(&mut connection) + .poll(&mut Context::from_waker(futures::task::noop_waker_ref())); + + assert!(result.is_pending()); + assert_eq!( + Arc::weak_count(&alive_substream_counter), + max_negotiating_inbound_streams, + "Expect no more than the maximum number of allowed streams" + ); + } + + QuickCheck::new().quickcheck(prop as fn(_)); + } + + #[test] + fn outbound_stream_timeout_starts_on_request() { + let upgrade_timeout = Duration::from_secs(1); + let mut connection = Connection::new( + StreamMuxerBox::new(PendingStreamMuxer), + MockConnectionHandler::new(upgrade_timeout), + None, + 2, + ); + + connection.handler.open_new_outbound(); + let _ = Pin::new(&mut connection) + .poll(&mut Context::from_waker(futures::task::noop_waker_ref())); + + std::thread::sleep(upgrade_timeout + Duration::from_secs(1)); + + let _ = Pin::new(&mut connection) + .poll(&mut Context::from_waker(futures::task::noop_waker_ref())); + + assert!(matches!( + connection.handler.error.unwrap(), + ConnectionHandlerUpgrErr::Timeout + )) + } + + struct DummyStreamMuxer { + counter: Arc<()>, + } + + impl StreamMuxer for DummyStreamMuxer { + type Substream = PendingSubstream; + type Error = Void; + + fn poll_inbound( + self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(PendingSubstream(Arc::downgrade(&self.counter)))) + } + + fn poll_outbound( + self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + Poll::Pending + } + + fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll( + self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + Poll::Pending + } + } + + /// A [`StreamMuxer`] which never returns a stream. + struct PendingStreamMuxer; + + impl StreamMuxer for PendingStreamMuxer { + type Substream = PendingSubstream; + type Error = Void; + + fn poll_inbound( + self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + Poll::Pending + } + + fn poll_outbound( + self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + Poll::Pending + } + + fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Pending + } + + fn poll( + self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + Poll::Pending + } + } + + struct PendingSubstream(Weak<()>); + + impl AsyncRead for PendingSubstream { + fn poll_read( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + _buf: &mut [u8], + ) -> Poll> { + Poll::Pending + } + } + + impl AsyncWrite for PendingSubstream { + fn poll_write( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + _buf: &[u8], + ) -> Poll> { + Poll::Pending + } + + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Pending + } + + fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Pending + } + } + + struct MockConnectionHandler { + outbound_requested: bool, + error: Option>, + upgrade_timeout: Duration, + } + + impl MockConnectionHandler { + fn new(upgrade_timeout: Duration) -> Self { + Self { + outbound_requested: false, + error: None, + upgrade_timeout, + } + } + + fn open_new_outbound(&mut self) { + self.outbound_requested = true; + } + } + + impl ConnectionHandler for MockConnectionHandler { + type InEvent = Void; + type OutEvent = Void; + type Error = Void; + type InboundProtocol = DeniedUpgrade; + type OutboundProtocol = DeniedUpgrade; + type InboundOpenInfo = (); + type OutboundOpenInfo = (); + + fn listen_protocol( + &self, + ) -> SubstreamProtocol { + SubstreamProtocol::new(DeniedUpgrade, ()).with_timeout(self.upgrade_timeout) + } + + fn inject_fully_negotiated_inbound( + &mut self, + protocol: ::Output, + _: Self::InboundOpenInfo, + ) { + void::unreachable(protocol) + } + + fn inject_fully_negotiated_outbound( + &mut self, + protocol: ::Output, + _: Self::OutboundOpenInfo, + ) { + void::unreachable(protocol) + } + + fn inject_event(&mut self, event: Self::InEvent) { + void::unreachable(event) + } + + fn inject_dial_upgrade_error( + &mut self, + _: Self::OutboundOpenInfo, + error: ConnectionHandlerUpgrErr<::Error>, + ) { + self.error = Some(error) + } + + fn connection_keep_alive(&self) -> KeepAlive { + KeepAlive::Yes + } + + fn poll( + &mut self, + _: &mut Context<'_>, + ) -> Poll< + ConnectionHandlerEvent< + Self::OutboundProtocol, + Self::OutboundOpenInfo, + Self::OutEvent, + Self::Error, + >, + > { + if self.outbound_requested { + self.outbound_requested = false; + return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(DeniedUpgrade, ()) + .with_timeout(self.upgrade_timeout), + }); + } + + Poll::Pending + } + } +} diff --git a/swarm/src/connection/error.rs b/swarm/src/connection/error.rs index 8a6d6bbbf00..db51ebca874 100644 --- a/swarm/src/connection/error.rs +++ b/swarm/src/connection/error.rs @@ -18,7 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::handler_wrapper; use crate::transport::TransportError; use crate::Multiaddr; use crate::{connection::ConnectionLimit, ConnectedPoint, PeerId}; @@ -66,15 +65,6 @@ where } } -impl From> for ConnectionError { - fn from(error: handler_wrapper::Error) -> Self { - match error { - handler_wrapper::Error::Handler(e) => Self::Handler(e), - handler_wrapper::Error::KeepAliveTimeout => Self::KeepAliveTimeout, - } - } -} - impl From for ConnectionError { fn from(error: io::Error) -> Self { ConnectionError::IO(error) diff --git a/swarm/src/connection/handler_wrapper.rs b/swarm/src/connection/handler_wrapper.rs deleted file mode 100644 index 03d09b3fbc1..00000000000 --- a/swarm/src/connection/handler_wrapper.rs +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::connection::SubstreamEndpoint; -use crate::handler::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, -}; -use crate::upgrade::SendWrapper; -use crate::IntoConnectionHandler; - -use futures::prelude::*; -use futures::stream::FuturesUnordered; -use futures_timer::Delay; -use instant::Instant; -use libp2p_core::{ - muxing::SubstreamBox, - upgrade::{self, InboundUpgradeApply, OutboundUpgradeApply, UpgradeError}, - Multiaddr, -}; -use libp2p_core::{ConnectedPoint, PeerId}; -use std::{error, fmt, pin::Pin, task::Context, task::Poll, time::Duration}; - -/// A wrapper for an underlying [`ConnectionHandler`]. -/// -/// It extends [`ConnectionHandler`] with: -/// - Enforced substream upgrade timeouts -/// - Driving substream upgrades -/// - Handling connection timeout -// TODO: add a caching system for protocols that are supported or not -pub struct HandlerWrapper -where - TConnectionHandler: ConnectionHandler, -{ - remote_peer_id: PeerId, - /// The underlying handler. - handler: TConnectionHandler, - /// Futures that upgrade incoming substreams. - negotiating_in: FuturesUnordered< - SubstreamUpgrade< - TConnectionHandler::InboundOpenInfo, - InboundUpgradeApply>, - >, - >, - /// Futures that upgrade outgoing substreams. - negotiating_out: FuturesUnordered< - SubstreamUpgrade< - TConnectionHandler::OutboundOpenInfo, - OutboundUpgradeApply>, - >, - >, - /// For each outbound substream request, how to upgrade it. The first element of the tuple - /// is the unique identifier (see `unique_dial_upgrade_id`). - queued_dial_upgrades: Vec<(u64, SendWrapper)>, - /// Unique identifier assigned to each queued dial upgrade. - unique_dial_upgrade_id: u64, - /// The currently planned connection & handler shutdown. - shutdown: Shutdown, - /// The substream upgrade protocol override, if any. - substream_upgrade_protocol_override: Option, - /// The maximum number of inbound streams concurrently negotiating on a - /// connection. New inbound streams exceeding the limit are dropped and thus - /// reset. - /// - /// Note: This only enforces a limit on the number of concurrently - /// negotiating inbound streams. The total number of inbound streams on a - /// connection is the sum of negotiating and negotiated streams. A limit on - /// the total number of streams can be enforced at the - /// [`StreamMuxerBox`](libp2p_core::muxing::StreamMuxerBox) level. - max_negotiating_inbound_streams: usize, -} - -impl std::fmt::Debug for HandlerWrapper { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("HandlerWrapper") - .field("negotiating_in", &self.negotiating_in) - .field("negotiating_out", &self.negotiating_out) - .field("unique_dial_upgrade_id", &self.unique_dial_upgrade_id) - .field("shutdown", &self.shutdown) - .field( - "substream_upgrade_protocol_override", - &self.substream_upgrade_protocol_override, - ) - .finish() - } -} - -impl HandlerWrapper { - pub(crate) fn new( - remote_peer_id: PeerId, - endpoint: ConnectedPoint, - handler: impl IntoConnectionHandler, - substream_upgrade_protocol_override: Option, - max_negotiating_inbound_streams: usize, - ) -> Self { - Self { - remote_peer_id, - handler: handler.into_handler(&remote_peer_id, &endpoint), - negotiating_in: Default::default(), - negotiating_out: Default::default(), - queued_dial_upgrades: Vec::new(), - unique_dial_upgrade_id: 0, - shutdown: Shutdown::None, - substream_upgrade_protocol_override, - max_negotiating_inbound_streams, - } - } - - pub(crate) fn into_connection_handler(self) -> TConnectionHandler { - self.handler - } -} - -struct SubstreamUpgrade { - user_data: Option, - timeout: Delay, - upgrade: Upgrade, -} - -impl Unpin for SubstreamUpgrade {} - -impl Future for SubstreamUpgrade -where - Upgrade: Future>> + Unpin, -{ - type Output = ( - UserData, - Result>, - ); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { - match self.timeout.poll_unpin(cx) { - Poll::Ready(()) => { - return Poll::Ready(( - self.user_data - .take() - .expect("Future not to be polled again once ready."), - Err(ConnectionHandlerUpgrErr::Timeout), - )) - } - - Poll::Pending => {} - } - - match self.upgrade.poll_unpin(cx) { - Poll::Ready(Ok(upgrade)) => Poll::Ready(( - self.user_data - .take() - .expect("Future not to be polled again once ready."), - Ok(upgrade), - )), - Poll::Ready(Err(err)) => Poll::Ready(( - self.user_data - .take() - .expect("Future not to be polled again once ready."), - Err(ConnectionHandlerUpgrErr::Upgrade(err)), - )), - Poll::Pending => Poll::Pending, - } - } -} - -/// The options for a planned connection & handler shutdown. -/// -/// A shutdown is planned anew based on the the return value of -/// [`ConnectionHandler::connection_keep_alive`] of the underlying handler -/// after every invocation of [`ConnectionHandler::poll`]. -/// -/// A planned shutdown is always postponed for as long as there are ingoing -/// or outgoing substreams being negotiated, i.e. it is a graceful, "idle" -/// shutdown. -#[derive(Debug)] -enum Shutdown { - /// No shutdown is planned. - None, - /// A shut down is planned as soon as possible. - Asap, - /// A shut down is planned for when a `Delay` has elapsed. - Later(Delay, Instant), -} - -/// Error generated by the [`HandlerWrapper`]. -#[derive(Debug)] -pub enum Error { - /// The connection handler encountered an error. - Handler(TErr), - /// The connection keep-alive timeout expired. - KeepAliveTimeout, -} - -impl From for Error { - fn from(err: TErr) -> Error { - Error::Handler(err) - } -} - -impl fmt::Display for Error -where - TErr: fmt::Display, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Error::Handler(err) => write!(f, "{}", err), - Error::KeepAliveTimeout => { - write!(f, "Connection closed due to expired keep-alive timeout.") - } - } - } -} - -impl error::Error for Error -where - TErr: error::Error + 'static, -{ - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - match self { - Error::Handler(err) => Some(err), - Error::KeepAliveTimeout => None, - } - } -} - -pub type OutboundOpenInfo = ( - u64, - ::OutboundOpenInfo, - Duration, -); - -impl HandlerWrapper -where - TConnectionHandler: ConnectionHandler, -{ - pub fn inject_substream( - &mut self, - substream: SubstreamBox, - // The first element of the tuple is the unique upgrade identifier - // (see `unique_dial_upgrade_id`). - endpoint: SubstreamEndpoint>, - ) { - match endpoint { - SubstreamEndpoint::Listener => { - if self.negotiating_in.len() == self.max_negotiating_inbound_streams { - log::warn!( - "Incoming substream from {} exceeding maximum number \ - of negotiating inbound streams {} on connection. \ - Dropping. See PoolConfig::with_max_negotiating_inbound_streams.", - self.remote_peer_id, - self.max_negotiating_inbound_streams, - ); - return; - } - - let protocol = self.handler.listen_protocol(); - let timeout = *protocol.timeout(); - let (upgrade, user_data) = protocol.into_upgrade(); - let upgrade = upgrade::apply_inbound(substream, SendWrapper(upgrade)); - let timeout = Delay::new(timeout); - self.negotiating_in.push(SubstreamUpgrade { - user_data: Some(user_data), - timeout, - upgrade, - }); - } - SubstreamEndpoint::Dialer((upgrade_id, user_data, timeout)) => { - let pos = match self - .queued_dial_upgrades - .iter() - .position(|(id, _)| id == &upgrade_id) - { - Some(p) => p, - None => { - debug_assert!(false, "Received an upgrade with an invalid upgrade ID"); - return; - } - }; - - let (_, upgrade) = self.queued_dial_upgrades.remove(pos); - let mut version = upgrade::Version::default(); - if let Some(v) = self.substream_upgrade_protocol_override { - if v != version { - log::debug!( - "Substream upgrade protocol override: {:?} -> {:?}", - version, - v - ); - version = v; - } - } - let upgrade = upgrade::apply_outbound(substream, upgrade, version); - let timeout = Delay::new(timeout); - self.negotiating_out.push(SubstreamUpgrade { - user_data: Some(user_data), - timeout, - upgrade, - }); - } - } - } - - pub fn inject_event(&mut self, event: TConnectionHandler::InEvent) { - self.handler.inject_event(event); - } - - pub fn inject_address_change(&mut self, new_address: &Multiaddr) { - self.handler.inject_address_change(new_address); - } - - fn handle_connection_handler_event( - &mut self, - handler_event: ConnectionHandlerEvent< - TConnectionHandler::OutboundProtocol, - TConnectionHandler::OutboundOpenInfo, - TConnectionHandler::OutEvent, - TConnectionHandler::Error, - >, - ) -> Result< - Event, TConnectionHandler::OutEvent>, - Error, - > { - match handler_event { - ConnectionHandlerEvent::Custom(event) => Ok(Event::Custom(event)), - ConnectionHandlerEvent::OutboundSubstreamRequest { protocol } => { - let id = self.unique_dial_upgrade_id; - let timeout = *protocol.timeout(); - self.unique_dial_upgrade_id += 1; - let (upgrade, info) = protocol.into_upgrade(); - self.queued_dial_upgrades.push((id, SendWrapper(upgrade))); - Ok(Event::OutboundSubstreamRequest((id, info, timeout))) - } - ConnectionHandlerEvent::Close(err) => Err(err.into()), - } - } - - pub fn poll( - &mut self, - cx: &mut Context<'_>, - ) -> Poll< - Result< - Event, TConnectionHandler::OutEvent>, - Error, - >, - > { - loop { - // Poll the [`ConnectionHandler`]. - if let Poll::Ready(handler_event) = self.handler.poll(cx) { - let wrapper_event = self.handle_connection_handler_event(handler_event)?; - return Poll::Ready(Ok(wrapper_event)); - } - - // In case the [`ConnectionHandler`] can not make any more progress, poll the negotiating outbound streams. - if let Poll::Ready(Some((user_data, res))) = self.negotiating_out.poll_next_unpin(cx) { - match res { - Ok(upgrade) => self - .handler - .inject_fully_negotiated_outbound(upgrade, user_data), - Err(err) => self.handler.inject_dial_upgrade_error(user_data, err), - } - - // After the `inject_*` calls, the [`ConnectionHandler`] might be able to make progress. - continue; - } - - // In case both the [`ConnectionHandler`] and the negotiating outbound streams can not - // make any more progress, poll the negotiating inbound streams. - if let Poll::Ready(Some((user_data, res))) = self.negotiating_in.poll_next_unpin(cx) { - match res { - Ok(upgrade) => self - .handler - .inject_fully_negotiated_inbound(upgrade, user_data), - Err(err) => self.handler.inject_listen_upgrade_error(user_data, err), - } - - // After the `inject_*` calls, the [`ConnectionHandler`] might be able to make progress. - continue; - } - - // None of the three can make any more progress, thus breaking the loop. - break; - } - - // Ask the handler whether it wants the connection (and the handler itself) - // to be kept alive, which determines the planned shutdown, if any. - match (&mut self.shutdown, self.handler.connection_keep_alive()) { - (Shutdown::Later(timer, deadline), KeepAlive::Until(t)) => { - if *deadline != t { - *deadline = t; - if let Some(dur) = deadline.checked_duration_since(Instant::now()) { - timer.reset(dur) - } - } - } - (_, KeepAlive::Until(t)) => { - if let Some(dur) = t.checked_duration_since(Instant::now()) { - self.shutdown = Shutdown::Later(Delay::new(dur), t) - } - } - (_, KeepAlive::No) => self.shutdown = Shutdown::Asap, - (_, KeepAlive::Yes) => self.shutdown = Shutdown::None, - }; - - // Check if the connection (and handler) should be shut down. - // As long as we're still negotiating substreams, shutdown is always postponed. - if self.negotiating_in.is_empty() && self.negotiating_out.is_empty() { - match self.shutdown { - Shutdown::None => {} - Shutdown::Asap => return Poll::Ready(Err(Error::KeepAliveTimeout)), - Shutdown::Later(ref mut delay, _) => match Future::poll(Pin::new(delay), cx) { - Poll::Ready(_) => return Poll::Ready(Err(Error::KeepAliveTimeout)), - Poll::Pending => {} - }, - } - } - - Poll::Pending - } -} - -/// Event produced by a [`HandlerWrapper`]. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum Event { - /// Require a new outbound substream to be opened with the remote. - OutboundSubstreamRequest(TOutboundOpenInfo), - - /// Other event. - Custom(TCustom), -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::handler::PendingConnectionHandler; - use quickcheck::*; - use std::sync::Arc; - - #[test] - fn max_negotiating_inbound_streams() { - fn prop(max_negotiating_inbound_streams: u8) { - let max_negotiating_inbound_streams: usize = max_negotiating_inbound_streams.into(); - let mut wrapper = HandlerWrapper::new( - PeerId::random(), - ConnectedPoint::Listener { - local_addr: Multiaddr::empty(), - send_back_addr: Multiaddr::empty(), - }, - PendingConnectionHandler::new("test".to_string()), - None, - max_negotiating_inbound_streams, - ); - let alive_substreams_counter = Arc::new(()); - - for _ in 0..max_negotiating_inbound_streams { - let substream = - SubstreamBox::new(PendingSubstream(alive_substreams_counter.clone())); - wrapper.inject_substream(substream, SubstreamEndpoint::Listener); - } - - assert_eq!( - Arc::strong_count(&alive_substreams_counter), - max_negotiating_inbound_streams + 1, - "Expect none of the substreams up to the limit to be dropped." - ); - - let substream = SubstreamBox::new(PendingSubstream(alive_substreams_counter.clone())); - wrapper.inject_substream(substream, SubstreamEndpoint::Listener); - - assert_eq!( - Arc::strong_count(&alive_substreams_counter), - max_negotiating_inbound_streams + 1, - "Expect substream exceeding the limit to be dropped." - ); - } - - QuickCheck::new().quickcheck(prop as fn(_)); - } - - struct PendingSubstream(Arc<()>); - - impl AsyncRead for PendingSubstream { - fn poll_read( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - _buf: &mut [u8], - ) -> Poll> { - Poll::Pending - } - } - - impl AsyncWrite for PendingSubstream { - fn poll_write( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - _buf: &[u8], - ) -> Poll> { - Poll::Pending - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Pending - } - - fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Pending - } - } -} diff --git a/swarm/src/connection/pool.rs b/swarm/src/connection/pool.rs index 62e931e9510..c43d5efb61e 100644 --- a/swarm/src/connection/pool.rs +++ b/swarm/src/connection/pool.rs @@ -19,6 +19,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use crate::connection::Connection; use crate::{ behaviour::{THandlerInEvent, THandlerOutEvent}, connection::{ @@ -89,7 +90,7 @@ where /// The maximum number of inbound streams concurrently negotiating on a connection. /// - /// See [`super::handler_wrapper::HandlerWrapper::max_negotiating_inbound_streams`]. + /// See [`Connection::max_negotiating_inbound_streams`]. max_negotiating_inbound_streams: usize, /// The executor to use for running the background tasks. If `None`, @@ -746,11 +747,9 @@ where }, ); - let connection = super::Connection::new( - obtained_peer_id, - endpoint, + let connection = Connection::new( muxer, - handler, + handler.into_handler(&obtained_peer_id, &endpoint), self.substream_upgrade_protocol_override, self.max_negotiating_inbound_streams, ); @@ -1165,7 +1164,7 @@ pub struct PoolConfig { /// The maximum number of inbound streams concurrently negotiating on a connection. /// - /// See [super::handler_wrapper::HandlerWrapper::max_negotiating_inbound_streams]. + /// See [`Connection::max_negotiating_inbound_streams`]. max_negotiating_inbound_streams: usize, } @@ -1240,7 +1239,7 @@ impl PoolConfig { /// The maximum number of inbound streams concurrently negotiating on a connection. /// - /// See [`super::handler_wrapper::HandlerWrapper::max_negotiating_inbound_streams`]. + /// See [`Connection::max_negotiating_inbound_streams`]. pub fn with_max_negotiating_inbound_streams(mut self, v: usize) -> Self { self.max_negotiating_inbound_streams = v; self diff --git a/swarm/src/dial_opts.rs b/swarm/src/dial_opts.rs index ac6b771ced8..edc69484b68 100644 --- a/swarm/src/dial_opts.rs +++ b/swarm/src/dial_opts.rs @@ -122,7 +122,7 @@ impl WithPeerId { } /// Override - /// [`PoolConfig::with_dial_concurrency_factor`](crate::connection::pool::PoolConfig::with_dial_concurrency_factor). + /// Number of addresses concurrently dialed for a single outbound connection attempt. pub fn override_dial_concurrency_factor(mut self, factor: NonZeroU8) -> Self { self.dial_concurrency_factor_override = Some(factor); self @@ -196,7 +196,7 @@ impl WithPeerIdWithAddresses { } /// Override - /// [`PoolConfig::with_dial_concurrency_factor`](crate::connection::pool::PoolConfig::with_dial_concurrency_factor). + /// Number of addresses concurrently dialed for a single outbound connection attempt. pub fn override_dial_concurrency_factor(mut self, factor: NonZeroU8) -> Self { self.dial_concurrency_factor_override = Some(factor); self diff --git a/swarm/src/dummy.rs b/swarm/src/dummy.rs new file mode 100644 index 00000000000..61f055915b3 --- /dev/null +++ b/swarm/src/dummy.rs @@ -0,0 +1,104 @@ +use crate::behaviour::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; +use crate::handler::{InboundUpgradeSend, OutboundUpgradeSend}; +use crate::{ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, SubstreamProtocol}; +use libp2p_core::connection::ConnectionId; +use libp2p_core::upgrade::DeniedUpgrade; +use libp2p_core::PeerId; +use libp2p_core::UpgradeError; +use std::task::{Context, Poll}; +use void::Void; + +/// Implementation of [`NetworkBehaviour`] that doesn't do anything. +pub struct Behaviour; + +impl NetworkBehaviour for Behaviour { + type ConnectionHandler = ConnectionHandler; + type OutEvent = Void; + + fn new_handler(&mut self) -> Self::ConnectionHandler { + ConnectionHandler + } + + fn inject_event(&mut self, _: PeerId, _: ConnectionId, event: Void) { + void::unreachable(event) + } + + fn poll( + &mut self, + _: &mut Context<'_>, + _: &mut impl PollParameters, + ) -> Poll> { + Poll::Pending + } +} + +/// An implementation of [`ConnectionHandler`] that neither handles any protocols nor does it keep the connection alive. +#[derive(Clone)] +pub struct ConnectionHandler; + +impl crate::handler::ConnectionHandler for ConnectionHandler { + type InEvent = Void; + type OutEvent = Void; + type Error = Void; + type InboundProtocol = DeniedUpgrade; + type OutboundProtocol = DeniedUpgrade; + type InboundOpenInfo = (); + type OutboundOpenInfo = Void; + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(DeniedUpgrade, ()) + } + + fn inject_fully_negotiated_inbound( + &mut self, + protocol: ::Output, + _: Self::InboundOpenInfo, + ) { + void::unreachable(protocol) + } + + fn inject_fully_negotiated_outbound( + &mut self, + protocol: ::Output, + _: Self::OutboundOpenInfo, + ) { + void::unreachable(protocol) + } + + fn inject_event(&mut self, event: Self::InEvent) { + void::unreachable(event) + } + + fn inject_dial_upgrade_error( + &mut self, + _: Self::OutboundOpenInfo, + error: ConnectionHandlerUpgrErr<::Error>, + ) { + match error { + ConnectionHandlerUpgrErr::Timeout => unreachable!(), + ConnectionHandlerUpgrErr::Timer => unreachable!(), + ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(e)) => void::unreachable(e), + ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(_)) => { + unreachable!("Denied upgrade does not support any protocols") + } + } + } + + fn connection_keep_alive(&self) -> KeepAlive { + KeepAlive::No + } + + fn poll( + &mut self, + _: &mut Context<'_>, + ) -> Poll< + ConnectionHandlerEvent< + Self::OutboundProtocol, + Self::OutboundOpenInfo, + Self::OutEvent, + Self::Error, + >, + > { + Poll::Pending + } +} diff --git a/swarm/src/handler.rs b/swarm/src/handler.rs index c6125f277b1..5c60f2bf24a 100644 --- a/swarm/src/handler.rs +++ b/swarm/src/handler.rs @@ -38,7 +38,6 @@ //! > the network as a whole, see the //! > [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) trait. -mod dummy; pub mod either; mod map_in; mod map_out; @@ -53,7 +52,6 @@ use instant::Instant; use libp2p_core::{upgrade::UpgradeError, ConnectedPoint, Multiaddr, PeerId}; use std::{cmp::Ordering, error, fmt, task::Context, task::Poll, time::Duration}; -pub use dummy::DummyConnectionHandler; pub use map_in::MapInEvent; pub use map_out::MapOutEvent; pub use one_shot::{OneShotHandler, OneShotHandlerConfig}; diff --git a/swarm/src/handler/dummy.rs b/swarm/src/handler/dummy.rs deleted file mode 100644 index 67594686674..00000000000 --- a/swarm/src/handler/dummy.rs +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::handler::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, - SubstreamProtocol, -}; -use crate::NegotiatedSubstream; -use libp2p_core::{ - upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}, - Multiaddr, -}; -use std::task::{Context, Poll}; -use void::Void; - -/// Implementation of [`ConnectionHandler`] that doesn't handle anything. -#[derive(Clone, Debug)] -pub struct DummyConnectionHandler { - pub keep_alive: KeepAlive, -} - -impl Default for DummyConnectionHandler { - fn default() -> Self { - DummyConnectionHandler { - keep_alive: KeepAlive::No, - } - } -} - -impl ConnectionHandler for DummyConnectionHandler { - type InEvent = Void; - type OutEvent = Void; - type Error = Void; - type InboundProtocol = DeniedUpgrade; - type OutboundProtocol = DeniedUpgrade; - type OutboundOpenInfo = Void; - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(DeniedUpgrade, ()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - _: >::Output, - _: Self::InboundOpenInfo, - ) { - unreachable!("`DeniedUpgrade` is never successful."); - } - - fn inject_fully_negotiated_outbound( - &mut self, - _: >::Output, - v: Self::OutboundOpenInfo, - ) { - void::unreachable(v) - } - - fn inject_event(&mut self, v: Self::InEvent) { - void::unreachable(v) - } - - fn inject_address_change(&mut self, _: &Multiaddr) {} - - fn inject_dial_upgrade_error( - &mut self, - _: Self::OutboundOpenInfo, - _: ConnectionHandlerUpgrErr< - >::Error, - >, - ) { - } - - fn inject_listen_upgrade_error( - &mut self, - _: Self::InboundOpenInfo, - _: ConnectionHandlerUpgrErr< - >::Error, - >, - ) { - } - - fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive - } - - fn poll( - &mut self, - _: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, - > { - Poll::Pending - } -} diff --git a/swarm/src/handler/multi.rs b/swarm/src/handler/multi.rs index f7e4d7ed1b4..07c1168b132 100644 --- a/swarm/src/handler/multi.rs +++ b/swarm/src/handler/multi.rs @@ -295,7 +295,7 @@ where } // Not always polling handlers in the same order should give anyone the chance to make progress. - let pos = rand::thread_rng().gen_range(0, self.handlers.len()); + let pos = rand::thread_rng().gen_range(0..self.handlers.len()); for (k, h) in self.handlers.iter_mut().skip(pos) { if let Poll::Ready(e) = h.poll(cx) { diff --git a/swarm/src/handler/one_shot.rs b/swarm/src/handler/one_shot.rs index 5db6b4d10a6..c599ff801e1 100644 --- a/swarm/src/handler/one_shot.rs +++ b/swarm/src/handler/one_shot.rs @@ -252,7 +252,7 @@ mod tests { ); block_on(poll_fn(|cx| loop { - if let Poll::Pending = handler.poll(cx) { + if handler.poll(cx).is_pending() { return Poll::Ready(()); } })); diff --git a/swarm/src/keep_alive.rs b/swarm/src/keep_alive.rs new file mode 100644 index 00000000000..ea5d5ee6399 --- /dev/null +++ b/swarm/src/keep_alive.rs @@ -0,0 +1,119 @@ +use crate::behaviour::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; +use crate::handler::{ + ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, SubstreamProtocol, +}; +use crate::NegotiatedSubstream; +use libp2p_core::connection::ConnectionId; +use libp2p_core::PeerId; +use libp2p_core::{ + upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}, + Multiaddr, +}; +use std::task::{Context, Poll}; +use void::Void; + +/// Implementation of [`NetworkBehaviour`] that doesn't do anything other than keep all connections alive. +/// +/// This is primarily useful for test code. In can however occasionally be useful for production code too. +/// The caveat is that open connections consume system resources and should thus be shutdown when +/// they are not in use. Connections can also fail at any time so really, your application should be +/// designed to establish them when necessary, making the use of this behaviour likely redundant. +#[derive(Default)] +pub struct Behaviour; + +impl NetworkBehaviour for Behaviour { + type ConnectionHandler = ConnectionHandler; + type OutEvent = Void; + + fn new_handler(&mut self) -> Self::ConnectionHandler { + ConnectionHandler + } + + fn inject_event(&mut self, _: PeerId, _: ConnectionId, event: Void) { + void::unreachable(event) + } + + fn poll( + &mut self, + _: &mut Context<'_>, + _: &mut impl PollParameters, + ) -> Poll> { + Poll::Pending + } +} + +/// Implementation of [`ConnectionHandler`] that doesn't handle anything but keeps the connection alive. +#[derive(Clone, Debug)] +pub struct ConnectionHandler; + +impl crate::handler::ConnectionHandler for ConnectionHandler { + type InEvent = Void; + type OutEvent = Void; + type Error = Void; + type InboundProtocol = DeniedUpgrade; + type OutboundProtocol = DeniedUpgrade; + type InboundOpenInfo = (); + type OutboundOpenInfo = Void; + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(DeniedUpgrade, ()) + } + + fn inject_fully_negotiated_inbound( + &mut self, + protocol: >::Output, + _: Self::InboundOpenInfo, + ) { + void::unreachable(protocol); + } + + fn inject_fully_negotiated_outbound( + &mut self, + protocol: >::Output, + _: Self::OutboundOpenInfo, + ) { + void::unreachable(protocol) + } + + fn inject_event(&mut self, v: Self::InEvent) { + void::unreachable(v) + } + + fn inject_address_change(&mut self, _: &Multiaddr) {} + + fn inject_dial_upgrade_error( + &mut self, + _: Self::OutboundOpenInfo, + _: ConnectionHandlerUpgrErr< + >::Error, + >, + ) { + } + + fn inject_listen_upgrade_error( + &mut self, + _: Self::InboundOpenInfo, + _: ConnectionHandlerUpgrErr< + >::Error, + >, + ) { + } + + fn connection_keep_alive(&self) -> KeepAlive { + KeepAlive::Yes + } + + fn poll( + &mut self, + _: &mut Context<'_>, + ) -> Poll< + ConnectionHandlerEvent< + Self::OutboundProtocol, + Self::OutboundOpenInfo, + Self::OutEvent, + Self::Error, + >, + > { + Poll::Pending + } +} diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index b8cfcf9368d..e708c27784d 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -61,7 +61,9 @@ mod upgrade; pub mod behaviour; pub mod dial_opts; +pub mod dummy; pub mod handler; +pub mod keep_alive; use behaviour::FromSwarm; pub use behaviour::{ @@ -350,11 +352,11 @@ where /// # use libp2p_swarm::dial_opts::{DialOpts, PeerCondition}; /// # use libp2p_core::{Multiaddr, PeerId, Transport}; /// # use libp2p_core::transport::dummy::DummyTransport; - /// # use libp2p_swarm::DummyBehaviour; + /// # use libp2p_swarm::dummy; /// # /// let mut swarm = Swarm::new( /// DummyTransport::new().boxed(), - /// DummyBehaviour::default(), + /// dummy::Behaviour, /// PeerId::random(), /// ); /// @@ -1389,9 +1391,15 @@ where self } - /// The maximum number of inbound streams concurrently negotiating on a connection. + /// The maximum number of inbound streams concurrently negotiating on a + /// connection. New inbound streams exceeding the limit are dropped and thus + /// reset. /// - /// See [`PoolConfig::with_max_negotiating_inbound_streams`]. + /// Note: This only enforces a limit on the number of concurrently + /// negotiating inbound streams. The total number of inbound streams on a + /// connection is the sum of negotiating and negotiated streams. A limit on + /// the total number of streams can be enforced at the + /// [`StreamMuxerBox`](libp2p_core::muxing::StreamMuxerBox) level. pub fn max_negotiating_inbound_streams(mut self, v: usize) -> Self { self.pool_config = self.pool_config.with_max_negotiating_inbound_streams(v); self @@ -1634,7 +1642,6 @@ fn p2p_addr(peer: Option, addr: Multiaddr) -> Result Disconnecting => Connecting. @@ -1732,9 +1738,7 @@ mod tests { fn test_connect_disconnect_ban() { // Since the test does not try to open any substreams, we can // use the dummy protocols handler. - let handler_proto = DummyConnectionHandler { - keep_alive: KeepAlive::Yes, - }; + let handler_proto = keep_alive::ConnectionHandler; let mut swarm1 = new_test_swarm::<_, ()>(handler_proto.clone()).build(); let mut swarm2 = new_test_swarm::<_, ()>(handler_proto).build(); @@ -1742,7 +1746,7 @@ mod tests { let addr1: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); let addr2: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); - swarm1.listen_on(addr1.clone()).unwrap(); + swarm1.listen_on(addr1).unwrap(); swarm2.listen_on(addr2.clone()).unwrap(); let swarm1_id = *swarm1.local_peer_id(); @@ -1852,9 +1856,7 @@ mod tests { fn test_swarm_disconnect() { // Since the test does not try to open any substreams, we can // use the dummy protocols handler. - let handler_proto = DummyConnectionHandler { - keep_alive: KeepAlive::Yes, - }; + let handler_proto = keep_alive::ConnectionHandler; let mut swarm1 = new_test_swarm::<_, ()>(handler_proto.clone()).build(); let mut swarm2 = new_test_swarm::<_, ()>(handler_proto).build(); @@ -1920,9 +1922,7 @@ mod tests { fn test_behaviour_disconnect_all() { // Since the test does not try to open any substreams, we can // use the dummy protocols handler. - let handler_proto = DummyConnectionHandler { - keep_alive: KeepAlive::Yes, - }; + let handler_proto = keep_alive::ConnectionHandler; let mut swarm1 = new_test_swarm::<_, ()>(handler_proto.clone()).build(); let mut swarm2 = new_test_swarm::<_, ()>(handler_proto).build(); @@ -1990,9 +1990,7 @@ mod tests { fn test_behaviour_disconnect_one() { // Since the test does not try to open any substreams, we can // use the dummy protocols handler. - let handler_proto = DummyConnectionHandler { - keep_alive: KeepAlive::Yes, - }; + let handler_proto = keep_alive::ConnectionHandler; let mut swarm1 = new_test_swarm::<_, ()>(handler_proto.clone()).build(); let mut swarm2 = new_test_swarm::<_, ()>(handler_proto).build(); @@ -2000,7 +1998,7 @@ mod tests { let addr1: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); let addr2: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); - swarm1.listen_on(addr1.clone()).unwrap(); + swarm1.listen_on(addr1).unwrap(); swarm2.listen_on(addr2.clone()).unwrap(); let swarm1_id = *swarm1.local_peer_id(); @@ -2070,18 +2068,16 @@ mod tests { struct DialConcurrencyFactor(NonZeroU8); impl Arbitrary for DialConcurrencyFactor { - fn arbitrary(g: &mut G) -> Self { - Self(NonZeroU8::new(g.gen_range(1, 11)).unwrap()) + fn arbitrary(g: &mut Gen) -> Self { + Self(NonZeroU8::new(g.gen_range(1..11)).unwrap()) } } fn prop(concurrency_factor: DialConcurrencyFactor) { block_on(async { - let mut swarm = new_test_swarm::<_, ()>(DummyConnectionHandler { - keep_alive: KeepAlive::Yes, - }) - .dial_concurrency_factor(concurrency_factor.0) - .build(); + let mut swarm = new_test_swarm::<_, ()>(keep_alive::ConnectionHandler) + .dial_concurrency_factor(concurrency_factor.0) + .build(); // Listen on `concurrency_factor + 1` addresses. // @@ -2108,7 +2104,7 @@ mod tests { swarm .dial( DialOpts::peer_id(PeerId::random()) - .addresses(listen_addresses.into()) + .addresses(listen_addresses) .build(), ) .unwrap(); @@ -2144,14 +2140,12 @@ mod tests { fn max_outgoing() { use rand::Rng; - let outgoing_limit = rand::thread_rng().gen_range(1, 10); + let outgoing_limit = rand::thread_rng().gen_range(1..10); let limits = ConnectionLimits::default().with_max_pending_outgoing(Some(outgoing_limit)); - let mut network = new_test_swarm::<_, ()>(DummyConnectionHandler { - keep_alive: KeepAlive::Yes, - }) - .connection_limits(limits) - .build(); + let mut network = new_test_swarm::<_, ()>(keep_alive::ConnectionHandler) + .connection_limits(limits) + .build(); let addr: Multiaddr = "/memory/1234".parse().unwrap(); @@ -2163,16 +2157,11 @@ mod tests { .addresses(vec![addr.clone()]) .build(), ) - .ok() .expect("Unexpected connection limit."); } match network - .dial( - DialOpts::peer_id(target) - .addresses(vec![addr.clone()]) - .build(), - ) + .dial(DialOpts::peer_id(target).addresses(vec![addr]).build()) .expect_err("Unexpected dialing success.") { DialError::ConnectionLimit(limit) => { @@ -2192,14 +2181,12 @@ mod tests { #[test] fn max_established_incoming() { - use rand::Rng; - #[derive(Debug, Clone)] struct Limit(u32); impl Arbitrary for Limit { - fn arbitrary(g: &mut G) -> Self { - Self(g.gen_range(1, 10)) + fn arbitrary(g: &mut Gen) -> Self { + Self(g.gen_range(1..10)) } } @@ -2210,16 +2197,12 @@ mod tests { fn prop(limit: Limit) { let limit = limit.0; - let mut network1 = new_test_swarm::<_, ()>(DummyConnectionHandler { - keep_alive: KeepAlive::Yes, - }) - .connection_limits(limits(limit)) - .build(); - let mut network2 = new_test_swarm::<_, ()>(DummyConnectionHandler { - keep_alive: KeepAlive::Yes, - }) - .connection_limits(limits(limit)) - .build(); + let mut network1 = new_test_swarm::<_, ()>(keep_alive::ConnectionHandler) + .connection_limits(limits(limit)) + .build(); + let mut network2 = new_test_swarm::<_, ()>(keep_alive::ConnectionHandler) + .connection_limits(limits(limit)) + .build(); let _ = network1.listen_on(multiaddr![Memory(0u64)]).unwrap(); let listen_addr = async_std::task::block_on(poll_fn(|cx| { @@ -2232,7 +2215,7 @@ mod tests { // Spawn and block on the dialer. async_std::task::block_on({ let mut n = 0; - let _ = network2.dial(listen_addr.clone()).unwrap(); + network2.dial(listen_addr.clone()).unwrap(); let mut expected_closed = false; let mut network_1_established = false; @@ -2324,8 +2307,8 @@ mod tests { // Checks whether dialing an address containing the wrong peer id raises an error // for the expected peer id instead of the obtained peer id. - let mut swarm1 = new_test_swarm::<_, ()>(DummyConnectionHandler::default()).build(); - let mut swarm2 = new_test_swarm::<_, ()>(DummyConnectionHandler::default()).build(); + let mut swarm1 = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); + let mut swarm2 = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); swarm1.listen_on("/memory/0".parse().unwrap()).unwrap(); @@ -2384,7 +2367,7 @@ mod tests { // // The last two can happen in any order. - let mut swarm = new_test_swarm::<_, ()>(DummyConnectionHandler::default()).build(); + let mut swarm = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); swarm.listen_on("/memory/0".parse().unwrap()).unwrap(); let local_address = @@ -2442,7 +2425,7 @@ mod tests { fn dial_self_by_id() { // Trying to dial self by passing the same `PeerId` shouldn't even be possible in the first // place. - let swarm = new_test_swarm::<_, ()>(DummyConnectionHandler::default()).build(); + let swarm = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); let peer_id = *swarm.local_peer_id(); assert!(!swarm.is_connected(&peer_id)); } @@ -2453,7 +2436,7 @@ mod tests { let target = PeerId::random(); - let mut swarm = new_test_swarm::<_, ()>(DummyConnectionHandler::default()).build(); + let mut swarm = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); let addresses = HashSet::from([ multiaddr![Ip4([0, 0, 0, 0]), Tcp(rand::random::())], @@ -2498,8 +2481,8 @@ mod tests { fn aborting_pending_connection_surfaces_error() { let _ = env_logger::try_init(); - let mut dialer = new_test_swarm::<_, ()>(DummyConnectionHandler::default()).build(); - let mut listener = new_test_swarm::<_, ()>(DummyConnectionHandler::default()).build(); + let mut dialer = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); + let mut listener = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); let listener_peer_id = *listener.local_peer_id(); listener.listen_on(multiaddr![Memory(0u64)]).unwrap(); diff --git a/swarm/src/registry.rs b/swarm/src/registry.rs index 76892edf981..04204930580 100644 --- a/swarm/src/registry.rs +++ b/swarm/src/registry.rs @@ -338,23 +338,22 @@ mod tests { use super::*; use libp2p_core::multiaddr::{Multiaddr, Protocol}; use quickcheck::*; - use rand::Rng; use std::num::{NonZeroU8, NonZeroUsize}; impl Arbitrary for AddressScore { - fn arbitrary(g: &mut G) -> AddressScore { - if g.gen_range(0, 10) == 0 { + fn arbitrary(g: &mut Gen) -> AddressScore { + if g.gen_range(0..10u8) == 0 { // ~10% "Infinitely" scored addresses AddressScore::Infinite } else { - AddressScore::Finite(g.gen()) + AddressScore::Finite(Arbitrary::arbitrary(g)) } } } impl Arbitrary for AddressRecord { - fn arbitrary(g: &mut G) -> Self { - let addr = Protocol::Tcp(g.gen::() % 256).into(); + fn arbitrary(g: &mut Gen) -> Self { + let addr = Protocol::Tcp(g.gen_range(0..256)).into(); let score = AddressScore::arbitrary(g); AddressRecord::new(addr, score) } @@ -381,7 +380,7 @@ mod tests { #[test] fn score_retention() { fn prop(first: AddressRecord, other: AddressRecord) -> TestResult { - if first.addr == other.addr { + if first.addr == other.addr || first.score.is_zero() { return TestResult::discard(); } @@ -389,7 +388,7 @@ mod tests { // Add the first address. addresses.add(first.addr.clone(), first.score); - assert!(addresses.iter().any(|a| &a.addr == &first.addr)); + assert!(addresses.iter().any(|a| a.addr == first.addr)); // Add another address so often that the initial report of // the first address may be purged and, since it was the @@ -398,7 +397,7 @@ mod tests { addresses.add(other.addr.clone(), other.score); } - let exists = addresses.iter().any(|a| &a.addr == &first.addr); + let exists = addresses.iter().any(|a| a.addr == first.addr); match (first.score, other.score) { // Only finite scores push out other finite scores. @@ -412,6 +411,33 @@ mod tests { quickcheck(prop as fn(_, _) -> _); } + #[test] + fn score_retention_finite_0() { + let first = { + let addr = Protocol::Tcp(42).into(); + let score = AddressScore::Finite(0); + AddressRecord::new(addr, score) + }; + let other = { + let addr = Protocol::Udp(42).into(); + let score = AddressScore::Finite(42); + AddressRecord::new(addr, score) + }; + + let mut addresses = Addresses::default(); + + // Add the first address. + addresses.add(first.addr.clone(), first.score); + assert!(addresses.iter().any(|a| a.addr == first.addr)); + + // Add another address so the first will address be purged, + // because its score is finite(0) + addresses.add(other.addr.clone(), other.score); + + assert!(addresses.iter().any(|a| a.addr == other.addr)); + assert!(!addresses.iter().any(|a| a.addr == first.addr)); + } + #[test] fn finitely_scored_address_limit() { fn prop(reports: Vec, limit: NonZeroU8) { @@ -425,12 +451,14 @@ mod tests { // Count the finitely scored addresses. let num_finite = addresses .iter() - .filter(|r| match r { - AddressRecord { - score: AddressScore::Finite(_), - .. - } => true, - _ => false, + .filter(|r| { + matches!( + r, + AddressRecord { + score: AddressScore::Finite(_), + .. + } + ) }) .count(); @@ -450,13 +478,13 @@ mod tests { // Add all address reports to the collection. for r in records.iter() { - addresses.add(r.addr.clone(), r.score.clone()); + addresses.add(r.addr.clone(), r.score); } // Check that each address in the registry has the expected score. for r in &addresses.registry { let expected_score = records.iter().fold(None::, |sum, rec| { - if &rec.addr == &r.addr { + if rec.addr == r.addr { sum.map_or(Some(rec.score), |s| Some(s + rec.score)) } else { sum diff --git a/swarm/src/test.rs b/swarm/src/test.rs index 166e9185a47..093ee420cb5 100644 --- a/swarm/src/test.rs +++ b/swarm/src/test.rs @@ -232,7 +232,7 @@ where } fn addresses_of_peer(&mut self, p: &PeerId) -> Vec { - self.addresses_of_peer.push(p.clone()); + self.addresses_of_peer.push(*p); self.inner.addresses_of_peer(p) } @@ -271,12 +271,8 @@ where } else { assert_eq!(other_established, 0) } - self.inject_connection_established.push(( - p.clone(), - c.clone(), - e.clone(), - other_established, - )); + self.inject_connection_established + .push((*p, *c, e.clone(), other_established)); self.inner .inject_connection_established(p, c, e, errors, other_established); } @@ -349,7 +345,7 @@ where "`inject_event` is never called for closed connections." ); - self.inject_event.push((p.clone(), c.clone(), e.clone())); + self.inject_event.push((p, c, e.clone())); self.inner.inject_event(p, c, e); } @@ -389,7 +385,7 @@ where } fn inject_listener_error(&mut self, l: ListenerId, e: &(dyn std::error::Error + 'static)) { - self.inject_listener_error.push(l.clone()); + self.inject_listener_error.push(l); self.inner.inject_listener_error(l, e); } diff --git a/transports/deflate/CHANGELOG.md b/transports/deflate/CHANGELOG.md index ead0f9cb68f..d7c58a6e4a9 100644 --- a/transports/deflate/CHANGELOG.md +++ b/transports/deflate/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.37.0 [unreleased] + +- Update to `libp2p-core` `v0.37.0`. + # 0.36.0 - Update to `libp2p-core` `v0.36.0`. diff --git a/transports/deflate/Cargo.toml b/transports/deflate/Cargo.toml index 904500c1cd0..bbb1e124771 100644 --- a/transports/deflate/Cargo.toml +++ b/transports/deflate/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-deflate" edition = "2021" rust-version = "1.56.1" description = "Deflate encryption protocol for libp2p" -version = "0.36.0" +version = "0.37.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,11 +12,11 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.1" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.37.0", path = "../../core" } flate2 = "1.0" [dev-dependencies] async-std = "1.6.2" -libp2p-tcp = { path = "../../transports/tcp" } -quickcheck = "0.9" -rand = "0.7" +libp2p = { path = "../..", features = ["full"] } +quickcheck = { package = "quickcheck-ext", path = "../../misc/quickcheck-ext" } +rand = "0.8" diff --git a/transports/deflate/tests/test.rs b/transports/deflate/tests/test.rs index 06d91ddd808..726e562d8a2 100644 --- a/transports/deflate/tests/test.rs +++ b/transports/deflate/tests/test.rs @@ -19,10 +19,11 @@ // DEALINGS IN THE SOFTWARE. use futures::{future, prelude::*}; -use libp2p_core::{transport::Transport, upgrade}; -use libp2p_deflate::DeflateConfig; -use libp2p_tcp::TcpTransport; -use quickcheck::{QuickCheck, RngCore, TestResult}; +use libp2p::core::{transport::Transport, upgrade}; +use libp2p::deflate::DeflateConfig; +use libp2p::tcp::TcpTransport; +use quickcheck::{QuickCheck, TestResult}; +use rand::RngCore; #[test] fn deflate() { diff --git a/transports/dns/CHANGELOG.md b/transports/dns/CHANGELOG.md index a6c46fa4191..e20c92f66e0 100644 --- a/transports/dns/CHANGELOG.md +++ b/transports/dns/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.37.0 [unreleased] + +- Remove default features. If you previously depended on `async-std` you need to enable this explicitly now. See [PR 2918]. + +- Update to `libp2p-core` `v0.37.0`. + +[PR 2918]: https://github.com/libp2p/rust-libp2p/pull/2918 + # 0.36.0 - Update to `libp2p-core` `v0.36.0`. diff --git a/transports/dns/Cargo.toml b/transports/dns/Cargo.toml index 51c9b688c73..a17b64b705c 100644 --- a/transports/dns/Cargo.toml +++ b/transports/dns/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-dns" edition = "2021" rust-version = "1.56.1" description = "DNS transport implementation for libp2p" -version = "0.36.0" +version = "0.37.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,12 +11,12 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.37.0", path = "../../core" } log = "0.4.1" futures = "0.3.1" -async-std-resolver = { version = "0.21", optional = true } +async-std-resolver = { version = "0.22", optional = true } parking_lot = "0.12.0" -trust-dns-resolver = { version = "0.21", default-features = false, features = ["system-config"] } +trust-dns-resolver = { version = "0.22", default-features = false, features = ["system-config"] } smallvec = "1.6.1" [dev-dependencies] @@ -25,7 +25,6 @@ tokio-crate = { package = "tokio", version = "1.0", default-features = false, fe async-std-crate = { package = "async-std", version = "1.6" } [features] -default = ["async-std"] async-std = ["async-std-resolver"] tokio = ["trust-dns-resolver/tokio-runtime"] # The `tokio-` prefix and feature dependency is just to be explicit, diff --git a/transports/dns/src/lib.rs b/transports/dns/src/lib.rs index 0ee89f78373..7f76a378990 100644 --- a/transports/dns/src/lib.rs +++ b/transports/dns/src/lib.rs @@ -607,13 +607,10 @@ mod tests { fn dial(&mut self, addr: Multiaddr) -> Result> { // Check that all DNS components have been resolved, i.e. replaced. - assert!(!addr.iter().any(|p| match p { - Protocol::Dns(_) - | Protocol::Dns4(_) - | Protocol::Dns6(_) - | Protocol::Dnsaddr(_) => true, - _ => false, - })); + assert!(!addr.iter().any(|p| matches!( + p, + Protocol::Dns(_) | Protocol::Dns4(_) | Protocol::Dns6(_) | Protocol::Dnsaddr(_) + ))); Ok(Box::pin(future::ready(Ok(())))) } diff --git a/transports/noise/CHANGELOG.md b/transports/noise/CHANGELOG.md index 1416aab4e30..bae8fd51edb 100644 --- a/transports/noise/CHANGELOG.md +++ b/transports/noise/CHANGELOG.md @@ -1,9 +1,13 @@ -# 0.39.1 [unreleased] +# 0.40.0 [unreleased] + +- Update to `libp2p-core` `v0.37.0`. - Introduce `NoiseAuthenticated::xx` constructor, assuming a X25519 DH key exchange. An XX key exchange and X25519 keys are the most common way of using noise in libp2p and thus deserve a convenience constructor. See [PR 2887]. +- Add `NoiseConfig::with_prologue` which allows users to set the noise prologue of the handshake. See [PR 2903]. [PR 2887]: https://github.com/libp2p/rust-libp2p/pull/2887 +[PR 2903]: https://github.com/libp2p/rust-libp2p/pull/2903 # 0.39.0 diff --git a/transports/noise/Cargo.toml b/transports/noise/Cargo.toml index 8fef520cb9a..3f2c69900b5 100644 --- a/transports/noise/Cargo.toml +++ b/transports/noise/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-noise" edition = "2021" rust-version = "1.56.1" description = "Cryptographic handshake protocol using the noise framework." -version = "0.39.1" +version = "0.40.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,7 +13,7 @@ bytes = "1" curve25519-dalek = "3.0.0" futures = "0.3.1" lazy_static = "1.2" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.37.0", path = "../../core" } log = "0.4" prost = "0.11" rand = "0.8.3" @@ -31,8 +31,8 @@ snow = { version = "0.9.0", features = ["default-resolver"], default-features = [dev-dependencies] async-io = "1.2.0" env_logger = "0.9.0" -libp2p-tcp = { path = "../../transports/tcp" } -quickcheck = "0.9.0" +libp2p = { path = "../..", features = ["full"] } +quickcheck = { package = "quickcheck-ext", path = "../../misc/quickcheck-ext" } libsodium-sys-stable = { version = "1.19.22", features = ["fetch-latest"] } ed25519-compact = "1.0.11" diff --git a/transports/noise/src/lib.rs b/transports/noise/src/lib.rs index 1712176d7ef..c1b55d2076b 100644 --- a/transports/noise/src/lib.rs +++ b/transports/noise/src/lib.rs @@ -40,8 +40,8 @@ //! //! ``` //! use libp2p_core::{identity, Transport, upgrade}; -//! use libp2p_tcp::TcpTransport; -//! use libp2p_noise::{Keypair, X25519Spec, NoiseAuthenticated}; +//! use libp2p::tcp::TcpTransport; +//! use libp2p::noise::{Keypair, X25519Spec, NoiseAuthenticated}; //! //! # fn main() { //! let id_keys = identity::Keypair::generate_ed25519(); @@ -67,6 +67,7 @@ pub use protocol::{Protocol, ProtocolParams, IK, IX, XX}; use futures::prelude::*; use libp2p_core::{identity, InboundUpgrade, OutboundUpgrade, PeerId, UpgradeInfo}; +use snow::HandshakeState; use std::pin::Pin; use zeroize::Zeroize; @@ -78,6 +79,14 @@ pub struct NoiseConfig { legacy: LegacyConfig, remote: R, _marker: std::marker::PhantomData

, + + /// Prologue to use in the noise handshake. + /// + /// The prologue can contain arbitrary data that will be hashed into the noise handshake. + /// For the handshake to succeed, both parties must set the same prologue. + /// + /// For further information, see . + prologue: Vec, } impl NoiseConfig { @@ -87,6 +96,11 @@ impl NoiseConfig { NoiseAuthenticated { config: self } } + /// Set the noise prologue. + pub fn with_prologue(self, prologue: Vec) -> Self { + Self { prologue, ..self } + } + /// Sets the legacy configuration options to use, if any. pub fn set_legacy_config(&mut self, cfg: LegacyConfig) -> &mut Self { self.legacy = cfg; @@ -94,6 +108,35 @@ impl NoiseConfig { } } +impl NoiseConfig +where + C: Zeroize + AsRef<[u8]>, +{ + fn into_responder(self) -> Result { + let state = self + .params + .into_builder() + .prologue(self.prologue.as_ref()) + .local_private_key(self.dh_keys.secret().as_ref()) + .build_responder() + .map_err(NoiseError::from)?; + + Ok(state) + } + + fn into_initiator(self) -> Result { + let state = self + .params + .into_builder() + .prologue(self.prologue.as_ref()) + .local_private_key(self.dh_keys.secret().as_ref()) + .build_initiator() + .map_err(NoiseError::from)?; + + Ok(state) + } +} + impl NoiseConfig where C: Protocol + Zeroize, @@ -106,6 +149,7 @@ where legacy: LegacyConfig::default(), remote: (), _marker: std::marker::PhantomData, + prologue: Vec::default(), } } } @@ -122,6 +166,7 @@ where legacy: LegacyConfig::default(), remote: (), _marker: std::marker::PhantomData, + prologue: Vec::default(), } } } @@ -141,6 +186,7 @@ where legacy: LegacyConfig::default(), remote: (), _marker: std::marker::PhantomData, + prologue: Vec::default(), } } } @@ -164,6 +210,7 @@ where legacy: LegacyConfig::default(), remote: (remote_dh, remote_id), _marker: std::marker::PhantomData, + prologue: Vec::default(), } } } @@ -174,25 +221,22 @@ impl InboundUpgrade for NoiseConfig where NoiseConfig: UpgradeInfo, T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - C: Protocol + AsRef<[u8]> + Zeroize + Send + 'static, + C: Protocol + AsRef<[u8]> + Zeroize + Clone + Send + 'static, { type Output = (RemoteIdentity, NoiseOutput); type Error = NoiseError; type Future = Handshake; fn upgrade_inbound(self, socket: T, _: Self::Info) -> Self::Future { - let session = self - .params - .into_builder() - .local_private_key(self.dh_keys.secret().as_ref()) - .build_responder() - .map_err(NoiseError::from); + let config = self.legacy; + let identity = self.dh_keys.clone().into_identity(); + handshake::rt1_responder( socket, - session, - self.dh_keys.into_identity(), + self.into_responder(), + identity, IdentityExchange::Mutual, - self.legacy, + config, ) } } @@ -201,25 +245,22 @@ impl OutboundUpgrade for NoiseConfig where NoiseConfig: UpgradeInfo, T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - C: Protocol + AsRef<[u8]> + Zeroize + Send + 'static, + C: Protocol + AsRef<[u8]> + Zeroize + Clone + Send + 'static, { type Output = (RemoteIdentity, NoiseOutput); type Error = NoiseError; type Future = Handshake; fn upgrade_outbound(self, socket: T, _: Self::Info) -> Self::Future { - let session = self - .params - .into_builder() - .local_private_key(self.dh_keys.secret().as_ref()) - .build_initiator() - .map_err(NoiseError::from); + let legacy = self.legacy; + let identity = self.dh_keys.clone().into_identity(); + handshake::rt1_initiator( socket, - session, - self.dh_keys.into_identity(), + self.into_initiator(), + identity, IdentityExchange::Mutual, - self.legacy, + legacy, ) } } @@ -230,25 +271,22 @@ impl InboundUpgrade for NoiseConfig where NoiseConfig: UpgradeInfo, T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - C: Protocol + AsRef<[u8]> + Zeroize + Send + 'static, + C: Protocol + AsRef<[u8]> + Zeroize + Clone + Send + 'static, { type Output = (RemoteIdentity, NoiseOutput); type Error = NoiseError; type Future = Handshake; fn upgrade_inbound(self, socket: T, _: Self::Info) -> Self::Future { - let session = self - .params - .into_builder() - .local_private_key(self.dh_keys.secret().as_ref()) - .build_responder() - .map_err(NoiseError::from); + let legacy = self.legacy; + let identity = self.dh_keys.clone().into_identity(); + handshake::rt15_responder( socket, - session, - self.dh_keys.into_identity(), + self.into_responder(), + identity, IdentityExchange::Mutual, - self.legacy, + legacy, ) } } @@ -257,25 +295,22 @@ impl OutboundUpgrade for NoiseConfig where NoiseConfig: UpgradeInfo, T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - C: Protocol + AsRef<[u8]> + Zeroize + Send + 'static, + C: Protocol + AsRef<[u8]> + Zeroize + Clone + Send + 'static, { type Output = (RemoteIdentity, NoiseOutput); type Error = NoiseError; type Future = Handshake; fn upgrade_outbound(self, socket: T, _: Self::Info) -> Self::Future { - let session = self - .params - .into_builder() - .local_private_key(self.dh_keys.secret().as_ref()) - .build_initiator() - .map_err(NoiseError::from); + let legacy = self.legacy; + let identity = self.dh_keys.clone().into_identity(); + handshake::rt15_initiator( socket, - session, - self.dh_keys.into_identity(), + self.into_initiator(), + identity, IdentityExchange::Mutual, - self.legacy, + legacy, ) } } @@ -286,25 +321,22 @@ impl InboundUpgrade for NoiseConfig where NoiseConfig: UpgradeInfo, T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - C: Protocol + AsRef<[u8]> + Zeroize + Send + 'static, + C: Protocol + AsRef<[u8]> + Zeroize + Clone + Send + 'static, { type Output = (RemoteIdentity, NoiseOutput); type Error = NoiseError; type Future = Handshake; fn upgrade_inbound(self, socket: T, _: Self::Info) -> Self::Future { - let session = self - .params - .into_builder() - .local_private_key(self.dh_keys.secret().as_ref()) - .build_responder() - .map_err(NoiseError::from); + let legacy = self.legacy; + let identity = self.dh_keys.clone().into_identity(); + handshake::rt1_responder( socket, - session, - self.dh_keys.into_identity(), + self.into_responder(), + identity, IdentityExchange::Receive, - self.legacy, + legacy, ) } } @@ -313,7 +345,7 @@ impl OutboundUpgrade for NoiseConfig, identity::Pu where NoiseConfig, identity::PublicKey)>: UpgradeInfo, T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - C: Protocol + AsRef<[u8]> + Zeroize + Send + 'static, + C: Protocol + AsRef<[u8]> + Zeroize + Clone + Send + 'static, { type Output = (RemoteIdentity, NoiseOutput); type Error = NoiseError; @@ -323,10 +355,12 @@ where let session = self .params .into_builder() + .prologue(self.prologue.as_ref()) .local_private_key(self.dh_keys.secret().as_ref()) .remote_public_key(self.remote.0.as_ref()) .build_initiator() .map_err(NoiseError::from); + handshake::rt1_initiator( socket, session, @@ -356,12 +390,12 @@ pub struct NoiseAuthenticated { config: NoiseConfig, } -impl NoiseAuthenticated { +impl NoiseAuthenticated { /// Create a new [`NoiseAuthenticated`] for the `XX` handshake pattern using X25519 DH keys. /// /// For now, this is the only combination that is guaranteed to be compatible with other libp2p implementations. pub fn xx(id_keys: &identity::Keypair) -> Result { - let dh_keys = Keypair::::new(); + let dh_keys = Keypair::::new(); let noise_keys = dh_keys.into_authentic(id_keys)?; let config = NoiseConfig::xx(noise_keys); @@ -432,7 +466,7 @@ where } /// Legacy configuration options. -#[derive(Clone, Default)] +#[derive(Clone, Copy, Default)] pub struct LegacyConfig { /// Whether to continue sending legacy handshake payloads, /// i.e. length-prefixed protobuf payloads inside a length-prefixed @@ -445,3 +479,51 @@ pub struct LegacyConfig { /// libp2p implementations. pub recv_legacy_handshake: bool, } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn handshake_hashes_disagree_if_prologue_differs() { + let alice = new_xx_config() + .with_prologue(b"alice prologue".to_vec()) + .into_initiator() + .unwrap(); + let bob = new_xx_config() + .with_prologue(b"bob prologue".to_vec()) + .into_responder() + .unwrap(); + + let alice_handshake_hash = alice.get_handshake_hash(); + let bob_handshake_hash = bob.get_handshake_hash(); + + assert_ne!(alice_handshake_hash, bob_handshake_hash) + } + + #[test] + fn handshake_hashes_agree_if_prologue_is_the_same() { + let alice = new_xx_config() + .with_prologue(b"shared knowledge".to_vec()) + .into_initiator() + .unwrap(); + let bob = new_xx_config() + .with_prologue(b"shared knowledge".to_vec()) + .into_responder() + .unwrap(); + + let alice_handshake_hash = alice.get_handshake_hash(); + let bob_handshake_hash = bob.get_handshake_hash(); + + assert_eq!(alice_handshake_hash, bob_handshake_hash) + } + + fn new_xx_config() -> NoiseConfig { + let dh_keys = Keypair::::new(); + let noise_keys = dh_keys + .into_authentic(&identity::Keypair::generate_ed25519()) + .unwrap(); + + NoiseConfig::xx(noise_keys) + } +} diff --git a/transports/noise/src/protocol/x25519.rs b/transports/noise/src/protocol/x25519.rs index 0ffa9991ae6..482f20245a6 100644 --- a/transports/noise/src/protocol/x25519.rs +++ b/transports/noise/src/protocol/x25519.rs @@ -300,7 +300,7 @@ mod tests { let sodium_sec = ed25519_sk_to_curve25519(&ed25519_compact::SecretKey::new(ed25519.encode())); let sodium_pub = ed25519_pk_to_curve25519(&ed25519_compact::PublicKey::new( - ed25519.public().encode().clone(), + ed25519.public().encode(), )); let our_pub = x25519.public.0; diff --git a/transports/noise/tests/smoke.rs b/transports/noise/tests/smoke.rs index 14d09621dd9..16dcf4383d1 100644 --- a/transports/noise/tests/smoke.rs +++ b/transports/noise/tests/smoke.rs @@ -23,16 +23,16 @@ use futures::{ future::{self, Either}, prelude::*, }; -use libp2p_core::identity; -use libp2p_core::transport::{self, Transport}; -use libp2p_core::upgrade::{self, apply_inbound, apply_outbound, Negotiated}; -use libp2p_noise::{ +use libp2p::core::identity; +use libp2p::core::transport::{self, Transport}; +use libp2p::core::upgrade::{self, apply_inbound, apply_outbound, Negotiated}; +use libp2p::noise::{ Keypair, NoiseAuthenticated, NoiseConfig, NoiseError, NoiseOutput, RemoteIdentity, X25519Spec, X25519, }; -use libp2p_tcp::TcpTransport; +use libp2p::tcp::TcpTransport; use log::info; -use quickcheck::QuickCheck; +use quickcheck::*; use std::{convert::TryInto, io, net::TcpStream}; #[allow(dead_code)] @@ -324,11 +324,13 @@ fn expect_identity( #[derive(Debug, Clone, PartialEq, Eq)] struct Message(Vec); -impl quickcheck::Arbitrary for Message { - fn arbitrary(g: &mut G) -> Self { - let s = 1 + g.next_u32() % (128 * 1024); - let mut v = vec![0; s.try_into().unwrap()]; - g.fill_bytes(&mut v); +impl Arbitrary for Message { + fn arbitrary(g: &mut Gen) -> Self { + let s = g.gen_range(1..128 * 1024); + let mut v = vec![0; s]; + for b in &mut v { + *b = u8::arbitrary(g); + } Message(v) } } diff --git a/transports/plaintext/CHANGELOG.md b/transports/plaintext/CHANGELOG.md index bb070580986..a0d21b8d1f3 100644 --- a/transports/plaintext/CHANGELOG.md +++ b/transports/plaintext/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.37.0 [unreleased] + +- Update to `libp2p-core` `v0.37.0`. + # 0.36.0 - Update to `libp2p-core` `v0.36.0`. diff --git a/transports/plaintext/Cargo.toml b/transports/plaintext/Cargo.toml index f250c2a4287..1af9894d371 100644 --- a/transports/plaintext/Cargo.toml +++ b/transports/plaintext/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-plaintext" edition = "2021" rust-version = "1.56.1" description = "Plaintext encryption dummy protocol for libp2p" -version = "0.36.0" +version = "0.37.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,7 +14,7 @@ categories = ["network-programming", "asynchronous"] bytes = "1" futures = "0.3.1" asynchronous-codec = "0.6" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.37.0", path = "../../core" } log = "0.4.8" prost = "0.11" unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } @@ -22,8 +22,8 @@ void = "1.0.2" [dev-dependencies] env_logger = "0.9.0" -quickcheck = "0.9.0" -rand = "0.7" +quickcheck = { package = "quickcheck-ext", path = "../../misc/quickcheck-ext" } +rand = "0.8" [build-dependencies] prost-build = "0.11" diff --git a/transports/plaintext/tests/smoke.rs b/transports/plaintext/tests/smoke.rs index ea62f0a9dfa..d1316309c64 100644 --- a/transports/plaintext/tests/smoke.rs +++ b/transports/plaintext/tests/smoke.rs @@ -32,7 +32,7 @@ fn variable_msg_length() { let _ = env_logger::try_init(); fn prop(msg: Vec) { - let mut msg_to_send = msg.clone(); + let msg_to_send = msg.clone(); let msg_to_receive = msg; let server_id = identity::Keypair::generate_ed25519(); @@ -91,7 +91,7 @@ fn variable_msg_length() { debug!("Client: writing message."); client_channel - .write_all(&mut msg_to_send) + .write_all(&msg_to_send) .await .expect("no error"); debug!("Client: flushing channel."); diff --git a/transports/pnet/CHANGELOG.md b/transports/pnet/CHANGELOG.md index 4b764219f4f..f2e40c03d8f 100644 --- a/transports/pnet/CHANGELOG.md +++ b/transports/pnet/CHANGELOG.md @@ -1,3 +1,15 @@ +# 0.22.1 [unreleased] + +- Bump rand to 0.8 and quickcheck to 1. See [PR 2857]. + +- Bump async-std-resolver and trust-dns-resolver from 0.21 to 0.22. See [PR 2988]. + +- Bump salsa20 to 0.10. See [PR 2989]. + +[PR 2857]: https://github.com/libp2p/rust-libp2p/pull/2857 +[PR 2988]: https://github.com/libp2p/rust-libp2p/pull/2988 +[PR 2989]: https://github.com/libp2p/rust-libp2p/pull/2989 + # 0.22.0 [2021-11-01] - Update dependencies. diff --git a/transports/pnet/Cargo.toml b/transports/pnet/Cargo.toml index b674f18ab97..3bd3f3689df 100644 --- a/transports/pnet/Cargo.toml +++ b/transports/pnet/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-pnet" edition = "2021" rust-version = "1.56.1" description = "Private swarm support for libp2p" -version = "0.22.0" +version = "0.22.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,10 +13,10 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.1" log = "0.4.8" -salsa20 = "0.9" +salsa20 = "0.10" sha3 = "0.10" -rand = "0.7" +rand = "0.8" pin-project = "1.0.2" [dev-dependencies] -quickcheck = "0.9.0" +quickcheck = { package = "quickcheck-ext", path = "../../misc/quickcheck-ext" } diff --git a/transports/pnet/src/lib.rs b/transports/pnet/src/lib.rs index efd27b14667..8560171682e 100644 --- a/transports/pnet/src/lib.rs +++ b/transports/pnet/src/lib.rs @@ -30,7 +30,7 @@ use log::trace; use pin_project::pin_project; use rand::RngCore; use salsa20::{ - cipher::{NewCipher, StreamCipher}, + cipher::{KeyIvInit, StreamCipher}, Salsa20, XSalsa20, }; use sha3::{digest::ExtendableOutput, Shake128}; @@ -319,9 +319,8 @@ mod tests { use quickcheck::*; impl Arbitrary for PreSharedKey { - fn arbitrary(g: &mut G) -> PreSharedKey { - let mut key = [0; KEY_SIZE]; - g.fill_bytes(&mut key); + fn arbitrary(g: &mut Gen) -> PreSharedKey { + let key = core::array::from_fn(|_| u8::arbitrary(g)); PreSharedKey(key) } } diff --git a/transports/tcp/CHANGELOG.md b/transports/tcp/CHANGELOG.md index ff34ae49407..83ad2d3c1a3 100644 --- a/transports/tcp/CHANGELOG.md +++ b/transports/tcp/CHANGELOG.md @@ -3,7 +3,16 @@ - Update to `if-watch` `v2.0.0`. Simplify `IfWatcher` integration. Use `if_watch::IfWatcher` for all runtimes. See [PR 2813]. +- Update to `libp2p-core` `v0.37.0`. + +- Remove default features. If you previously depended on `async-std` you need to enable this explicitly now. See [PR 2918]. + +- Return `None` in `GenTcpTransport::address_translation` if listen- or observed address are not tcp addresses. + See [PR 2970]. + [PR 2813]: https://github.com/libp2p/rust-libp2p/pull/2813 +[PR 2918]: https://github.com/libp2p/rust-libp2p/pull/2918 +[PR 2970]: https://github.com/libp2p/rust-libp2p/pull/2970 # 0.36.0 diff --git a/transports/tcp/Cargo.toml b/transports/tcp/Cargo.toml index 948d9507f0a..06517a84f67 100644 --- a/transports/tcp/Cargo.toml +++ b/transports/tcp/Cargo.toml @@ -16,13 +16,12 @@ futures = "0.3.8" futures-timer = "3.0" if-watch = "2.0.0" libc = "0.2.80" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.37.0", path = "../../core" } log = "0.4.11" socket2 = { version = "0.4.0", features = ["all"] } tokio-crate = { package = "tokio", version = "1.19.0", default-features = false, features = ["net"], optional = true } [features] -default = ["async-io"] tokio = ["tokio-crate"] async-io = ["async-io-crate"] diff --git a/transports/tcp/src/lib.rs b/transports/tcp/src/lib.rs index f7b897c0d47..bf75107982b 100644 --- a/transports/tcp/src/lib.rs +++ b/transports/tcp/src/lib.rs @@ -520,6 +520,9 @@ where /// `None` is returned if one of the given addresses is not a TCP/IP /// address. fn address_translation(&self, listen: &Multiaddr, observed: &Multiaddr) -> Option { + if !is_tcp_addr(listen) || !is_tcp_addr(observed) { + return None; + } match &self.port_reuse { PortReuse::Disabled => address_translation(listen, observed), PortReuse::Enabled { .. } => Some(observed.clone()), @@ -829,6 +832,23 @@ fn ip_to_multiaddr(ip: IpAddr, port: u16) -> Multiaddr { Multiaddr::empty().with(ip.into()).with(Protocol::Tcp(port)) } +fn is_tcp_addr(addr: &Multiaddr) -> bool { + use Protocol::*; + + let mut iter = addr.iter(); + + let first = match iter.next() { + None => return false, + Some(p) => p, + }; + let second = match iter.next() { + None => return false, + Some(p) => p, + }; + + matches!(first, Ip4(_) | Ip6(_) | Dns(_) | Dns4(_) | Dns6(_)) && matches!(second, Tcp(_)) +} + #[cfg(test)] mod tests { use super::*; @@ -836,6 +856,7 @@ mod tests { channel::{mpsc, oneshot}, future::poll_fn, }; + use libp2p_core::PeerId; #[test] fn multiaddr_to_tcp_conversion() { @@ -1240,4 +1261,43 @@ mod tests { test("/ip4/127.0.0.1/tcp/12345/tcp/12345".parse().unwrap()); } + + #[cfg(any(feature = "async-io", feature = "tcp"))] + #[test] + fn test_address_translation() { + #[cfg(feature = "async-io")] + let transport = TcpTransport::new(GenTcpConfig::new()); + #[cfg(all(feature = "tokio", not(feature = "async-io")))] + let transport = TokioTcpTransport::new(GenTcpConfig::new()); + + let port = 42; + let tcp_listen_addr = Multiaddr::empty() + .with(Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) + .with(Protocol::Tcp(port)); + let observed_ip = Ipv4Addr::new(123, 45, 67, 8); + let tcp_observed_addr = Multiaddr::empty() + .with(Protocol::Ip4(observed_ip)) + .with(Protocol::Tcp(1)) + .with(Protocol::P2p(PeerId::random().into())); + + let translated = transport + .address_translation(&tcp_listen_addr, &tcp_observed_addr) + .unwrap(); + let mut iter = translated.iter(); + assert_eq!(iter.next(), Some(Protocol::Ip4(observed_ip))); + assert_eq!(iter.next(), Some(Protocol::Tcp(port))); + assert_eq!(iter.next(), None); + + let quic_addr = Multiaddr::empty() + .with(Protocol::Ip4(Ipv4Addr::new(87, 65, 43, 21))) + .with(Protocol::Udp(1)) + .with(Protocol::Quic); + + assert!(transport + .address_translation(&tcp_listen_addr, &quic_addr) + .is_none()); + assert!(transport + .address_translation(&quic_addr, &tcp_observed_addr) + .is_none()); + } } diff --git a/transports/uds/CHANGELOG.md b/transports/uds/CHANGELOG.md index 78f759803ee..6d7581890c9 100644 --- a/transports/uds/CHANGELOG.md +++ b/transports/uds/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.36.0 [unreleased] + +- Remove default features. If you previously depended on `async-std` you need to enable this explicitly now. See [PR 2918]. + +- Update to `libp2p-core` `v0.37.0`. + +[PR 2918]: https://github.com/libp2p/rust-libp2p/pull/2918 + # 0.35.0 - Update to `libp2p-core` `v0.36.0`. diff --git a/transports/uds/Cargo.toml b/transports/uds/Cargo.toml index 3b7f1eb0a3f..e9483f503d1 100644 --- a/transports/uds/Cargo.toml +++ b/transports/uds/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-uds" edition = "2021" rust-version = "1.56.1" description = "Unix domain sockets transport for libp2p" -version = "0.35.0" +version = "0.36.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,13 +12,10 @@ categories = ["network-programming", "asynchronous"] [target.'cfg(all(unix, not(target_os = "emscripten")))'.dependencies] async-std = { version = "1.6.2", optional = true } -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.37.0", path = "../../core" } log = "0.4.1" futures = "0.3.1" tokio = { version = "1.15", default-features = false, features = ["net"], optional = true } [target.'cfg(all(unix, not(target_os = "emscripten")))'.dev-dependencies] tempfile = "3.0" - -[features] -default = ["async-std"] diff --git a/transports/uds/src/lib.rs b/transports/uds/src/lib.rs index 492f1afb029..71e6c094013 100644 --- a/transports/uds/src/lib.rs +++ b/transports/uds/src/lib.rs @@ -264,7 +264,6 @@ mod tests { Transport, }; use std::{self, borrow::Cow, path::Path}; - use tempfile; #[test] fn multiaddr_to_path_conversion() { @@ -320,7 +319,7 @@ mod tests { let mut uds = UdsConfig::new(); let addr = rx.await.unwrap(); let mut socket = uds.dial(addr).unwrap().await.unwrap(); - socket.write(&[1, 2, 3]).await.unwrap(); + let _ = socket.write(&[1, 2, 3]).await.unwrap(); }); } diff --git a/transports/wasm-ext/CHANGELOG.md b/transports/wasm-ext/CHANGELOG.md index 0b696f40cce..b778fb9a7ca 100644 --- a/transports/wasm-ext/CHANGELOG.md +++ b/transports/wasm-ext/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.37.0 [unreleased] + +- Update to `libp2p-core` `v0.37.0`. + # 0.36.0 - Update to `libp2p-core` `v0.36.0`. diff --git a/transports/wasm-ext/Cargo.toml b/transports/wasm-ext/Cargo.toml index 6c6a645c8c2..b4f290e7676 100644 --- a/transports/wasm-ext/Cargo.toml +++ b/transports/wasm-ext/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-wasm-ext" edition = "2021" rust-version = "1.56.1" description = "Allows passing in an external transport in a WASM environment" -version = "0.36.0" +version = "0.37.0" authors = ["Pierre Krieger "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,7 +13,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.1" js-sys = "0.3.50" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.37.0", path = "../../core" } parity-send-wrapper = "0.1.0" wasm-bindgen = "0.2.42" wasm-bindgen-futures = "0.4.4" diff --git a/transports/websocket/CHANGELOG.md b/transports/websocket/CHANGELOG.md index 00ee342d01f..10fcf02e183 100644 --- a/transports/websocket/CHANGELOG.md +++ b/transports/websocket/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.39.0 [unreleased] + +- Update to `libp2p-core` `v0.37.0`. + # 0.38.0 - Update to `libp2p-core` `v0.36.0`. diff --git a/transports/websocket/Cargo.toml b/transports/websocket/Cargo.toml index b470864a959..5402af48c6f 100644 --- a/transports/websocket/Cargo.toml +++ b/transports/websocket/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-websocket" edition = "2021" rust-version = "1.56.1" description = "WebSocket transport for libp2p" -version = "0.38.0" +version = "0.39.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,7 +14,7 @@ categories = ["network-programming", "asynchronous"] futures-rustls = "0.22" either = "1.5.3" futures = "0.3.1" -libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.37.0", path = "../../core" } log = "0.4.8" parking_lot = "0.12.0" quicksink = "0.1" @@ -24,4 +24,4 @@ url = "2.1" webpki-roots = "0.22" [dev-dependencies] -libp2p-tcp = { path = "../tcp" } +libp2p = { path = "../..", features = ["full"] } diff --git a/transports/websocket/src/lib.rs b/transports/websocket/src/lib.rs index bf75e389648..721f9c92848 100644 --- a/transports/websocket/src/lib.rs +++ b/transports/websocket/src/lib.rs @@ -219,8 +219,8 @@ where mod tests { use super::WsConfig; use futures::prelude::*; - use libp2p_core::{multiaddr::Protocol, Multiaddr, PeerId, Transport}; - use libp2p_tcp as tcp; + use libp2p::core::{multiaddr::Protocol, Multiaddr, PeerId, Transport}; + use libp2p::tcp; #[test] fn dialer_connects_to_listener_ipv4() {