• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tari-project / tari / 15491056829

06 Jun 2025 12:55PM UTC coverage: 72.363% (+0.3%) from 72.04%
15491056829

push

github

web-flow
chore: fix tests (#7196)

Description
---
Fixes the tests and improves performance by not duplicating transactions

<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->
## Summary by CodeRabbit

- **Bug Fixes**
- Improved detection and removal of duplicate unconfirmed transactions,
ensuring users see each transaction only once in the transaction list.
- Enhanced handling of transaction mining status, providing more
accurate display of unconfirmed transactions.

- **Tests**
- Updated test scenarios to better simulate real-world transaction
mining and status changes, increasing reliability of transaction status
updates.
<!-- end of auto-generated comment: release notes by coderabbit.ai -->

19 of 19 new or added lines in 1 file covered. (100.0%)

48 existing lines in 18 files now uncovered.

81253 of 112285 relevant lines covered (72.36%)

243660.94 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

91.17
/comms/core/src/peer_manager/peer_storage_sql.rs
1
//  Copyright 2019 The Tari Project
2
//
3
//  Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
4
//  following conditions are met:
5
//
6
//  1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
7
//  disclaimer.
8
//
9
//  2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
10
//  following disclaimer in the documentation and/or other materials provided with the distribution.
11
//
12
//  3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
13
//  products derived from this software without specific prior written permission.
14
//
15
//  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
16
//  INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17
//  DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18
//  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
19
//  SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
20
//  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
21
//  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22

23
use std::{cmp::min, time::Duration};
24

25
use log::*;
26
use multiaddr::Multiaddr;
27

28
use crate::{
29
    net_address::PeerAddressSource,
30
    peer_manager::{
31
        database::{PeerDatabaseSql, ThisPeerIdentity},
32
        peer::Peer,
33
        peer_id::PeerId,
34
        NodeDistance,
35
        NodeId,
36
        PeerFeatures,
37
        PeerManagerError,
38
    },
39
    types::{CommsDatabase, CommsPublicKey},
40
};
41

42
const LOG_TARGET: &str = "comms::peer_manager::peer_storage_sql";
43
// The maximum number of peers to return in peer manager
44
const PEER_MANAGER_SYNC_PEERS: usize = 100;
45
// The maximum amount of time a peer can be inactive before being considered stale:
46
// ((5 days, 24h, 60m, 60s)/2 = 2.5 days)
47
pub const STALE_PEER_THRESHOLD_DURATION: Duration = Duration::from_secs(5 * 24 * 60 * 60 / 2);
48

49
/// PeerStorageSql provides a mechanism to keep a datastore and a local copy of all peers in sync and allow fast
50
/// searches using the node_id, public key or net_address of a peer.
51
#[derive(Clone)]
52
pub struct PeerStorageSql {
53
    peer_db: PeerDatabaseSql,
54
}
55

56
impl PeerStorageSql {
57
    /// Constructs a new PeerStorageSql, with indexes populated from the given datastore
58
    pub fn new_indexed(database: PeerDatabaseSql) -> Result<PeerStorageSql, PeerManagerError> {
242✔
59
        trace!(
242✔
60
            target: LOG_TARGET,
×
61
            "Peer storage is initialized. {} total entries.",
×
62
            database.size(),
×
63
        );
64

65
        Ok(PeerStorageSql { peer_db: database })
242✔
66
    }
242✔
67

68
    /// Get this peer's identity
69
    pub fn this_peer_identity(&self) -> ThisPeerIdentity {
1,517✔
70
        self.peer_db.this_peer_identity()
1,517✔
71
    }
1,517✔
72

73
    /// Get the size of the database
74
    pub fn count(&self) -> usize {
56,961✔
75
        self.peer_db.size()
56,961✔
76
    }
56,961✔
77

78
    /// Adds or updates a peer and sets the last connection as successful.
79
    /// If the peer is marked as offline, it will be unmarked.
80
    pub fn add_or_update_peer(&self, peer: Peer) -> Result<PeerId, PeerManagerError> {
55,482✔
81
        Ok(self.peer_db.add_or_update_peer(peer)?)
55,482✔
82
    }
55,482✔
83

84
    /// Adds a peer an online peer if the peer does not already exist. When a peer already
85
    /// exists, the stored version will be replaced with the newly provided peer.
86
    pub fn add_or_update_online_peer(
1✔
87
        &self,
1✔
88
        pubkey: &CommsPublicKey,
1✔
89
        node_id: &NodeId,
1✔
90
        addresses: &[Multiaddr],
1✔
91
        peer_features: &PeerFeatures,
1✔
92
        source: &PeerAddressSource,
1✔
93
    ) -> Result<Peer, PeerManagerError> {
1✔
94
        Ok(self
1✔
95
            .peer_db
1✔
96
            .add_or_update_online_peer(pubkey, node_id, addresses, peer_features, source)?)
1✔
97
    }
1✔
98

99
    /// The peer with the specified node id will be soft deleted (marked as deleted)
100
    pub fn soft_delete_peer(&self, node_id: &NodeId) -> Result<(), PeerManagerError> {
1✔
101
        self.peer_db.soft_delete_peer(node_id)?;
1✔
102
        Ok(())
1✔
103
    }
1✔
104

105
    /// Find the peer with the provided NodeID
106
    pub fn get_peer_by_node_id(&self, node_id: &NodeId) -> Result<Option<Peer>, PeerManagerError> {
157,817✔
107
        Ok(self.peer_db.get_peer_by_node_id(node_id)?)
157,817✔
108
    }
157,817✔
109

110
    /// Get all peers based on a list of their node_ids
111
    pub fn get_peers_by_node_ids(&self, node_ids: &[NodeId]) -> Result<Vec<Peer>, PeerManagerError> {
1✔
112
        Ok(self.peer_db.get_peers_by_node_ids(node_ids)?)
1✔
113
    }
1✔
114

115
    /// Get all peers based on a list of their node_ids
116
    pub fn get_peer_public_keys_by_node_ids(
×
117
        &self,
×
118
        node_ids: &[NodeId],
×
119
    ) -> Result<Vec<CommsPublicKey>, PeerManagerError> {
×
120
        Ok(self.peer_db.get_peer_public_keys_by_node_ids(node_ids)?)
×
121
    }
×
122

123
    /// Get all banned peers
124
    pub fn get_banned_peers(&self) -> Result<Vec<Peer>, PeerManagerError> {
×
125
        Ok(self.peer_db.get_banned_peers()?)
×
126
    }
×
127

128
    pub fn find_all_starts_with(&self, partial: &[u8]) -> Result<Vec<Peer>, PeerManagerError> {
×
129
        Ok(self.peer_db.find_all_peers_match_partial_key(partial)?)
×
130
    }
×
131

132
    /// Find the peer with the provided PublicKey
133
    pub fn find_by_public_key(&self, public_key: &CommsPublicKey) -> Result<Option<Peer>, PeerManagerError> {
51,911✔
134
        Ok(self.peer_db.get_peer_by_public_key(public_key)?)
51,911✔
135
    }
51,911✔
136

137
    /// Check if a peer exist using the specified public_key
138
    pub fn exists_public_key(&self, public_key: &CommsPublicKey) -> Result<bool, PeerManagerError> {
17✔
139
        if let Ok(val) = self.peer_db.peer_exists_by_public_key(public_key) {
17✔
140
            Ok(val.is_some())
17✔
141
        } else {
142
            Ok(false)
×
143
        }
144
    }
17✔
145

146
    /// Check if a peer exist using the specified node_id
147
    pub fn exists_node_id(&self, node_id: &NodeId) -> Result<bool, PeerManagerError> {
×
148
        if let Ok(val) = self.peer_db.peer_exists_by_node_id(node_id) {
×
149
            Ok(val.is_some())
×
150
        } else {
151
            Ok(false)
×
152
        }
153
    }
×
154

155
    /// Return the peer by corresponding to the provided NodeId if it is not banned
156
    pub fn direct_identity_node_id(&self, node_id: &NodeId) -> Result<Peer, PeerManagerError> {
51,486✔
157
        let peer = self
51,486✔
158
            .get_peer_by_node_id(node_id)?
51,486✔
159
            .ok_or(PeerManagerError::peer_not_found(node_id))?;
51,486✔
160

161
        if peer.is_banned() {
51,485✔
UNCOV
162
            Err(PeerManagerError::BannedPeer)
×
163
        } else {
164
            Ok(peer)
51,485✔
165
        }
166
    }
51,486✔
167

168
    /// Return the peer by corresponding to the provided public key if it is not banned
169
    pub fn direct_identity_public_key(&self, public_key: &CommsPublicKey) -> Result<Peer, PeerManagerError> {
51,508✔
170
        let peer = self
51,508✔
171
            .find_by_public_key(public_key)?
51,508✔
172
            .ok_or(PeerManagerError::peer_not_found(&NodeId::from_public_key(public_key)))?;
51,508✔
173

174
        if peer.is_banned() {
51,502✔
175
            Err(PeerManagerError::BannedPeer)
×
176
        } else {
177
            Ok(peer)
51,502✔
178
        }
179
    }
51,508✔
180

181
    /// Return all peers, optionally filtering on supplied feature
182
    pub fn all(&self, features: Option<PeerFeatures>) -> Result<Vec<Peer>, PeerManagerError> {
1✔
183
        Ok(self.peer_db.get_all_peers(features)?)
1✔
184
    }
1✔
185

186
    /// Return "good" peers for syncing
187
    /// Criteria:
188
    ///  - Peer is not banned
189
    ///  - Peer has been seen within a defined time span (within the threshold)
190
    ///  - Only returns a maximum number of syncable peers (corresponds with the max possible number of requestable
191
    ///    peers to sync)
192
    ///  - Uses 0 as max PEER_MANAGER_SYNC_PEERS
193
    pub fn discovery_syncing(
5✔
194
        &self,
5✔
195
        mut n: usize,
5✔
196
        excluded_peers: &[NodeId],
5✔
197
        features: Option<PeerFeatures>,
5✔
198
    ) -> Result<Vec<Peer>, PeerManagerError> {
5✔
199
        if n == 0 {
5✔
200
            n = PEER_MANAGER_SYNC_PEERS;
×
201
        } else {
5✔
202
            n = min(n, PEER_MANAGER_SYNC_PEERS);
5✔
203
        }
5✔
204

205
        Ok(self
5✔
206
            .peer_db
5✔
207
            .get_n_random_active_peers(n, excluded_peers, features, Some(STALE_PEER_THRESHOLD_DURATION))?)
5✔
208
    }
5✔
209

210
    /// Compile a list of all known peers
211
    pub fn get_not_banned_or_deleted_peers(&self) -> Result<Vec<Peer>, PeerManagerError> {
1✔
212
        Ok(self
1✔
213
            .peer_db
1✔
214
            .get_n_not_banned_or_deleted_peers(PEER_MANAGER_SYNC_PEERS)?)
1✔
215
    }
1✔
216

217
    /// Compile a list of closest `n` active peers
218
    pub fn closest_n_active_peers(
2,858✔
219
        &self,
2,858✔
220
        region_node_id: &NodeId,
2,858✔
221
        n: usize,
2,858✔
222
        excluded_peers: &[NodeId],
2,858✔
223
        features: Option<PeerFeatures>,
2,858✔
224
        stale_peer_threshold: Option<Duration>,
2,858✔
225
        exclude_if_all_address_failed: bool,
2,858✔
226
        exclusion_distance: Option<NodeDistance>,
2,858✔
227
    ) -> Result<Vec<Peer>, PeerManagerError> {
2,858✔
228
        Ok(self.peer_db.get_closest_n_active_peers(
2,858✔
229
            region_node_id,
2,858✔
230
            n,
2,858✔
231
            excluded_peers,
2,858✔
232
            features,
2,858✔
233
            stale_peer_threshold,
2,858✔
234
            exclude_if_all_address_failed,
2,858✔
235
            exclusion_distance,
2,858✔
236
        )?)
2,858✔
237
    }
2,858✔
238

239
    pub fn get_seed_peers(&self) -> Result<Vec<Peer>, PeerManagerError> {
1✔
240
        Ok(self.peer_db.get_seed_peers()?)
1✔
241
    }
1✔
242

243
    /// Compile a random list of communication node peers of size _n_ that are not banned or offline
244
    pub fn random_peers(&self, n: usize, exclude_peers: &[NodeId]) -> Result<Vec<Peer>, PeerManagerError> {
825✔
245
        Ok(self.peer_db.get_n_random_peers(n, exclude_peers)?)
825✔
246
    }
825✔
247

248
    /// Get the closest `n` not failed, banned or deleted peers, ordered by their distance to the given node ID.
249
    pub fn get_closest_n_good_standing_peers(
2✔
250
        &self,
2✔
251
        n: usize,
2✔
252
        features: PeerFeatures,
2✔
253
    ) -> Result<Vec<Peer>, PeerManagerError> {
2✔
254
        Ok(self.peer_db.get_closest_n_good_standing_peers(n, features)?)
2✔
255
    }
2✔
256

257
    /// Check if a specific node_id is in the network region of the N nearest neighbours of the region specified by
258
    /// region_node_id. If there are less than N known peers, this will _always_ return true
259
    pub fn in_network_region(&self, node_id: &NodeId, n: usize) -> Result<bool, PeerManagerError> {
4✔
260
        let region_node_id = self.this_peer_identity().node_id;
4✔
261
        let region_node_distance = region_node_id.distance(node_id);
4✔
262
        let node_threshold = self.calc_region_threshold(n, PeerFeatures::COMMUNICATION_NODE)?;
4✔
263
        // Is node ID in the base node threshold?
264
        if region_node_distance <= node_threshold {
4✔
265
            return Ok(true);
3✔
266
        }
1✔
267
        let client_threshold = self.calc_region_threshold(n, PeerFeatures::COMMUNICATION_CLIENT)?; // Is node ID in the base client threshold?
1✔
268
        Ok(region_node_distance <= client_threshold)
1✔
269
    }
4✔
270

271
    /// Calculate the threshold for the region specified by region_node_id.
272
    pub fn calc_region_threshold(&self, n: usize, features: PeerFeatures) -> Result<NodeDistance, PeerManagerError> {
9✔
273
        let region_node_id = self.this_peer_identity().node_id;
9✔
274
        if n == 0 {
9✔
275
            return Ok(NodeDistance::max_distance());
×
276
        }
9✔
277

278
        let closest_peers = self.peer_db.get_closest_n_good_standing_peer_node_ids(n, features)?;
9✔
279
        let mut dists = Vec::new();
9✔
280
        for node_id in closest_peers {
42✔
281
            dists.push(region_node_id.distance(&node_id));
33✔
282
        }
33✔
283

284
        if dists.is_empty() {
9✔
285
            return Ok(NodeDistance::max_distance());
×
286
        }
9✔
287

9✔
288
        // If we have less than `n` matching peers in our threshold group, the threshold should be max
9✔
289
        if dists.len() < n {
9✔
290
            return Ok(NodeDistance::max_distance());
1✔
291
        }
8✔
292

8✔
293
        Ok(dists.pop().expect("dists cannot be empty at this point"))
8✔
294
    }
9✔
295

296
    /// Unban the peer
297
    pub fn unban_peer(&self, node_id: &NodeId) -> Result<(), PeerManagerError> {
×
298
        let _node_id = self.peer_db.reset_banned(node_id)?;
×
299
        Ok(())
×
300
    }
×
301

302
    /// Unban the peer
303
    pub fn unban_all_peers(&self) -> Result<usize, PeerManagerError> {
×
304
        let number_unbanned = self.peer_db.reset_all_banned()?;
×
305
        Ok(number_unbanned)
×
306
    }
×
307

308
    pub fn reset_offline_non_wallet_peers(&self) -> Result<usize, PeerManagerError> {
×
309
        let number_offline = self.peer_db.reset_offline_non_wallet_peers()?;
×
310
        Ok(number_offline)
×
311
    }
×
312

313
    /// Ban the peer for the given duration
314
    pub fn ban_peer(
×
315
        &self,
×
316
        public_key: &CommsPublicKey,
×
317
        duration: Duration,
×
318
        reason: String,
×
319
    ) -> Result<NodeId, PeerManagerError> {
×
320
        let node_id = NodeId::from_key(public_key);
×
321
        self.peer_db
×
322
            .set_banned(&node_id, duration, reason)?
×
323
            .ok_or(PeerManagerError::peer_not_found(&NodeId::from_public_key(public_key)))
×
324
    }
×
325

326
    /// Ban the peer for the given duration
327
    pub fn ban_peer_by_node_id(
30✔
328
        &self,
30✔
329
        node_id: &NodeId,
30✔
330
        duration: Duration,
30✔
331
        reason: String,
30✔
332
    ) -> Result<NodeId, PeerManagerError> {
30✔
333
        self.peer_db
30✔
334
            .set_banned(node_id, duration, reason)?
30✔
335
            .ok_or(PeerManagerError::peer_not_found(node_id))
30✔
336
    }
30✔
337

338
    pub fn is_peer_banned(&self, node_id: &NodeId) -> Result<bool, PeerManagerError> {
2,820✔
339
        let peer = self
2,820✔
340
            .get_peer_by_node_id(node_id)?
2,820✔
341
            .ok_or(PeerManagerError::peer_not_found(node_id))?;
2,818✔
342
        Ok(peer.is_banned())
2,818✔
343
    }
2,820✔
344

345
    /// This will store metadata inside of the metadata field in the peer provided by the nodeID.
346
    /// It will return None if the value was empty and the old value if the value was updated
347
    pub fn set_peer_metadata(
2,361✔
348
        &self,
2,361✔
349
        node_id: &NodeId,
2,361✔
350
        key: u8,
2,361✔
351
        data: Vec<u8>,
2,361✔
352
    ) -> Result<Option<Vec<u8>>, PeerManagerError> {
2,361✔
353
        Ok(self.peer_db.set_metadata(node_id, key, data)?)
2,361✔
354
    }
2,361✔
355
}
356

357
#[allow(clippy::from_over_into)]
358
impl Into<CommsDatabase> for PeerStorageSql {
359
    fn into(self) -> CommsDatabase {
×
360
        self.peer_db
×
361
    }
×
362
}
363

364
#[cfg(test)]
365
mod test {
366
    use std::{borrow::BorrowMut, iter::repeat_with};
367

368
    use chrono::{DateTime, Utc};
369
    use multiaddr::Multiaddr;
370
    use rand::Rng;
371
    use tari_common_sqlite::connection::DbConnection;
372

373
    use super::*;
374
    use crate::{
375
        net_address::{MultiaddrWithStats, MultiaddressesWithStats, PeerAddressSource},
376
        peer_manager::{database::MIGRATIONS, peer::PeerFlags},
377
    };
378

379
    fn get_peer_db_sql_test_db() -> Result<PeerDatabaseSql, PeerManagerError> {
5✔
380
        let db_connection = DbConnection::connect_temp_file_and_migrate(MIGRATIONS).unwrap();
5✔
381
        Ok(PeerDatabaseSql::new(
5✔
382
            db_connection,
5✔
383
            &create_test_peer(PeerFeatures::COMMUNICATION_NODE, false),
5✔
384
        )?)
5✔
385
    }
5✔
386

387
    fn get_peer_storage_sql_test_db() -> Result<PeerStorageSql, PeerManagerError> {
4✔
388
        PeerStorageSql::new_indexed(get_peer_db_sql_test_db()?)
4✔
389
    }
4✔
390

391
    #[test]
392
    fn test_restore() {
1✔
393
        // Create Peers
1✔
394
        let mut rng = rand::rngs::OsRng;
1✔
395
        let (_sk, pk) = CommsPublicKey::random_keypair(&mut rng);
1✔
396
        let node_id = NodeId::from_key(&pk);
1✔
397
        let net_address1 = "/ip4/1.2.3.4/tcp/8000".parse::<Multiaddr>().unwrap();
1✔
398
        let net_address2 = "/ip4/5.6.7.8/tcp/8000".parse::<Multiaddr>().unwrap();
1✔
399
        let net_address3 = "/ip4/5.6.7.8/tcp/7000".parse::<Multiaddr>().unwrap();
1✔
400
        let mut net_addresses =
1✔
401
            MultiaddressesWithStats::from_addresses_with_source(vec![net_address1], &PeerAddressSource::Config);
1✔
402
        net_addresses.add_address(&net_address2, &PeerAddressSource::Config);
1✔
403
        net_addresses.add_address(&net_address3, &PeerAddressSource::Config);
1✔
404
        let peer1 = Peer::new(
1✔
405
            pk,
1✔
406
            node_id,
1✔
407
            net_addresses,
1✔
408
            PeerFlags::default(),
1✔
409
            PeerFeatures::empty(),
1✔
410
            Default::default(),
1✔
411
            Default::default(),
1✔
412
        );
1✔
413

1✔
414
        let (_sk, pk) = CommsPublicKey::random_keypair(&mut rng);
1✔
415
        let node_id = NodeId::from_key(&pk);
1✔
416
        let net_address4 = "/ip4/9.10.11.12/tcp/7000".parse::<Multiaddr>().unwrap();
1✔
417
        let net_addresses =
1✔
418
            MultiaddressesWithStats::from_addresses_with_source(vec![net_address4], &PeerAddressSource::Config);
1✔
419
        let peer2: Peer = Peer::new(
1✔
420
            pk,
1✔
421
            node_id,
1✔
422
            net_addresses,
1✔
423
            PeerFlags::default(),
1✔
424
            PeerFeatures::empty(),
1✔
425
            Default::default(),
1✔
426
            Default::default(),
1✔
427
        );
1✔
428

1✔
429
        let (_sk, pk) = CommsPublicKey::random_keypair(&mut rng);
1✔
430
        let node_id = NodeId::from_key(&pk);
1✔
431
        let net_address5 = "/ip4/13.14.15.16/tcp/6000".parse::<Multiaddr>().unwrap();
1✔
432
        let net_address6 = "/ip4/17.18.19.20/tcp/8000".parse::<Multiaddr>().unwrap();
1✔
433
        let mut net_addresses =
1✔
434
            MultiaddressesWithStats::from_addresses_with_source(vec![net_address5], &PeerAddressSource::Config);
1✔
435
        net_addresses.add_address(&net_address6, &PeerAddressSource::Config);
1✔
436
        let peer3 = Peer::new(
1✔
437
            pk,
1✔
438
            node_id,
1✔
439
            net_addresses,
1✔
440
            PeerFlags::default(),
1✔
441
            PeerFeatures::empty(),
1✔
442
            Default::default(),
1✔
443
            Default::default(),
1✔
444
        );
1✔
445

1✔
446
        // Create new datastore with a peer database
1✔
447
        let mut db = Some(get_peer_db_sql_test_db().unwrap());
1✔
448
        {
1✔
449
            let peer_storage = db.take().unwrap();
1✔
450

1✔
451
            // Test adding and searching for peers
1✔
452
            assert!(peer_storage.add_or_update_peer(peer1.clone()).is_ok());
1✔
453
            assert!(peer_storage.add_or_update_peer(peer2.clone()).is_ok());
1✔
454
            assert!(peer_storage.add_or_update_peer(peer3.clone()).is_ok());
1✔
455

456
            assert_eq!(peer_storage.size(), 3);
1✔
457
            assert!(peer_storage.get_peer_by_public_key(&peer1.public_key).is_ok());
1✔
458
            assert!(peer_storage.get_peer_by_public_key(&peer2.public_key).is_ok());
1✔
459
            assert!(peer_storage.get_peer_by_public_key(&peer3.public_key).is_ok());
1✔
460
            db = Some(peer_storage);
1✔
461
        }
1✔
462
        // Restore from existing database
1✔
463
        let peer_storage = PeerStorageSql::new_indexed(db.take().unwrap()).unwrap();
1✔
464

1✔
465
        assert_eq!(peer_storage.peer_db.size(), 3);
1✔
466
        assert!(peer_storage.find_by_public_key(&peer1.public_key).is_ok());
1✔
467
        assert!(peer_storage.find_by_public_key(&peer2.public_key).is_ok());
1✔
468
        assert!(peer_storage.find_by_public_key(&peer3.public_key).is_ok());
1✔
469
    }
1✔
470

471
    #[allow(clippy::too_many_lines)]
472
    #[test]
473
    fn test_add_delete_find_peer() {
1✔
474
        let peer_storage = get_peer_storage_sql_test_db().unwrap();
1✔
475

1✔
476
        // Create Peers
1✔
477
        let mut rng = rand::rngs::OsRng;
1✔
478
        let (_sk, pk) = CommsPublicKey::random_keypair(&mut rng);
1✔
479
        let node_id = NodeId::from_key(&pk);
1✔
480
        let net_address1 = "/ip4/1.2.3.4/tcp/8000".parse::<Multiaddr>().unwrap();
1✔
481
        let net_address2 = "/ip4/5.6.7.8/tcp/8000".parse::<Multiaddr>().unwrap();
1✔
482
        let net_address3 = "/ip4/5.6.7.8/tcp/7000".parse::<Multiaddr>().unwrap();
1✔
483
        let mut net_addresses =
1✔
484
            MultiaddressesWithStats::from_addresses_with_source(vec![net_address1], &PeerAddressSource::Config);
1✔
485
        net_addresses.add_address(&net_address2, &PeerAddressSource::Config);
1✔
486
        net_addresses.add_address(&net_address3, &PeerAddressSource::Config);
1✔
487
        let peer1 = Peer::new(
1✔
488
            pk,
1✔
489
            node_id,
1✔
490
            net_addresses,
1✔
491
            PeerFlags::default(),
1✔
492
            PeerFeatures::empty(),
1✔
493
            Default::default(),
1✔
494
            Default::default(),
1✔
495
        );
1✔
496

1✔
497
        let (_sk, pk) = CommsPublicKey::random_keypair(&mut rng);
1✔
498
        let node_id = NodeId::from_key(&pk);
1✔
499
        let net_address4 = "/ip4/9.10.11.12/tcp/7000".parse::<Multiaddr>().unwrap();
1✔
500
        let net_addresses =
1✔
501
            MultiaddressesWithStats::from_addresses_with_source(vec![net_address4], &PeerAddressSource::Config);
1✔
502
        let peer2: Peer = Peer::new(
1✔
503
            pk,
1✔
504
            node_id,
1✔
505
            net_addresses,
1✔
506
            PeerFlags::default(),
1✔
507
            PeerFeatures::empty(),
1✔
508
            Default::default(),
1✔
509
            Default::default(),
1✔
510
        );
1✔
511

1✔
512
        let (_sk, pk) = CommsPublicKey::random_keypair(&mut rng);
1✔
513
        let node_id = NodeId::from_key(&pk);
1✔
514
        let net_address5 = "/ip4/13.14.15.16/tcp/6000".parse::<Multiaddr>().unwrap();
1✔
515
        let net_address6 = "/ip4/17.18.19.20/tcp/8000".parse::<Multiaddr>().unwrap();
1✔
516
        let mut net_addresses =
1✔
517
            MultiaddressesWithStats::from_addresses_with_source(vec![net_address5], &PeerAddressSource::Config);
1✔
518
        net_addresses.add_address(&net_address6, &PeerAddressSource::Config);
1✔
519
        let peer3 = Peer::new(
1✔
520
            pk,
1✔
521
            node_id,
1✔
522
            net_addresses,
1✔
523
            PeerFlags::default(),
1✔
524
            PeerFeatures::empty(),
1✔
525
            Default::default(),
1✔
526
            Default::default(),
1✔
527
        );
1✔
528
        // Test adding and searching for peers
1✔
529
        peer_storage.add_or_update_peer(peer1.clone()).unwrap(); // assert!(peer_storage.add_or_update_peer(peer1.clone()).is_ok());
1✔
530
        assert!(peer_storage.add_or_update_peer(peer2.clone()).is_ok());
1✔
531
        assert!(peer_storage.add_or_update_peer(peer3.clone()).is_ok());
1✔
532

533
        assert_eq!(peer_storage.peer_db.size(), 3);
1✔
534

535
        assert_eq!(
1✔
536
            peer_storage
1✔
537
                .find_by_public_key(&peer1.public_key)
1✔
538
                .unwrap()
1✔
539
                .unwrap()
1✔
540
                .public_key,
1✔
541
            peer1.public_key
1✔
542
        );
1✔
543
        assert_eq!(
1✔
544
            peer_storage
1✔
545
                .find_by_public_key(&peer2.public_key)
1✔
546
                .unwrap()
1✔
547
                .unwrap()
1✔
548
                .public_key,
1✔
549
            peer2.public_key
1✔
550
        );
1✔
551
        assert_eq!(
1✔
552
            peer_storage
1✔
553
                .find_by_public_key(&peer3.public_key)
1✔
554
                .unwrap()
1✔
555
                .unwrap()
1✔
556
                .public_key,
1✔
557
            peer3.public_key
1✔
558
        );
1✔
559

560
        assert_eq!(
1✔
561
            peer_storage
1✔
562
                .get_peer_by_node_id(&peer1.node_id)
1✔
563
                .unwrap()
1✔
564
                .unwrap()
1✔
565
                .node_id,
1✔
566
            peer1.node_id
1✔
567
        );
1✔
568
        assert_eq!(
1✔
569
            peer_storage
1✔
570
                .get_peer_by_node_id(&peer2.node_id)
1✔
571
                .unwrap()
1✔
572
                .unwrap()
1✔
573
                .node_id,
1✔
574
            peer2.node_id
1✔
575
        );
1✔
576
        assert_eq!(
1✔
577
            peer_storage
1✔
578
                .get_peer_by_node_id(&peer3.node_id)
1✔
579
                .unwrap()
1✔
580
                .unwrap()
1✔
581
                .node_id,
1✔
582
            peer3.node_id
1✔
583
        );
1✔
584

585
        peer_storage.find_by_public_key(&peer1.public_key).unwrap().unwrap();
1✔
586
        peer_storage.find_by_public_key(&peer2.public_key).unwrap().unwrap();
1✔
587
        peer_storage.find_by_public_key(&peer3.public_key).unwrap().unwrap();
1✔
588

1✔
589
        // Test delete of border case peer
1✔
590
        assert!(peer_storage.soft_delete_peer(&peer3.node_id).is_ok());
1✔
591

592
        // It is a logical delete, so there should still be 3 peers in the db
593
        assert_eq!(peer_storage.peer_db.size(), 3);
1✔
594

595
        assert_eq!(
1✔
596
            peer_storage
1✔
597
                .find_by_public_key(&peer1.public_key)
1✔
598
                .unwrap()
1✔
599
                .unwrap()
1✔
600
                .public_key,
1✔
601
            peer1.public_key
1✔
602
        );
1✔
603
        assert_eq!(
1✔
604
            peer_storage
1✔
605
                .find_by_public_key(&peer2.public_key)
1✔
606
                .unwrap()
1✔
607
                .unwrap()
1✔
608
                .public_key,
1✔
609
            peer2.public_key
1✔
610
        );
1✔
611
        assert!(peer_storage
1✔
612
            .find_by_public_key(&peer3.public_key)
1✔
613
            .unwrap()
1✔
614
            .unwrap()
1✔
615
            .deleted_at
1✔
616
            .is_some());
1✔
617

618
        assert_eq!(
1✔
619
            peer_storage
1✔
620
                .get_peer_by_node_id(&peer1.node_id)
1✔
621
                .unwrap()
1✔
622
                .unwrap()
1✔
623
                .node_id,
1✔
624
            peer1.node_id
1✔
625
        );
1✔
626
        assert_eq!(
1✔
627
            peer_storage
1✔
628
                .get_peer_by_node_id(&peer2.node_id)
1✔
629
                .unwrap()
1✔
630
                .unwrap()
1✔
631
                .node_id,
1✔
632
            peer2.node_id
1✔
633
        );
1✔
634
        assert!(peer_storage
1✔
635
            .get_peer_by_node_id(&peer3.node_id)
1✔
636
            .unwrap()
1✔
637
            .unwrap()
1✔
638
            .deleted_at
1✔
639
            .is_some());
1✔
640
    }
1✔
641

642
    fn create_test_peer(features: PeerFeatures, ban: bool) -> Peer {
28✔
643
        let mut rng = rand::rngs::OsRng;
28✔
644

28✔
645
        let (_sk, pk) = CommsPublicKey::random_keypair(&mut rng);
28✔
646
        let node_id = NodeId::from_key(&pk);
28✔
647

28✔
648
        let mut net_addresses = MultiaddressesWithStats::from_addresses_with_source(vec![], &PeerAddressSource::Config);
28✔
649

650
        // Create 1 to 4 random addresses
651
        for _i in 1..=rand::thread_rng().gen_range(1..4) {
55✔
652
            let n = [
55✔
653
                rand::thread_rng().gen_range(1..255),
55✔
654
                rand::thread_rng().gen_range(1..255),
55✔
655
                rand::thread_rng().gen_range(1..255),
55✔
656
                rand::thread_rng().gen_range(1..255),
55✔
657
                rand::thread_rng().gen_range(5000..9000),
55✔
658
            ];
55✔
659
            let net_address = format!("/ip4/{}.{}.{}.{}/tcp/{}", n[0], n[1], n[2], n[3], n[4])
55✔
660
                .parse::<Multiaddr>()
55✔
661
                .unwrap();
55✔
662
            net_addresses.add_address(&net_address, &PeerAddressSource::Config);
55✔
663
        }
55✔
664

665
        let mut peer = Peer::new(
28✔
666
            pk,
28✔
667
            node_id,
28✔
668
            net_addresses,
28✔
669
            PeerFlags::default(),
28✔
670
            features,
28✔
671
            Default::default(),
28✔
672
            Default::default(),
28✔
673
        );
28✔
674
        if ban {
28✔
675
            peer.ban_for(Duration::from_secs(600), "".to_string());
1✔
676
        }
27✔
677
        peer
28✔
678
    }
28✔
679

680
    #[test]
681
    fn test_in_network_region() {
1✔
682
        let peer_storage = get_peer_storage_sql_test_db().unwrap();
1✔
683

1✔
684
        let mut nodes = repeat_with(|| create_test_peer(PeerFeatures::COMMUNICATION_NODE, false))
5✔
685
            .take(5)
1✔
686
            .chain(repeat_with(|| create_test_peer(PeerFeatures::COMMUNICATION_CLIENT, false)).take(4))
4✔
687
            .collect::<Vec<_>>();
1✔
688

689
        for p in &nodes {
10✔
690
            peer_storage.add_or_update_peer(p.clone()).unwrap();
9✔
691
        }
9✔
692

693
        let main_peer_node_id = peer_storage.this_peer_identity().node_id;
1✔
694

1✔
695
        nodes.sort_by(|a, b| {
20✔
696
            a.node_id
20✔
697
                .distance(&main_peer_node_id)
20✔
698
                .cmp(&b.node_id.distance(&main_peer_node_id))
20✔
699
        });
20✔
700

1✔
701
        let db_nodes = peer_storage.peer_db.get_all_peers(None).unwrap();
1✔
702
        assert_eq!(db_nodes.len(), 9);
1✔
703

704
        let close_node = &nodes.first().unwrap().node_id;
1✔
705
        let far_node = &nodes.last().unwrap().node_id;
1✔
706

1✔
707
        let is_in_region = peer_storage.in_network_region(&main_peer_node_id, 1).unwrap();
1✔
708
        assert!(is_in_region);
1✔
709

710
        let is_in_region = peer_storage.in_network_region(close_node, 1).unwrap();
1✔
711
        assert!(is_in_region);
1✔
712

713
        let is_in_region = peer_storage.in_network_region(far_node, 9).unwrap();
1✔
714
        assert!(is_in_region);
1✔
715

716
        let is_in_region = peer_storage.in_network_region(far_node, 3).unwrap();
1✔
717
        assert!(!is_in_region);
1✔
718
    }
1✔
719

720
    #[test]
721
    fn get_just_seeds() {
1✔
722
        let peer_storage = get_peer_storage_sql_test_db().unwrap();
1✔
723

1✔
724
        let seeds = repeat_with(|| {
5✔
725
            let mut peer = create_test_peer(PeerFeatures::COMMUNICATION_NODE, false);
5✔
726
            peer.add_flags(PeerFlags::SEED);
5✔
727
            peer
5✔
728
        })
5✔
729
        .take(5)
1✔
730
        .collect::<Vec<_>>();
1✔
731

732
        for p in &seeds {
6✔
733
            peer_storage.add_or_update_peer(p.clone()).unwrap();
5✔
734
        }
5✔
735

736
        let nodes = repeat_with(|| create_test_peer(PeerFeatures::COMMUNICATION_NODE, false))
5✔
737
            .take(5)
1✔
738
            .collect::<Vec<_>>();
1✔
739

740
        for p in &nodes {
6✔
741
            peer_storage.add_or_update_peer(p.clone()).unwrap();
5✔
742
        }
5✔
743
        let retrieved_seeds = peer_storage.get_seed_peers().unwrap();
1✔
744
        assert_eq!(retrieved_seeds.len(), seeds.len());
1✔
745
        for seed in seeds {
6✔
746
            assert!(retrieved_seeds.iter().any(|p| p.node_id == seed.node_id));
15✔
747
        }
748
    }
1✔
749

750
    #[test]
751
    fn discovery_syncing_returns_correct_peers() {
1✔
752
        let peer_storage = get_peer_storage_sql_test_db().unwrap();
1✔
753

1✔
754
        // Threshold duration + a minute
1✔
755
        #[allow(clippy::cast_possible_wrap)] // Won't wrap around, numbers are static
1✔
756
        let above_the_threshold = Utc::now().timestamp() - (STALE_PEER_THRESHOLD_DURATION.as_secs() + 60) as i64;
1✔
757

1✔
758
        let never_seen_peer = create_test_peer(PeerFeatures::COMMUNICATION_NODE, false);
1✔
759
        let banned_peer = create_test_peer(PeerFeatures::COMMUNICATION_NODE, true);
1✔
760

1✔
761
        let mut not_active_peer = create_test_peer(PeerFeatures::COMMUNICATION_NODE, false);
1✔
762
        let address = not_active_peer.addresses.best().unwrap();
1✔
763
        let mut address = MultiaddrWithStats::new(address.address().clone(), PeerAddressSource::Config);
1✔
764
        address.mark_last_attempted(DateTime::from_timestamp(above_the_threshold, 0).unwrap().naive_utc());
1✔
765
        not_active_peer
1✔
766
            .addresses
1✔
767
            .merge(&MultiaddressesWithStats::from(vec![address]));
1✔
768

1✔
769
        let mut good_peer = create_test_peer(PeerFeatures::COMMUNICATION_NODE, false);
1✔
770
        let good_addresses = good_peer.addresses.borrow_mut();
1✔
771
        let good_address = good_addresses.addresses()[0].address().clone();
1✔
772
        good_addresses.mark_last_seen_now(&good_address);
1✔
773

1✔
774
        assert!(peer_storage.add_or_update_peer(never_seen_peer).is_ok());
1✔
775
        assert!(peer_storage.add_or_update_peer(not_active_peer).is_ok());
1✔
776
        assert!(peer_storage.add_or_update_peer(banned_peer).is_ok());
1✔
777
        assert!(peer_storage.add_or_update_peer(good_peer).is_ok());
1✔
778

779
        assert_eq!(peer_storage.all(None).unwrap().len(), 4);
1✔
780
        assert_eq!(
1✔
781
            peer_storage
1✔
782
                .discovery_syncing(100, &[], Some(PeerFeatures::COMMUNICATION_NODE))
1✔
783
                .unwrap()
1✔
784
                .len(),
1✔
785
            1
1✔
786
        );
1✔
787
    }
1✔
788
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc