perf: cache the peer, atom count instead of always recomputing it (#4431)

This commit is contained in:
Charles Kerr 2022-12-21 12:07:40 -06:00 committed by GitHub
parent cc9bc748b0
commit c74ac4f4ed
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 33 additions and 20 deletions

View File

@ -10,6 +10,7 @@
#endif
#include <array>
#include <atomic>
#include <cstdint> // uint8_t, uint32_t, uint64_t
#include "transmission.h"
@ -213,6 +214,12 @@ public:
virtual void requestBlocks(tr_block_span_t const* block_spans, size_t n_spans) = 0;
[[nodiscard]] static auto peer_count() noexcept
{
// the number of currently-connected peers
return n_peers_.load();
}
struct RequestLimit
{
// How many blocks we could request.
@ -254,6 +261,9 @@ public:
// how many requests we made to this peer and then canceled
tr_recentHistory<uint16_t> cancels_sent_to_peer;
private:
static inline auto n_peers_ = std::atomic<size_t>{};
};
/***

View File

@ -15,7 +15,6 @@
#include <map>
#include <iterator> // std::back_inserter
#include <memory>
#include <numeric> // std::accumulate
#include <optional>
#include <tuple> // std::tie
#include <utility>
@ -159,6 +158,18 @@ struct peer_atom
, fromBest{ from }
, flags{ flags_in }
{
++n_atoms_;
}
~peer_atom()
{
[[maybe_unused]] auto const n_prev = n_atoms_--;
TR_ASSERT(n_prev > 0U);
}
[[nodiscard]] static auto atom_count() noexcept
{
return n_atoms_.load();
}
[[nodiscard]] constexpr auto isSeed() const noexcept
@ -287,6 +298,8 @@ private:
// the minimum we'll wait before attempting to reconnect to a peer
static auto constexpr MinimumReconnectIntervalSecs = int{ 5 };
static auto inline n_atoms_ = std::atomic<size_t>{};
};
using Handshakes = std::map<tr_address, tr_handshake>;
@ -587,6 +600,7 @@ tr_peer::tr_peer(tr_torrent const* tor, peer_atom* atom_in)
, atom{ atom_in }
, blame{ tor->blockCount() }
{
++n_peers_;
}
tr_peer::~tr_peer()
@ -600,6 +614,9 @@ tr_peer::~tr_peer()
{
atom->is_connected = false;
}
[[maybe_unused]] auto const n_prev = n_peers_--;
TR_ASSERT(n_prev > 0U);
}
/**
@ -2371,13 +2388,8 @@ void enforceTorrentPeerLimit(tr_swarm* swarm)
void enforceSessionPeerLimit(tr_session* session)
{
// do we have too many peers?
auto const& torrents = session->torrents();
size_t const n_peers = std::accumulate(
std::begin(torrents),
std::end(torrents),
size_t{},
[](size_t sum, tr_torrent const* tor) { return sum + tor->swarm->peerCount(); });
size_t const max = session->peerLimit();
auto const n_peers = tr_peer::peer_count();
auto const max = session->peerLimit();
if (n_peers <= max)
{
return;
@ -2656,24 +2668,15 @@ struct peer_candidate
// leave 5% of connection slots for incoming connections -- ticket #2609
auto const max_candidates = static_cast<size_t>(session->peerLimit() * 0.95);
/* count how many peers and atoms we've got */
auto atom_count = size_t{};
auto peer_count = size_t{};
for (auto const* const tor : session->torrents())
{
auto const* const swarm = tor->swarm;
atom_count += std::size(swarm->pool);
peer_count += swarm->peerCount();
}
/* don't start any new handshakes if we're full up */
// don't start any new handshakes if we're full up
auto const peer_count = tr_peer::peer_count();
if (max_candidates <= peer_count)
{
return {};
}
auto candidates = std::vector<peer_candidate>{};
candidates.reserve(atom_count);
candidates.reserve(peer_atom::atom_count());
/* populate the candidate array */
auto salter = tr_salt_shaker{};