1
0
Fork 0
mirror of https://github.com/transmission/transmission synced 2025-03-04 18:48:06 +00:00

perf: cache the peer, atom count instead of always recomputing it (#4431)

This commit is contained in:
Charles Kerr 2022-12-21 12:07:40 -06:00 committed by GitHub
parent cc9bc748b0
commit c74ac4f4ed
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 33 additions and 20 deletions

View file

@ -10,6 +10,7 @@
#endif #endif
#include <array> #include <array>
#include <atomic>
#include <cstdint> // uint8_t, uint32_t, uint64_t #include <cstdint> // uint8_t, uint32_t, uint64_t
#include "transmission.h" #include "transmission.h"
@ -213,6 +214,12 @@ public:
virtual void requestBlocks(tr_block_span_t const* block_spans, size_t n_spans) = 0; virtual void requestBlocks(tr_block_span_t const* block_spans, size_t n_spans) = 0;
[[nodiscard]] static auto peer_count() noexcept
{
// the number of currently-connected peers
return n_peers_.load();
}
struct RequestLimit struct RequestLimit
{ {
// How many blocks we could request. // How many blocks we could request.
@ -254,6 +261,9 @@ public:
// how many requests we made to this peer and then canceled // how many requests we made to this peer and then canceled
tr_recentHistory<uint16_t> cancels_sent_to_peer; tr_recentHistory<uint16_t> cancels_sent_to_peer;
private:
static inline auto n_peers_ = std::atomic<size_t>{};
}; };
/*** /***

View file

@ -15,7 +15,6 @@
#include <map> #include <map>
#include <iterator> // std::back_inserter #include <iterator> // std::back_inserter
#include <memory> #include <memory>
#include <numeric> // std::accumulate
#include <optional> #include <optional>
#include <tuple> // std::tie #include <tuple> // std::tie
#include <utility> #include <utility>
@ -159,6 +158,18 @@ struct peer_atom
, fromBest{ from } , fromBest{ from }
, flags{ flags_in } , flags{ flags_in }
{ {
++n_atoms_;
}
~peer_atom()
{
[[maybe_unused]] auto const n_prev = n_atoms_--;
TR_ASSERT(n_prev > 0U);
}
[[nodiscard]] static auto atom_count() noexcept
{
return n_atoms_.load();
} }
[[nodiscard]] constexpr auto isSeed() const noexcept [[nodiscard]] constexpr auto isSeed() const noexcept
@ -287,6 +298,8 @@ private:
// the minimum we'll wait before attempting to reconnect to a peer // the minimum we'll wait before attempting to reconnect to a peer
static auto constexpr MinimumReconnectIntervalSecs = int{ 5 }; static auto constexpr MinimumReconnectIntervalSecs = int{ 5 };
static auto inline n_atoms_ = std::atomic<size_t>{};
}; };
using Handshakes = std::map<tr_address, tr_handshake>; using Handshakes = std::map<tr_address, tr_handshake>;
@ -587,6 +600,7 @@ tr_peer::tr_peer(tr_torrent const* tor, peer_atom* atom_in)
, atom{ atom_in } , atom{ atom_in }
, blame{ tor->blockCount() } , blame{ tor->blockCount() }
{ {
++n_peers_;
} }
tr_peer::~tr_peer() tr_peer::~tr_peer()
@ -600,6 +614,9 @@ tr_peer::~tr_peer()
{ {
atom->is_connected = false; atom->is_connected = false;
} }
[[maybe_unused]] auto const n_prev = n_peers_--;
TR_ASSERT(n_prev > 0U);
} }
/** /**
@ -2371,13 +2388,8 @@ void enforceTorrentPeerLimit(tr_swarm* swarm)
void enforceSessionPeerLimit(tr_session* session) void enforceSessionPeerLimit(tr_session* session)
{ {
// do we have too many peers? // do we have too many peers?
auto const& torrents = session->torrents(); auto const n_peers = tr_peer::peer_count();
size_t const n_peers = std::accumulate( auto const max = session->peerLimit();
std::begin(torrents),
std::end(torrents),
size_t{},
[](size_t sum, tr_torrent const* tor) { return sum + tor->swarm->peerCount(); });
size_t const max = session->peerLimit();
if (n_peers <= max) if (n_peers <= max)
{ {
return; return;
@ -2656,24 +2668,15 @@ struct peer_candidate
// leave 5% of connection slots for incoming connections -- ticket #2609 // leave 5% of connection slots for incoming connections -- ticket #2609
auto const max_candidates = static_cast<size_t>(session->peerLimit() * 0.95); auto const max_candidates = static_cast<size_t>(session->peerLimit() * 0.95);
/* count how many peers and atoms we've got */ // don't start any new handshakes if we're full up
auto atom_count = size_t{}; auto const peer_count = tr_peer::peer_count();
auto peer_count = size_t{};
for (auto const* const tor : session->torrents())
{
auto const* const swarm = tor->swarm;
atom_count += std::size(swarm->pool);
peer_count += swarm->peerCount();
}
/* don't start any new handshakes if we're full up */
if (max_candidates <= peer_count) if (max_candidates <= peer_count)
{ {
return {}; return {};
} }
auto candidates = std::vector<peer_candidate>{}; auto candidates = std::vector<peer_candidate>{};
candidates.reserve(atom_count); candidates.reserve(peer_atom::atom_count());
/* populate the candidate array */ /* populate the candidate array */
auto salter = tr_salt_shaker{}; auto salter = tr_salt_shaker{};