refactor: swarm (#2103)

* refactor: encapsulate request tracking in a class

Introduces a new class to peer-mgr, `ClientRequests`, which tracks what
active requests we've got pending: which blocks, when the requests were
sent, and who they were sent to.

This shouldn't change peer-mgr behavior. Its goal is to carve out some
of peer-mgr's data structures and encapsulte them behind an API that's
simpler to understand.

* refactor: move ActiveRequests to its own file

* perf: avoid duplicate call to tr_cpMissingBlocksInPiece
This commit is contained in:
Charles Kerr 2021-11-19 12:37:38 -06:00 committed by GitHub
parent 0b58e7afa7
commit 073c6af1d6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 1323 additions and 834 deletions

View File

@ -361,6 +361,10 @@
C1FEE57B1C3223CC00D62832 /* watchdir.h in Headers */ = {isa = PBXBuildFile; fileRef = C1FEE5761C3223CC00D62832 /* watchdir.h */; };
CAB35C64252F6F5E00552A55 /* mime-types.h in Headers */ = {isa = PBXBuildFile; fileRef = CAB35C62252F6F5E00552A55 /* mime-types.h */; };
E138A9780C04D88F00C5426C /* ProgressGradients.mm in Sources */ = {isa = PBXBuildFile; fileRef = E138A9760C04D88F00C5426C /* ProgressGradients.mm */; };
ED8A163F2735A8AA000D61F9 /* peer-mgr-active-requests.h in Headers */ = {isa = PBXBuildFile; fileRef = ED8A163B2735A8AA000D61F9 /* peer-mgr-active-requests.h */; };
ED8A16402735A8AA000D61F9 /* peer-mgr-active-requests.cc in Sources */ = {isa = PBXBuildFile; fileRef = ED8A163C2735A8AA000D61F9 /* peer-mgr-active-requests.cc */; };
ED8A16412735A8AA000D61F9 /* peer-mgr-wishlist.h in Headers */ = {isa = PBXBuildFile; fileRef = ED8A163D2735A8AA000D61F9 /* peer-mgr-wishlist.h */; };
ED8A16422735A8AA000D61F9 /* peer-mgr-wishlist.cc in Sources */ = {isa = PBXBuildFile; fileRef = ED8A163E2735A8AA000D61F9 /* peer-mgr-wishlist.cc */; };
EDBDFA9E25AFCCA60093D9C1 /* evutil_time.c in Sources */ = {isa = PBXBuildFile; fileRef = EDBDFA9D25AFCCA60093D9C1 /* evutil_time.c */; };
F63480631E1D7274005B9E09 /* Images.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = F63480621E1D7274005B9E09 /* Images.xcassets */; };
/* End PBXBuildFile section */
@ -514,6 +518,10 @@
4D36BA660CA2F00800A63CA5 /* peer-io.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "peer-io.h"; sourceTree = "<group>"; };
4D36BA680CA2F00800A63CA5 /* peer-mgr.cc */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = "peer-mgr.cc"; sourceTree = "<group>"; };
4D36BA690CA2F00800A63CA5 /* peer-mgr.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "peer-mgr.h"; sourceTree = "<group>"; };
ED8A163B2735A8AA000D61F9 /* peer-mgr-active-requests.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "peer-mgr-active-requests.h"; sourceTree = "<group>"; };
ED8A163C2735A8AA000D61F9 /* peer-mgr-active-requests.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "peer-mgr-active-requests.cc"; sourceTree = "<group>"; };
ED8A163D2735A8AA000D61F9 /* peer-mgr-wishlist.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "peer-mgr-wishlist.h"; sourceTree = "<group>"; };
ED8A163E2735A8AA000D61F9 /* peer-mgr-wishlist.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "peer-mgr-wishlist.cc"; sourceTree = "<group>"; };
4D36BA6A0CA2F00800A63CA5 /* peer-msgs.cc */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = "peer-msgs.cc"; sourceTree = "<group>"; };
4D36BA6B0CA2F00800A63CA5 /* peer-msgs.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "peer-msgs.h"; sourceTree = "<group>"; };
4D36BA6C0CA2F00800A63CA5 /* ptrarray.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ptrarray.h; sourceTree = "<group>"; };
@ -1413,6 +1421,10 @@
4D36BA660CA2F00800A63CA5 /* peer-io.h */,
4D36BA680CA2F00800A63CA5 /* peer-mgr.cc */,
4D36BA690CA2F00800A63CA5 /* peer-mgr.h */,
ED8A163C2735A8AA000D61F9 /* peer-mgr-active-requests.cc */,
ED8A163B2735A8AA000D61F9 /* peer-mgr-active-requests.h */,
ED8A163E2735A8AA000D61F9 /* peer-mgr-wishlist.cc */,
ED8A163D2735A8AA000D61F9 /* peer-mgr-wishlist.h */,
4D36BA6A0CA2F00800A63CA5 /* peer-msgs.cc */,
4D36BA6B0CA2F00800A63CA5 /* peer-msgs.h */,
A292A6E40DFB45E5004B9C0A /* peer-common.h */,
@ -1838,6 +1850,7 @@
C1FEE5771C3223CC00D62832 /* watchdir-common.h in Headers */,
BEFC1E4E0C07861A00B0BB3C /* inout.h in Headers */,
BEFC1E520C07861A00B0BB3C /* fdlimit.h in Headers */,
ED8A163F2735A8AA000D61F9 /* peer-mgr-active-requests.h in Headers */,
BEFC1E550C07861A00B0BB3C /* completion.h in Headers */,
BEFC1E570C07861A00B0BB3C /* clients.h in Headers */,
A2BE9C530C1E4AF7002D16E6 /* makemeta.h in Headers */,
@ -1865,6 +1878,7 @@
A22CFCA90FC24ED80009BD3E /* tr-dht.h in Headers */,
0A6169A80FE5C9A200C66CE6 /* bitfield.h in Headers */,
A25964A7106D73A800453B31 /* announcer.h in Headers */,
ED8A16412735A8AA000D61F9 /* peer-mgr-wishlist.h in Headers */,
4D8017EB10BBC073008A4AF2 /* torrent-magnet.h in Headers */,
4D80185A10BBC0B0008A4AF2 /* magnet-metainfo.h in Headers */,
A209EE5D1144B51E002B02D1 /* history.h in Headers */,
@ -2415,6 +2429,7 @@
A2AAB65F0DE0CF6200E04DDA /* rpcimpl.cc in Sources */,
BEFC1E2D0C07861A00B0BB3C /* upnp.cc in Sources */,
A2AAB65C0DE0CF6200E04DDA /* rpc-server.cc in Sources */,
ED8A16402735A8AA000D61F9 /* peer-mgr-active-requests.cc in Sources */,
BEFC1E2F0C07861A00B0BB3C /* session.cc in Sources */,
BEFC1E320C07861A00B0BB3C /* torrent.cc in Sources */,
BEFC1E360C07861A00B0BB3C /* port-forwarding.cc in Sources */,
@ -2440,6 +2455,7 @@
C10C644D1D9AF328003C1B4C /* session-id.cc in Sources */,
4D36BA770CA2F00800A63CA5 /* peer-mgr.cc in Sources */,
C1077A50183EB29600634C22 /* file-posix.cc in Sources */,
ED8A16422735A8AA000D61F9 /* peer-mgr-wishlist.cc in Sources */,
4D36BA790CA2F00800A63CA5 /* peer-msgs.cc in Sources */,
A25D2CBD0CF4C73E0096A262 /* stats.cc in Sources */,
A201527E0D1C270F0081714F /* torrent-ctor.cc in Sources */,

View File

@ -37,6 +37,8 @@ set(PROJECT_FILES
net.cc
peer-io.cc
peer-mgr.cc
peer-mgr-active-requests.cc
peer-mgr-wishlist.cc
peer-msgs.cc
platform.cc
platform-quota.cc
@ -163,6 +165,8 @@ set(${PROJECT_NAME}_PRIVATE_HEADERS
peer-common.h
peer-io.h
peer-mgr.h
peer-mgr-active-requests.h
peer-mgr-wishlist.h
peer-msgs.h
peer-socket.h
platform-quota.h

View File

@ -17,6 +17,7 @@
#include <optional>
#include <string_view>
#include <tuple>
#include <utility>
#include <vector>
#include "transmission.h"

View File

@ -326,6 +326,11 @@ tr_dh_secret_t tr_dh_agree(tr_dh_ctx_t raw_handle, uint8_t const* other_public_k
bool tr_rand_buffer(void* buffer, size_t length)
{
if (length == 0)
{
return true;
}
TR_ASSERT(buffer != nullptr);
return check_result(CCRandomGenerateBytes(buffer, length));

View File

@ -284,6 +284,11 @@ tr_dh_secret_t tr_dh_agree(tr_dh_ctx_t raw_handle, uint8_t const* other_public_k
bool tr_rand_buffer(void* buffer, size_t length)
{
if (length == 0)
{
return true;
}
TR_ASSERT(buffer != nullptr);
tr_lock* rng_lock = get_rng_lock();

View File

@ -398,6 +398,11 @@ void tr_x509_cert_free(tr_x509_cert_t handle)
bool tr_rand_buffer(void* buffer, size_t length)
{
if (length == 0)
{
return true;
}
TR_ASSERT(buffer != nullptr);
return check_result(RAND_bytes(static_cast<unsigned char*>(buffer), (int)length));

View File

@ -281,6 +281,11 @@ tr_dh_secret_t tr_dh_agree(tr_dh_ctx_t raw_handle, uint8_t const* other_public_k
bool tr_rand_buffer(void* buffer, size_t length)
{
if (length == 0)
{
return true;
}
TR_ASSERT(buffer != nullptr);
tr_lock* rng_lock = get_rng_lock();

View File

@ -90,9 +90,6 @@ public:
/* how many requests the peer has made that we haven't responded to yet */
int pendingReqsToClient = 0;
/* how many requests we've made and are currently awaiting a response for */
int pendingReqsToPeer = 0;
tr_session* const session;
/* Hook to private peer-mgr information */

View File

@ -0,0 +1,243 @@
/*
* This file Copyright (C) 2021 Mnemosyne LLC
*
* It may be used under the GNU GPL versions 2 or 3
* or any future license endorsed by Mnemosyne LLC.
*
*/
#include <algorithm>
#include <functional>
#include <memory>
#include <numeric>
#include <utility>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#define LIBTRANSMISSION_PEER_MODULE
#include "peer-mgr-active-requests.h"
namespace
{
struct peer_at
{
tr_peer* peer;
time_t when;
peer_at(tr_peer* p, time_t w)
: peer{ p }
, when{ w }
{
}
int compare(peer_at const& that) const // <=>
{
if (peer != that.peer)
{
return peer < that.peer ? -1 : 1;
}
return 0;
}
bool operator==(peer_at const& that) const
{
return compare(that) == 0;
}
bool operator<(peer_at const& that) const
{
return compare(that) < 0;
}
};
struct PeerAtHash
{
std::size_t operator()(peer_at const& pa) const noexcept
{
return std::hash<tr_peer*>{}(pa.peer);
}
};
} // namespace
class ActiveRequests::Impl
{
public:
size_t size() const
{
return size_;
}
size_t count(tr_peer const* peer) const
{
auto const it = count_.find(peer);
return it != std::end(count_) ? it->second : size_t{};
}
void incCount(tr_peer const* peer)
{
++count_[peer];
++size_;
}
void decCount(tr_peer const* peer)
{
auto it = count_.find(peer);
TR_ASSERT(it != std::end(count_));
TR_ASSERT(it->second > 0);
TR_ASSERT(size_ > 0);
if (it != std::end(count_))
{
if (--it->second == 0)
{
count_.erase(it);
}
--size_;
}
}
std::unordered_map<tr_peer const*, size_t> count_;
std::unordered_map<tr_block_index_t, std::unordered_set<peer_at, PeerAtHash>> blocks_;
private:
size_t size_ = 0;
};
ActiveRequests::ActiveRequests()
: impl_{ std::make_unique<Impl>() }
{
}
ActiveRequests::~ActiveRequests() = default;
bool ActiveRequests::add(tr_block_index_t block, tr_peer* peer, time_t when)
{
bool const added = impl_->blocks_[block].emplace(peer, when).second;
if (added)
{
impl_->incCount(peer);
}
return added;
}
// remove a request to `peer` for `block`
bool ActiveRequests::remove(tr_block_index_t block, tr_peer const* peer)
{
auto const it = impl_->blocks_.find(block);
auto const key = peer_at{ const_cast<tr_peer*>(peer), 0 };
auto const removed = it != std::end(impl_->blocks_) && it->second.erase(key) != 0;
if (removed)
{
impl_->decCount(peer);
if (std::empty(it->second))
{
impl_->blocks_.erase(it);
}
}
return removed;
}
// remove requests to `peer` and return the associated blocks
std::vector<tr_block_index_t> ActiveRequests::remove(tr_peer const* peer)
{
auto removed = std::vector<tr_block_index_t>{};
removed.reserve(impl_->blocks_.size());
auto const key = peer_at{ const_cast<tr_peer*>(peer), 0 };
for (auto const& it : impl_->blocks_)
{
if (it.second.count(key))
{
removed.push_back(it.first);
}
}
for (auto block : removed)
{
remove(block, peer);
}
return removed;
}
// remove requests for `block` and return the associated peers
std::vector<tr_peer*> ActiveRequests::remove(tr_block_index_t block)
{
auto removed = std::vector<tr_peer*>{};
auto it = impl_->blocks_.find(block);
if (it != std::end(impl_->blocks_))
{
auto const n = std::size(it->second);
removed.resize(n);
std::transform(
std::begin(it->second),
std::end(it->second),
std::begin(removed),
[](auto const& sent) { return sent.peer; });
impl_->blocks_.erase(block);
}
for (auto* peer : removed)
{
impl_->decCount(peer);
}
return removed;
}
// return true if there's an active request to `peer` for `block`
bool ActiveRequests::has(tr_block_index_t block, tr_peer const* peer) const
{
auto const it = impl_->blocks_.find(block);
return it != std::end(impl_->blocks_) && it->second.count(peer_at{ const_cast<tr_peer*>(peer), 0 });
}
// count how many peers we're asking for `block`
size_t ActiveRequests::count(tr_block_index_t block) const
{
return std::size(impl_->blocks_[block]);
}
// count how many active block requests we have to `peer`
size_t ActiveRequests::count(tr_peer const* peer) const
{
return impl_->count(peer);
}
// return the total number of active requests
size_t ActiveRequests::size() const
{
return impl_->size();
}
// returns the active requests sent before `when`
std::vector<std::pair<tr_block_index_t, tr_peer*>> ActiveRequests::sentBefore(time_t when) const
{
auto sent_before = std::vector<std::pair<tr_block_index_t, tr_peer*>>{};
sent_before.reserve(std::size(impl_->blocks_));
for (auto& perblock : impl_->blocks_)
{
for (auto& sent : perblock.second)
{
if (sent.when < when)
{
sent_before.emplace_back(perblock.first, sent.peer);
}
}
}
return sent_before;
}

View File

@ -0,0 +1,64 @@
/*
* This file Copyright (C) 2021 Mnemosyne LLC
*
* It may be used under the GNU GPL versions 2 or 3
* or any future license endorsed by Mnemosyne LLC.
*
*/
#pragma once
#ifndef LIBTRANSMISSION_PEER_MODULE
#error only the libtransmission peer module should #include this header.
#endif
#include <cstddef> // size_t
#include <ctime> // time_t
#include <memory>
#include <utility>
#include <vector>
#include "transmission.h" // tr_block_index_t
#include "peer-common.h" // tr_peer*
/**
* Bookkeeping for the active requests we have --
* e.g. the requests we've sent and are awaiting a response.
*/
class ActiveRequests
{
public:
ActiveRequests();
~ActiveRequests();
// record that we've requested `block` from `peer`
bool add(tr_block_index_t block, tr_peer* peer, time_t when);
// erase any record of a request for `block` from `peer`
bool remove(tr_block_index_t block, tr_peer const* peer);
// erase any record of requests to `peer` and return the previously-associated blocks
std::vector<tr_block_index_t> remove(tr_peer const* peer);
// erase any record of requests to `block` and return the previously-associated peers
std::vector<tr_peer*> remove(tr_block_index_t block);
// return true if there's a record of a request for `block` from `peer`
[[nodiscard]] bool has(tr_block_index_t block, tr_peer const* peer) const;
// count how many peers we're asking for `block`
[[nodiscard]] size_t count(tr_block_index_t block) const;
// count how many active block requests we have to `peer`
[[nodiscard]] size_t count(tr_peer const* peer) const;
// return the total number of active requests
[[nodiscard]] size_t size() const;
// returns the active requests sent before `when`
[[nodiscard]] std::vector<std::pair<tr_block_index_t, tr_peer*>> sentBefore(time_t when) const;
private:
class Impl;
std::unique_ptr<Impl> const impl_;
};

View File

@ -0,0 +1,199 @@
/*
* This file Copyright (C) 2021 Mnemosyne LLC
*
* It may be used under the GNU GPL versions 2 or 3
* or any future license endorsed by Mnemosyne LLC.
*
*/
#include <algorithm>
#include <cstddef>
#include <iterator>
#include <numeric>
#include <utility>
#include <vector>
#define LIBTRANSMISSION_PEER_MODULE
#include "transmission.h"
#include "crypto-utils.h" // tr_rand_buffer()
#include "peer-mgr-wishlist.h"
namespace
{
struct Candidate
{
tr_piece_index_t piece;
size_t n_blocks_missing;
tr_priority_t priority;
uint8_t salt;
Candidate(tr_piece_index_t piece_in, size_t missing_in, tr_priority_t priority_in, uint8_t salt_in)
: piece{ piece_in }
, n_blocks_missing{ missing_in }
, priority{ priority_in }
, salt{ salt_in }
{
}
int compare(Candidate const& that) const // <=>
{
// prefer pieces closer to completion
if (n_blocks_missing != that.n_blocks_missing)
{
return n_blocks_missing < that.n_blocks_missing ? -1 : 1;
}
// prefer higher priority
if (priority != that.priority)
{
return priority > that.priority ? -1 : 1;
}
if (salt != that.salt)
{
return salt < that.salt ? -1 : 1;
}
return 0;
}
bool operator<(Candidate const& that) const // less than
{
return compare(that) < 0;
}
};
std::vector<Candidate> getCandidates(Wishlist::PeerInfo const& peer_info)
{
// count up the pieces that we still want
auto wanted_pieces = std::vector<std::pair<tr_piece_index_t, size_t>>{};
auto const n_pieces = peer_info.countAllPieces();
wanted_pieces.reserve(n_pieces);
for (tr_piece_index_t i = 0; i < n_pieces; ++i)
{
if (!peer_info.clientCanRequestPiece(i))
{
continue;
}
size_t const n_missing = peer_info.countMissingBlocks(i);
if (n_missing == 0)
{
continue;
}
wanted_pieces.emplace_back(i, n_missing);
}
// transform them into candidates
auto const n = std::size(wanted_pieces);
auto saltbuf = std::vector<char>(n);
tr_rand_buffer(std::data(saltbuf), n);
auto candidates = std::vector<Candidate>{};
candidates.reserve(n);
for (size_t i = 0; i < n; ++i)
{
auto const [piece, n_missing] = wanted_pieces[i];
candidates.emplace_back(piece, n_missing, peer_info.priority(piece), saltbuf[i]);
}
return candidates;
}
static std::vector<tr_block_range_t> makeRanges(tr_block_index_t const* sorted_blocks, size_t n_blocks)
{
if (n_blocks == 0)
{
return {};
}
auto ranges = std::vector<tr_block_range_t>{};
auto cur = tr_block_range_t{ sorted_blocks[0], sorted_blocks[0] };
for (size_t i = 1; i < n_blocks; ++i)
{
if (cur.last + 1 == sorted_blocks[i])
{
cur.last = sorted_blocks[i];
}
else
{
ranges.push_back(cur);
cur = tr_block_range_t{ sorted_blocks[i], sorted_blocks[i] };
}
}
ranges.push_back(cur);
return ranges;
}
} // namespace
std::vector<tr_block_range_t> Wishlist::next(Wishlist::PeerInfo const& peer_info, size_t n_wanted_blocks)
{
size_t n_blocks = 0;
auto ranges = std::vector<tr_block_range_t>{};
// sanity clause
TR_ASSERT(n_wanted_blocks > 0);
// We usually won't need all the candidates until endgame, so don't
// waste cycles sorting all of them here. partial sort is enough.
auto candidates = getCandidates(peer_info);
auto constexpr MaxSortedPieces = size_t{ 30 };
auto const middle = std::min(std::size(candidates), MaxSortedPieces);
std::partial_sort(std::begin(candidates), std::begin(candidates) + middle, std::end(candidates));
for (auto const& candidate : candidates)
{
// do we have enough?
if (n_blocks >= n_wanted_blocks)
{
break;
}
// walk the blocks in this piece
auto const [first, last] = peer_info.blockRange(candidate.piece);
auto blocks = std::vector<tr_block_index_t>{};
blocks.reserve(last + 1 - first);
for (tr_block_index_t block = first; block <= last && n_blocks + std::size(blocks) < n_wanted_blocks; ++block)
{
// don't request blocks we've already got
if (!peer_info.clientCanRequestBlock(block))
{
continue;
}
// don't request from too many peers
size_t const n_peers = peer_info.countActiveRequests(block);
size_t const max_peers = peer_info.isEndgame() ? 2 : 1;
if (n_peers >= max_peers)
{
continue;
}
blocks.push_back(block);
}
if (std::empty(blocks))
{
continue;
}
// copy the ranges into `ranges`
auto const tmp = makeRanges(std::data(blocks), std::size(blocks));
std::copy(std::begin(tmp), std::end(tmp), std::back_inserter(ranges));
n_blocks += std::accumulate(
std::begin(tmp),
std::end(tmp),
size_t{},
[](size_t sum, auto range) { return sum + range.last + 1 - range.first; });
if (n_blocks >= n_wanted_blocks)
{
break;
}
}
return ranges;
}

View File

@ -0,0 +1,38 @@
/*
* This file Copyright (C) 2021 Mnemosyne LLC
*
* It may be used under the GNU GPL versions 2 or 3
* or any future license endorsed by Mnemosyne LLC.
*
*/
#pragma once
#ifndef LIBTRANSMISSION_PEER_MODULE
#error only the libtransmission peer module should #include this header.
#endif
#include "transmission.h"
#include "torrent.h"
/**
* Figures out what blocks we want to request next.
*/
class Wishlist
{
public:
struct PeerInfo
{
virtual bool clientCanRequestBlock(tr_block_index_t block) const = 0;
virtual bool clientCanRequestPiece(tr_piece_index_t piece) const = 0;
virtual bool isEndgame() const = 0;
virtual size_t countActiveRequests(tr_block_index_t block) const = 0;
virtual size_t countMissingBlocks(tr_piece_index_t piece) const = 0;
virtual tr_block_range_t blockRange(tr_piece_index_t) const = 0;
virtual tr_piece_index_t countAllPieces() const = 0;
virtual tr_priority_t priority(tr_piece_index_t) const = 0;
};
// get a list of the next blocks that we should request from a peer
std::vector<tr_block_range_t> next(PeerInfo const& peer_info, size_t n_wanted_blocks);
};

File diff suppressed because it is too large Load Diff

View File

@ -79,17 +79,13 @@ void tr_peerMgrSetUtpSupported(tr_torrent* tor, tr_address const* addr);
void tr_peerMgrSetUtpFailed(tr_torrent* tor, tr_address const* addr, bool failed);
void tr_peerMgrGetNextRequests(
tr_torrent* torrent,
tr_peer* peer,
int numwant,
tr_block_index_t* setme,
int* numgot,
bool get_intervals);
std::vector<tr_block_range_t> tr_peerMgrGetNextRequests(tr_torrent* torrent, tr_peer* peer, size_t numwant);
bool tr_peerMgrDidPeerRequest(tr_torrent const* torrent, tr_peer const* peer, tr_block_index_t block);
void tr_peerMgrRebuildRequests(tr_torrent* torrent);
void tr_peerMgrClientSentRequests(tr_torrent* torrent, tr_peer* peer, tr_block_range_t range);
size_t tr_peerMgrCountActiveRequestsToPeer(tr_torrent const* torrent, tr_peer const* peer);
void tr_peerMgrAddIncoming(tr_peerMgr* manager, tr_address* addr, tr_port port, struct tr_peer_socket const socket);

View File

@ -11,7 +11,9 @@
#include <cstdarg>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <memory> // std::unique_ptr
#include <optional>
#include <event2/buffer.h>
#include <event2/bufferevent.h>
@ -581,7 +583,7 @@ public:
bool clientSentLtepHandshake = false;
bool peerSentLtepHandshake = false;
int desiredRequestCount = 0;
size_t desired_request_count = 0;
int prefetchCount = 0;
@ -630,9 +632,8 @@ public:
struct tr_incoming incoming = {};
/* if the peer supports the Extension Protocol in BEP 10 and
supplied a reqq argument, it's stored here. Otherwise, the
value is zero and should be ignored. */
int64_t reqq = 0;
supplied a reqq argument, it's stored here. */
std::optional<size_t> reqq;
UniqueTimer pex_timer;
@ -2001,16 +2002,13 @@ static void updateDesiredRequestCount(tr_peerMsgsImpl* msgs)
/* there are lots of reasons we might not want to request any blocks... */
if (tr_torrentIsSeed(torrent) || !tr_torrentHasMetadata(torrent) || msgs->client_is_choked_ || !msgs->client_is_interested_)
{
msgs->desiredRequestCount = 0;
msgs->desired_request_count = 0;
}
else
{
int const floor = 4;
int const seconds = RequestBufSecs;
uint64_t const now = tr_time_msec();
/* Get the rate limit we should use.
* FIXME: this needs to consider all the other peers as well... */
* TODO: this needs to consider all the other peers as well... */
uint64_t const now = tr_time_msec();
auto rate_Bps = tr_peerGetPieceSpeed_Bps(msgs, now, TR_PEER_TO_CLIENT);
if (tr_torrentUsesSpeedLimit(torrent, TR_PEER_TO_CLIENT))
{
@ -2027,14 +2025,11 @@ static void updateDesiredRequestCount(tr_peerMsgsImpl* msgs)
/* use this desired rate to figure out how
* many requests we should send to this peer */
int const estimatedBlocksInPeriod = (rate_Bps * seconds) / torrent->blockSize;
msgs->desiredRequestCount = std::max(floor, estimatedBlocksInPeriod);
/* honor the peer's maximum request count, if specified */
if ((msgs->reqq > 0) && (msgs->desiredRequestCount > msgs->reqq))
{
msgs->desiredRequestCount = msgs->reqq;
}
size_t constexpr Floor = 32;
size_t constexpr Seconds = RequestBufSecs;
size_t const estimated_blocks_in_period = (rate_Bps * Seconds) / torrent->blockSize;
size_t const ceil = msgs->reqq ? *msgs->reqq : 250;
msgs->desired_request_count = std::clamp(estimated_blocks_in_period, Floor, ceil);
}
}
@ -2070,24 +2065,36 @@ static void updateMetadataRequests(tr_peerMsgsImpl* msgs, time_t now)
static void updateBlockRequests(tr_peerMsgsImpl* msgs)
{
if (tr_torrentIsPieceTransferAllowed(msgs->torrent, TR_PEER_TO_CLIENT) && msgs->desiredRequestCount > 0 &&
msgs->pendingReqsToPeer <= msgs->desiredRequestCount * 0.66)
if (!tr_torrentIsPieceTransferAllowed(msgs->torrent, TR_PEER_TO_CLIENT))
{
TR_ASSERT(msgs->is_client_interested());
TR_ASSERT(!msgs->is_client_choked());
return;
}
int const numwant = msgs->desiredRequestCount - msgs->pendingReqsToPeer;
auto const n_active = tr_peerMgrCountActiveRequestsToPeer(msgs->torrent, msgs);
if (n_active >= msgs->desired_request_count)
{
return;
}
auto* const blocks = tr_new(tr_block_index_t, numwant);
auto n = int{};
tr_peerMgrGetNextRequests(msgs->torrent, msgs, numwant, blocks, &n, false);
auto const n_wanted = msgs->desired_request_count - n_active;
if (n_wanted == 0)
{
return;
}
for (int i = 0; i < n; ++i)
TR_ASSERT(msgs->is_client_interested());
TR_ASSERT(!msgs->is_client_choked());
// std::cout << __FILE__ << ':' << __LINE__ << " wants " << n_wanted << " blocks to request" << std::endl;
for (auto const range : tr_peerMgrGetNextRequests(msgs->torrent, msgs, n_wanted))
{
for (tr_block_index_t block = range.first; block <= range.last; ++block)
{
protocolSendRequest(msgs, blockToReq(msgs->torrent, blocks[i]));
protocolSendRequest(msgs, blockToReq(msgs->torrent, block));
}
tr_free(blocks);
// std::cout << __FILE__ << ':' << __LINE__ << " peer " << (void*)msgs << " requested " << range.last + 1 - range.first << " blocks" << std::endl;
tr_peerMgrClientSentRequests(msgs->torrent, msgs, range);
}
}

View File

@ -2220,7 +2220,6 @@ void tr_torrentSetFilePriorities(
}
tr_torrentSetDirty(tor);
tr_peerMgrRebuildRequests(tor);
tr_torrentUnlock(tor);
}
@ -2333,7 +2332,6 @@ void tr_torrentSetFileDLs(tr_torrent* tor, tr_file_index_t const* files, tr_file
tr_torrentInitFileDLs(tor, files, fileCount, doDownload);
tr_torrentSetDirty(tor);
tr_torrentRecheckCompleteness(tor);
tr_peerMgrRebuildRequests(tor);
tr_torrentUnlock(tor);
}
@ -2495,7 +2493,7 @@ uint64_t tr_pieceOffset(tr_torrent const* tor, tr_piece_index_t index, uint32_t
return ret;
}
tr_block_range tr_torGetFileBlockRange(tr_torrent const* tor, tr_file_index_t const file)
tr_block_range_t tr_torGetFileBlockRange(tr_torrent const* tor, tr_file_index_t const file)
{
tr_file const* f = &tor->info.files[file];
@ -2511,7 +2509,7 @@ tr_block_range tr_torGetFileBlockRange(tr_torrent const* tor, tr_file_index_t co
return { first, last };
}
tr_block_range tr_torGetPieceBlockRange(tr_torrent const* tor, tr_piece_index_t const piece)
tr_block_range_t tr_torGetPieceBlockRange(tr_torrent const* tor, tr_piece_index_t const piece)
{
uint64_t offset = tor->info.pieceSize;
offset *= piece;

View File

@ -91,15 +91,9 @@ void tr_torrentGetBlockLocation(
uint32_t* offset,
uint32_t* length);
struct tr_block_range
{
tr_block_index_t first;
tr_block_index_t last;
};
tr_block_range_t tr_torGetFileBlockRange(tr_torrent const* tor, tr_file_index_t const file);
tr_block_range tr_torGetFileBlockRange(tr_torrent const* tor, tr_file_index_t const file);
tr_block_range tr_torGetPieceBlockRange(tr_torrent const* tor, tr_piece_index_t const piece);
tr_block_range_t tr_torGetPieceBlockRange(tr_torrent const* tor, tr_piece_index_t const piece);
void tr_torrentInitFilePriority(tr_torrent* tor, tr_file_index_t fileIndex, tr_priority_t priority);

View File

@ -35,6 +35,12 @@ using tr_block_index_t = uint32_t;
using tr_port = uint16_t;
using tr_tracker_tier_t = uint32_t;
struct tr_block_range_t
{
tr_block_index_t first;
tr_block_index_t last;
};
struct tr_ctor;
struct tr_error;
struct tr_info;

View File

@ -365,29 +365,18 @@ static void on_idle(tr_webseed* w)
if (tor != nullptr && tor->isRunning && !tr_torrentIsSeed(tor) && want > 0)
{
tr_block_index_t* const blocks = tr_new(tr_block_index_t, want * 2);
auto got = int{};
tr_peerMgrGetNextRequests(tor, w, want, blocks, &got, true);
auto n_tasks = size_t{};
w->idle_connections -= std::min(w->idle_connections, got);
if (w->retry_tickcount >= FAILURE_RETRY_INTERVAL && got == want)
for (auto const range : tr_peerMgrGetNextRequests(tor, w, want))
{
w->retry_tickcount = 0;
}
for (int i = 0; i < got; ++i)
{
tr_block_index_t const b = blocks[i * 2];
tr_block_index_t const be = blocks[i * 2 + 1];
auto* const task = tr_new0(struct tr_webseed_task, 1);
auto const [first, last] = range;
auto* const task = tr_new0(tr_webseed_task, 1);
task->session = tor->session;
task->webseed = w;
task->block = b;
task->piece_index = tr_torBlockPiece(tor, b);
task->piece_offset = tor->blockSize * b - tor->info.pieceSize * task->piece_index;
task->length = (be - b) * tor->blockSize + tr_torBlockCountBytes(tor, be);
task->block = first;
task->piece_index = tr_torBlockPiece(tor, first);
task->piece_offset = tor->blockSize * first - tor->info.pieceSize * task->piece_index;
task->length = (last - first) * tor->blockSize + tr_torBlockCountBytes(tor, last);
task->blocks_done = 0;
task->response_code = 0;
task->block_size = tor->blockSize;
@ -395,9 +384,16 @@ static void on_idle(tr_webseed* w)
evbuffer_add_cb(task->content, on_content_changed, task);
w->tasks.insert(task);
task_request_next_chunk(task);
--w->idle_connections;
++n_tasks;
tr_peerMgrClientSentRequests(tor, w, range);
}
tr_free(blocks);
if (w->retry_tickcount >= FAILURE_RETRY_INTERVAL && n_tasks > 0)
{
w->retry_tickcount = 0;
}
}
}

View File

@ -15,6 +15,8 @@ add_executable(libtransmission-test
metainfo-test.cc
move-test.cc
peer-msgs-test.cc
peer-mgr-wishlist-test.cc
peer-mgr-active-requests-test.cc
quark-test.cc
rename-test.cc
rpc-test.cc

View File

@ -0,0 +1,191 @@
/*
* This file Copyright (C) Mnemosyne LLC
*
* It may be used under the GNU GPL versions 2 or 3
* or any future license endorsed by Mnemosyne LLC.
*
*/
#define LIBTRANSMISSION_PEER_MODULE
#include <algorithm>
#include <type_traits>
#include "transmission.h"
#include "peer-mgr-active-requests.h"
#include "gtest/gtest.h"
class PeerMgrActiveRequestsTest : public ::testing::Test
{
protected:
tr_peer* peer_a_ = reinterpret_cast<tr_peer*>(0xCAFE);
tr_peer* peer_b_ = reinterpret_cast<tr_peer*>(0xDEAD);
tr_peer* peer_c_ = reinterpret_cast<tr_peer*>(0xBEEF);
};
// consider: making it a templated class so that tr_peer can be replaced with X
TEST_F(PeerMgrActiveRequestsTest, requestsAreNotAddedTwice)
{
auto requests = ActiveRequests{};
auto const block = tr_block_index_t{ 100 };
auto const peer = static_cast<tr_peer*>(nullptr);
auto const when = time_t(0);
EXPECT_TRUE(requests.add(block, peer, when));
EXPECT_FALSE(requests.add(block, peer, when));
EXPECT_FALSE(requests.add(block, peer, when));
EXPECT_FALSE(requests.add(block, peer, when + 1));
}
TEST_F(PeerMgrActiveRequestsTest, requestsMadeAreCounted)
{
auto requests = ActiveRequests{};
auto const block = tr_block_index_t{ 100 };
auto const peer = static_cast<tr_peer*>(nullptr);
auto const when = time_t(0);
EXPECT_EQ(0, requests.count(block));
EXPECT_EQ(0, requests.count(peer));
EXPECT_EQ(0, requests.size());
EXPECT_TRUE(requests.add(block, peer, when));
EXPECT_EQ(1, requests.count(block));
EXPECT_EQ(1, requests.count(peer));
EXPECT_EQ(1, requests.size());
}
TEST_F(PeerMgrActiveRequestsTest, requestsAreRemoved)
{
auto requests = ActiveRequests{};
auto const block = tr_block_index_t{ 100 };
auto const peer = static_cast<tr_peer*>(nullptr);
auto const when = time_t(0);
EXPECT_TRUE(requests.add(block, peer, when));
EXPECT_EQ(1, requests.count(block));
EXPECT_EQ(1, requests.count(peer));
EXPECT_EQ(1, requests.size());
EXPECT_TRUE(requests.remove(block, peer));
EXPECT_EQ(0, requests.count(block));
EXPECT_EQ(0, requests.count(peer));
EXPECT_EQ(0, requests.size());
EXPECT_FALSE(requests.remove(block, peer));
EXPECT_EQ(0, requests.count(block));
EXPECT_EQ(0, requests.count(peer));
EXPECT_EQ(0, requests.size());
}
TEST_F(PeerMgrActiveRequestsTest, peersAreRemoved)
{
auto requests = ActiveRequests{};
auto const block = tr_block_index_t{ 100 };
auto const peer = static_cast<tr_peer*>(nullptr);
auto const when = time_t(0);
// setup: add a request
EXPECT_TRUE(requests.add(block, peer, when));
EXPECT_EQ(1, requests.count(block));
EXPECT_EQ(1, requests.count(peer));
EXPECT_EQ(1, requests.size());
// try removing requests for that block (should remove the 1 active request)
auto const removed = requests.remove(block);
EXPECT_EQ(std::vector<tr_peer*>{ peer }, removed);
EXPECT_EQ(0, requests.count(block));
EXPECT_EQ(0, requests.count(peer));
EXPECT_EQ(0, requests.size());
// try removing requests for that block agian (should remove nothing)
EXPECT_EQ(std::vector<tr_peer*>{}, requests.remove(block));
}
TEST_F(PeerMgrActiveRequestsTest, multiplePeersAreRemoved)
{
// setup
auto requests = ActiveRequests{};
auto const block_a = tr_block_index_t{ 128 };
auto const when_a = 100;
EXPECT_TRUE(requests.add(block_a, peer_a_, when_a));
auto const block_b = block_a;
auto const when_b = 200;
EXPECT_TRUE(requests.add(block_b, peer_b_, when_b));
auto const block_c = tr_block_index_t{ 256 };
auto const when_c = when_b;
EXPECT_TRUE(requests.add(block_c, peer_c_, when_c));
EXPECT_EQ(block_a, block_b);
EXPECT_EQ(2, requests.count(block_a));
EXPECT_EQ(1, requests.count(block_c));
EXPECT_EQ(3, requests.size());
// now remove block_a, which was req'd by peer_a_ and peer_b_
auto expected = std::vector<tr_peer*>{ peer_a_, peer_b_ };
std::sort(std::begin(expected), std::end(expected));
auto removed = requests.remove(block_a);
std::sort(std::begin(removed), std::end(removed));
EXPECT_EQ(expected, removed);
}
TEST_F(PeerMgrActiveRequestsTest, multipleBlocksAreRemoved)
{
// setup
auto requests = ActiveRequests{};
auto const block_a1 = tr_block_index_t{ 128 };
auto const when_a1 = 300;
EXPECT_TRUE(requests.add(block_a1, peer_a_, when_a1));
auto const block_a2 = tr_block_index_t{ 256 };
auto const when_a2 = 400;
EXPECT_TRUE(requests.add(block_a2, peer_a_, when_a2));
EXPECT_EQ(2, requests.size());
EXPECT_EQ(2, requests.count(peer_a_));
EXPECT_EQ(1, requests.count(block_a1));
EXPECT_EQ(0, requests.count(peer_b_));
EXPECT_EQ(0, requests.count(tr_block_index_t{ 512 }));
// confirm that removing peer_a_ removes all of its requests
auto expected = std::vector<tr_block_index_t>{ block_a1, block_a2 };
std::sort(std::begin(expected), std::end(expected));
auto removed = requests.remove(peer_a_);
std::sort(std::begin(removed), std::end(removed));
EXPECT_EQ(expected, removed);
EXPECT_EQ(0, requests.size());
EXPECT_EQ(0, requests.count(peer_a_));
EXPECT_EQ(0, requests.count(block_a1));
}
TEST_F(PeerMgrActiveRequestsTest, sentBefore)
{
// setup
auto requests = ActiveRequests{};
auto const block_a1 = tr_block_index_t{ 128 };
auto const when_a1 = 300;
EXPECT_TRUE(requests.add(block_a1, peer_a_, when_a1));
auto const block_a2 = tr_block_index_t{ 256 };
auto const when_a2 = 400;
EXPECT_TRUE(requests.add(block_a2, peer_a_, when_a2));
EXPECT_EQ(2, requests.size());
EXPECT_EQ(2, requests.count(peer_a_));
EXPECT_EQ(1, requests.count(block_a1));
// test that the timestamps are counted correctly
EXPECT_EQ(0, std::size(requests.sentBefore(when_a1 - 1)));
EXPECT_EQ(0, std::size(requests.sentBefore(when_a1)));
EXPECT_EQ(1, std::size(requests.sentBefore(when_a1 + 1)));
EXPECT_EQ(1, std::size(requests.sentBefore(when_a2 - 1)));
EXPECT_EQ(1, std::size(requests.sentBefore(when_a2)));
EXPECT_EQ(2, std::size(requests.sentBefore(when_a2 + 1)));
// test that the returned block + peer pairs are correct
auto items = requests.sentBefore(when_a1 + 1);
ASSERT_EQ(1, std::size(items));
EXPECT_EQ(block_a1, items[0].first);
EXPECT_EQ(peer_a_, items[0].second);
}

View File

@ -0,0 +1,353 @@
/*
* This file Copyright (C) Mnemosyne LLC
*
* It may be used under the GNU GPL versions 2 or 3
* or any future license endorsed by Mnemosyne LLC.
*
*/
#include <algorithm>
#include <type_traits>
#define LIBTRANSMISSION_PEER_MODULE
#include "transmission.h"
#include "peer-mgr-wishlist.h"
#include "gtest/gtest.h"
class PeerMgrWishlistTest : public ::testing::Test
{
protected:
struct MockPeerInfo : public Wishlist::PeerInfo
{
mutable std::map<tr_block_index_t, size_t> active_request_count_;
mutable std::map<tr_piece_index_t, size_t> missing_block_count_;
mutable std::map<tr_piece_index_t, tr_block_range_t> block_range_;
mutable std::map<tr_piece_index_t, tr_priority_t> piece_priority_;
mutable std::set<tr_block_index_t> can_request_block_;
mutable std::set<tr_piece_index_t> can_request_piece_;
tr_piece_index_t piece_count_ = 0;
bool is_endgame_ = false;
[[nodiscard]] bool clientCanRequestBlock(tr_block_index_t block) const final
{
return can_request_block_.count(block) != 0;
}
[[nodiscard]] bool clientCanRequestPiece(tr_piece_index_t piece) const final
{
return can_request_piece_.count(piece) != 0;
}
[[nodiscard]] bool isEndgame() const final
{
return is_endgame_;
}
[[nodiscard]] size_t countActiveRequests(tr_block_index_t block) const final
{
return active_request_count_[block];
}
[[nodiscard]] size_t countMissingBlocks(tr_piece_index_t piece) const final
{
return missing_block_count_[piece];
}
[[nodiscard]] tr_block_range_t blockRange(tr_piece_index_t piece) const final
{
return block_range_[piece];
}
[[nodiscard]] tr_piece_index_t countAllPieces() const final
{
return piece_count_;
}
[[nodiscard]] tr_priority_t priority(tr_piece_index_t piece) const final
{
return piece_priority_[piece];
}
};
};
TEST_F(PeerMgrWishlistTest, doesNotRequestPiecesThatCannotBeRequested)
{
auto peer_info = MockPeerInfo{};
auto wishlist = Wishlist{};
// setup: three pieces, all missing
peer_info.piece_count_ = 3;
peer_info.missing_block_count_[0] = 100;
peer_info.missing_block_count_[1] = 100;
peer_info.missing_block_count_[2] = 50;
peer_info.block_range_[0] = { 0, 99 };
peer_info.block_range_[1] = { 100, 199 };
peer_info.block_range_[2] = { 200, 250 };
// but we only want the first piece
peer_info.can_request_piece_.insert(0);
for (tr_block_index_t i = peer_info.block_range_[0].first; i <= peer_info.block_range_[0].last; ++i)
{
peer_info.can_request_block_.insert(i);
}
// we should only get the first piece back
auto ranges = wishlist.next(peer_info, 1000);
ASSERT_EQ(1, std::size(ranges));
EXPECT_EQ(peer_info.block_range_[0].first, ranges[0].first);
EXPECT_EQ(peer_info.block_range_[0].last, ranges[0].last);
}
TEST_F(PeerMgrWishlistTest, doesNotRequestBlocksThatCannotBeRequested)
{
auto peer_info = MockPeerInfo{};
auto wishlist = Wishlist{};
// setup: three pieces, all missing
peer_info.piece_count_ = 3;
peer_info.missing_block_count_[0] = 100;
peer_info.missing_block_count_[1] = 100;
peer_info.missing_block_count_[2] = 50;
peer_info.block_range_[0] = { 0, 99 };
peer_info.block_range_[1] = { 100, 199 };
peer_info.block_range_[2] = { 200, 249 };
// and we want all three pieces
peer_info.can_request_piece_.insert(0);
peer_info.can_request_piece_.insert(1);
peer_info.can_request_piece_.insert(2);
// but we've already requested blocks [0..10) from someone else,
// so we don't want to send repeat requests
for (tr_block_index_t i = 10; i < 250; ++i)
{
peer_info.can_request_block_.insert(i);
}
// even if we ask wishlist for more blocks than exist,
// it should omit blocks 1-10 from the return set
auto ranges = wishlist.next(peer_info, 1000);
auto requested = tr_bitfield(250);
for (auto const& range : ranges)
{
requested.setRange(range.first, range.last + 1);
}
EXPECT_EQ(240, requested.count());
EXPECT_EQ(0, requested.count(0, 10));
EXPECT_EQ(240, requested.count(10, 250));
}
TEST_F(PeerMgrWishlistTest, doesNotRequestTooManyBlocks)
{
auto peer_info = MockPeerInfo{};
auto wishlist = Wishlist{};
// setup: three pieces, all missing
peer_info.piece_count_ = 3;
peer_info.missing_block_count_[0] = 100;
peer_info.missing_block_count_[1] = 100;
peer_info.missing_block_count_[2] = 50;
peer_info.block_range_[0] = { 0, 99 };
peer_info.block_range_[1] = { 100, 199 };
peer_info.block_range_[2] = { 200, 249 };
// and we want everything
for (tr_piece_index_t i = 0; i < 3; ++i)
{
peer_info.can_request_piece_.insert(i);
}
for (tr_block_index_t i = 0; i < 250; ++i)
{
peer_info.can_request_block_.insert(i);
}
// but we only ask for 10 blocks,
// so that's how many we should get back
auto const n_wanted = 10;
auto ranges = wishlist.next(peer_info, n_wanted);
auto n_got = size_t{};
for (auto const& range : ranges)
{
n_got += range.last + 1 - range.first;
}
EXPECT_EQ(n_wanted, n_got);
}
TEST_F(PeerMgrWishlistTest, prefersHighPriorityPieces)
{
auto peer_info = MockPeerInfo{};
auto wishlist = Wishlist{};
// setup: three pieces, all missing
peer_info.piece_count_ = 3;
peer_info.missing_block_count_[0] = 100;
peer_info.missing_block_count_[1] = 100;
peer_info.missing_block_count_[2] = 100;
peer_info.block_range_[0] = { 0, 99 };
peer_info.block_range_[1] = { 100, 199 };
peer_info.block_range_[2] = { 200, 299 };
// and we want everything
for (tr_piece_index_t i = 0; i < 3; ++i)
{
peer_info.can_request_piece_.insert(i);
}
for (tr_block_index_t i = 0; i < 299; ++i)
{
peer_info.can_request_block_.insert(i);
}
// and the second piece is high priority
peer_info.piece_priority_[1] = TR_PRI_HIGH;
// wishlist should pick the high priority piece's blocks first.
//
// NB: when all other things are equal in the wishlist, pieces are
// picked at random so this test -could- pass even if there's a bug.
// So test several times to shake out any randomness
auto const num_runs = 1000;
for (int run = 0; run < num_runs; ++run)
{
auto const n_wanted = 10;
auto ranges = wishlist.next(peer_info, n_wanted);
auto n_got = size_t{};
for (auto const& range : ranges)
{
for (auto block = range.first; block <= range.last; ++block)
{
EXPECT_LE(peer_info.block_range_[1].first, block);
EXPECT_LE(block, peer_info.block_range_[1].last);
}
n_got += range.last + 1 - range.first;
}
EXPECT_EQ(n_wanted, n_got);
}
}
TEST_F(PeerMgrWishlistTest, onlyRequestsDupesDuringEndgame)
{
auto peer_info = MockPeerInfo{};
auto wishlist = Wishlist{};
// setup: three pieces, all missing
peer_info.piece_count_ = 3;
peer_info.missing_block_count_[0] = 100;
peer_info.missing_block_count_[1] = 100;
peer_info.missing_block_count_[2] = 100;
peer_info.block_range_[0] = { 0, 99 };
peer_info.block_range_[1] = { 100, 199 };
peer_info.block_range_[2] = { 200, 299 };
// and we want everything
for (tr_piece_index_t i = 0; i < 3; ++i)
{
peer_info.can_request_piece_.insert(i);
}
for (tr_block_index_t i = 0; i < 300; ++i)
{
peer_info.can_request_block_.insert(i);
}
// and we've already requested blocks [0-150)
for (tr_block_index_t i = 0; i < 150; ++i)
{
peer_info.active_request_count_[i] = 1;
}
// even if we ask wishlist to list more blocks than exist,
// those first 150 should be omitted from the return list
auto ranges = wishlist.next(peer_info, 1000);
auto requested = tr_bitfield(300);
for (auto const& range : ranges)
{
requested.setRange(range.first, range.last + 1);
}
EXPECT_EQ(150, requested.count());
EXPECT_EQ(0, requested.count(0, 150));
EXPECT_EQ(150, requested.count(150, 300));
// BUT during endgame it's OK to request dupes,
// so then we _should_ see the first 150 in the list
peer_info.is_endgame_ = true;
ranges = wishlist.next(peer_info, 1000);
requested = tr_bitfield(300);
for (auto const& range : ranges)
{
requested.setRange(range.first, range.last + 1);
}
EXPECT_EQ(300, requested.count());
EXPECT_EQ(150, requested.count(0, 150));
EXPECT_EQ(150, requested.count(150, 300));
}
TEST_F(PeerMgrWishlistTest, prefersNearlyCompletePieces)
{
auto peer_info = MockPeerInfo{};
auto wishlist = Wishlist{};
// setup: three pieces, same size
peer_info.piece_count_ = 3;
peer_info.block_range_[0] = { 0, 99 };
peer_info.block_range_[1] = { 100, 199 };
peer_info.block_range_[2] = { 200, 299 };
// and we want everything
for (tr_piece_index_t i = 0; i < 3; ++i)
{
peer_info.can_request_piece_.insert(i);
}
// but some pieces are closer to completion than others
peer_info.missing_block_count_[0] = 10;
peer_info.missing_block_count_[1] = 20;
peer_info.missing_block_count_[2] = 100;
for (tr_piece_index_t piece = 0; piece < 3; ++piece)
{
auto const& range = peer_info.block_range_[piece];
auto const& n_missing = peer_info.missing_block_count_[piece];
for (size_t i = 0; i < n_missing; ++i)
{
peer_info.can_request_block_.insert(range.first + i);
}
}
// wishlist prefers to get pieces completed ASAP, so it
// should pick the ones with the fewest missing blocks first.
// NB: when all other things are equal in the wishlist, pieces are
// picked at random so this test -could- pass even if there's a bug.
// So test several times to shake out any randomness
auto const num_runs = 1000;
for (int run = 0; run < num_runs; ++run)
{
auto const ranges = wishlist.next(peer_info, 10);
auto requested = tr_bitfield(300);
for (auto const& range : ranges)
{
requested.setRange(range.first, range.last + 1);
}
EXPECT_EQ(10, requested.count());
EXPECT_EQ(10, requested.count(0, 100));
EXPECT_EQ(0, requested.count(100, 300));
}
// Same premise as previous test, but ask for more blocks.
// Since the second piece is also the second-closest to completion,
// those blocks should be next in line.
for (int run = 0; run < num_runs; ++run)
{
auto const ranges = wishlist.next(peer_info, 20);
auto requested = tr_bitfield(300);
for (auto const& range : ranges)
{
requested.setRange(range.first, range.last + 1);
}
EXPECT_EQ(20, requested.count());
EXPECT_EQ(10, requested.count(0, 100));
EXPECT_EQ(10, requested.count(100, 200));
EXPECT_EQ(0, requested.count(200, 300));
}
}