refactor: incremental announcer improvements (#4211)

This commit is contained in:
Charles Kerr 2022-11-17 20:23:54 -06:00 committed by GitHub
parent f17cee97e5
commit 4cc0b77eec
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 440 additions and 445 deletions

View File

@ -42,3 +42,187 @@ void tr_announcerParseHttpAnnounceResponse(tr_announce_response& response, std::
void tr_announcerParseHttpScrapeResponse(tr_scrape_response& response, std::string_view benc, std::string_view log_name);
tr_interned_string tr_announcerGetKey(tr_url_parsed_t const& parsed);
[[nodiscard]] constexpr std::string_view tr_announce_event_get_string(tr_announce_event e)
{
switch (e)
{
case TR_ANNOUNCE_EVENT_COMPLETED:
return "completed";
case TR_ANNOUNCE_EVENT_STARTED:
return "started";
case TR_ANNOUNCE_EVENT_STOPPED:
return "stopped";
default:
return "";
}
}
struct tr_announce_request
{
tr_announce_event event = {};
bool partial_seed = false;
/* the port we listen for incoming peers on */
tr_port port;
/* per-session key */
int key = 0;
/* the number of peers we'd like to get back in the response */
int numwant = 0;
/* the number of bytes we uploaded since the last 'started' event */
uint64_t up = 0;
/* the number of good bytes we downloaded since the last 'started' event */
uint64_t down = 0;
/* the number of bad bytes we downloaded since the last 'started' event */
uint64_t corrupt = 0;
/* the total size of the torrent minus the number of bytes completed */
uint64_t leftUntilComplete = 0;
/* the tracker's announce URL */
tr_interned_string announce_url;
/* key generated by and returned from an http tracker.
* see tr_announce_response.tracker_id_str */
std::string tracker_id;
/* the torrent's peer id.
* this changes when a torrent is stopped -> restarted. */
tr_peer_id_t peer_id;
/* the torrent's info_hash */
tr_sha1_digest_t info_hash;
/* the name to use when deep logging is enabled */
char log_name[128];
};
struct tr_announce_response
{
/* the torrent's info hash */
tr_sha1_digest_t info_hash = {};
/* whether or not we managed to connect to the tracker */
bool did_connect = false;
/* whether or not the scrape timed out */
bool did_timeout = false;
/* preferred interval between announces.
* transmission treats this as the interval for periodic announces */
int interval = 0;
/* minimum interval between announces. (optional)
* transmission treats this as the min interval for manual announces */
int min_interval = 0;
/* how many peers are seeding this torrent */
int seeders = -1;
/* how many peers are downloading this torrent */
int leechers = -1;
/* how many times this torrent has been downloaded */
int downloads = -1;
/* IPv4 peers that we acquired from the tracker */
std::vector<tr_pex> pex;
/* IPv6 peers that we acquired from the tracker */
std::vector<tr_pex> pex6;
/* human-readable error string on failure, or nullptr */
std::string errmsg;
/* human-readable warning string or nullptr */
std::string warning;
/* key generated by and returned from an http tracker.
* if this is provided, subsequent http announces must include this. */
std::string tracker_id;
/* tracker extension that returns the client's public IP address.
* https://www.bittorrent.org/beps/bep_0024.html */
std::optional<tr_address> external_ip;
};
/// SCRAPE
/* pick a number small enough for common tracker software:
* - ocelot has no upper bound
* - opentracker has an upper bound of 64
* - udp protocol has an upper bound of 74
* - xbtt has no upper bound
*
* This is only an upper bound: if the tracker complains about
* length, announcer will incrementally lower the batch size.
*/
auto inline constexpr TR_MULTISCRAPE_MAX = 60;
struct tr_scrape_request
{
/* the scrape URL */
tr_interned_string scrape_url;
/* the name to use when deep logging is enabled */
char log_name[128];
/* info hashes of the torrents to scrape */
std::array<tr_sha1_digest_t, TR_MULTISCRAPE_MAX> info_hash;
/* how many hashes to use in the info_hash field */
int info_hash_count = 0;
};
struct tr_scrape_response_row
{
/* the torrent's info_hash */
tr_sha1_digest_t info_hash;
/* how many peers are seeding this torrent */
int seeders = 0;
/* how many peers are downloading this torrent */
int leechers = 0;
/* how many times this torrent has been downloaded */
int downloads = 0;
/* the number of active downloaders in the swarm.
* this is a BEP 21 extension that some trackers won't support.
* http://www.bittorrent.org/beps/bep_0021.html#tracker-scrapes */
int downloaders = 0;
};
struct tr_scrape_response
{
/* whether or not we managed to connect to the tracker */
bool did_connect = false;
/* whether or not the scrape timed out */
bool did_timeout = false;
/* how many info hashes are in the 'rows' field */
int row_count;
/* the individual torrents' scrape results */
std::array<tr_scrape_response_row, TR_MULTISCRAPE_MAX> rows;
/* the raw scrape url */
tr_interned_string scrape_url;
/* human-readable error string on failure, or nullptr */
std::string errmsg;
/* minimum interval (in seconds) allowed between scrapes.
* this is an unofficial extension that some trackers won't support. */
int min_request_interval;
};

View File

@ -303,7 +303,7 @@ struct tau_tracker
return std::empty(announces) && std::empty(scrapes) && !addr_pending_dns_;
}
void sendto(void const* buf, size_t buflen)
void sendto(std::byte const* buf, size_t buflen)
{
TR_ASSERT(addr_);
if (!addr_)
@ -349,29 +349,16 @@ struct tau_tracker
addr_expires_at_ = now + DnsRetryIntervalSecs;
}
// if the address info is too old, expire it
if (addr_ && (closing || addr_expires_at_ <= now))
{
logtrace(this->host, "Expiring old DNS result");
addr_.reset();
addr_expires_at_ = 0;
}
// are there any requests pending?
if (this->isIdle())
{
return;
}
// if DNS lookup *recently* failed for this host, do nothing
if (!addr_ && now < addr_expires_at_)
{
return;
}
// if we don't have an address yet, try & get one now.
if (!closing && !addr_ && !addr_pending_dns_)
// update the addr if our lookup is past its shelf date
if (!closing && !addr_pending_dns_ && addr_expires_at_ <= now)
{
addr_.reset();
addr_pending_dns_ = std::async(std::launch::async, lookup, this->host, this->port, this->key);
return;
}
@ -380,13 +367,13 @@ struct tau_tracker
this->key,
fmt::format(
"connected {} ({} {}) -- connecting_at {}",
this->connection_expiration_time > now,
is_connected(now),
this->connection_expiration_time,
now,
this->connecting_at));
/* also need a valid connection ID... */
if (addr_ && this->connection_expiration_time <= now && this->connecting_at == 0)
if (addr_ && !is_connected(now) && this->connecting_at == 0)
{
this->connecting_at = now;
this->connection_transaction_id = tau_transaction_new();
@ -399,16 +386,14 @@ struct tau_tracker
auto const contiguous = std::vector<std::byte>(std::begin(buf), std::end(buf));
this->sendto(std::data(contiguous), std::size(contiguous));
return;
}
if (timeout_reqs)
{
timeout_requests();
timeout_requests(now);
}
if (addr_ && this->connection_expiration_time > now)
if (addr_ && is_connected(now))
{
send_requests();
}
@ -418,6 +403,11 @@ private:
using Sockaddr = std::pair<sockaddr_storage, socklen_t>;
using MaybeSockaddr = std::optional<Sockaddr>;
[[nodiscard]] constexpr bool is_connected(time_t now) const noexcept
{
return connection_id != tau_connection_t{} && now < connection_expiration_time;
}
[[nodiscard]] static MaybeSockaddr lookup(tr_interned_string host, tr_port port, tr_interned_string logname)
{
auto szport = std::array<char, 16>{};
@ -469,9 +459,8 @@ private:
///
void timeout_requests()
void timeout_requests(time_t now)
{
time_t const now = time(nullptr);
bool const cancel_all = this->close_at != 0 && (this->close_at <= now);
if (this->connecting_at != 0 && this->connecting_at + TauRequestTtl < now)
@ -546,7 +535,7 @@ private:
}
}
void send_request(void const* payload, size_t payload_len)
void send_request(std::byte const* payload, size_t payload_len)
{
logdbg(this->key, fmt::format("sending request w/connection id {}", this->connection_id));

View File

@ -68,24 +68,6 @@ static auto constexpr TrMultiscrapeStep = int{ 5 };
****
***/
std::string_view tr_announce_event_get_string(tr_announce_event e)
{
switch (e)
{
case TR_ANNOUNCE_EVENT_COMPLETED:
return "completed"sv;
case TR_ANNOUNCE_EVENT_STARTED:
return "started"sv;
case TR_ANNOUNCE_EVENT_STOPPED:
return "stopped"sv;
default:
return ""sv;
}
}
namespace
{
@ -128,9 +110,9 @@ struct StopsCompare
return 0;
}
[[nodiscard]] bool operator()(tr_announce_request const* a, tr_announce_request const* b) const noexcept // less than
[[nodiscard]] bool operator()(tr_announce_request const* one, tr_announce_request const* two) const noexcept
{
return compare(a, b) < 0;
return compare(one, two) < 0;
}
};
@ -298,7 +280,6 @@ struct tr_tier
{
tr_tier(tr_announcer* announcer, tr_torrent* tor_in, std::vector<tr_announce_list::tracker_info const*> const& infos)
: tor{ tor_in }
, id{ next_key++ }
{
trackers.reserve(std::size(infos));
for (auto const* info : infos)
@ -450,7 +431,7 @@ struct tr_tier
time_t lastAnnounceStartTime = 0;
time_t lastAnnounceTime = 0;
int const id;
int const id = next_key++;
int announce_event_priority = 0;
@ -491,11 +472,9 @@ private:
return ret;
}
static int next_key;
static inline int next_key = 0;
};
int tr_tier::next_key = 0;
/***
****
***/
@ -983,186 +962,189 @@ static void on_announce_error(tr_tier* tier, char const* err, tr_announce_event
static void onAnnounceDone(tr_announce_response const* response, void* vdata)
{
auto* const data = static_cast<announce_data*>(vdata);
tr_announcer* announcer = data->session->announcer;
tr_tier* tier = getTier(announcer, response->info_hash, data->tier_id);
time_t const now = tr_time();
tr_tier* const tier = getTier(announcer, response->info_hash, data->tier_id);
if (tier == nullptr)
{
delete data;
return;
}
auto const now = tr_time();
tr_announce_event const event = data->event;
if (tier != nullptr)
tr_logAddTraceTier(
tier,
fmt::format(
"Got announce response: "
"connected:{} "
"timeout:{} "
"seeders:{} "
"leechers:{} "
"downloads:{} "
"interval:{} "
"min_interval:{} "
"tracker_id_str:{} "
"pex:{} "
"pex6:{} "
"err:{} "
"warn:{}",
response->did_connect,
response->did_timeout,
response->seeders,
response->leechers,
response->downloads,
response->interval,
response->min_interval,
(!std::empty(response->tracker_id) ? response->tracker_id.c_str() : "none"),
std::size(response->pex),
std::size(response->pex6),
(!std::empty(response->errmsg) ? response->errmsg.c_str() : "none"),
(!std::empty(response->warning) ? response->warning.c_str() : "none")));
tier->lastAnnounceTime = now;
tier->lastAnnounceTimedOut = response->did_timeout;
tier->lastAnnounceSucceeded = false;
tier->isAnnouncing = false;
tier->manualAnnounceAllowedAt = now + tier->announceMinIntervalSec;
if (response->external_ip)
{
tr_logAddTraceTier(
tier,
fmt::format(
"Got announce response: "
"connected:{} "
"timeout:{} "
"seeders:{} "
"leechers:{} "
"downloads:{} "
"interval:{} "
"min_interval:{} "
"tracker_id_str:{} "
"pex:{} "
"pex6:{} "
"err:{} "
"warn:{}",
response->did_connect,
response->did_timeout,
response->seeders,
response->leechers,
response->downloads,
response->interval,
response->min_interval,
(!std::empty(response->tracker_id) ? response->tracker_id.c_str() : "none"),
std::size(response->pex),
std::size(response->pex6),
(!std::empty(response->errmsg) ? response->errmsg.c_str() : "none"),
(!std::empty(response->warning) ? response->warning.c_str() : "none")));
data->session->setExternalIP(*response->external_ip);
}
tier->lastAnnounceTime = now;
tier->lastAnnounceTimedOut = response->did_timeout;
tier->lastAnnounceSucceeded = false;
tier->isAnnouncing = false;
tier->manualAnnounceAllowedAt = now + tier->announceMinIntervalSec;
if (response->external_ip)
if (!response->did_connect)
{
on_announce_error(tier, _("Could not connect to tracker"), event);
}
else if (response->did_timeout)
{
on_announce_error(tier, _("Tracker did not respond"), event);
}
else if (!std::empty(response->errmsg))
{
/* If the torrent's only tracker returned an error, publish it.
Don't bother publishing if there are other trackers -- it's
all too common for people to load up dozens of dead trackers
in a torrent's metainfo... */
if (tier->tor->trackerCount() < 2)
{
data->session->setExternalIP(*response->external_ip);
publishError(tier, response->errmsg);
}
if (!response->did_connect)
on_announce_error(tier, response->errmsg.c_str(), event);
}
else
{
auto const is_stopped = event == TR_ANNOUNCE_EVENT_STOPPED;
auto leechers = int{};
auto scrape_fields = int{};
auto seeders = int{};
publishErrorClear(tier);
auto* const tracker = tier->currentTracker();
if (tracker != nullptr)
{
on_announce_error(tier, _("Could not connect to tracker"), event);
}
else if (response->did_timeout)
{
on_announce_error(tier, _("Tracker did not respond"), event);
}
else if (!std::empty(response->errmsg))
{
/* If the torrent's only tracker returned an error, publish it.
Don't bother publishing if there are other trackers -- it's
all too common for people to load up dozens of dead trackers
in a torrent's metainfo... */
if (tier->tor->trackerCount() < 2)
tracker->consecutive_failures = 0;
if (response->seeders >= 0)
{
publishError(tier, response->errmsg);
tracker->seeder_count = seeders = response->seeders;
++scrape_fields;
}
on_announce_error(tier, response->errmsg.c_str(), event);
if (response->leechers >= 0)
{
tracker->leecher_count = leechers = response->leechers;
++scrape_fields;
}
if (response->downloads >= 0)
{
tracker->download_count = response->downloads;
++scrape_fields;
}
if (!std::empty(response->tracker_id))
{
tracker->tracker_id = response->tracker_id;
}
}
if (auto const& warning = response->warning; !std::empty(warning))
{
tier->last_announce_str = warning;
tr_logAddTraceTier(tier, fmt::format("tracker gave '{}'", warning));
publishWarning(tier, warning);
}
else
{
auto const is_stopped = event == TR_ANNOUNCE_EVENT_STOPPED;
auto leechers = int{};
auto scrape_fields = int{};
auto seeders = int{};
tier->last_announce_str = _("Success");
}
publishErrorClear(tier);
if (response->min_interval != 0)
{
tier->announceMinIntervalSec = response->min_interval;
}
auto* const tracker = tier->currentTracker();
if (tracker != nullptr)
{
tracker->consecutive_failures = 0;
if (response->interval != 0)
{
tier->announceIntervalSec = response->interval;
}
if (response->seeders >= 0)
{
tracker->seeder_count = seeders = response->seeders;
++scrape_fields;
}
if (!std::empty(response->pex))
{
publishPeersPex(tier, seeders, leechers, response->pex);
}
if (response->leechers >= 0)
{
tracker->leecher_count = leechers = response->leechers;
++scrape_fields;
}
if (!std::empty(response->pex6))
{
publishPeersPex(tier, seeders, leechers, response->pex6);
}
if (response->downloads >= 0)
{
tracker->download_count = response->downloads;
++scrape_fields;
}
publishPeerCounts(tier, seeders, leechers);
if (!std::empty(response->tracker_id))
{
tracker->tracker_id = response->tracker_id;
}
}
tier->isRunning = data->is_running_on_success;
if (auto const& warning = response->warning; !std::empty(warning))
{
tier->last_announce_str = warning;
tr_logAddTraceTier(tier, fmt::format("tracker gave '{}'", warning));
publishWarning(tier, warning);
}
else
{
tier->last_announce_str = _("Success");
}
/* if the tracker included scrape fields in its announce response,
then a separate scrape isn't needed */
if (scrape_fields >= 3 || (scrape_fields >= 1 && tracker->scrape_info == nullptr))
{
tr_logAddTraceTier(
tier,
fmt::format(
"Announce response has scrape info; bumping next scrape to {} seconds from now.",
tier->scrapeIntervalSec));
tier->scheduleNextScrape();
tier->lastScrapeTime = now;
tier->lastScrapeSucceeded = true;
}
else if (tier->lastScrapeTime + tier->scrapeIntervalSec <= now)
{
tier->scrapeSoon();
}
if (response->min_interval != 0)
{
tier->announceMinIntervalSec = response->min_interval;
}
tier->lastAnnounceSucceeded = true;
tier->lastAnnouncePeerCount = std::size(response->pex) + std::size(response->pex6);
if (response->interval != 0)
{
tier->announceIntervalSec = response->interval;
}
if (is_stopped)
{
/* now that we've successfully stopped the torrent,
* we can reset the up/down/corrupt count we've kept
* for this tracker */
tier->byteCounts[TR_ANN_UP] = 0;
tier->byteCounts[TR_ANN_DOWN] = 0;
tier->byteCounts[TR_ANN_CORRUPT] = 0;
}
if (!std::empty(response->pex))
{
publishPeersPex(tier, seeders, leechers, response->pex);
}
if (!std::empty(response->pex6))
{
publishPeersPex(tier, seeders, leechers, response->pex6);
}
publishPeerCounts(tier, seeders, leechers);
tier->isRunning = data->is_running_on_success;
/* if the tracker included scrape fields in its announce response,
then a separate scrape isn't needed */
if (scrape_fields >= 3 || (scrape_fields >= 1 && tracker->scrape_info == nullptr))
{
tr_logAddTraceTier(
tier,
fmt::format(
"Announce response has scrape info; bumping next scrape to {} seconds from now.",
tier->scrapeIntervalSec));
tier->scheduleNextScrape();
tier->lastScrapeTime = now;
tier->lastScrapeSucceeded = true;
}
else if (tier->lastScrapeTime + tier->scrapeIntervalSec <= now)
{
tier->scrapeSoon();
}
tier->lastAnnounceSucceeded = true;
tier->lastAnnouncePeerCount = std::size(response->pex) + std::size(response->pex6);
if (is_stopped)
{
/* now that we've successfully stopped the torrent,
* we can reset the up/down/corrupt count we've kept
* for this tracker */
tier->byteCounts[TR_ANN_UP] = 0;
tier->byteCounts[TR_ANN_DOWN] = 0;
tier->byteCounts[TR_ANN_CORRUPT] = 0;
}
if (!is_stopped && std::empty(tier->announce_events))
{
/* the queue is empty, so enqueue a periodic update */
int const i = tier->announceIntervalSec;
tr_logAddTraceTier(tier, fmt::format("Sending periodic reannounce in {} seconds", i));
tier_announce_event_push(tier, TR_ANNOUNCE_EVENT_NONE, now + i);
}
if (!is_stopped && std::empty(tier->announce_events))
{
/* the queue is empty, so enqueue a periodic update */
int const i = tier->announceIntervalSec;
tr_logAddTraceTier(tier, fmt::format("Sending periodic reannounce in {} seconds", i));
tier_announce_event_push(tier, TR_ANNOUNCE_EVENT_NONE, now + i);
}
}
@ -1320,89 +1302,90 @@ static void on_scrape_done(tr_scrape_response const* response, void* vsession)
for (int i = 0; i < response->row_count; ++i)
{
auto const& row = response->rows[i];
auto* const tor = session->torrents().get(row.info_hash);
if (tor != nullptr)
if (tor == nullptr)
{
auto* tier = tor->torrent_announcer->getTierFromScrape(response->scrape_url);
continue;
}
if (tier == nullptr)
{
continue;
}
auto* const tier = tor->torrent_announcer->getTierFromScrape(response->scrape_url);
if (tier == nullptr)
{
continue;
}
tr_logAddTraceTier(
tier,
fmt::format(
"scraped url:{} "
" -- "
"did_connect:{} "
"did_timeout:{} "
"seeders:{} "
"leechers:{} "
"downloads:{} "
"downloaders:{} "
"min_request_interval:{} "
"err:{} ",
response->scrape_url.sv(),
response->did_connect,
response->did_timeout,
row.seeders,
row.leechers,
row.downloads,
row.downloaders,
response->min_request_interval,
std::empty(response->errmsg) ? "none"sv : response->errmsg));
tr_logAddTraceTier(
tier,
fmt::format(
"scraped url:{} "
" -- "
"did_connect:{} "
"did_timeout:{} "
"seeders:{} "
"leechers:{} "
"downloads:{} "
"downloaders:{} "
"min_request_interval:{} "
"err:{} ",
response->scrape_url.sv(),
response->did_connect,
response->did_timeout,
row.seeders,
row.leechers,
row.downloads,
row.downloaders,
response->min_request_interval,
std::empty(response->errmsg) ? "none"sv : response->errmsg));
tier->isScraping = false;
tier->lastScrapeTime = now;
tier->lastScrapeSucceeded = false;
tier->lastScrapeTimedOut = response->did_timeout;
tier->isScraping = false;
tier->lastScrapeTime = now;
tier->lastScrapeSucceeded = false;
tier->lastScrapeTimedOut = response->did_timeout;
if (!response->did_connect)
{
on_scrape_error(session, tier, _("Could not connect to tracker"));
}
else if (response->did_timeout)
{
on_scrape_error(session, tier, _("Tracker did not respond"));
}
else if (!std::empty(response->errmsg))
{
on_scrape_error(session, tier, response->errmsg.c_str());
}
else
{
tier->lastScrapeSucceeded = true;
tier->scrapeIntervalSec = std::max(int{ DefaultScrapeIntervalSec }, response->min_request_interval);
tier->scheduleNextScrape();
tr_logAddTraceTier(tier, fmt::format("Scrape successful. Rescraping in {} seconds.", tier->scrapeIntervalSec));
if (!response->did_connect)
{
on_scrape_error(session, tier, _("Could not connect to tracker"));
}
else if (response->did_timeout)
{
on_scrape_error(session, tier, _("Tracker did not respond"));
}
else if (!std::empty(response->errmsg))
{
on_scrape_error(session, tier, response->errmsg.c_str());
}
else
{
tier->lastScrapeSucceeded = true;
tier->scrapeIntervalSec = std::max(int{ DefaultScrapeIntervalSec }, response->min_request_interval);
tier->scheduleNextScrape();
tr_logAddTraceTier(tier, fmt::format("Scrape successful. Rescraping in {} seconds.", tier->scrapeIntervalSec));
if (tr_tracker* const tracker = tier->currentTracker(); tracker != nullptr)
if (tr_tracker* const tracker = tier->currentTracker(); tracker != nullptr)
{
if (row.seeders >= 0)
{
if (row.seeders >= 0)
{
tracker->seeder_count = row.seeders;
}
if (row.leechers >= 0)
{
tracker->leecher_count = row.leechers;
}
if (row.downloads >= 0)
{
tracker->download_count = row.downloads;
}
tracker->downloader_count = row.downloaders;
tracker->consecutive_failures = 0;
tracker->seeder_count = row.seeders;
}
if (row.seeders >= 0 && row.leechers >= 0 && row.downloads >= 0)
if (row.leechers >= 0)
{
publishPeerCounts(tier, row.seeders, row.leechers);
tracker->leecher_count = row.leechers;
}
if (row.downloads >= 0)
{
tracker->download_count = row.downloads;
}
tracker->downloader_count = row.downloaders;
tracker->consecutive_failures = 0;
}
if (row.seeders >= 0 && row.leechers >= 0 && row.downloads >= 0)
{
publishPeerCounts(tier, row.seeders, row.leechers);
}
}
}

View File

@ -114,176 +114,12 @@ enum tr_announce_event
TR_ANNOUNCE_EVENT_STOPPED,
};
std::string_view tr_announce_event_get_string(tr_announce_event);
struct tr_announce_request
{
tr_announce_event event = {};
bool partial_seed = false;
/* the port we listen for incoming peers on */
tr_port port;
/* per-session key */
int key = 0;
/* the number of peers we'd like to get back in the response */
int numwant = 0;
/* the number of bytes we uploaded since the last 'started' event */
uint64_t up = 0;
/* the number of good bytes we downloaded since the last 'started' event */
uint64_t down = 0;
/* the number of bad bytes we downloaded since the last 'started' event */
uint64_t corrupt = 0;
/* the total size of the torrent minus the number of bytes completed */
uint64_t leftUntilComplete = 0;
/* the tracker's announce URL */
tr_interned_string announce_url;
/* key generated by and returned from an http tracker.
* see tr_announce_response.tracker_id_str */
std::string tracker_id;
/* the torrent's peer id.
* this changes when a torrent is stopped -> restarted. */
tr_peer_id_t peer_id;
/* the torrent's info_hash */
tr_sha1_digest_t info_hash;
/* the name to use when deep logging is enabled */
char log_name[128];
};
struct tr_announce_response
{
/* the torrent's info hash */
tr_sha1_digest_t info_hash = {};
/* whether or not we managed to connect to the tracker */
bool did_connect = false;
/* whether or not the scrape timed out */
bool did_timeout = false;
/* preferred interval between announces.
* transmission treats this as the interval for periodic announces */
int interval = 0;
/* minimum interval between announces. (optional)
* transmission treats this as the min interval for manual announces */
int min_interval = 0;
/* how many peers are seeding this torrent */
int seeders = -1;
/* how many peers are downloading this torrent */
int leechers = -1;
/* how many times this torrent has been downloaded */
int downloads = -1;
/* IPv4 peers that we acquired from the tracker */
std::vector<tr_pex> pex;
/* IPv6 peers that we acquired from the tracker */
std::vector<tr_pex> pex6;
/* human-readable error string on failure, or nullptr */
std::string errmsg;
/* human-readable warning string or nullptr */
std::string warning;
/* key generated by and returned from an http tracker.
* if this is provided, subsequent http announces must include this. */
std::string tracker_id;
/* tracker extension that returns the client's public IP address.
* https://www.bittorrent.org/beps/bep_0024.html */
std::optional<tr_address> external_ip;
};
struct tr_announce_request;
struct tr_announce_response;
using tr_announce_response_func = void (*)(tr_announce_response const* response, void* userdata);
/// SCRAPE
/* pick a number small enough for common tracker software:
* - ocelot has no upper bound
* - opentracker has an upper bound of 64
* - udp protocol has an upper bound of 74
* - xbtt has no upper bound
*
* This is only an upper bound: if the tracker complains about
* length, announcer will incrementally lower the batch size.
*/
auto inline constexpr TR_MULTISCRAPE_MAX = 60;
struct tr_scrape_request
{
/* the scrape URL */
tr_interned_string scrape_url;
/* the name to use when deep logging is enabled */
char log_name[128];
/* info hashes of the torrents to scrape */
std::array<tr_sha1_digest_t, TR_MULTISCRAPE_MAX> info_hash;
/* how many hashes to use in the info_hash field */
int info_hash_count = 0;
};
struct tr_scrape_response_row
{
/* the torrent's info_hash */
tr_sha1_digest_t info_hash;
/* how many peers are seeding this torrent */
int seeders = 0;
/* how many peers are downloading this torrent */
int leechers = 0;
/* how many times this torrent has been downloaded */
int downloads = 0;
/* the number of active downloaders in the swarm.
* this is a BEP 21 extension that some trackers won't support.
* http://www.bittorrent.org/beps/bep_0021.html#tracker-scrapes */
int downloaders = 0;
};
struct tr_scrape_response
{
/* whether or not we managed to connect to the tracker */
bool did_connect = false;
/* whether or not the scrape timed out */
bool did_timeout = false;
/* how many info hashes are in the 'rows' field */
int row_count;
/* the individual torrents' scrape results */
std::array<tr_scrape_response_row, TR_MULTISCRAPE_MAX> rows;
/* the raw scrape url */
tr_interned_string scrape_url;
/* human-readable error string on failure, or nullptr */
std::string errmsg;
/* minimum interval (in seconds) allowed between scrapes.
* this is an unofficial extension that some trackers won't support. */
int min_request_interval;
};
struct tr_scrape_request;
struct tr_scrape_response;
using tr_scrape_response_func = void (*)(tr_scrape_response const* response, void* user_data);
/// UDP ANNOUNCER

View File

@ -10,9 +10,12 @@
#include <fmt/format.h>
#define LIBTRANSMISSION_ANNOUNCER_MODULE
#include "transmission.h"
#include "announcer.h"
#include "announcer-common.h"
#include "crypto-utils.h"
#include "peer-mgr.h" // for tr_pex
#include "timer-ev.h"