refactor: prefer constexpr over enum for non enumerated constants (#1996)

refactor: prefer constexpr over enum for non-enumerated constants
This commit is contained in:
Charles Kerr 2021-10-20 19:39:05 -05:00 committed by GitHub
parent 241dc34019
commit 850b3caa61
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 431 additions and 441 deletions

View File

@ -97,10 +97,7 @@ static uint64_t evbuffer_read_ntoh_64(struct evbuffer* buf)
using tau_connection_t = uint64_t;
enum
{
TAU_CONNECTION_TTL_SECS = 60
};
static auto constexpr TauConnectionTtlSecs = int{ 60 };
using tau_transaction_t = uint32_t;
@ -145,10 +142,7 @@ static bool is_tau_response_message(tau_action_t action, size_t msglen)
return false;
}
enum
{
TAU_REQUEST_TTL = 60
};
static auto constexpr TauRequestTtl = int{ 60 };
/****
*****
@ -588,7 +582,7 @@ static void on_tracker_connection_response(struct tau_tracker* tracker, tau_acti
if (action == TAU_ACTION_CONNECT)
{
tracker->connection_id = evbuffer_read_ntoh_64(buf);
tracker->connection_expiration_time = now + TAU_CONNECTION_TTL_SECS;
tracker->connection_expiration_time = now + TauConnectionTtlSecs;
dbgmsg(tracker->key, "Got a new connection ID from tracker: %" PRIu64, tracker->connection_id);
}
else
@ -611,7 +605,7 @@ static void tau_tracker_timeout_reqs(struct tau_tracker* tracker)
time_t const now = time(nullptr);
bool const cancel_all = tracker->close_at != 0 && (tracker->close_at <= now);
if (tracker->connecting_at != 0 && tracker->connecting_at + TAU_REQUEST_TTL < now)
if (tracker->connecting_at != 0 && tracker->connecting_at + TauRequestTtl < now)
{
on_tracker_connection_response(tracker, TAU_ACTION_ERROR, nullptr);
}
@ -622,7 +616,7 @@ static void tau_tracker_timeout_reqs(struct tau_tracker* tracker)
{
auto* req = static_cast<struct tau_announce_request*>(tr_ptrArrayNth(reqs, i));
if (cancel_all || req->created_at + TAU_REQUEST_TTL < now)
if (cancel_all || req->created_at + TauRequestTtl < now)
{
dbgmsg(tracker->key, "timeout announce req %p", (void*)req);
tau_announce_request_fail(req, false, true, nullptr);
@ -639,7 +633,7 @@ static void tau_tracker_timeout_reqs(struct tau_tracker* tracker)
{
auto* const req = static_cast<struct tau_scrape_request*>(tr_ptrArrayNth(reqs, i));
if (cancel_all || req->created_at + TAU_REQUEST_TTL < now)
if (cancel_all || req->created_at + TauRequestTtl < now)
{
dbgmsg(tracker->key, "timeout scrape req %p", (void*)req);
tau_scrape_request_fail(req, false, true, nullptr);

View File

@ -48,28 +48,25 @@ static void tier_build_log_name(struct tr_tier const* tier, char* buf, size_t bu
} \
} while (0)
enum
{
/* unless the tracker says otherwise, rescrape this frequently */
DEFAULT_SCRAPE_INTERVAL_SEC = (60 * 30),
/* unless the tracker says otherwise, this is the announce interval */
DEFAULT_ANNOUNCE_INTERVAL_SEC = (60 * 10),
/* unless the tracker says otherwise, this is the announce min_interval */
DEFAULT_ANNOUNCE_MIN_INTERVAL_SEC = (60 * 2),
/* the value of the 'numwant' argument passed in tracker requests. */
NUMWANT = 80,
/* unless the tracker says otherwise, rescrape this frequently */
static auto constexpr DefaultScrapeIntervalSec = int{ 60 * 30 };
/* unless the tracker says otherwise, this is the announce interval */
static auto constexpr DefaultAnnounceIntervalSec = int{ 60 * 10 };
/* unless the tracker says otherwise, this is the announce min_interval */
static auto constexpr DefaultAnnounceMinIntervalSec = int{ 60 * 2 };
/* the value of the 'numwant' argument passed in tracker requests. */
static auto constexpr Numwant = int{ 80 };
/* how often to announce & scrape */
UPKEEP_INTERVAL_MSEC = 500,
MAX_ANNOUNCES_PER_UPKEEP = 20,
MAX_SCRAPES_PER_UPKEEP = 20,
/* how often to announce & scrape */
static auto constexpr UpkeepIntervalMsec = int{ 500 };
static auto constexpr MaxAnnouncesPerUpkeep = int{ 20 };
static auto constexpr MaxScrapesPerUpkeep = int{ 20 };
/* this is how often to call the UDP tracker upkeep */
TAU_UPKEEP_INTERVAL_SECS = 5,
/* this is how often to call the UDP tracker upkeep */
static auto constexpr TauUpkeepIntervalSecs = int{ 5 };
/* how many infohashes to remove when we get a scrape-too-long error */
TR_MULTISCRAPE_STEP = 5
};
/* how many infohashes to remove when we get a scrape-too-long error */
static auto constexpr TrMultiscrapeStep = int{ 5 };
/***
****
@ -182,7 +179,7 @@ void tr_announcerInit(tr_session* session)
a->key = tr_rand_int(INT_MAX);
a->session = session;
a->upkeepTimer = evtimer_new(session->event_base, onUpkeepTimer, a);
tr_timerAddMsec(a->upkeepTimer, UPKEEP_INTERVAL_MSEC);
tr_timerAddMsec(a->upkeepTimer, UpkeepIntervalMsec);
session->announcer = a;
}
@ -350,9 +347,9 @@ static void tierConstruct(tr_tier* tier, tr_torrent* tor)
tier->key = nextKey++;
tier->currentTrackerIndex = -1;
tier->scrapeIntervalSec = DEFAULT_SCRAPE_INTERVAL_SEC;
tier->announceIntervalSec = DEFAULT_ANNOUNCE_INTERVAL_SEC;
tier->announceMinIntervalSec = DEFAULT_ANNOUNCE_MIN_INTERVAL_SEC;
tier->scrapeIntervalSec = DefaultScrapeIntervalSec;
tier->announceIntervalSec = DefaultAnnounceIntervalSec;
tier->announceMinIntervalSec = DefaultAnnounceMinIntervalSec;
tier->scrapeAt = get_next_scrape_time(tor->session, tier, 0);
tier->tor = tor;
}
@ -380,9 +377,9 @@ static void tierIncrementTracker(tr_tier* tier)
tier->currentTracker = &tier->trackers[i];
/* reset some of the tier's fields */
tier->scrapeIntervalSec = DEFAULT_SCRAPE_INTERVAL_SEC;
tier->announceIntervalSec = DEFAULT_ANNOUNCE_INTERVAL_SEC;
tier->announceMinIntervalSec = DEFAULT_ANNOUNCE_MIN_INTERVAL_SEC;
tier->scrapeIntervalSec = DefaultScrapeIntervalSec;
tier->announceIntervalSec = DefaultAnnounceIntervalSec;
tier->announceMinIntervalSec = DefaultAnnounceMinIntervalSec;
tier->isAnnouncing = false;
tier->isScraping = false;
tier->lastAnnounceStartTime = 0;
@ -944,7 +941,7 @@ static tr_announce_request* announce_request_new(
req->corrupt = tier->byteCounts[TR_ANN_CORRUPT];
req->leftUntilComplete = tr_torrentHasMetadata(tor) ? tor->info.totalSize - tr_torrentHaveTotal(tor) : INT64_MAX;
req->event = event;
req->numwant = event == TR_ANNOUNCE_EVENT_STOPPED ? 0 : NUMWANT;
req->numwant = event == TR_ANNOUNCE_EVENT_STOPPED ? 0 : Numwant;
req->key = announcer->key;
req->partial_seed = tr_torrentGetCompleteness(tor) == TR_PARTIAL_SEED;
tier_build_log_name(tier, req->log_name, sizeof(req->log_name));
@ -1420,7 +1417,7 @@ static void on_scrape_done(tr_scrape_response const* response, void* vsession)
else
{
tier->lastScrapeSucceeded = true;
tier->scrapeIntervalSec = std::max(int{ DEFAULT_SCRAPE_INTERVAL_SEC }, response->min_request_interval);
tier->scrapeIntervalSec = std::max(int{ DefaultScrapeIntervalSec }, response->min_request_interval);
tier->scrapeAt = get_next_scrape_time(session, tier, tier->scrapeIntervalSec);
tr_logAddTorDbg(tier->tor, "Scrape successful. Rescraping in %d seconds.", tier->scrapeIntervalSec);
@ -1469,7 +1466,7 @@ static void on_scrape_done(tr_scrape_response const* response, void* vsession)
error out, lower the value once for that batch, not N times. */
if (*multiscrape_max >= response->row_count)
{
int const n = std::max(1, int{ *multiscrape_max - TR_MULTISCRAPE_STEP });
int const n = std::max(1, int{ *multiscrape_max - TrMultiscrapeStep });
if (*multiscrape_max != n)
{
char* scheme = nullptr;
@ -1518,7 +1515,7 @@ static void multiscrape(tr_announcer* announcer, std::vector<tr_tier*> const& ti
{
size_t request_count = 0;
time_t const now = tr_time();
tr_scrape_request requests[MAX_SCRAPES_PER_UPKEEP] = {};
tr_scrape_request requests[MaxScrapesPerUpkeep] = {};
/* batch as many info_hashes into a request as we can */
for (auto* tier : tiers)
@ -1551,7 +1548,7 @@ static void multiscrape(tr_announcer* announcer, std::vector<tr_tier*> const& ti
}
/* otherwise, if there's room for another request, build a new one */
if (!found && request_count < MAX_SCRAPES_PER_UPKEEP)
if (!found && request_count < MaxScrapesPerUpkeep)
{
tr_scrape_request* req = &requests[request_count++];
req->url = scrape_info->url.c_str();
@ -1679,14 +1676,14 @@ static void scrapeAndAnnounceMore(tr_announcer* announcer)
/* Second, announce what we can. If there aren't enough slots
* available, use compareAnnounceTiers to prioritize. */
if (announce_me.size() > MAX_ANNOUNCES_PER_UPKEEP)
if (announce_me.size() > MaxAnnouncesPerUpkeep)
{
std::partial_sort(
std::begin(announce_me),
std::begin(announce_me) + MAX_ANNOUNCES_PER_UPKEEP,
std::begin(announce_me) + MaxAnnouncesPerUpkeep,
std::end(announce_me),
[](auto const* a, auto const* b) { return compareAnnounceTiers(a, b) < 0; });
announce_me.resize(MAX_ANNOUNCES_PER_UPKEEP);
announce_me.resize(MaxAnnouncesPerUpkeep);
}
for (auto*& tier : announce_me)
@ -1717,12 +1714,12 @@ static void onUpkeepTimer([[maybe_unused]] evutil_socket_t fd, [[maybe_unused]]
/* TAU upkeep */
if (announcer->tauUpkeepAt <= now)
{
announcer->tauUpkeepAt = now + TAU_UPKEEP_INTERVAL_SECS;
announcer->tauUpkeepAt = now + TauUpkeepIntervalSecs;
tr_tracker_udp_upkeep(session);
}
/* set up the next timer */
tr_timerAddMsec(announcer->upkeepTimer, UPKEEP_INTERVAL_MSEC);
tr_timerAddMsec(announcer->upkeepTimer, UpkeepIntervalMsec);
tr_sessionUnlock(session);
}

View File

@ -128,11 +128,8 @@ char* tr_ssha1(char const* plain_text)
{
TR_ASSERT(plain_text != nullptr);
enum
{
saltval_len = 8,
salter_len = 64
};
auto constexpr SaltvalLen = int{ 8 };
auto constexpr SalterLen = int{ 64 };
static char const* salter =
"0123456789"
@ -140,21 +137,21 @@ char* tr_ssha1(char const* plain_text)
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"./";
unsigned char salt[saltval_len];
unsigned char salt[SaltvalLen];
uint8_t sha[SHA_DIGEST_LENGTH];
char buf[2 * SHA_DIGEST_LENGTH + saltval_len + 2];
char buf[2 * SHA_DIGEST_LENGTH + SaltvalLen + 2];
tr_rand_buffer(salt, saltval_len);
tr_rand_buffer(salt, SaltvalLen);
for (auto& ch : salt)
{
ch = salter[ch % salter_len];
ch = salter[ch % SalterLen];
}
tr_sha1(sha, plain_text, (int)strlen(plain_text), salt, saltval_len, nullptr);
tr_sha1(sha, plain_text, (int)strlen(plain_text), salt, SaltvalLen, nullptr);
tr_sha1_to_hex(&buf[1], sha);
memcpy(&buf[1 + 2 * SHA_DIGEST_LENGTH], &salt, saltval_len);
buf[1 + 2 * SHA_DIGEST_LENGTH + saltval_len] = '\0';
memcpy(&buf[1 + 2 * SHA_DIGEST_LENGTH], &salt, SaltvalLen);
buf[1 + 2 * SHA_DIGEST_LENGTH + SaltvalLen] = '\0';
buf[0] = '{'; /* signal that this is a hash. this makes saving/restoring easier */
return tr_strdup(buf);

View File

@ -38,24 +38,21 @@
#define HANDSHAKE_NAME "\023BitTorrent protocol"
enum
{
/* BitTorrent Handshake Constants */
HANDSHAKE_NAME_LEN = 20,
HANDSHAKE_FLAGS_LEN = 8,
HANDSHAKE_SIZE = 68,
INCOMING_HANDSHAKE_LEN = 48,
/* Encryption Constants */
PadA_MAXLEN = 512,
PadB_MAXLEN = 512,
PadC_MAXLEN = 512,
PadD_MAXLEN = 512,
VC_LENGTH = 8,
CRYPTO_PROVIDE_PLAINTEXT = 1,
CRYPTO_PROVIDE_CRYPTO = 2,
/* how long to wait before giving up on a handshake */
HANDSHAKE_TIMEOUT_SEC = 30
};
// bittorrent handshake constants
static auto constexpr HANDSHAKE_NAME_LEN = int{ 20 };
static auto constexpr HANDSHAKE_FLAGS_LEN = int{ 8 };
static auto constexpr HANDSHAKE_SIZE = int{ 68 };
static auto constexpr INCOMING_HANDSHAKE_LEN = int{ 48 };
// encryption constants
static auto constexpr PadA_MAXLEN = int{ 512 };
static auto constexpr PadB_MAXLEN = int{ 512 };
static auto constexpr VC_LENGTH = int{ 8 };
static auto constexpr CRYPTO_PROVIDE_PLAINTEXT = int{ 1 };
static auto constexpr CRYPTO_PROVIDE_CRYPTO = int{ 2 };
// how long to wait before giving up on a handshake
static auto constexpr HANDSHAKE_TIMEOUT_SEC = int{ 30 };
#ifdef ENABLE_LTEP
#define HANDSHAKE_HAS_LTEP(bits) (((bits)[5] & 0x10) != 0)

View File

@ -41,48 +41,53 @@
#include "utils.h"
#include "webseed.h"
enum
{
/* how frequently to cull old atoms */
ATOM_PERIOD_MSEC = (60 * 1000),
/* how frequently to change which peers are choked */
RECHOKE_PERIOD_MSEC = (10 * 1000),
/* an optimistically unchoked peer is immune from rechoking
for this many calls to rechokeUploads(). */
OPTIMISTIC_UNCHOKE_MULTIPLIER = 4,
/* how frequently to reallocate bandwidth */
BANDWIDTH_PERIOD_MSEC = 500,
/* how frequently to age out old piece request lists */
REFILL_UPKEEP_PERIOD_MSEC = (10 * 1000),
/* how frequently to decide which peers live and die */
RECONNECT_PERIOD_MSEC = 500,
/* when many peers are available, keep idle ones this long */
MIN_UPLOAD_IDLE_SECS = (60),
/* when few peers are available, keep idle ones this long */
MAX_UPLOAD_IDLE_SECS = (60 * 5),
/* max number of peers to ask for per second overall.
* this throttle is to avoid overloading the router */
MAX_CONNECTIONS_PER_SECOND = 12,
/* number of bad pieces a peer is allowed to send before we ban them */
MAX_BAD_PIECES_PER_PEER = 5,
/* amount of time to keep a list of request pieces lying around
before it's considered too old and needs to be rebuilt */
PIECE_LIST_SHELF_LIFE_SECS = 60,
/* use for bitwise operations w/peer_atom.flags2 */
MYFLAG_BANNED = 1,
/* use for bitwise operations w/peer_atom.flags2 */
/* unreachable for now... but not banned.
* if they try to connect to us it's okay */
MYFLAG_UNREACHABLE = 2,
/* the minimum we'll wait before attempting to reconnect to a peer */
MINIMUM_RECONNECT_INTERVAL_SECS = 5,
/** how long we'll let requests we've made linger before we cancel them */
REQUEST_TTL_SECS = 90,
/* */
NO_BLOCKS_CANCEL_HISTORY = 120,
/* */
CANCEL_HISTORY_SEC = 60
};
// how frequently to cull old atoms
static auto constexpr AtomPeriodMsec = int{ 60 * 1000 };
// how frequently to change which peers are choked
static auto constexpr RechokePeriodMsec = int{ 10 * 1000 };
// an optimistically unchoked peer is immune from rechoking
// for this many calls to rechokeUploads().
static auto constexpr OptimisticUnchokeMultiplier = int{ 4 };
// how frequently to reallocate bandwidth
static auto constexpr BandwidthPeriodMsec = int{ 500 };
// how frequently to age out old piece request lists
static auto constexpr RefillUpkeepPeriodMsec = int{ 10 * 1000 };
// how frequently to decide which peers live and die
static auto constexpr ReconnectPeriodMsec = int{ 500 };
// when many peers are available, keep idle ones this long
static auto constexpr MinUploadIdleSecs = int{ 60 };
// when few peers are available, keep idle ones this long
static auto constexpr MaxUploadIdleSecs = int{ 60 * 5 };
// max number of peers to ask for per second overall.
// this throttle is to avoid overloading the router
static auto constexpr MaxConnectionsPerSecond = int{ 12 };
// number of bad pieces a peer is allowed to send before we ban them
static auto constexpr MaxBadPiecesPerPeer = int{ 5 };
// use for bitwise operations w/peer_atom.flags2
static auto constexpr MyflagBanned = int{ 1 };
// use for bitwise operations w/peer_atom.flags2
// unreachable for now... but not banned.
// if they try to connect to us it's okay
static auto constexpr MyflagUnreachable = int{ 2 };
// the minimum we'll wait before attempting to reconnect to a peer
static auto constexpr MinimumReconnectIntervalSecs = int{ 5 };
// how long we'll let requests we've made linger before we cancel them
static auto constexpr RequestTtlSecs = int{ 90 };
static auto constexpr CancelHistorySec = int{ 60 };
/**
***
@ -1439,7 +1444,7 @@ static void refillUpkeep([[maybe_unused]] evutil_socket_t fd, [[maybe_unused]] s
managerLock(mgr);
time_t const now = tr_time();
time_t const too_old = now - REQUEST_TTL_SECS;
time_t const too_old = now - RequestTtlSecs;
/* alloc the temporary "cancel" buffer */
for (auto const* tor : mgr->session->torrents)
@ -1513,7 +1518,7 @@ static void refillUpkeep([[maybe_unused]] evutil_socket_t fd, [[maybe_unused]] s
}
tr_free(cancel);
tr_timerAddMsec(mgr->refillUpkeepTimer, REFILL_UPKEEP_PERIOD_MSEC);
tr_timerAddMsec(mgr->refillUpkeepTimer, RefillUpkeepPeriodMsec);
managerUnlock(mgr);
}
@ -1521,10 +1526,10 @@ static void addStrike(tr_swarm* s, tr_peer* peer)
{
tordbg(s, "increasing peer %s strike count to %d", tr_atomAddrStr(peer->atom), peer->strikes + 1);
if (++peer->strikes >= MAX_BAD_PIECES_PER_PEER)
if (++peer->strikes >= MaxBadPiecesPerPeer)
{
struct peer_atom* atom = peer->atom;
atom->flags2 |= MYFLAG_BANNED;
atom->flags2 |= MyflagBanned;
peer->doPurge = true;
tordbg(s, "banning peer %s", tr_atomAddrStr(atom));
}
@ -1971,7 +1976,7 @@ static bool myHandshakeDoneCB(
if (!readAnythingFromPeer)
{
tordbg(s, "marking peer %s as unreachable... numFails is %d", tr_atomAddrStr(atom), (int)atom->numFails);
atom->flags2 |= MYFLAG_UNREACHABLE;
atom->flags2 |= MyflagUnreachable;
}
}
}
@ -1987,7 +1992,7 @@ static bool myHandshakeDoneCB(
if (!tr_peerIoIsIncoming(io))
{
atom->flags |= ADDED_F_CONNECTABLE;
atom->flags2 &= ~MYFLAG_UNREACHABLE;
atom->flags2 &= ~MyflagUnreachable;
}
/* In principle, this flag specifies whether the peer groks uTP,
@ -1997,7 +2002,7 @@ static bool myHandshakeDoneCB(
atom->flags |= ADDED_F_UTP_FLAGS;
}
if ((atom->flags2 & MYFLAG_BANNED) != 0)
if ((atom->flags2 & MyflagBanned) != 0)
{
tordbg(s, "banned peer %s tried to reconnect", tr_atomAddrStr(atom));
}
@ -2286,7 +2291,7 @@ static bool isAtomInteresting(tr_torrent const* tor, struct peer_atom* atom)
return false;
}
if ((atom->flags2 & MYFLAG_BANNED) != 0)
if ((atom->flags2 & MyflagBanned) != 0)
{
return false;
}
@ -2391,22 +2396,22 @@ static void ensureMgrTimersExist(struct tr_peerMgr* m)
{
if (m->atomTimer == nullptr)
{
m->atomTimer = createTimer(m->session, ATOM_PERIOD_MSEC, atomPulse, m);
m->atomTimer = createTimer(m->session, AtomPeriodMsec, atomPulse, m);
}
if (m->bandwidthTimer == nullptr)
{
m->bandwidthTimer = createTimer(m->session, BANDWIDTH_PERIOD_MSEC, bandwidthPulse, m);
m->bandwidthTimer = createTimer(m->session, BandwidthPeriodMsec, bandwidthPulse, m);
}
if (m->rechokeTimer == nullptr)
{
m->rechokeTimer = createTimer(m->session, RECHOKE_PERIOD_MSEC, rechokePulse, m);
m->rechokeTimer = createTimer(m->session, RechokePeriodMsec, rechokePulse, m);
}
if (m->refillUpkeepTimer == nullptr)
{
m->refillUpkeepTimer = createTimer(m->session, REFILL_UPKEEP_PERIOD_MSEC, refillUpkeep, m);
m->refillUpkeepTimer = createTimer(m->session, RefillUpkeepPeriodMsec, refillUpkeep, m);
}
}
@ -2744,10 +2749,10 @@ struct tr_peer_stat* tr_peerMgrPeerStats(tr_torrent const* tor, int* setmeCount)
stat->isUploadingTo = msgs->is_active(TR_CLIENT_TO_PEER);
stat->isSeed = tr_peerIsSeed(peer);
stat->blocksToPeer = peer->blocksSentToPeer.count(now, CANCEL_HISTORY_SEC);
stat->blocksToClient = peer->blocksSentToClient.count(now, CANCEL_HISTORY_SEC);
stat->cancelsToPeer = peer->cancelsSentToPeer.count(now, CANCEL_HISTORY_SEC);
stat->cancelsToClient = peer->cancelsSentToClient.count(now, CANCEL_HISTORY_SEC);
stat->blocksToPeer = peer->blocksSentToPeer.count(now, CancelHistorySec);
stat->blocksToClient = peer->blocksSentToClient.count(now, CancelHistorySec);
stat->cancelsToPeer = peer->cancelsSentToPeer.count(now, CancelHistorySec);
stat->cancelsToClient = peer->cancelsSentToClient.count(now, CancelHistorySec);
stat->pendingReqsToPeer = peer->pendingReqsToPeer;
stat->pendingReqsToClient = peer->pendingReqsToClient;
@ -2893,7 +2898,7 @@ static void rechokeDownloads(tr_swarm* s)
int maxPeers = 0;
int rechoke_count = 0;
struct tr_rechoke_info* rechoke = nullptr;
int const MIN_INTERESTING_PEERS = 5;
auto constexpr MinInterestingPeers = 5;
int const peerCount = tr_ptrArraySize(&s->peers);
time_t const now = tr_time();
@ -2930,8 +2935,8 @@ static void rechokeDownloads(tr_swarm* s)
for (int i = 0; i < peerCount; ++i)
{
auto const* const peer = static_cast<tr_peer const*>(tr_ptrArrayNth(&s->peers, i));
auto const b = peer->blocksSentToClient.count(now, CANCEL_HISTORY_SEC);
auto const c = peer->cancelsSentToPeer.count(now, CANCEL_HISTORY_SEC);
auto const b = peer->blocksSentToClient.count(now, CancelHistorySec);
auto const c = peer->cancelsSentToPeer.count(now, CancelHistorySec);
if (b == 0) /* ignore unresponsive peers, as described above */
{
@ -2962,7 +2967,7 @@ static void rechokeDownloads(tr_swarm* s)
if (timeSinceCancel != 0)
{
int const maxIncrease = 15;
time_t const maxHistory = 2 * CANCEL_HISTORY_SEC;
time_t const maxHistory = 2 * CancelHistorySec;
double const mult = std::min(timeSinceCancel, maxHistory) / (double)maxHistory;
int const inc = maxIncrease * mult;
maxPeers = s->maxPeers + inc;
@ -2975,15 +2980,7 @@ static void rechokeDownloads(tr_swarm* s)
}
/* don't let the previous section's number tweaking go too far... */
if (maxPeers < MIN_INTERESTING_PEERS)
{
maxPeers = MIN_INTERESTING_PEERS;
}
if (maxPeers > s->tor->maxConnectedPeers)
{
maxPeers = s->tor->maxConnectedPeers;
}
maxPeers = std::clamp(maxPeers, MinInterestingPeers, int(s->tor->maxConnectedPeers));
s->maxPeers = maxPeers;
@ -3012,8 +3009,8 @@ static void rechokeDownloads(tr_swarm* s)
else
{
auto rechoke_state = tr_rechoke_state{};
auto const blocks = peer->blocksSentToClient.count(now, CANCEL_HISTORY_SEC);
auto const cancels = peer->cancelsSentToPeer.count(now, CANCEL_HISTORY_SEC);
auto const blocks = peer->blocksSentToClient.count(now, CancelHistorySec);
auto const cancels = peer->cancelsSentToPeer.count(now, CancelHistorySec);
if (blocks == 0 && cancels == 0)
{
@ -3259,7 +3256,7 @@ static void rechokeUploads(tr_swarm* s, uint64_t const now)
auto* c = randPool[tr_rand_int_weak(n)];
c->isChoked = false;
s->optimistic = c->msgs;
s->optimisticUnchokeTimeScaler = OPTIMISTIC_UNCHOKE_MULTIPLIER;
s->optimisticUnchokeTimeScaler = OptimisticUnchokeMultiplier;
}
}
@ -3293,7 +3290,7 @@ static void rechokePulse([[maybe_unused]] evutil_socket_t fd, [[maybe_unused]] s
}
}
tr_timerAddMsec(mgr->rechokeTimer, RECHOKE_PERIOD_MSEC);
tr_timerAddMsec(mgr->rechokeTimer, RechokePeriodMsec);
managerUnlock(mgr);
}
@ -3329,8 +3326,8 @@ static bool shouldPeerBeClosed(tr_swarm const* s, tr_peer const* peer, int peerC
* if we have zero connections, strictness is 0% */
float const strictness = peerCount >= relaxStrictnessIfFewerThanN ? 1.0 :
peerCount / (float)relaxStrictnessIfFewerThanN;
int const lo = MIN_UPLOAD_IDLE_SECS;
int const hi = MAX_UPLOAD_IDLE_SECS;
int const lo = MinUploadIdleSecs;
int const hi = MaxUploadIdleSecs;
int const limit = hi - (hi - lo) * strictness;
int const idleTime = now - std::max(atom->time, atom->piece_data_time);
@ -3374,14 +3371,14 @@ static tr_peer** getPeersToClose(tr_swarm* s, time_t const now_sec, int* setmeSi
static int getReconnectIntervalSecs(struct peer_atom const* atom, time_t const now)
{
auto sec = int{};
bool const unreachable = (atom->flags2 & MYFLAG_UNREACHABLE) != 0;
bool const unreachable = (atom->flags2 & MyflagUnreachable) != 0;
/* if we were recently connected to this peer and transferring piece
* data, try to reconnect to them sooner rather that later -- we don't
* want network troubles to get in the way of a good peer. */
if (!unreachable && now - atom->piece_data_time <= MINIMUM_RECONNECT_INTERVAL_SECS * 2)
if (!unreachable && now - atom->piece_data_time <= MinimumReconnectIntervalSecs * 2)
{
sec = MINIMUM_RECONNECT_INTERVAL_SECS;
sec = MinimumReconnectIntervalSecs;
}
/* otherwise, the interval depends on how many times we've tried
* and failed to connect to the peer */
@ -3695,8 +3692,8 @@ static void reconnectPulse([[maybe_unused]] evutil_socket_t fd, [[maybe_unused]]
}
/* try to make new peer connections */
int const MAX_CONNECTIONS_PER_PULSE = (int)(MAX_CONNECTIONS_PER_SECOND * (RECONNECT_PERIOD_MSEC / 1000.0));
makeNewPeerConnections(mgr, MAX_CONNECTIONS_PER_PULSE);
int const MaxConnectionsPerPulse = (int)(MaxConnectionsPerSecond * (ReconnectPeriodMsec / 1000.0));
makeNewPeerConnections(mgr, MaxConnectionsPerPulse);
}
/****
@ -3748,8 +3745,8 @@ static void bandwidthPulse([[maybe_unused]] evutil_socket_t fd, [[maybe_unused]]
pumpAllPeers(mgr);
/* allocate bandwidth to the peers */
session->bandwidth->allocate(TR_UP, BANDWIDTH_PERIOD_MSEC);
session->bandwidth->allocate(TR_DOWN, BANDWIDTH_PERIOD_MSEC);
session->bandwidth->allocate(TR_UP, BandwidthPeriodMsec);
session->bandwidth->allocate(TR_DOWN, BandwidthPeriodMsec);
/* torrent upkeep */
for (auto* tor : session->torrents)
@ -3781,7 +3778,7 @@ static void bandwidthPulse([[maybe_unused]] evutil_socket_t fd, [[maybe_unused]]
reconnectPulse(0, 0, mgr);
tr_timerAddMsec(mgr->bandwidthTimer, BANDWIDTH_PERIOD_MSEC);
tr_timerAddMsec(mgr->bandwidthTimer, BandwidthPeriodMsec);
managerUnlock(mgr);
}
@ -3917,7 +3914,7 @@ static void atomPulse([[maybe_unused]] evutil_socket_t fd, [[maybe_unused]] shor
}
}
tr_timerAddMsec(mgr->atomTimer, ATOM_PERIOD_MSEC);
tr_timerAddMsec(mgr->atomTimer, AtomPeriodMsec);
managerUnlock(mgr);
}
@ -3955,7 +3952,7 @@ static bool isPeerCandidate(tr_torrent const* tor, struct peer_atom* atom, time_
}
/* not if they're banned... */
if ((atom->flags2 & MYFLAG_BANNED) != 0)
if ((atom->flags2 & MyflagBanned) != 0)
{
return false;
}
@ -4214,7 +4211,7 @@ static void initiateConnection(tr_peerMgr* mgr, tr_swarm* s, struct peer_atom* a
if (io == nullptr)
{
tordbg(s, "peerIo not created; marking peer %s as unreachable", tr_atomAddrStr(atom));
atom->flags2 |= MYFLAG_UNREACHABLE;
atom->flags2 |= MyflagUnreachable;
atom->numFails++;
}
else

View File

@ -42,61 +42,85 @@
***
**/
enum
// these values are hardcoded by various BEPs as noted
enum BitTorrentMessages
{
// http://bittorrent.org/beps/bep_0003.html#peer-messages
BtChoke = 0,
BtUnchoke = 1,
BtInterested = 2,
BtNotInterested = 3,
BtHave = 4,
BtBitfield = 5,
BtRequest = 6,
BtPiece = 7,
BtCancel = 8,
BtPort = 9,
// https://www.bittorrent.org/beps/bep_0006.html
BtFextSuggest = 13,
BtFextHaveAll = 14,
BtFextHaveNone = 15,
BtFextReject = 16,
BtFextAllowedFast = 17,
// http://bittorrent.org/beps/bep_0010.html
// see also LtepMessageIds below
BtLtep = 20
};
enum LtepMessages
{
BT_CHOKE = 0,
BT_UNCHOKE = 1,
BT_INTERESTED = 2,
BT_NOT_INTERESTED = 3,
BT_HAVE = 4,
BT_BITFIELD = 5,
BT_REQUEST = 6,
BT_PIECE = 7,
BT_CANCEL = 8,
BT_PORT = 9,
/* */
BT_FEXT_SUGGEST = 13,
BT_FEXT_HAVE_ALL = 14,
BT_FEXT_HAVE_NONE = 15,
BT_FEXT_REJECT = 16,
BT_FEXT_ALLOWED_FAST = 17,
/* */
BT_LTEP = 20,
/* */
LTEP_HANDSHAKE = 0,
/* */
};
// http://bittorrent.org/beps/bep_0010.html
// Client-defined extension message IDs that we tell peers about
// in the LTEP handshake and will respond to when sent in an LTEP
// message.
enum LtepMessageIds
{
// we support peer exchange (bep 11)
UT_PEX_ID = 1,
// we support sending metadata files (bep 9)
// see also MetadataMsgType below
UT_METADATA_ID = 3,
/* */
MIN_CHOKE_PERIOD_SEC = 10,
/* idle seconds before we send a keepalive */
KEEPALIVE_INTERVAL_SECS = 100,
/* */
PEX_INTERVAL_SECS = 90, /* sec between sendPex() calls */
/* */
REQQ = 512,
/* */
METADATA_REQQ = 64,
/* */
MAGIC_NUMBER = 21549,
/* used in lowering the outMessages queue period */
IMMEDIATE_PRIORITY_INTERVAL_SECS = 0,
HIGH_PRIORITY_INTERVAL_SECS = 2,
LOW_PRIORITY_INTERVAL_SECS = 10,
/* number of pieces we'll allow in our fast set */
MAX_FAST_SET_SIZE = 3,
/* how many blocks to keep prefetched per peer */
PREFETCH_SIZE = 18,
/* when we're making requests from another peer,
batch them together to send enough requests to
meet our bandwidth goals for the next N seconds */
REQUEST_BUF_SECS = 10,
/* defined in BEP #9 */
};
// http://bittorrent.org/beps/bep_0009.html
enum MetadataMsgType
{
METADATA_MSG_TYPE_REQUEST = 0,
METADATA_MSG_TYPE_DATA = 1,
METADATA_MSG_TYPE_REJECT = 2
};
// seconds between sendPex() calls
static auto constexpr PexIntervalSecs = int{ 90 };
static auto constexpr MinChokePeriodSec = int{ 10 };
// idle seconds before we send a keepalive
static auto constexpr KeepaliveIntervalSecs = int{ 100 };
static auto constexpr MetadataReqQ = int{ 64 };
static auto constexpr ReqQ = int{ 512 };
// used in lowering the outMessages queue period
static auto constexpr ImmediatePriorityIntervalSecs = int{ 0 };
static auto constexpr HighPriorityIntervalSecs = int{ 2 };
static auto constexpr LowPriorityIntervalSecs = int{ 10 };
// how many blocks to keep prefetched per peer
static auto constexpr PrefetchSize = int{ 18 };
// when we're making requests from another peer,
// batch them together to send enough requests to
// meet our bandwidth goals for the next N seconds
static auto constexpr RequestBufSecs = int{ 10 };
namespace
{
@ -106,10 +130,10 @@ constexpr int MAX_PEX_PEER_COUNT = 50;
enum
{
AWAITING_BT_LENGTH,
AWAITING_BT_ID,
AWAITING_BT_MESSAGE,
AWAITING_BT_PIECE
AwaitingBtLength,
AwaitingBtId,
AwaitingBtMessage,
AwaitingBtPiece
};
enum encryption_preference_t
@ -198,7 +222,7 @@ class tr_peerMsgsImpl : public tr_peerMsgs
public:
tr_peerMsgsImpl(tr_torrent* torrent_in, peer_atom* atom_in, tr_peerIo* io_in, tr_peer_callback callback, void* callbackData)
: tr_peerMsgs{ torrent_in, atom_in }
, outMessagesBatchPeriod{ LOW_PRIORITY_INTERVAL_SECS }
, outMessagesBatchPeriod{ LowPriorityIntervalSecs }
, torrent{ torrent_in }
, outMessages{ evbuffer_new() }
, io{ io_in }
@ -208,7 +232,7 @@ public:
if (tr_torrentAllowsPex(torrent))
{
pex_timer.reset(evtimer_new(torrent->session->event_base, pexPulse, this));
tr_timerAdd(pex_timer.get(), PEX_INTERVAL_SECS, 0);
tr_timerAdd(pex_timer.get(), PexIntervalSecs, 0);
}
if (tr_peerIoSupportsUTP(io))
@ -330,7 +354,7 @@ public:
bool is_reading_block(tr_block_index_t block) const override
{
return state == AWAITING_BT_PIECE && block == _tr_block(torrent, incoming.blockReq.index, incoming.blockReq.offset);
return state == AwaitingBtPiece && block == _tr_block(torrent, incoming.blockReq.index, incoming.blockReq.offset);
}
void cancel_block_request(tr_block_index_t block) override
@ -341,7 +365,7 @@ public:
void set_choke(bool peer_is_choked) override
{
time_t const now = tr_time();
time_t const fibrillationTime = now - MIN_CHOKE_PERIOD_SEC;
time_t const fibrillationTime = now - MinChokePeriodSec;
if (chokeChangedAt > fibrillationTime)
{
@ -564,7 +588,7 @@ public:
* very quickly; others aren't as urgent. */
int8_t outMessagesBatchPeriod;
uint8_t state = AWAITING_BT_LENGTH;
uint8_t state = AwaitingBtLength;
uint8_t ut_pex_id = 0;
uint8_t ut_metadata_id = 0;
uint16_t pexCount = 0;
@ -576,6 +600,8 @@ public:
size_t metadata_size_hint = 0;
#if 0
/* number of pieces we'll allow in our fast set */
static auto constexpr MAX_FAST_SET_SIZE = int{ 3 };
size_t fastsetSize;
tr_piece_index_t fastset[MAX_FAST_SET_SIZE];
#endif
@ -584,9 +610,9 @@ public:
evbuffer* const outMessages; /* all the non-piece messages */
struct peer_request peerAskedFor[REQQ] = {};
struct peer_request peerAskedFor[ReqQ] = {};
int peerAskedForMetadata[METADATA_REQQ] = {};
int peerAskedForMetadata[MetadataReqQ] = {};
int peerAskedForMetadataCount = 0;
tr_pex* pex = nullptr;
@ -695,7 +721,7 @@ static void protocolSendReject(tr_peerMsgsImpl* msgs, struct peer_request const*
struct evbuffer* out = msgs->outMessages;
evbuffer_add_uint32(out, sizeof(uint8_t) + 3 * sizeof(uint32_t));
evbuffer_add_uint8(out, BT_FEXT_REJECT);
evbuffer_add_uint8(out, BtFextReject);
evbuffer_add_uint32(out, req->index);
evbuffer_add_uint32(out, req->offset);
evbuffer_add_uint32(out, req->length);
@ -709,14 +735,14 @@ static void protocolSendRequest(tr_peerMsgsImpl* msgs, struct peer_request const
struct evbuffer* out = msgs->outMessages;
evbuffer_add_uint32(out, sizeof(uint8_t) + 3 * sizeof(uint32_t));
evbuffer_add_uint8(out, BT_REQUEST);
evbuffer_add_uint8(out, BtRequest);
evbuffer_add_uint32(out, req.index);
evbuffer_add_uint32(out, req.offset);
evbuffer_add_uint32(out, req.length);
dbgmsg(msgs, "requesting %u:%u->%u...", req.index, req.offset, req.length);
dbgOutMessageLen(msgs);
pokeBatchPeriod(msgs, IMMEDIATE_PRIORITY_INTERVAL_SECS);
pokeBatchPeriod(msgs, ImmediatePriorityIntervalSecs);
}
static void protocolSendCancel(tr_peerMsgsImpl* msgs, peer_request const& req)
@ -724,14 +750,14 @@ static void protocolSendCancel(tr_peerMsgsImpl* msgs, peer_request const& req)
struct evbuffer* out = msgs->outMessages;
evbuffer_add_uint32(out, sizeof(uint8_t) + 3 * sizeof(uint32_t));
evbuffer_add_uint8(out, BT_CANCEL);
evbuffer_add_uint8(out, BtCancel);
evbuffer_add_uint32(out, req.index);
evbuffer_add_uint32(out, req.offset);
evbuffer_add_uint32(out, req.length);
dbgmsg(msgs, "cancelling %u:%u->%u...", req.index, req.offset, req.length);
dbgOutMessageLen(msgs);
pokeBatchPeriod(msgs, IMMEDIATE_PRIORITY_INTERVAL_SECS);
pokeBatchPeriod(msgs, ImmediatePriorityIntervalSecs);
}
static void protocolSendPort(tr_peerMsgsImpl* msgs, uint16_t port)
@ -740,7 +766,7 @@ static void protocolSendPort(tr_peerMsgsImpl* msgs, uint16_t port)
dbgmsg(msgs, "sending Port %u", port);
evbuffer_add_uint32(out, 3);
evbuffer_add_uint8(out, BT_PORT);
evbuffer_add_uint8(out, BtPort);
evbuffer_add_uint16(out, port);
}
@ -749,12 +775,12 @@ static void protocolSendHave(tr_peerMsgsImpl* msgs, tr_piece_index_t index)
struct evbuffer* out = msgs->outMessages;
evbuffer_add_uint32(out, sizeof(uint8_t) + sizeof(uint32_t));
evbuffer_add_uint8(out, BT_HAVE);
evbuffer_add_uint8(out, BtHave);
evbuffer_add_uint32(out, index);
dbgmsg(msgs, "sending Have %u", index);
dbgOutMessageLen(msgs);
pokeBatchPeriod(msgs, LOW_PRIORITY_INTERVAL_SECS);
pokeBatchPeriod(msgs, LowPriorityIntervalSecs);
}
#if 0
@ -767,7 +793,7 @@ static void protocolSendAllowedFast(tr_peerMsgs* msgs, uint32_t pieceIndex)
struct evbuffer* out = msgs->outMessages;
evbuffer_add_uint32(io, out, sizeof(uint8_t) + sizeof(uint32_t));
evbuffer_add_uint8(io, out, BT_FEXT_ALLOWED_FAST);
evbuffer_add_uint8(io, out, BtFextAllowedFast);
evbuffer_add_uint32(io, out, pieceIndex);
dbgmsg(msgs, "sending Allowed Fast %u...", pieceIndex);
@ -781,11 +807,11 @@ static void protocolSendChoke(tr_peerMsgsImpl* msgs, bool choke)
struct evbuffer* out = msgs->outMessages;
evbuffer_add_uint32(out, sizeof(uint8_t));
evbuffer_add_uint8(out, choke ? BT_CHOKE : BT_UNCHOKE);
evbuffer_add_uint8(out, choke ? BtChoke : BtUnchoke);
dbgmsg(msgs, "sending %s...", choke ? "Choke" : "Unchoke");
dbgOutMessageLen(msgs);
pokeBatchPeriod(msgs, IMMEDIATE_PRIORITY_INTERVAL_SECS);
pokeBatchPeriod(msgs, ImmediatePriorityIntervalSecs);
}
static void protocolSendHaveAll(tr_peerMsgsImpl* msgs)
@ -795,11 +821,11 @@ static void protocolSendHaveAll(tr_peerMsgsImpl* msgs)
struct evbuffer* out = msgs->outMessages;
evbuffer_add_uint32(out, sizeof(uint8_t));
evbuffer_add_uint8(out, BT_FEXT_HAVE_ALL);
evbuffer_add_uint8(out, BtFextHaveAll);
dbgmsg(msgs, "sending HAVE_ALL...");
dbgOutMessageLen(msgs);
pokeBatchPeriod(msgs, IMMEDIATE_PRIORITY_INTERVAL_SECS);
pokeBatchPeriod(msgs, ImmediatePriorityIntervalSecs);
}
static void protocolSendHaveNone(tr_peerMsgsImpl* msgs)
@ -809,11 +835,11 @@ static void protocolSendHaveNone(tr_peerMsgsImpl* msgs)
struct evbuffer* out = msgs->outMessages;
evbuffer_add_uint32(out, sizeof(uint8_t));
evbuffer_add_uint8(out, BT_FEXT_HAVE_NONE);
evbuffer_add_uint8(out, BtFextHaveNone);
dbgmsg(msgs, "sending HAVE_NONE...");
dbgOutMessageLen(msgs);
pokeBatchPeriod(msgs, IMMEDIATE_PRIORITY_INTERVAL_SECS);
pokeBatchPeriod(msgs, ImmediatePriorityIntervalSecs);
}
/**
@ -912,9 +938,9 @@ static void sendInterest(tr_peerMsgsImpl* msgs, bool b)
dbgmsg(msgs, "Sending %s", b ? "Interested" : "Not Interested");
evbuffer_add_uint32(out, sizeof(uint8_t));
evbuffer_add_uint8(out, b ? BT_INTERESTED : BT_NOT_INTERESTED);
evbuffer_add_uint8(out, b ? BtInterested : BtNotInterested);
pokeBatchPeriod(msgs, HIGH_PRIORITY_INTERVAL_SECS);
pokeBatchPeriod(msgs, HighPriorityIntervalSecs);
dbgOutMessageLen(msgs);
}
@ -1027,16 +1053,41 @@ static void sendLtepHandshake(tr_peerMsgsImpl* msgs)
tr_variantDictAddRaw(&val, TR_KEY_ipv6, ipv6, 16);
}
// http://bittorrent.org/beps/bep_0009.html
// It also adds "metadata_size" to the handshake message (not the
// "m" dictionary) specifying an integer value of the number of
// bytes of the metadata.
if (allow_metadata_xfer && tr_torrentHasMetadata(msgs->torrent) && msgs->torrent->infoDictLength > 0)
{
tr_variantDictAddInt(&val, TR_KEY_metadata_size, msgs->torrent->infoDictLength);
}
// http://bittorrent.org/beps/bep_0010.html
// Local TCP listen port. Allows each side to learn about the TCP
// port number of the other side. Note that there is no need for the
// receiving side of the connection to send this extension message,
// since its port number is already known.
tr_variantDictAddInt(&val, TR_KEY_p, tr_sessionGetPublicPeerPort(msgs->session));
tr_variantDictAddInt(&val, TR_KEY_reqq, REQQ);
tr_variantDictAddBool(&val, TR_KEY_upload_only, tr_torrentIsSeed(msgs->torrent));
// http://bittorrent.org/beps/bep_0010.html
// An integer, the number of outstanding request messages this
// client supports without dropping any. The default in in
// libtorrent is 250.
tr_variantDictAddInt(&val, TR_KEY_reqq, ReqQ);
// http://bittorrent.org/beps/bep_0010.html
// Client name and version (as a utf-8 string). This is a much more
// reliable way of identifying the client than relying on the
// peer id encoding.
tr_variantDictAddQuark(&val, TR_KEY_v, version_quark);
// http://bittorrent.org/beps/bep_0021.html
// A peer that is a partial seed SHOULD include an extra header in
// the extension handshake 'upload_only'. Setting the value of this
// key to 1 indicates that this peer is not interested in downloading
// anything.
tr_variantDictAddBool(&val, TR_KEY_upload_only, tr_torrentIsSeed(msgs->torrent));
if (allow_metadata_xfer || allow_pex)
{
tr_variant* m = tr_variantDictAddDict(&val, TR_KEY_m, 2);
@ -1055,10 +1106,10 @@ static void sendLtepHandshake(tr_peerMsgsImpl* msgs)
payload = tr_variantToBuf(&val, TR_VARIANT_FMT_BENC);
evbuffer_add_uint32(out, 2 * sizeof(uint8_t) + evbuffer_get_length(payload));
evbuffer_add_uint8(out, BT_LTEP);
evbuffer_add_uint8(out, BtLtep);
evbuffer_add_uint8(out, LTEP_HANDSHAKE);
evbuffer_add_buffer(out, payload);
pokeBatchPeriod(msgs, IMMEDIATE_PRIORITY_INTERVAL_SECS);
pokeBatchPeriod(msgs, ImmediatePriorityIntervalSecs);
dbgOutMessageLen(msgs);
/* cleanup */
@ -1218,7 +1269,7 @@ static void parseUtMetadata(tr_peerMsgsImpl* msgs, uint32_t msglen, struct evbuf
if (msg_type == METADATA_MSG_TYPE_REQUEST)
{
if (piece >= 0 && tr_torrentHasMetadata(msgs->torrent) && !tr_torrentIsPrivate(msgs->torrent) &&
msgs->peerAskedForMetadataCount < METADATA_REQQ)
msgs->peerAskedForMetadataCount < MetadataReqQ)
{
msgs->peerAskedForMetadata[msgs->peerAskedForMetadataCount++] = piece;
}
@ -1236,10 +1287,10 @@ static void parseUtMetadata(tr_peerMsgsImpl* msgs, uint32_t msglen, struct evbuf
/* write it out as a LTEP message to our outMessages buffer */
evbuffer_add_uint32(out, 2 * sizeof(uint8_t) + evbuffer_get_length(payload));
evbuffer_add_uint8(out, BT_LTEP);
evbuffer_add_uint8(out, BtLtep);
evbuffer_add_uint8(out, msgs->ut_metadata_id);
evbuffer_add_buffer(out, payload);
pokeBatchPeriod(msgs, HIGH_PRIORITY_INTERVAL_SECS);
pokeBatchPeriod(msgs, HighPriorityIntervalSecs);
dbgOutMessageLen(msgs);
/* cleanup */
@ -1377,7 +1428,7 @@ static ReadState readBtLength(tr_peerMsgsImpl* msgs, struct evbuffer* inbuf, siz
else
{
msgs->incoming.length = len;
msgs->state = AWAITING_BT_ID;
msgs->state = AwaitingBtId;
}
return READ_NOW;
@ -1398,15 +1449,15 @@ static ReadState readBtId(tr_peerMsgsImpl* msgs, struct evbuffer* inbuf, size_t
msgs->incoming.id = id;
dbgmsg(msgs, "msgs->incoming.id is now %d; msgs->incoming.length is %zu", id, (size_t)msgs->incoming.length);
if (id == BT_PIECE)
if (id == BtPiece)
{
msgs->state = AWAITING_BT_PIECE;
msgs->state = AwaitingBtPiece;
return READ_NOW;
}
if (msgs->incoming.length != 1)
{
msgs->state = AWAITING_BT_MESSAGE;
msgs->state = AwaitingBtMessage;
return READ_NOW;
}
@ -1427,7 +1478,7 @@ static void prefetchPieces(tr_peerMsgsImpl* msgs)
return;
}
for (int i = msgs->prefetchCount; i < msgs->pendingReqsToClient && i < PREFETCH_SIZE; ++i)
for (int i = msgs->prefetchCount; i < msgs->pendingReqsToClient && i < PrefetchSize; ++i)
{
struct peer_request const* req = msgs->peerAskedFor + i;
@ -1460,7 +1511,7 @@ static void peerMadeRequest(tr_peerMsgsImpl* msgs, struct peer_request const* re
{
dbgmsg(msgs, "rejecting request from choked peer");
}
else if (msgs->pendingReqsToClient + 1 >= REQQ)
else if (msgs->pendingReqsToClient + 1 >= ReqQ)
{
dbgmsg(msgs, "rejecting request ... reqq is full");
}
@ -1484,20 +1535,20 @@ static bool messageLengthIsCorrect(tr_peerMsgsImpl const* msg, uint8_t id, uint3
{
switch (id)
{
case BT_CHOKE:
case BT_UNCHOKE:
case BT_INTERESTED:
case BT_NOT_INTERESTED:
case BT_FEXT_HAVE_ALL:
case BT_FEXT_HAVE_NONE:
case BtChoke:
case BtUnchoke:
case BtInterested:
case BtNotInterested:
case BtFextHaveAll:
case BtFextHaveNone:
return len == 1;
case BT_HAVE:
case BT_FEXT_SUGGEST:
case BT_FEXT_ALLOWED_FAST:
case BtHave:
case BtFextSuggest:
case BtFextAllowedFast:
return len == 5;
case BT_BITFIELD:
case BtBitfield:
if (tr_torrentHasMetadata(msg->torrent))
{
return len == (msg->torrent->info.pieceCount >> 3) + ((msg->torrent->info.pieceCount & 7) != 0 ? 1 : 0) + 1U;
@ -1512,18 +1563,18 @@ static bool messageLengthIsCorrect(tr_peerMsgsImpl const* msg, uint8_t id, uint3
return true;
case BT_REQUEST:
case BT_CANCEL:
case BT_FEXT_REJECT:
case BtRequest:
case BtCancel:
case BtFextReject:
return len == 13;
case BT_PIECE:
case BtPiece:
return len > 9 && len <= 16393;
case BT_PORT:
case BtPort:
return len == 3;
case BT_LTEP:
case BtLtep:
return len >= 2;
default:
@ -1590,7 +1641,7 @@ static ReadState readBtPiece(tr_peerMsgsImpl* msgs, struct evbuffer* inbuf, size
/* cleanup */
req->length = 0;
msgs->state = AWAITING_BT_LENGTH;
msgs->state = AwaitingBtLength;
return err != 0 ? READ_ERR : READ_NOW;
}
@ -1625,7 +1676,7 @@ static ReadState readBtMessage(tr_peerMsgsImpl* msgs, struct evbuffer* inbuf, si
switch (id)
{
case BT_CHOKE:
case BtChoke:
dbgmsg(msgs, "got Choke");
msgs->client_is_choked_ = true;
@ -1637,26 +1688,26 @@ static ReadState readBtMessage(tr_peerMsgsImpl* msgs, struct evbuffer* inbuf, si
msgs->update_active(TR_PEER_TO_CLIENT);
break;
case BT_UNCHOKE:
case BtUnchoke:
dbgmsg(msgs, "got Unchoke");
msgs->client_is_choked_ = false;
msgs->update_active(TR_PEER_TO_CLIENT);
updateDesiredRequestCount(msgs);
break;
case BT_INTERESTED:
case BtInterested:
dbgmsg(msgs, "got Interested");
msgs->peer_is_interested_ = true;
msgs->update_active(TR_CLIENT_TO_PEER);
break;
case BT_NOT_INTERESTED:
case BtNotInterested:
dbgmsg(msgs, "got Not Interested");
msgs->peer_is_interested_ = false;
msgs->update_active(TR_CLIENT_TO_PEER);
break;
case BT_HAVE:
case BtHave:
tr_peerIoReadUint32(msgs->io, inbuf, &ui32);
dbgmsg(msgs, "got Have: %u", ui32);
@ -1676,7 +1727,7 @@ static ReadState readBtMessage(tr_peerMsgsImpl* msgs, struct evbuffer* inbuf, si
updatePeerProgress(msgs);
break;
case BT_BITFIELD:
case BtBitfield:
{
uint8_t* tmp = tr_new(uint8_t, msglen);
dbgmsg(msgs, "got a bitfield");
@ -1688,7 +1739,7 @@ static ReadState readBtMessage(tr_peerMsgsImpl* msgs, struct evbuffer* inbuf, si
break;
}
case BT_REQUEST:
case BtRequest:
{
struct peer_request r;
tr_peerIoReadUint32(msgs->io, inbuf, &r.index);
@ -1699,7 +1750,7 @@ static ReadState readBtMessage(tr_peerMsgsImpl* msgs, struct evbuffer* inbuf, si
break;
}
case BT_CANCEL:
case BtCancel:
{
struct peer_request r;
tr_peerIoReadUint32(msgs->io, inbuf, &r.index);
@ -1723,12 +1774,12 @@ static ReadState readBtMessage(tr_peerMsgsImpl* msgs, struct evbuffer* inbuf, si
break;
}
case BT_PIECE:
case BtPiece:
TR_ASSERT(false); /* handled elsewhere! */
break;
case BT_PORT:
dbgmsg(msgs, "Got a BT_PORT");
case BtPort:
dbgmsg(msgs, "Got a BtPort");
tr_peerIoReadUint16(msgs->io, inbuf, &msgs->dht_port);
if (msgs->dht_port > 0)
@ -1738,8 +1789,8 @@ static ReadState readBtMessage(tr_peerMsgsImpl* msgs, struct evbuffer* inbuf, si
break;
case BT_FEXT_SUGGEST:
dbgmsg(msgs, "Got a BT_FEXT_SUGGEST");
case BtFextSuggest:
dbgmsg(msgs, "Got a BtFextSuggest");
tr_peerIoReadUint32(msgs->io, inbuf, &ui32);
if (fext)
@ -1754,8 +1805,8 @@ static ReadState readBtMessage(tr_peerMsgsImpl* msgs, struct evbuffer* inbuf, si
break;
case BT_FEXT_ALLOWED_FAST:
dbgmsg(msgs, "Got a BT_FEXT_ALLOWED_FAST");
case BtFextAllowedFast:
dbgmsg(msgs, "Got a BtFextAllowedFast");
tr_peerIoReadUint32(msgs->io, inbuf, &ui32);
if (fext)
@ -1770,8 +1821,8 @@ static ReadState readBtMessage(tr_peerMsgsImpl* msgs, struct evbuffer* inbuf, si
break;
case BT_FEXT_HAVE_ALL:
dbgmsg(msgs, "Got a BT_FEXT_HAVE_ALL");
case BtFextHaveAll:
dbgmsg(msgs, "Got a BtFextHaveAll");
if (fext)
{
@ -1788,8 +1839,8 @@ static ReadState readBtMessage(tr_peerMsgsImpl* msgs, struct evbuffer* inbuf, si
break;
case BT_FEXT_HAVE_NONE:
dbgmsg(msgs, "Got a BT_FEXT_HAVE_NONE");
case BtFextHaveNone:
dbgmsg(msgs, "Got a BtFextHaveNone");
if (fext)
{
@ -1805,10 +1856,10 @@ static ReadState readBtMessage(tr_peerMsgsImpl* msgs, struct evbuffer* inbuf, si
break;
case BT_FEXT_REJECT:
case BtFextReject:
{
struct peer_request r;
dbgmsg(msgs, "Got a BT_FEXT_REJECT");
dbgmsg(msgs, "Got a BtFextReject");
tr_peerIoReadUint32(msgs->io, inbuf, &r.index);
tr_peerIoReadUint32(msgs->io, inbuf, &r.offset);
tr_peerIoReadUint32(msgs->io, inbuf, &r.length);
@ -1826,8 +1877,8 @@ static ReadState readBtMessage(tr_peerMsgsImpl* msgs, struct evbuffer* inbuf, si
break;
}
case BT_LTEP:
dbgmsg(msgs, "Got a BT_LTEP");
case BtLtep:
dbgmsg(msgs, "Got a BtLtep");
parseLtep(msgs, msglen, inbuf);
break;
@ -1840,7 +1891,7 @@ static ReadState readBtMessage(tr_peerMsgsImpl* msgs, struct evbuffer* inbuf, si
TR_ASSERT(msglen + 1 == msgs->incoming.length);
TR_ASSERT(evbuffer_get_length(inbuf) == startBufLen - msglen);
msgs->state = AWAITING_BT_LENGTH;
msgs->state = AwaitingBtLength;
return READ_NOW;
}
@ -1922,7 +1973,7 @@ static ReadState canRead(tr_peerIo* io, void* vmsgs, size_t* piece)
{
ret = READ_LATER;
}
else if (msgs->state == AWAITING_BT_PIECE)
else if (msgs->state == AwaitingBtPiece)
{
ret = readBtPiece(msgs, in, inlen, piece);
}
@ -1930,15 +1981,15 @@ static ReadState canRead(tr_peerIo* io, void* vmsgs, size_t* piece)
{
switch (msgs->state)
{
case AWAITING_BT_LENGTH:
case AwaitingBtLength:
ret = readBtLength(msgs, in, inlen);
break;
case AWAITING_BT_ID:
case AwaitingBtId:
ret = readBtId(msgs, in, inlen);
break;
case AWAITING_BT_MESSAGE:
case AwaitingBtMessage:
ret = readBtMessage(msgs, in, inlen);
break;
@ -1976,7 +2027,7 @@ static void updateDesiredRequestCount(tr_peerMsgsImpl* msgs)
unsigned int rate_Bps;
unsigned int irate_Bps;
int const floor = 4;
int const seconds = REQUEST_BUF_SECS;
int const seconds = RequestBufSecs;
uint64_t const now = tr_time_msec();
/* Get the rate limit we should use.
@ -2028,10 +2079,10 @@ static void updateMetadataRequests(tr_peerMsgsImpl* msgs, time_t now)
/* write it out as a LTEP message to our outMessages buffer */
evbuffer_add_uint32(out, 2 * sizeof(uint8_t) + evbuffer_get_length(payload));
evbuffer_add_uint8(out, BT_LTEP);
evbuffer_add_uint8(out, BtLtep);
evbuffer_add_uint8(out, msgs->ut_metadata_id);
evbuffer_add_buffer(out, payload);
pokeBatchPeriod(msgs, HIGH_PRIORITY_INTERVAL_SECS);
pokeBatchPeriod(msgs, HighPriorityIntervalSecs);
dbgOutMessageLen(msgs);
/* cleanup */
@ -2089,7 +2140,7 @@ static size_t fillOutputBuffer(tr_peerMsgsImpl* msgs, time_t now)
tr_peerIoWriteBuf(msgs->io, msgs->outMessages, false);
msgs->clientSentAnythingAt = now;
msgs->outMessagesBatchedAt = 0;
msgs->outMessagesBatchPeriod = LOW_PRIORITY_INTERVAL_SECS;
msgs->outMessagesBatchPeriod = LowPriorityIntervalSecs;
bytesWritten += len;
}
@ -2119,11 +2170,11 @@ static size_t fillOutputBuffer(tr_peerMsgsImpl* msgs, time_t now)
/* write it out as a LTEP message to our outMessages buffer */
evbuffer_add_uint32(out, 2 * sizeof(uint8_t) + evbuffer_get_length(payload) + dataLen);
evbuffer_add_uint8(out, BT_LTEP);
evbuffer_add_uint8(out, BtLtep);
evbuffer_add_uint8(out, msgs->ut_metadata_id);
evbuffer_add_buffer(out, payload);
evbuffer_add(out, data, dataLen);
pokeBatchPeriod(msgs, HIGH_PRIORITY_INTERVAL_SECS);
pokeBatchPeriod(msgs, HighPriorityIntervalSecs);
dbgOutMessageLen(msgs);
evbuffer_free(payload);
@ -2147,10 +2198,10 @@ static size_t fillOutputBuffer(tr_peerMsgsImpl* msgs, time_t now)
/* write it out as a LTEP message to our outMessages buffer */
evbuffer_add_uint32(out, 2 * sizeof(uint8_t) + evbuffer_get_length(payload));
evbuffer_add_uint8(out, BT_LTEP);
evbuffer_add_uint8(out, BtLtep);
evbuffer_add_uint8(out, msgs->ut_metadata_id);
evbuffer_add_buffer(out, payload);
pokeBatchPeriod(msgs, HIGH_PRIORITY_INTERVAL_SECS);
pokeBatchPeriod(msgs, HighPriorityIntervalSecs);
dbgOutMessageLen(msgs);
evbuffer_free(payload);
@ -2177,7 +2228,7 @@ static size_t fillOutputBuffer(tr_peerMsgsImpl* msgs, time_t now)
evbuffer_expand(out, msglen);
evbuffer_add_uint32(out, sizeof(uint8_t) + 2 * sizeof(uint32_t) + req.length);
evbuffer_add_uint8(out, BT_PIECE);
evbuffer_add_uint8(out, BtPiece);
evbuffer_add_uint32(out, req.index);
evbuffer_add_uint32(out, req.offset);
@ -2247,11 +2298,11 @@ static size_t fillOutputBuffer(tr_peerMsgsImpl* msgs, time_t now)
*** Keepalive
**/
if (msgs != nullptr && msgs->clientSentAnythingAt != 0 && now - msgs->clientSentAnythingAt > KEEPALIVE_INTERVAL_SECS)
if (msgs != nullptr && msgs->clientSentAnythingAt != 0 && now - msgs->clientSentAnythingAt > KeepaliveIntervalSecs)
{
dbgmsg(msgs, "sending a keepalive message");
evbuffer_add_uint32(msgs->outMessages, 0);
pokeBatchPeriod(msgs, IMMEDIATE_PRIORITY_INTERVAL_SECS);
pokeBatchPeriod(msgs, ImmediatePriorityIntervalSecs);
}
return bytesWritten;
@ -2303,10 +2354,10 @@ static void sendBitfield(tr_peerMsgsImpl* msgs)
auto bytes = tr_torrentCreatePieceBitfield(msgs->torrent);
evbuffer_add_uint32(out, sizeof(uint8_t) + bytes.size());
evbuffer_add_uint8(out, BT_BITFIELD);
evbuffer_add_uint8(out, BtBitfield);
evbuffer_add(out, bytes.data(), std::size(bytes));
dbgmsg(msgs, "sending bitfield... outMessage size is now %zu", evbuffer_get_length(out));
pokeBatchPeriod(msgs, IMMEDIATE_PRIORITY_INTERVAL_SECS);
pokeBatchPeriod(msgs, ImmediatePriorityIntervalSecs);
}
static void tellPeerWhatWeHave(tr_peerMsgsImpl* msgs)
@ -2629,10 +2680,10 @@ static void sendPex(tr_peerMsgsImpl* msgs)
/* write the pex message */
payload = tr_variantToBuf(&val, TR_VARIANT_FMT_BENC);
evbuffer_add_uint32(out, 2 * sizeof(uint8_t) + evbuffer_get_length(payload));
evbuffer_add_uint8(out, BT_LTEP);
evbuffer_add_uint8(out, BtLtep);
evbuffer_add_uint8(out, msgs->ut_pex_id);
evbuffer_add_buffer(out, payload);
pokeBatchPeriod(msgs, HIGH_PRIORITY_INTERVAL_SECS);
pokeBatchPeriod(msgs, HighPriorityIntervalSecs);
dbgmsg(msgs, "sending a pex message; outMessage size is now %zu", evbuffer_get_length(out));
dbgOutMessageLen(msgs);
@ -2657,5 +2708,5 @@ static void pexPulse([[maybe_unused]] evutil_socket_t fd, [[maybe_unused]] short
sendPex(msgs);
TR_ASSERT(msgs->pex_timer);
tr_timerAdd(msgs->pex_timer.get(), PEX_INTERVAL_SECS, 0);
tr_timerAdd(msgs->pex_timer.get(), PexIntervalSecs, 0);
}

View File

@ -763,13 +763,10 @@ static void handle_request(struct evhttp_request* req, void* arg)
}
}
enum
{
SERVER_START_RETRY_COUNT = 10,
SERVER_START_RETRY_DELAY_STEP = 3,
SERVER_START_RETRY_DELAY_INCREMENT = 5,
SERVER_START_RETRY_MAX_DELAY = 60
};
static auto constexpr ServerStartRetryCount = int{ 10 };
static auto constexpr ServerStartRetryDelayIncrement = int{ 5 };
static auto constexpr ServerStartRetryDelayStep = int{ 3 };
static auto constexpr ServerStartRetryMaxDelay = int{ 60 };
static void startServer(void* vserver);
@ -780,8 +777,8 @@ static void rpc_server_on_start_retry([[maybe_unused]] evutil_socket_t fd, [[may
static int rpc_server_start_retry(tr_rpc_server* server)
{
int retry_delay = (server->start_retry_counter / SERVER_START_RETRY_DELAY_STEP + 1) * SERVER_START_RETRY_DELAY_INCREMENT;
retry_delay = std::min(retry_delay, int{ SERVER_START_RETRY_MAX_DELAY });
int retry_delay = (server->start_retry_counter / ServerStartRetryDelayStep + 1) * ServerStartRetryDelayIncrement;
retry_delay = std::min(retry_delay, int{ ServerStartRetryMaxDelay });
if (server->start_retry_timer == nullptr)
{
@ -825,7 +822,7 @@ static void startServer(void* vserver)
{
evhttp_free(httpd);
if (server->start_retry_counter < SERVER_START_RETRY_COUNT)
if (server->start_retry_counter < ServerStartRetryCount)
{
int const retry_delay = rpc_server_start_retry(server);
@ -838,7 +835,7 @@ static void startServer(void* vserver)
"Unable to bind to %s:%d after %d attempts, giving up",
address,
port,
SERVER_START_RETRY_COUNT);
ServerStartRetryCount);
}
else
{

View File

@ -63,17 +63,14 @@
#include "version.h"
#include "web.h"
enum
{
#ifdef TR_LIGHTWEIGHT
DEFAULT_CACHE_SIZE_MB = 2,
DEFAULT_PREFETCH_ENABLED = false,
static auto constexpr DefaultCacheSizeMB = int{ 2 };
static auto constexpr DefaultPrefetchEnabled = bool{ false };
#else
DEFAULT_CACHE_SIZE_MB = 4,
DEFAULT_PREFETCH_ENABLED = true,
static auto constexpr DefaultCacheSizeMB = int{ 4 };
static auto constexpr DefaultPrefetchEnabled = bool{ true };
#endif
SAVE_INTERVAL_SECS = 360
};
static auto constexpr SaveIntervalSecs = int{ 360 };
#define dbgmsg(...) tr_logAddDeepNamed(nullptr, __VA_ARGS__)
@ -334,7 +331,7 @@ void tr_sessionGetDefaultSettings(tr_variant* d)
tr_variantDictReserve(d, 69);
tr_variantDictAddBool(d, TR_KEY_blocklist_enabled, false);
tr_variantDictAddStr(d, TR_KEY_blocklist_url, "http://www.example.com/blocklist");
tr_variantDictAddInt(d, TR_KEY_cache_size_mb, DEFAULT_CACHE_SIZE_MB);
tr_variantDictAddInt(d, TR_KEY_cache_size_mb, DefaultCacheSizeMB);
tr_variantDictAddBool(d, TR_KEY_dht_enabled, true);
tr_variantDictAddBool(d, TR_KEY_utp_enabled, true);
tr_variantDictAddBool(d, TR_KEY_lpd_enabled, false);
@ -359,7 +356,7 @@ void tr_sessionGetDefaultSettings(tr_variant* d)
tr_variantDictAddBool(d, TR_KEY_pex_enabled, true);
tr_variantDictAddBool(d, TR_KEY_port_forwarding_enabled, true);
tr_variantDictAddInt(d, TR_KEY_preallocation, TR_PREALLOCATE_SPARSE);
tr_variantDictAddBool(d, TR_KEY_prefetch_enabled, DEFAULT_PREFETCH_ENABLED);
tr_variantDictAddBool(d, TR_KEY_prefetch_enabled, DefaultPrefetchEnabled);
tr_variantDictAddInt(d, TR_KEY_peer_id_ttl_hours, 6);
tr_variantDictAddBool(d, TR_KEY_queue_stalled_enabled, true);
tr_variantDictAddInt(d, TR_KEY_queue_stalled_minutes, 30);
@ -583,7 +580,7 @@ static void onSaveTimer([[maybe_unused]] evutil_socket_t fd, [[maybe_unused]] sh
tr_statsSaveDirty(session);
tr_timerAdd(session->saveTimer, SAVE_INTERVAL_SECS, 0);
tr_timerAdd(session->saveTimer, SaveIntervalSecs, 0);
}
/***
@ -754,7 +751,7 @@ static void tr_sessionInitImpl(void* vdata)
TR_ASSERT(tr_isSession(session));
session->saveTimer = evtimer_new(session->event_base, onSaveTimer, session);
tr_timerAdd(session->saveTimer, SAVE_INTERVAL_SECS, 0);
tr_timerAdd(session->saveTimer, SaveIntervalSecs, 0);
tr_announcerInit(session);
@ -1467,12 +1464,9 @@ static void updateBandwidth(tr_session* session, tr_direction dir)
session->bandwidth->setDesiredSpeedBytesPerSecond(dir, limit_Bps);
}
enum
{
MINUTES_PER_HOUR = 60,
MINUTES_PER_DAY = MINUTES_PER_HOUR * 24,
MINUTES_PER_WEEK = MINUTES_PER_DAY * 7
};
static auto constexpr MinutesPerHour = int{ 60 };
static auto constexpr MinutesPerDay = int{ MinutesPerHour * 24 };
static auto constexpr MinutesPerWeek = int{ MinutesPerDay * 7 };
static void turtleUpdateTable(struct tr_turtle_info* t)
{
@ -1487,12 +1481,12 @@ static void turtleUpdateTable(struct tr_turtle_info* t)
if (end <= begin)
{
end += MINUTES_PER_DAY;
end += MinutesPerDay;
}
for (time_t i = begin; i < end; ++i)
{
t->minutes->setBit((i + day * MINUTES_PER_DAY) % MINUTES_PER_WEEK);
t->minutes->setBit((i + day * MinutesPerDay) % MinutesPerWeek);
}
}
}
@ -1537,11 +1531,11 @@ static bool getInTurtleTime(struct tr_turtle_info const* t)
struct tm tm;
tr_localtime_r(&now, &tm);
size_t minute_of_the_week = tm.tm_wday * MINUTES_PER_DAY + tm.tm_hour * MINUTES_PER_HOUR + tm.tm_min;
size_t minute_of_the_week = tm.tm_wday * MinutesPerDay + tm.tm_hour * MinutesPerHour + tm.tm_min;
if (minute_of_the_week >= MINUTES_PER_WEEK) /* leap minutes? */
if (minute_of_the_week >= MinutesPerWeek) /* leap minutes? */
{
minute_of_the_week = MINUTES_PER_WEEK - 1;
minute_of_the_week = MinutesPerWeek - 1;
}
return t->minutes->readBit(minute_of_the_week);
@ -1576,7 +1570,7 @@ static void turtleBootstrap(tr_session* session, struct tr_turtle_info* turtle)
turtle->changedByUser = false;
turtle->autoTurtleState = TR_AUTO_SWITCH_UNUSED;
turtle->minutes = new Bitfield(MINUTES_PER_WEEK);
turtle->minutes = new Bitfield(MinutesPerWeek);
turtleUpdateTable(turtle);

View File

@ -31,11 +31,8 @@
****
***/
enum
{
/* don't ask for the same metadata piece more than this often */
MIN_REPEAT_INTERVAL_SECS = 3
};
/* don't ask for the same metadata piece more than this often */
static auto constexpr MinRepeatIntervalSecs = int{ 3 };
struct metadata_node
{
@ -372,7 +369,7 @@ bool tr_torrentGetNextMetadataRequest(tr_torrent* tor, time_t now, int* setme_pi
bool have_request = false;
struct tr_incomplete_metadata* m = tor->incompleteMetadata;
if (m != nullptr && m->piecesNeededCount > 0 && m->piecesNeeded[0].requestedAt + MIN_REPEAT_INTERVAL_SECS < now)
if (m != nullptr && m->piecesNeededCount > 0 && m->piecesNeeded[0].requestedAt + MinRepeatIntervalSecs < now)
{
int const piece = m->piecesNeeded[0].piece;
tr_removeElementFromArray(m->piecesNeeded, 0, sizeof(struct metadata_node), m->piecesNeededCount);

View File

@ -441,20 +441,17 @@ void tr_dhtUninit(tr_session* ss)
}
else
{
enum
{
MAX_NODES = 300,
PORT_LEN = 2,
COMPACT_ADDR_LEN = 4,
COMPACT_LEN = (COMPACT_ADDR_LEN + PORT_LEN),
COMPACT6_ADDR_LEN = 16,
COMPACT6_LEN = (COMPACT6_ADDR_LEN + PORT_LEN),
};
auto constexpr MaxNodes = size_t{ 300 };
auto constexpr PortLen = size_t{ 2 };
auto constexpr CompactAddrLen = size_t{ 4 };
auto constexpr CompactLen = size_t{ CompactAddrLen + PortLen };
auto constexpr Compact6AddrLen = size_t{ 16 };
auto constexpr Compact6Len = size_t{ Compact6AddrLen + PortLen };
struct sockaddr_in sins[MAX_NODES];
struct sockaddr_in6 sins6[MAX_NODES];
int num = MAX_NODES;
int num6 = MAX_NODES;
struct sockaddr_in sins[MaxNodes];
struct sockaddr_in6 sins6[MaxNodes];
int num = MaxNodes;
int num6 = MaxNodes;
int n = dht_get_nodes(sins, &num, sins6, &num6);
tr_logAddNamedInfo("DHT", "Saving %d (%d + %d) nodes", n, num, num6);
@ -464,14 +461,14 @@ void tr_dhtUninit(tr_session* ss)
if (num > 0)
{
char compact[MAX_NODES * COMPACT_LEN];
char compact[MaxNodes * CompactLen];
char* out = compact;
for (struct sockaddr_in const* in = sins; in < sins + num; ++in)
{
memcpy(out, &in->sin_addr, COMPACT_ADDR_LEN);
out += COMPACT_ADDR_LEN;
memcpy(out, &in->sin_port, PORT_LEN);
out += PORT_LEN;
memcpy(out, &in->sin_addr, CompactAddrLen);
out += CompactAddrLen;
memcpy(out, &in->sin_port, PortLen);
out += PortLen;
}
tr_variantDictAddRaw(&benc, TR_KEY_nodes, compact, out - compact);
@ -479,14 +476,14 @@ void tr_dhtUninit(tr_session* ss)
if (num6 > 0)
{
char compact6[MAX_NODES * COMPACT6_LEN];
char compact6[MaxNodes * Compact6Len];
char* out6 = compact6;
for (struct sockaddr_in6 const* in = sins6; in < sins6 + num6; ++in)
{
memcpy(out6, &in->sin6_addr, COMPACT6_ADDR_LEN);
out6 += COMPACT6_ADDR_LEN;
memcpy(out6, &in->sin6_port, PORT_LEN);
out6 += PORT_LEN;
memcpy(out6, &in->sin6_addr, Compact6AddrLen);
out6 += Compact6AddrLen;
memcpy(out6, &in->sin6_port, PortLen);
out6 += PortLen;
}
tr_variantDictAddRaw(&benc, TR_KEY_nodes6, compact6, out6 - compact6);

View File

@ -68,28 +68,21 @@ using in_port_t = uint16_t; /* all missing */
static void event_callback(evutil_socket_t, short, void*);
enum
{
UPKEEP_INTERVAL_SECS = 5
};
static auto constexpr UpkeepIntervalSecs = int{ 5 };
static struct event* upkeep_timer = nullptr;
static tr_socket_t lpd_socket; /**<separate multicast receive socket */
static tr_socket_t lpd_socket2; /**<and multicast send socket */
static struct event* lpd_event = nullptr;
static event* lpd_event = nullptr;
static tr_port lpd_port;
static tr_session* session;
enum
{
lpd_maxDatagramLength = 200 /**<the size an LPD datagram must not exceed */
};
char const lpd_mcastGroup[] = "239.192.152.143"; /**<LPD multicast group */
int const lpd_mcastPort = 6771; /**<LPD source and destination UPD port */
static struct sockaddr_in lpd_mcastAddr; /**<initialized from the above constants in tr_lpdInit */
static auto constexpr lpd_maxDatagramLength = int{ 200 }; /**<the size an LPD datagram must not exceed */
static char constexpr lpd_mcastGroup[] = "239.192.152.143"; /**<LPD multicast group */
static auto constexpr lpd_mcastPort = int{ 6771 }; /**<LPD source and destination UPD port */
static auto lpd_mcastAddr = sockaddr_in{}; /**<initialized from the above constants in tr_lpdInit */
/**
* @brief Protocol-related information carried by a Local Peer Discovery packet */
@ -99,20 +92,14 @@ struct lpd_protocolVersion
int minor;
};
enum lpd_enumTimeToLive
{
lpd_ttlSameSubnet = 1,
lpd_ttlSameSite = 32,
lpd_ttlSameRegion = 64,
lpd_ttlSameContinent = 128,
lpd_ttlUnrestricted = 255
};
static auto constexpr lpd_ttlSameSubnet = int{ 1 };
// static auto constexpr lpd_ttlSameSite = int{ 32 };
// static auto constexpr lpd_ttlSameRegion = int{ 64 };
// static auto constexpr lpd_ttlSameContinent = int{ 128 };
// static auto constexpr lpd_ttlUnrestricted = int{ 255 };
enum
{
lpd_announceInterval = 4 * 60, /**<4 min announce interval per torrent */
lpd_announceScope = lpd_ttlSameSubnet /**<the maximum scope for LPD datagrams */
};
static auto constexpr lpd_announceInterval = int{ 4 * 60 }; /**<4 min announce interval per torrent */
static auto constexpr lpd_announceScope = int{ lpd_ttlSameSubnet }; /**<the maximum scope for LPD datagrams */
/**
* @defgroup DoS Message Flood Protection
@ -132,10 +119,7 @@ enum
* @ingroup DoS
* @brief allow at most ten messages per second (interval average)
* @note this constraint is only enforced once per housekeeping interval */
enum
{
lpd_announceCapFactor = 10
};
static auto constexpr lpd_announceCapFactor = int{ 10 };
/**
* @ingroup DoS
@ -234,22 +218,19 @@ static bool lpd_extractParam(char const* const str, char const* const name, int
TR_ASSERT(name != nullptr);
TR_ASSERT(val != nullptr);
enum
{
/* configure maximum length of search string here */
maxLength = 30
};
/* configure maximum length of search string here */
auto constexpr MaxLength = int{ 30 };
char sstr[maxLength] = { 0 };
char sstr[MaxLength] = { 0 };
char const* pos;
if (strlen(name) > maxLength - strlen(CRLF ": "))
if (strlen(name) > MaxLength - strlen(CRLF ": "))
{
return false;
}
/* compose the string token to search for */
tr_snprintf(sstr, maxLength, CRLF "%s: ", name);
tr_snprintf(sstr, MaxLength, CRLF "%s: ", name);
pos = strstr(str, sstr);
@ -408,7 +389,7 @@ int tr_lpdInit(tr_session* ss, [[maybe_unused]] tr_address* tr_addr)
event_add(lpd_event, nullptr);
upkeep_timer = evtimer_new(ss->event_base, on_upkeep_timer, ss);
tr_timerAdd(upkeep_timer, UPKEEP_INTERVAL_SECS, 0);
tr_timerAdd(upkeep_timer, UpkeepIntervalSecs, 0);
tr_logAddNamedDbg("LPD", "Local Peer Discovery initialised");
@ -536,15 +517,12 @@ bool tr_lpdSendAnnounce(tr_torrent const* t)
*/
static int tr_lpdConsiderAnnounce(tr_pex* peer, char const* const msg)
{
enum
{
maxValueLen = 25,
maxHashLen = SIZEOF_HASH_STRING
};
auto constexpr MaxValueLen = int{ 25 };
auto constexpr MaxHashLen = int{ SIZEOF_HASH_STRING };
auto ver = lpd_protocolVersion{ -1, -1 };
char value[maxValueLen] = { 0 };
char hashString[maxHashLen] = { 0 };
char value[MaxValueLen] = { 0 };
char hashString[MaxHashLen] = { 0 };
int res = 0;
int peerPort = 0;
@ -561,7 +539,7 @@ static int tr_lpdConsiderAnnounce(tr_pex* peer, char const* const msg)
/* save the effort to check Host, which seems to be optional anyway */
if (!lpd_extractParam(params, "Port", maxValueLen, value))
if (!lpd_extractParam(params, "Port", MaxValueLen, value))
{
return 0;
}
@ -575,7 +553,7 @@ static int tr_lpdConsiderAnnounce(tr_pex* peer, char const* const msg)
peer->port = htons(peerPort);
res = -1; /* signal caller side-effect to peer->port via return != 0 */
if (!lpd_extractParam(params, "Infohash", maxHashLen, hashString))
if (!lpd_extractParam(params, "Infohash", MaxHashLen, hashString))
{
return res;
}
@ -683,8 +661,8 @@ static int tr_lpdAnnounceMore(time_t const now, int const interval)
static void on_upkeep_timer([[maybe_unused]] evutil_socket_t s, [[maybe_unused]] short type, [[maybe_unused]] void* user_data)
{
time_t const now = tr_time();
tr_lpdAnnounceMore(now, UPKEEP_INTERVAL_SECS);
tr_timerAdd(upkeep_timer, UPKEEP_INTERVAL_SECS, 0);
tr_lpdAnnounceMore(now, UpkeepIntervalSecs);
tr_timerAdd(upkeep_timer, UpkeepIntervalSecs, 0);
}
/**

View File

@ -26,10 +26,7 @@
****
***/
enum
{
MSEC_TO_SLEEP_PER_SECOND_DURING_VERIFY = 100
};
static auto constexpr MsecToSleepPerSecondDuringVerify = int{ 100 };
static bool verifyTorrent(tr_torrent* tor, bool* stopFlag)
{
@ -118,7 +115,7 @@ static bool verifyTorrent(tr_torrent* tor, bool* stopFlag)
if (lastSleptAt != now)
{
lastSleptAt = now;
tr_wait_msec(MSEC_TO_SLEEP_PER_SECOND_DURING_VERIFY);
tr_wait_msec(MsecToSleepPerSecondDuringVerify);
}
sha = tr_sha1_init();

View File

@ -38,10 +38,7 @@
#define USE_LIBCURL_SOCKOPT
#endif
enum
{
THREADFUNC_MAX_SLEEP_MSEC = 200,
};
static auto constexpr ThreadfuncMaxSleepMsec = int{ 200 };
#if 0
#define dbgmsg(fmt, ...) fprintf(stderr, fmt "\n", __VA_ARGS__)
@ -488,7 +485,7 @@ static void tr_webThreadFunc(void* vsession)
if (msec < 0)
{
msec = THREADFUNC_MAX_SLEEP_MSEC;
msec = ThreadfuncMaxSleepMsec;
}
if (session->isClosed)
@ -498,9 +495,9 @@ static void tr_webThreadFunc(void* vsession)
if (msec > 0)
{
if (msec > THREADFUNC_MAX_SLEEP_MSEC)
if (msec > ThreadfuncMaxSleepMsec)
{
msec = THREADFUNC_MAX_SLEEP_MSEC;
msec = ThreadfuncMaxSleepMsec;
}
auto numfds = int{};
@ -513,7 +510,7 @@ static void tr_webThreadFunc(void* vsession)
/* curl_multi_wait() returns immediately if there are
* no fds to wait for, so we need an explicit wait here
* to emulate select() behavior */
tr_wait_msec(std::min(msec, THREADFUNC_MAX_SLEEP_MSEC / 2L));
tr_wait_msec(std::min(msec, ThreadfuncMaxSleepMsec / 2L));
}
}
else