mirror of
https://github.com/transmission/transmission
synced 2025-01-31 03:12:44 +00:00
fix: sonarcloud (#4453)
This commit is contained in:
parent
a1892f2c7c
commit
14a3d01e45
6 changed files with 25 additions and 38 deletions
|
@ -45,10 +45,7 @@ using namespace std::literals;
|
|||
|
||||
/* unless the tracker says otherwise, rescrape this frequently */
|
||||
static auto constexpr DefaultScrapeIntervalSec = int{ 60 * 30 };
|
||||
/* unless the tracker says otherwise, this is the announce interval */
|
||||
static auto constexpr DefaultAnnounceIntervalSec = int{ 60 * 10 };
|
||||
/* unless the tracker says otherwise, this is the announce min_interval */
|
||||
static auto constexpr DefaultAnnounceMinIntervalSec = int{ 60 * 2 };
|
||||
|
||||
/* the value of the 'numwant' argument passed in tracker requests. */
|
||||
static auto constexpr Numwant = int{ 80 };
|
||||
|
||||
|
@ -528,6 +525,12 @@ struct tr_tier
|
|||
bool isScraping = false;
|
||||
|
||||
private:
|
||||
// unless the tracker says otherwise, this is the announce interval
|
||||
static auto constexpr DefaultAnnounceIntervalSec = int{ 60 * 10 };
|
||||
|
||||
// unless the tracker says otherwise, this is the announce min_interval
|
||||
static auto constexpr DefaultAnnounceMinIntervalSec = int{ 60 * 2 };
|
||||
|
||||
[[nodiscard]] static time_t getNextScrapeTime(tr_session const* session, tr_tier const* tier, int interval)
|
||||
{
|
||||
// Maybe don't scrape paused torrents
|
||||
|
|
|
@ -58,21 +58,6 @@ using tr_socket_t = int;
|
|||
#define sockerrno errno
|
||||
#endif
|
||||
|
||||
/****
|
||||
*****
|
||||
***** tr_address
|
||||
*****
|
||||
****/
|
||||
|
||||
enum tr_address_type
|
||||
{
|
||||
TR_AF_INET,
|
||||
TR_AF_INET6,
|
||||
NUM_TR_AF_INET_TYPES
|
||||
};
|
||||
|
||||
struct tr_address;
|
||||
|
||||
/**
|
||||
* Literally just a port number.
|
||||
*
|
||||
|
@ -149,6 +134,13 @@ private:
|
|||
uint16_t hport_ = 0;
|
||||
};
|
||||
|
||||
enum tr_address_type
|
||||
{
|
||||
TR_AF_INET,
|
||||
TR_AF_INET6,
|
||||
NUM_TR_AF_INET_TYPES
|
||||
};
|
||||
|
||||
struct tr_address
|
||||
{
|
||||
[[nodiscard]] static std::optional<tr_address> from_string(std::string_view address_sv);
|
||||
|
|
|
@ -903,10 +903,8 @@ static void peerSuggestedPiece(
|
|||
void tr_peerMgrPieceCompleted(tr_torrent* tor, tr_piece_index_t p)
|
||||
{
|
||||
bool piece_came_from_peers = false;
|
||||
tr_swarm* const s = tor->swarm;
|
||||
|
||||
/* walk through our peers */
|
||||
for (auto* const peer : s->peers)
|
||||
for (auto* const peer : tor->swarm->peers)
|
||||
{
|
||||
// notify the peer that we now have this piece
|
||||
peer->on_piece_completed(p);
|
||||
|
@ -2648,10 +2646,7 @@ struct peer_candidate
|
|||
auto const now_msec = tr_time_msec();
|
||||
|
||||
// leave 5% of connection slots for incoming connections -- ticket #2609
|
||||
auto const max_candidates = static_cast<size_t>(session->peerLimit() * 0.95);
|
||||
|
||||
// don't start any new handshakes if we're full up
|
||||
if (max_candidates <= tr_peerMsgs::size())
|
||||
if (auto const max_candidates = static_cast<size_t>(session->peerLimit() * 0.95); max_candidates <= tr_peerMsgs::size())
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#define tr_logAddDebugIo(io, msg) tr_logAddDebug(msg, (io)->display_name())
|
||||
#define tr_logAddTraceIo(io, msg) tr_logAddTrace(msg, (io)->display_name())
|
||||
|
||||
tr_peer_socket::tr_peer_socket(tr_session* session, tr_address const& address, tr_port port, tr_socket_t sock)
|
||||
tr_peer_socket::tr_peer_socket(tr_session const* session, tr_address const& address, tr_port port, tr_socket_t sock)
|
||||
: handle{ sock }
|
||||
, address_{ address }
|
||||
, port_{ port }
|
||||
|
@ -115,16 +115,13 @@ size_t tr_peer_socket::try_read(Buffer& buf, size_t max, tr_error** error) const
|
|||
}
|
||||
|
||||
#ifdef WITH_UTP
|
||||
if (is_utp())
|
||||
{
|
||||
// utp_read_drained() notifies libutp that this read buffer is
|
||||
// empty. It opens up the congestion window by sending an ACK
|
||||
// (soonish) if one was not going to be sent.
|
||||
if (std::empty(buf))
|
||||
// utp_read_drained() notifies libutp that this read buffer is empty.
|
||||
// It opens up the congestion window by sending an ACK (soonish) if
|
||||
// one was not going to be sent.
|
||||
if (is_utp() && std::empty(buf))
|
||||
{
|
||||
utp_read_drained(handle.utp);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return {};
|
||||
|
|
|
@ -29,7 +29,7 @@ public:
|
|||
using Buffer = libtransmission::Buffer;
|
||||
|
||||
tr_peer_socket() = default;
|
||||
tr_peer_socket(tr_session* session, tr_address const& address, tr_port port, tr_socket_t sock);
|
||||
tr_peer_socket(tr_session const* session, tr_address const& address, tr_port port, tr_socket_t sock);
|
||||
tr_peer_socket(tr_address const& address, tr_port port, struct UTPSocket* const sock);
|
||||
tr_peer_socket(tr_peer_socket&&) = default;
|
||||
tr_peer_socket(tr_peer_socket const&) = delete;
|
||||
|
|
|
@ -66,7 +66,7 @@ using shared_unique_ptr = std::unique_ptr<CURLSH, ShareDeleter>;
|
|||
|
||||
struct MultiDeleter
|
||||
{
|
||||
void operator()(CURLM* multi)
|
||||
void operator()(CURLM* multi) const
|
||||
{
|
||||
if (multi == nullptr)
|
||||
{
|
||||
|
@ -585,7 +585,7 @@ public:
|
|||
return std::empty(queued_tasks_) && std::empty(running_tasks_);
|
||||
}
|
||||
|
||||
void remove_task(Task& task)
|
||||
void remove_task(Task const& task)
|
||||
{
|
||||
auto const lock = std::unique_lock{ tasks_mutex_ };
|
||||
|
||||
|
|
Loading…
Reference in a new issue