1
0
Fork 0
mirror of https://github.com/transmission/transmission synced 2025-03-12 15:14:12 +00:00

fix: speed limits did not work below 16KiB/s (#7339)

* refactor: calculate raw speed by OS buffer instead of application buffer

* refactor: limit transfer speed by raw speed

* refactor: early return write if no bytes

* refactor: remove code for guessing TCP/IP overhead

* refactor: remove code for uTP overhead
This commit is contained in:
Yat Ho 2024-12-29 15:42:07 +08:00 committed by GitHub
parent 536fe4a6e1
commit a4cc98b92c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 39 additions and 77 deletions

View file

@ -298,17 +298,19 @@ void tr_bandwidth::notify_bandwidth_consumed(tr_direction dir, size_t byte_count
auto& band = band_[dir];
if (band.is_limited_ && is_piece_data)
{
band.bytes_left_ -= std::min(band.bytes_left_, byte_count);
}
notify_bandwidth_consumed_bytes(now, band.raw_, byte_count);
if (is_piece_data)
{
notify_bandwidth_consumed_bytes(now, band.piece_, byte_count);
}
else
{
notify_bandwidth_consumed_bytes(now, band.raw_, byte_count);
if (band.is_limited_)
{
band.bytes_left_ -= std::min(band.bytes_left_, byte_count);
}
}
if (parent_ != nullptr)
{

View file

@ -183,7 +183,7 @@ public:
return false;
}
auto const got = get_piece_speed(now_msec, dir);
auto const got = get_raw_speed(now_msec, dir);
auto const want = get_desired_speed(dir);
return got >= want;
}

View file

@ -271,21 +271,21 @@ bool tr_peerIo::reconnect()
void tr_peerIo::did_write_wrapper(size_t bytes_transferred)
{
auto const keep_alive = shared_from_this();
auto const now = tr_time_msec();
while (bytes_transferred != 0U && !std::empty(outbuf_info_))
if (bytes_transferred > 0U)
{
bandwidth().notify_bandwidth_consumed(TR_UP, bytes_transferred, false, now);
}
while (bytes_transferred > 0U && !std::empty(outbuf_info_))
{
auto& [n_bytes_left, is_piece_data] = outbuf_info_.front();
auto const payload = std::min(n_bytes_left, bytes_transferred);
size_t const payload = std::min(uint64_t{ n_bytes_left }, uint64_t{ bytes_transferred });
/* For µTP sockets, the overhead is computed in utp_on_overhead. */
size_t const overhead = socket_.guess_packet_overhead(payload);
uint64_t const now = tr_time_msec();
bandwidth().notify_bandwidth_consumed(TR_UP, payload, is_piece_data, now);
if (overhead > 0U)
if (is_piece_data)
{
bandwidth().notify_bandwidth_consumed(TR_UP, overhead, false, now);
bandwidth().notify_bandwidth_consumed(TR_UP, payload, true, now);
}
if (did_write_ != nullptr)
@ -360,7 +360,7 @@ void tr_peerIo::event_write_cb([[maybe_unused]] evutil_socket_t fd, short /*even
// ---
void tr_peerIo::can_read_wrapper()
void tr_peerIo::can_read_wrapper(size_t bytes_transferred)
{
// try to consume the input buffer
@ -376,6 +376,11 @@ void tr_peerIo::can_read_wrapper()
auto done = false;
auto err = false;
if (bytes_transferred > 0U)
{
bandwidth().notify_bandwidth_consumed(TR_DOWN, bytes_transferred, false, now);
}
// In normal conditions, only continue processing if we still have bandwidth
// quota for it.
//
@ -384,27 +389,14 @@ void tr_peerIo::can_read_wrapper()
// processing if the read buffer is more than twice as large as the target size.
while (!done && !err && (read_buffer_size() > RcvBuf * 2U || bandwidth().clamp(TR_DOWN, read_buffer_size()) != 0U))
{
size_t piece = 0U;
auto const old_len = read_buffer_size();
auto piece = size_t{};
auto const read_state = can_read_ != nullptr ? can_read_(this, user_data_, &piece) : ReadState::Err;
auto const used = old_len - read_buffer_size();
auto const overhead = socket_.guess_packet_overhead(used);
if (piece != 0U)
if (piece > 0U)
{
bandwidth().notify_bandwidth_consumed(TR_DOWN, piece, true, now);
}
if (used != piece)
{
bandwidth().notify_bandwidth_consumed(TR_DOWN, used - piece, false, now);
}
if (overhead > 0U)
{
bandwidth().notify_bandwidth_consumed(TR_DOWN, overhead, false, now);
}
switch (read_state)
{
case ReadState::Now:
@ -459,7 +451,7 @@ size_t tr_peerIo::try_read(size_t max)
}
else if (!std::empty(buf))
{
can_read_wrapper();
can_read_wrapper(n_read);
}
return n_read;
@ -593,6 +585,11 @@ size_t tr_peerIo::flush_outgoing_protocol_msgs()
void tr_peerIo::write_bytes(void const* bytes, size_t n_bytes, bool is_piece_data)
{
if (n_bytes == 0U)
{
return;
}
outbuf_info_.emplace_back(n_bytes, is_piece_data);
auto [resbuf, reslen] = outbuf_.reserve_space(n_bytes);
@ -749,7 +746,7 @@ void tr_peerIo::utp_init([[maybe_unused]] struct_utp_context* ctx)
io->inbuf_.add(args->buf, args->len);
io->set_enabled(TR_DOWN, true);
io->can_read_wrapper();
io->can_read_wrapper(args->len);
// utp_read_drained() notifies libutp that we read a packet from them.
// It opens up the congestion window by sending an ACK (soonish) if
@ -783,19 +780,6 @@ void tr_peerIo::utp_init([[maybe_unused]] struct_utp_context* ctx)
return {};
});
utp_set_callback(
ctx,
UTP_ON_OVERHEAD_STATISTICS,
[](utp_callback_arguments* args) -> uint64
{
if (auto* const io = static_cast<tr_peerIo*>(utp_get_userdata(args->socket)); io != nullptr)
{
tr_logAddTraceIo(io, fmt::format("{:d} overhead bytes via utp", args->len));
io->bandwidth().notify_bandwidth_consumed(args->send != 0 ? TR_UP : TR_DOWN, args->len, false, tr_time_msec());
}
return {};
});
utp_set_callback(
ctx,
UTP_ON_STATE_CHANGE,

View file

@ -343,7 +343,7 @@ private:
void event_enable(short event);
void event_disable(short event);
void can_read_wrapper();
void can_read_wrapper(size_t bytes_transferred);
void did_write_wrapper(size_t bytes_transferred);
size_t try_read(size_t max);

View file

@ -94,32 +94,6 @@ public:
#endif
}
[[nodiscard]] constexpr size_t guess_packet_overhead(size_t n_bytes) const noexcept
{
if (is_tcp())
{
// https://web.archive.org/web/20140912230020/http://sd.wareonearth.com:80/~phil/net/overhead/
// TCP over Ethernet:
// Assuming no header compression (e.g. not PPP)
// Add 20 IPv4 header or 40 IPv6 header (no options)
// Add 20 TCP header
// Add 12 bytes optional TCP timestamps
// Max TCP Payload data rates over ethernet are thus:
// (1500-40)/ (38+1500) = 94.9285 % IPv4, minimal headers
// (1500-52)/ (38+1500) = 94.1482 % IPv4, TCP timestamps
// (1500-52)/ (42+1500) = 93.9040 % 802.1q, IPv4, TCP timestamps
// (1500-60)/ (38+1500) = 93.6281 % IPv6, minimal headers
// (1500-72)/ (38+1500) = 92.8479 % IPv6, TCP timestamps
// (1500-72)/ (42+1500) = 92.6070 % 802.1q, IPv6, TCP timestamps
// So, let's guess around 7% overhead
return n_bytes / 14U;
}
// We only guess for TCP; uTP tracks its overhead via UTP_ON_OVERHEAD_STATISTICS
return {};
}
union
{
tr_socket_t tcp;

View file

@ -259,7 +259,9 @@ public:
void got_piece_data(uint32_t n_bytes)
{
bandwidth_.notify_bandwidth_consumed(TR_DOWN, n_bytes, true, tr_time_msec());
auto const now = tr_time_msec();
bandwidth_.notify_bandwidth_consumed(TR_DOWN, n_bytes, false, now);
bandwidth_.notify_bandwidth_consumed(TR_DOWN, n_bytes, true, now);
publish(tr_peer_event::GotPieceData(n_bytes));
connection_limiter.got_data();
}