2023-02-11 20:49:42 +00:00
|
|
|
// This file Copyright © 2008-2023 Mnemosyne LLC.
|
2022-02-07 16:25:02 +00:00
|
|
|
// It may be used under GPLv2 (SPDX: GPL-2.0-only), GPLv3 (SPDX: GPL-3.0-only),
|
2022-01-20 18:27:56 +00:00
|
|
|
// or any future license endorsed by Mnemosyne LLC.
|
|
|
|
// License text can be found in the licenses/ folder.
|
2008-11-24 04:21:23 +00:00
|
|
|
|
2021-09-19 20:41:35 +00:00
|
|
|
#include <algorithm>
|
2022-12-18 19:24:46 +00:00
|
|
|
#include <utility> // for std::swap()
|
2021-11-09 03:30:03 +00:00
|
|
|
#include <vector>
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2022-03-15 14:52:16 +00:00
|
|
|
#include <fmt/core.h>
|
|
|
|
|
2008-11-24 04:21:23 +00:00
|
|
|
#include "transmission.h"
|
2022-03-15 14:52:16 +00:00
|
|
|
|
2008-11-24 04:21:23 +00:00
|
|
|
#include "bandwidth.h"
|
2022-12-19 23:31:24 +00:00
|
|
|
#include "crypto-utils.h"
|
2013-01-25 23:34:20 +00:00
|
|
|
#include "log.h"
|
2008-12-16 22:08:17 +00:00
|
|
|
#include "peer-io.h"
|
2017-06-08 07:24:12 +00:00
|
|
|
#include "tr-assert.h"
|
2022-06-29 20:08:58 +00:00
|
|
|
#include "utils.h" // tr_time_msec()
|
2008-11-24 04:21:23 +00:00
|
|
|
|
2022-10-25 16:14:42 +00:00
|
|
|
tr_bytes_per_second_t tr_bandwidth::getSpeedBytesPerSecond(RateControl& r, unsigned int interval_msec, uint64_t now)
|
2008-11-24 04:21:23 +00:00
|
|
|
{
|
2017-04-30 16:25:26 +00:00
|
|
|
if (now == 0)
|
2017-04-19 12:04:45 +00:00
|
|
|
{
|
|
|
|
now = tr_time_msec();
|
|
|
|
}
|
2008-11-24 04:21:23 +00:00
|
|
|
|
2021-10-12 06:04:22 +00:00
|
|
|
if (now != r.cache_time_)
|
2008-11-24 04:21:23 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
uint64_t bytes = 0;
|
2017-04-20 16:02:19 +00:00
|
|
|
uint64_t const cutoff = now - interval_msec;
|
2008-11-24 04:21:23 +00:00
|
|
|
|
2022-04-28 13:44:24 +00:00
|
|
|
for (int i = r.newest_; r.date_[i] > cutoff;)
|
2011-03-15 18:11:31 +00:00
|
|
|
{
|
2022-04-28 13:44:24 +00:00
|
|
|
bytes += r.size_[i];
|
2011-03-15 18:11:31 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
if (--i == -1)
|
|
|
|
{
|
2021-10-12 06:04:22 +00:00
|
|
|
i = HistorySize - 1; /* circular history */
|
2017-04-19 12:04:45 +00:00
|
|
|
}
|
2013-01-24 23:59:52 +00:00
|
|
|
|
2021-10-12 06:04:22 +00:00
|
|
|
if (i == r.newest_)
|
2017-04-19 12:04:45 +00:00
|
|
|
{
|
|
|
|
break; /* we've come all the way around */
|
|
|
|
}
|
2011-03-15 18:11:31 +00:00
|
|
|
}
|
2008-11-24 04:21:23 +00:00
|
|
|
|
2022-10-25 16:14:42 +00:00
|
|
|
r.cache_val_ = static_cast<tr_bytes_per_second_t>(bytes * 1000U / interval_msec);
|
2021-10-12 06:04:22 +00:00
|
|
|
r.cache_time_ = now;
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
|
|
|
|
2021-10-12 06:04:22 +00:00
|
|
|
return r.cache_val_;
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
|
|
|
|
2022-06-29 20:08:58 +00:00
|
|
|
void tr_bandwidth::notifyBandwidthConsumedBytes(uint64_t const now, RateControl* r, size_t size)
|
2008-11-24 04:21:23 +00:00
|
|
|
{
|
2022-04-28 13:44:24 +00:00
|
|
|
if (r->date_[r->newest_] + GranularityMSec >= now)
|
2013-01-24 23:59:52 +00:00
|
|
|
{
|
2022-04-28 13:44:24 +00:00
|
|
|
r->size_[r->newest_] += size;
|
2013-01-24 23:59:52 +00:00
|
|
|
}
|
2017-04-19 12:04:45 +00:00
|
|
|
else
|
2008-11-24 04:21:23 +00:00
|
|
|
{
|
2021-10-12 06:04:22 +00:00
|
|
|
if (++r->newest_ == HistorySize)
|
2017-04-19 12:04:45 +00:00
|
|
|
{
|
2021-10-10 01:12:03 +00:00
|
|
|
r->newest_ = 0;
|
2017-04-19 12:04:45 +00:00
|
|
|
}
|
|
|
|
|
2022-04-28 13:44:24 +00:00
|
|
|
r->date_[r->newest_] = now;
|
|
|
|
r->size_[r->newest_] = size;
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
2011-03-15 18:11:31 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
/* invalidate cache_val*/
|
2021-10-10 01:12:03 +00:00
|
|
|
r->cache_time_ = 0;
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
|
|
|
|
2023-01-07 22:55:00 +00:00
|
|
|
// ---
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2022-06-29 20:08:58 +00:00
|
|
|
tr_bandwidth::tr_bandwidth(tr_bandwidth* parent)
|
2008-11-24 04:21:23 +00:00
|
|
|
{
|
2022-03-18 16:34:29 +00:00
|
|
|
this->setParent(parent);
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
|
|
|
|
2023-01-07 22:55:00 +00:00
|
|
|
// ---
|
2008-11-24 04:21:23 +00:00
|
|
|
|
2023-01-07 22:55:00 +00:00
|
|
|
namespace
|
|
|
|
{
|
|
|
|
namespace deparent_helpers
|
|
|
|
{
|
|
|
|
void remove_child(std::vector<tr_bandwidth*>& v, tr_bandwidth* remove_me) noexcept
|
2021-11-20 16:58:47 +00:00
|
|
|
{
|
|
|
|
// the list isn't sorted -- so instead of erase()ing `it`,
|
|
|
|
// do the cheaper option of overwriting it with the final item
|
2022-08-13 17:11:07 +00:00
|
|
|
if (auto it = std::find(std::begin(v), std::end(v), remove_me); it != std::end(v))
|
|
|
|
{
|
|
|
|
*it = v.back();
|
|
|
|
v.resize(v.size() - 1);
|
|
|
|
}
|
2021-11-20 16:58:47 +00:00
|
|
|
}
|
2023-01-07 22:55:00 +00:00
|
|
|
} // namespace deparent_helpers
|
|
|
|
} // namespace
|
2021-11-20 16:58:47 +00:00
|
|
|
|
2022-08-16 14:30:05 +00:00
|
|
|
void tr_bandwidth::deparent() noexcept
|
|
|
|
{
|
2023-01-07 22:55:00 +00:00
|
|
|
using namespace deparent_helpers;
|
|
|
|
|
2022-08-16 14:30:05 +00:00
|
|
|
if (parent_ == nullptr)
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
remove_child(parent_->children_, this);
|
|
|
|
parent_ = nullptr;
|
|
|
|
}
|
|
|
|
|
2022-06-29 20:08:58 +00:00
|
|
|
void tr_bandwidth::setParent(tr_bandwidth* new_parent)
|
2008-11-25 21:35:17 +00:00
|
|
|
{
|
2021-10-12 06:04:22 +00:00
|
|
|
TR_ASSERT(this != new_parent);
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2022-08-16 14:30:05 +00:00
|
|
|
deparent();
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2021-10-12 06:04:22 +00:00
|
|
|
if (new_parent != nullptr)
|
2008-11-25 21:35:17 +00:00
|
|
|
{
|
2021-11-28 14:05:44 +00:00
|
|
|
#ifdef TR_ENABLE_ASSERTS
|
2021-10-12 06:04:22 +00:00
|
|
|
TR_ASSERT(new_parent->parent_ != this);
|
2021-11-20 16:58:47 +00:00
|
|
|
auto& children = new_parent->children_;
|
|
|
|
TR_ASSERT(std::find(std::begin(children), std::end(children), this) == std::end(children)); // not already there
|
2021-11-28 14:05:44 +00:00
|
|
|
#endif
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2021-11-20 16:58:47 +00:00
|
|
|
new_parent->children_.push_back(this);
|
2021-10-12 06:04:22 +00:00
|
|
|
this->parent_ = new_parent;
|
2008-11-25 21:35:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-07 22:55:00 +00:00
|
|
|
// ---
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2022-06-29 20:08:58 +00:00
|
|
|
void tr_bandwidth::allocateBandwidth(
|
2021-08-15 09:41:48 +00:00
|
|
|
tr_priority_t parent_priority,
|
|
|
|
unsigned int period_msec,
|
2022-08-30 17:38:30 +00:00
|
|
|
std::vector<std::shared_ptr<tr_peerIo>>& peer_pool)
|
2008-11-24 04:21:23 +00:00
|
|
|
{
|
2022-12-20 19:49:37 +00:00
|
|
|
auto const priority = std::max(parent_priority, this->priority_);
|
2017-06-13 02:24:09 +00:00
|
|
|
|
2022-12-20 19:49:37 +00:00
|
|
|
// set the available bandwidth
|
|
|
|
for (auto const dir : { TR_UP, TR_DOWN })
|
2008-11-26 15:58:26 +00:00
|
|
|
{
|
2022-12-20 19:49:37 +00:00
|
|
|
if (auto& bandwidth = band_[dir]; bandwidth.is_limited_)
|
|
|
|
{
|
|
|
|
auto const next_pulse_speed = bandwidth.desired_speed_bps_;
|
|
|
|
bandwidth.bytes_left_ = next_pulse_speed * period_msec / 1000U;
|
|
|
|
}
|
2008-11-26 15:58:26 +00:00
|
|
|
}
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2022-12-16 07:23:12 +00:00
|
|
|
// add this bandwidth's peer, if any, to the peer pool
|
2022-08-30 17:38:30 +00:00
|
|
|
if (auto shared = this->peer_.lock(); shared)
|
2013-01-24 23:59:52 +00:00
|
|
|
{
|
2022-12-16 07:23:12 +00:00
|
|
|
shared->set_priority(priority);
|
2022-08-30 17:38:30 +00:00
|
|
|
peer_pool.push_back(std::move(shared));
|
2009-04-18 23:17:30 +00:00
|
|
|
}
|
2008-12-09 22:05:45 +00:00
|
|
|
|
2021-10-09 12:52:09 +00:00
|
|
|
// traverse & repeat for the subtree
|
2021-10-12 06:04:22 +00:00
|
|
|
for (auto* child : this->children_)
|
2013-01-24 23:59:52 +00:00
|
|
|
{
|
2022-12-20 19:49:37 +00:00
|
|
|
child->allocateBandwidth(priority, period_msec, peer_pool);
|
2008-11-25 21:35:17 +00:00
|
|
|
}
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
|
|
|
|
2022-12-19 23:31:24 +00:00
|
|
|
void tr_bandwidth::phaseOne(std::vector<tr_peerIo*>& peers, tr_direction dir)
|
2008-12-09 22:05:45 +00:00
|
|
|
{
|
2022-12-18 19:24:46 +00:00
|
|
|
// First phase of IO. Tries to distribute bandwidth fairly to keep faster
|
|
|
|
// peers from starving the others.
|
2022-12-19 23:31:24 +00:00
|
|
|
tr_logAddTrace(fmt::format("{} peers to go round-robin for {}", peers.size(), dir == TR_UP ? "upload" : "download"));
|
2017-04-19 12:04:45 +00:00
|
|
|
|
2022-12-18 19:24:46 +00:00
|
|
|
// Shuffle the peers so they all have equal chance to be first in line.
|
2022-12-19 23:31:24 +00:00
|
|
|
thread_local auto urbg = tr_urbg<size_t>{};
|
|
|
|
std::shuffle(std::begin(peers), std::end(peers), urbg);
|
2011-05-04 21:38:01 +00:00
|
|
|
|
2022-12-18 19:24:46 +00:00
|
|
|
// Give each peer `Increment` bandwidth bytes to use. Repeat this
|
|
|
|
// process until we run out of bandwidth and/or peers that can use it.
|
2022-12-19 23:31:24 +00:00
|
|
|
for (size_t n_unfinished = std::size(peers); n_unfinished > 0U;)
|
2022-12-18 19:24:46 +00:00
|
|
|
{
|
|
|
|
for (size_t i = 0; i < n_unfinished;)
|
|
|
|
{
|
|
|
|
// Value of 3000 bytes chosen so that when using µTP we'll send a full-size
|
|
|
|
// frame right away and leave enough buffered data for the next frame to go
|
|
|
|
// out in a timely manner.
|
|
|
|
static auto constexpr Increment = size_t{ 3000 };
|
2008-12-20 22:19:34 +00:00
|
|
|
|
2022-12-19 23:31:24 +00:00
|
|
|
auto const bytes_used = peers[i]->flush(dir, Increment);
|
2022-12-18 19:24:46 +00:00
|
|
|
tr_logAddTrace(fmt::format("peer #{} of {} used {} bytes in this pass", i, n_unfinished, bytes_used));
|
2009-01-03 02:43:17 +00:00
|
|
|
|
2022-12-18 19:24:46 +00:00
|
|
|
if (bytes_used != Increment)
|
|
|
|
{
|
|
|
|
// peer is done writing for now; move it to the end of the list
|
2022-12-19 23:31:24 +00:00
|
|
|
std::swap(peers[i], peers[n_unfinished - 1]);
|
2022-12-18 19:24:46 +00:00
|
|
|
--n_unfinished;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
++i;
|
|
|
|
}
|
2008-12-15 21:22:08 +00:00
|
|
|
}
|
2008-12-09 22:05:45 +00:00
|
|
|
}
|
2009-04-18 23:17:30 +00:00
|
|
|
}
|
|
|
|
|
2022-12-20 19:49:37 +00:00
|
|
|
void tr_bandwidth::allocate(unsigned int period_msec)
|
2009-04-18 23:17:30 +00:00
|
|
|
{
|
2022-08-30 17:38:30 +00:00
|
|
|
// keep these peers alive for the scope of this function
|
|
|
|
auto refs = std::vector<std::shared_ptr<tr_peerIo>>{};
|
|
|
|
|
2022-12-20 19:49:37 +00:00
|
|
|
auto peer_arrays = std::array<std::vector<tr_peerIo*>, 3>{};
|
|
|
|
auto& high = peer_arrays[0];
|
|
|
|
auto& normal = peer_arrays[1];
|
|
|
|
auto& low = peer_arrays[2];
|
2017-04-19 12:04:45 +00:00
|
|
|
|
2022-12-20 19:49:37 +00:00
|
|
|
// allocateBandwidth () is a helper function with two purposes:
|
|
|
|
// 1. allocate bandwidth to b and its subtree
|
|
|
|
// 2. accumulate an array of all the peerIos from b and its subtree.
|
|
|
|
this->allocateBandwidth(TR_PRI_LOW, period_msec, refs);
|
2017-04-19 12:04:45 +00:00
|
|
|
|
2022-11-28 15:45:39 +00:00
|
|
|
for (auto const& io : refs)
|
2009-04-21 16:18:51 +00:00
|
|
|
{
|
2022-12-16 07:23:12 +00:00
|
|
|
io->flush_outgoing_protocol_msgs();
|
2009-04-21 16:18:51 +00:00
|
|
|
|
2022-12-16 07:23:12 +00:00
|
|
|
switch (io->priority())
|
2013-01-24 23:59:52 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
case TR_PRI_HIGH:
|
2022-08-30 17:38:30 +00:00
|
|
|
high.push_back(io.get());
|
2021-10-06 17:24:02 +00:00
|
|
|
[[fallthrough]];
|
2017-04-19 12:04:45 +00:00
|
|
|
|
|
|
|
case TR_PRI_NORMAL:
|
2022-08-30 17:38:30 +00:00
|
|
|
normal.push_back(io.get());
|
2021-10-06 17:24:02 +00:00
|
|
|
[[fallthrough]];
|
2017-04-19 12:04:45 +00:00
|
|
|
|
|
|
|
default:
|
2022-08-30 17:38:30 +00:00
|
|
|
low.push_back(io.get());
|
2009-04-18 23:17:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-20 19:49:37 +00:00
|
|
|
// First phase of IO. Tries to distribute bandwidth fairly to keep faster
|
|
|
|
// peers from starving the others. Loop through the peers, giving each a
|
|
|
|
// small chunk of bandwidth. Keep looping until we run out of bandwidth
|
|
|
|
// and/or peers that can use it
|
|
|
|
for (auto& peers : peer_arrays)
|
|
|
|
{
|
|
|
|
phaseOne(peers, TR_UP);
|
|
|
|
phaseOne(peers, TR_DOWN);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Second phase of IO. To help us scale in high bandwidth situations,
|
|
|
|
// enable on-demand IO for peers with bandwidth left to burn.
|
|
|
|
// This on-demand IO is enabled until (1) the peer runs out of bandwidth,
|
|
|
|
// or (2) the next tr_bandwidth::allocate () call, when we start over again.
|
2022-11-28 15:45:39 +00:00
|
|
|
for (auto const& io : refs)
|
2017-04-19 12:04:45 +00:00
|
|
|
{
|
2022-12-20 19:49:37 +00:00
|
|
|
io->set_enabled(TR_UP, io->has_bandwidth_left(TR_UP));
|
|
|
|
io->set_enabled(TR_DOWN, io->has_bandwidth_left(TR_DOWN));
|
2017-04-19 12:04:45 +00:00
|
|
|
}
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
|
|
|
|
2023-01-07 22:55:00 +00:00
|
|
|
// ---
|
2008-11-24 04:21:23 +00:00
|
|
|
|
2022-11-21 16:19:45 +00:00
|
|
|
size_t tr_bandwidth::clamp(uint64_t now, tr_direction dir, size_t byte_count) const
|
2008-11-25 21:35:17 +00:00
|
|
|
{
|
2017-06-08 07:24:12 +00:00
|
|
|
TR_ASSERT(tr_isDirection(dir));
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2021-10-10 01:12:03 +00:00
|
|
|
if (this->band_[dir].is_limited_)
|
2008-11-25 21:35:17 +00:00
|
|
|
{
|
2021-10-12 06:04:22 +00:00
|
|
|
byte_count = std::min(byte_count, this->band_[dir].bytes_left_);
|
2021-10-09 12:52:09 +00:00
|
|
|
|
|
|
|
/* if we're getting close to exceeding the speed limit,
|
|
|
|
* clamp down harder on the bytes available */
|
2021-10-12 06:04:22 +00:00
|
|
|
if (byte_count > 0)
|
2011-03-02 07:21:58 +00:00
|
|
|
{
|
2021-10-09 12:52:09 +00:00
|
|
|
if (now == 0)
|
2011-03-15 18:11:31 +00:00
|
|
|
{
|
2021-10-09 12:52:09 +00:00
|
|
|
now = tr_time_msec();
|
2011-03-15 18:11:31 +00:00
|
|
|
}
|
2011-03-02 07:21:58 +00:00
|
|
|
|
2021-10-23 15:43:15 +00:00
|
|
|
auto const current = this->getRawSpeedBytesPerSecond(now, TR_DOWN);
|
|
|
|
auto const desired = this->getDesiredSpeedBytesPerSecond(TR_DOWN);
|
2022-11-23 19:47:04 +00:00
|
|
|
auto const r = desired >= 1 ? static_cast<double>(current) / desired : 0.0;
|
2021-10-09 12:52:09 +00:00
|
|
|
|
|
|
|
if (r > 1.0)
|
|
|
|
{
|
2022-11-23 19:47:04 +00:00
|
|
|
byte_count = 0; // none left
|
2021-10-09 12:52:09 +00:00
|
|
|
}
|
|
|
|
else if (r > 0.9)
|
|
|
|
{
|
2022-11-23 19:47:04 +00:00
|
|
|
byte_count -= (byte_count / 5U); // cap at 80%
|
2021-10-09 12:52:09 +00:00
|
|
|
}
|
|
|
|
else if (r > 0.8)
|
|
|
|
{
|
2022-11-23 19:47:04 +00:00
|
|
|
byte_count -= (byte_count / 10U); // cap at 90%
|
2021-10-09 12:52:09 +00:00
|
|
|
}
|
2017-04-19 12:04:45 +00:00
|
|
|
}
|
2008-11-25 21:35:17 +00:00
|
|
|
}
|
|
|
|
|
2021-10-12 06:04:22 +00:00
|
|
|
if (this->parent_ != nullptr && this->band_[dir].honor_parent_limits_ && byte_count > 0)
|
2021-10-09 12:52:09 +00:00
|
|
|
{
|
2021-10-12 06:04:22 +00:00
|
|
|
byte_count = this->parent_->clamp(now, dir, byte_count);
|
2021-10-09 12:52:09 +00:00
|
|
|
}
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2021-10-12 06:04:22 +00:00
|
|
|
return byte_count;
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
|
|
|
|
2022-06-29 20:08:58 +00:00
|
|
|
void tr_bandwidth::notifyBandwidthConsumed(tr_direction dir, size_t byte_count, bool is_piece_data, uint64_t now)
|
2008-11-24 04:21:23 +00:00
|
|
|
{
|
2017-06-08 07:24:12 +00:00
|
|
|
TR_ASSERT(tr_isDirection(dir));
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2021-10-10 01:12:03 +00:00
|
|
|
Band* band = &this->band_[dir];
|
2008-11-24 04:21:23 +00:00
|
|
|
|
2021-10-12 06:04:22 +00:00
|
|
|
if (band->is_limited_ && is_piece_data)
|
2017-04-19 12:04:45 +00:00
|
|
|
{
|
2021-10-12 06:04:22 +00:00
|
|
|
band->bytes_left_ -= std::min(size_t{ band->bytes_left_ }, byte_count);
|
2017-04-19 12:04:45 +00:00
|
|
|
}
|
2008-11-25 21:35:17 +00:00
|
|
|
|
|
|
|
#ifdef DEBUG_DIRECTION
|
2017-04-19 12:04:45 +00:00
|
|
|
|
2021-10-09 12:52:09 +00:00
|
|
|
if (dir == DEBUG_DIRECTION && band_->isLimited)
|
2017-04-19 12:04:45 +00:00
|
|
|
{
|
2021-08-15 09:41:48 +00:00
|
|
|
fprintf(
|
|
|
|
stderr,
|
|
|
|
"%p consumed %5zu bytes of %5s data... was %6zu, now %6zu left\n",
|
2021-10-09 12:52:09 +00:00
|
|
|
this,
|
2021-10-12 06:04:22 +00:00
|
|
|
byte_count,
|
|
|
|
is_piece_data ? "piece" : "raw",
|
2021-08-15 09:41:48 +00:00
|
|
|
oldBytesLeft,
|
2021-10-09 12:52:09 +00:00
|
|
|
band_->bytesLeft);
|
2017-04-19 12:04:45 +00:00
|
|
|
}
|
|
|
|
|
2008-11-25 21:35:17 +00:00
|
|
|
#endif
|
|
|
|
|
2021-10-12 06:04:22 +00:00
|
|
|
notifyBandwidthConsumedBytes(now, &band->raw_, byte_count);
|
2008-11-24 04:21:23 +00:00
|
|
|
|
2021-10-12 06:04:22 +00:00
|
|
|
if (is_piece_data)
|
2017-04-19 12:04:45 +00:00
|
|
|
{
|
2021-10-12 06:04:22 +00:00
|
|
|
notifyBandwidthConsumedBytes(now, &band->piece_, byte_count);
|
2017-04-19 12:04:45 +00:00
|
|
|
}
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2021-10-10 01:12:03 +00:00
|
|
|
if (this->parent_ != nullptr)
|
2017-04-19 12:04:45 +00:00
|
|
|
{
|
2021-10-12 06:04:22 +00:00
|
|
|
this->parent_->notifyBandwidthConsumed(dir, byte_count, is_piece_data, now);
|
2017-04-19 12:04:45 +00:00
|
|
|
}
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
2022-03-18 13:11:59 +00:00
|
|
|
|
2023-01-07 22:55:00 +00:00
|
|
|
// ---
|
2022-03-18 13:11:59 +00:00
|
|
|
|
2022-06-29 20:08:58 +00:00
|
|
|
tr_bandwidth_limits tr_bandwidth::getLimits() const
|
2022-03-18 13:11:59 +00:00
|
|
|
{
|
|
|
|
tr_bandwidth_limits limits;
|
|
|
|
limits.up_limit_KBps = tr_toSpeedKBps(this->getDesiredSpeedBytesPerSecond(TR_UP));
|
|
|
|
limits.down_limit_KBps = tr_toSpeedKBps(this->getDesiredSpeedBytesPerSecond(TR_DOWN));
|
|
|
|
limits.up_limited = this->isLimited(TR_UP);
|
|
|
|
limits.down_limited = this->isLimited(TR_DOWN);
|
|
|
|
return limits;
|
|
|
|
}
|
|
|
|
|
2022-06-29 20:08:58 +00:00
|
|
|
void tr_bandwidth::setLimits(tr_bandwidth_limits const* limits)
|
2022-03-18 13:11:59 +00:00
|
|
|
{
|
|
|
|
this->setDesiredSpeedBytesPerSecond(TR_UP, tr_toSpeedBytes(limits->up_limit_KBps));
|
|
|
|
this->setDesiredSpeedBytesPerSecond(TR_DOWN, tr_toSpeedBytes(limits->down_limit_KBps));
|
|
|
|
this->setLimited(TR_UP, limits->up_limited);
|
|
|
|
this->setLimited(TR_DOWN, limits->down_limited);
|
|
|
|
}
|