2008-11-24 04:21:23 +00:00
|
|
|
/*
|
2014-01-19 01:09:44 +00:00
|
|
|
* This file Copyright (C) 2008-2014 Mnemosyne LLC
|
2008-11-24 04:21:23 +00:00
|
|
|
*
|
2014-01-21 03:10:30 +00:00
|
|
|
* It may be used under the GNU GPL versions 2 or 3
|
2014-01-19 01:09:44 +00:00
|
|
|
* or any future license endorsed by Mnemosyne LLC.
|
2008-11-24 04:21:23 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2008-11-25 21:35:17 +00:00
|
|
|
#include <assert.h>
|
2017-04-21 07:40:57 +00:00
|
|
|
#include <string.h> /* memset() */
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2008-11-24 04:21:23 +00:00
|
|
|
#include "transmission.h"
|
|
|
|
#include "bandwidth.h"
|
2017-04-21 07:40:57 +00:00
|
|
|
#include "crypto-utils.h" /* tr_rand_int_weak() */
|
2013-01-25 23:34:20 +00:00
|
|
|
#include "log.h"
|
2008-12-16 22:08:17 +00:00
|
|
|
#include "peer-io.h"
|
2008-11-24 04:21:23 +00:00
|
|
|
#include "utils.h"
|
|
|
|
|
2012-12-05 17:29:46 +00:00
|
|
|
#define dbgmsg(...) \
|
2017-04-19 12:04:45 +00:00
|
|
|
do \
|
2013-01-24 23:59:52 +00:00
|
|
|
{ \
|
2017-04-19 12:04:45 +00:00
|
|
|
if (tr_logGetDeepEnabled()) \
|
|
|
|
{ \
|
|
|
|
tr_logAddDeep(__FILE__, __LINE__, NULL, __VA_ARGS__); \
|
|
|
|
} \
|
2013-01-24 23:59:52 +00:00
|
|
|
} \
|
2017-04-19 12:04:45 +00:00
|
|
|
while (0)
|
2009-01-02 21:50:51 +00:00
|
|
|
|
2008-11-24 04:21:23 +00:00
|
|
|
/***
|
|
|
|
****
|
|
|
|
***/
|
|
|
|
|
2017-04-20 16:02:19 +00:00
|
|
|
static unsigned int getSpeed_Bps(struct bratecontrol const* r, unsigned int interval_msec, uint64_t now)
|
2008-11-24 04:21:23 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
if (!now)
|
|
|
|
{
|
|
|
|
now = tr_time_msec();
|
|
|
|
}
|
2008-11-24 04:21:23 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
if (now != r->cache_time)
|
2008-11-24 04:21:23 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
int i = r->newest;
|
|
|
|
uint64_t bytes = 0;
|
2017-04-20 16:02:19 +00:00
|
|
|
uint64_t const cutoff = now - interval_msec;
|
2017-04-19 12:04:45 +00:00
|
|
|
struct bratecontrol* rvolatile = (struct bratecontrol*)r;
|
2008-11-24 04:21:23 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
for (;;)
|
2011-03-15 18:11:31 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
if (r->transfers[i].date <= cutoff)
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
2011-03-15 18:11:31 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
bytes += r->transfers[i].size;
|
2011-03-15 18:11:31 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
if (--i == -1)
|
|
|
|
{
|
|
|
|
i = HISTORY_SIZE - 1; /* circular history */
|
|
|
|
}
|
2013-01-24 23:59:52 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
if (i == r->newest)
|
|
|
|
{
|
|
|
|
break; /* we've come all the way around */
|
|
|
|
}
|
2011-03-15 18:11:31 +00:00
|
|
|
}
|
2008-11-24 04:21:23 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
rvolatile->cache_val = (unsigned int)((bytes * 1000u) / interval_msec);
|
|
|
|
rvolatile->cache_time = now;
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
return r->cache_val;
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
|
|
|
|
2017-04-20 16:02:19 +00:00
|
|
|
static void bytesUsed(uint64_t const now, struct bratecontrol* r, size_t size)
|
2008-11-24 04:21:23 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
if (r->transfers[r->newest].date + GRANULARITY_MSEC >= now)
|
2013-01-24 23:59:52 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
r->transfers[r->newest].size += size;
|
2013-01-24 23:59:52 +00:00
|
|
|
}
|
2017-04-19 12:04:45 +00:00
|
|
|
else
|
2008-11-24 04:21:23 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
if (++r->newest == HISTORY_SIZE)
|
|
|
|
{
|
|
|
|
r->newest = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
r->transfers[r->newest].date = now;
|
|
|
|
r->transfers[r->newest].size = size;
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
2011-03-15 18:11:31 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
/* invalidate cache_val*/
|
|
|
|
r->cache_time = 0;
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/******
|
|
|
|
*******
|
|
|
|
*******
|
|
|
|
******/
|
|
|
|
|
2017-04-20 16:02:19 +00:00
|
|
|
static int compareBandwidth(void const* va, void const* vb)
|
2008-11-25 21:35:17 +00:00
|
|
|
{
|
2017-04-20 16:02:19 +00:00
|
|
|
tr_bandwidth const* a = va;
|
|
|
|
tr_bandwidth const* b = vb;
|
2017-04-19 12:04:45 +00:00
|
|
|
return a->uniqueKey - b->uniqueKey;
|
2008-11-25 21:35:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/***
|
|
|
|
****
|
|
|
|
***/
|
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
void tr_bandwidthConstruct(tr_bandwidth* b, tr_session* session, tr_bandwidth* parent)
|
2008-11-24 04:21:23 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
static unsigned int uniqueKey = 0;
|
|
|
|
|
|
|
|
b->session = session;
|
|
|
|
b->children = TR_PTR_ARRAY_INIT;
|
|
|
|
b->magicNumber = BANDWIDTH_MAGIC_NUMBER;
|
|
|
|
b->uniqueKey = uniqueKey++;
|
|
|
|
b->band[TR_UP].honorParentLimits = true;
|
|
|
|
b->band[TR_DOWN].honorParentLimits = true;
|
|
|
|
tr_bandwidthSetParent(b, parent);
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
void tr_bandwidthDestruct(tr_bandwidth* b)
|
2008-11-24 04:21:23 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
assert(tr_isBandwidth(b));
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
tr_bandwidthSetParent(b, NULL);
|
|
|
|
tr_ptrArrayDestruct(&b->children, NULL);
|
2009-01-02 19:56:06 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
memset(b, ~0, sizeof(tr_bandwidth));
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/***
|
|
|
|
****
|
|
|
|
***/
|
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
void tr_bandwidthSetParent(tr_bandwidth* b, tr_bandwidth* parent)
|
2008-11-25 21:35:17 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
assert(tr_isBandwidth(b));
|
|
|
|
assert(b != parent);
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
if (b->parent)
|
2008-11-25 21:35:17 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
assert(tr_isBandwidth(b->parent));
|
|
|
|
tr_ptrArrayRemoveSortedPointer(&b->parent->children, b, compareBandwidth);
|
|
|
|
b->parent = NULL;
|
2008-11-25 21:35:17 +00:00
|
|
|
}
|
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
if (parent)
|
2008-11-25 21:35:17 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
assert(tr_isBandwidth(parent));
|
|
|
|
assert(parent->parent != b);
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
assert(tr_ptrArrayFindSorted(&parent->children, b, compareBandwidth) == NULL);
|
|
|
|
tr_ptrArrayInsertSorted(&parent->children, b, compareBandwidth);
|
|
|
|
assert(tr_ptrArrayFindSorted(&parent->children, b, compareBandwidth) == b);
|
|
|
|
b->parent = parent;
|
2008-11-25 21:35:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/***
|
|
|
|
****
|
|
|
|
***/
|
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
static void allocateBandwidth(tr_bandwidth* b, tr_priority_t parent_priority, tr_direction dir, unsigned int period_msec,
|
|
|
|
tr_ptrArray* peer_pool)
|
2008-11-24 04:21:23 +00:00
|
|
|
{
|
2017-04-20 16:02:19 +00:00
|
|
|
tr_priority_t const priority = MAX(parent_priority, b->priority);
|
2009-04-18 23:17:30 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
assert(tr_isBandwidth(b));
|
|
|
|
assert(tr_isDirection(dir));
|
2008-11-26 15:58:26 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
/* set the available bandwidth */
|
|
|
|
if (b->band[dir].isLimited)
|
2008-11-26 15:58:26 +00:00
|
|
|
{
|
2017-04-20 16:02:19 +00:00
|
|
|
uint64_t const nextPulseSpeed = b->band[dir].desiredSpeed_Bps;
|
2017-04-19 12:04:45 +00:00
|
|
|
b->band[dir].bytesLeft = nextPulseSpeed * period_msec / 1000u;
|
2008-11-26 15:58:26 +00:00
|
|
|
}
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
/* add this bandwidth's peer, if any, to the peer pool */
|
|
|
|
if (b->peer != NULL)
|
2013-01-24 23:59:52 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
b->peer->priority = priority;
|
|
|
|
tr_ptrArrayAppend(peer_pool, b->peer);
|
2009-04-18 23:17:30 +00:00
|
|
|
}
|
2008-12-09 22:05:45 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
/* traverse & repeat for the subtree */
|
|
|
|
if (1)
|
2013-01-24 23:59:52 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
int i;
|
|
|
|
struct tr_bandwidth** children = (struct tr_bandwidth**)tr_ptrArrayBase(&b->children);
|
2017-04-20 16:02:19 +00:00
|
|
|
int const n = tr_ptrArraySize(&b->children);
|
2017-04-19 12:04:45 +00:00
|
|
|
|
|
|
|
for (i = 0; i < n; ++i)
|
|
|
|
{
|
|
|
|
allocateBandwidth(children[i], priority, dir, period_msec, peer_pool);
|
|
|
|
}
|
2008-11-25 21:35:17 +00:00
|
|
|
}
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
static void phaseOne(tr_ptrArray* peerArray, tr_direction dir)
|
2008-12-09 22:05:45 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
int n;
|
|
|
|
int peerCount = tr_ptrArraySize(peerArray);
|
|
|
|
struct tr_peerIo** peers = (struct tr_peerIo**)tr_ptrArrayBase(peerArray);
|
|
|
|
|
|
|
|
/* First phase of IO. Tries to distribute bandwidth fairly to keep faster
|
|
|
|
* peers from starving the others. Loop through the peers, giving each a
|
|
|
|
* small chunk of bandwidth. Keep looping until we run out of bandwidth
|
|
|
|
* and/or peers that can use it */
|
|
|
|
n = peerCount;
|
|
|
|
dbgmsg("%d peers to go round-robin for %s", n, (dir == TR_UP ? "upload" : "download"));
|
|
|
|
|
|
|
|
while (n > 0)
|
2008-12-15 21:22:08 +00:00
|
|
|
{
|
2017-04-20 16:02:19 +00:00
|
|
|
int const i = tr_rand_int_weak(n); /* pick a peer at random */
|
2011-10-25 16:56:19 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
/* value of 3000 bytes chosen so that when using uTP we'll send a full-size
|
|
|
|
* frame right away and leave enough buffered data for the next frame to go
|
|
|
|
* out in a timely manner. */
|
2017-04-20 16:02:19 +00:00
|
|
|
size_t const increment = 3000;
|
2011-05-04 21:38:01 +00:00
|
|
|
|
2017-04-20 16:02:19 +00:00
|
|
|
int const bytesUsed = tr_peerIoFlush(peers[i], dir, increment);
|
2008-12-20 22:19:34 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
dbgmsg("peer #%d of %d used %d bytes in this pass", i, n, bytesUsed);
|
2009-01-03 02:43:17 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
if (bytesUsed != (int)increment)
|
2013-01-24 23:59:52 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
/* peer is done writing for now; move it to the end of the list */
|
|
|
|
tr_peerIo* pio = peers[i];
|
|
|
|
peers[i] = peers[n - 1];
|
|
|
|
peers[n - 1] = pio;
|
|
|
|
--n;
|
2008-12-15 21:22:08 +00:00
|
|
|
}
|
2008-12-09 22:05:45 +00:00
|
|
|
}
|
2009-04-18 23:17:30 +00:00
|
|
|
}
|
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
void tr_bandwidthAllocate(tr_bandwidth* b, tr_direction dir, unsigned int period_msec)
|
2009-04-18 23:17:30 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
int i, peerCount;
|
|
|
|
tr_ptrArray tmp = TR_PTR_ARRAY_INIT;
|
|
|
|
tr_ptrArray low = TR_PTR_ARRAY_INIT;
|
|
|
|
tr_ptrArray high = TR_PTR_ARRAY_INIT;
|
|
|
|
tr_ptrArray normal = TR_PTR_ARRAY_INIT;
|
|
|
|
struct tr_peerIo** peers;
|
|
|
|
|
|
|
|
/* allocateBandwidth () is a helper function with two purposes:
|
|
|
|
* 1. allocate bandwidth to b and its subtree
|
|
|
|
* 2. accumulate an array of all the peerIos from b and its subtree. */
|
|
|
|
allocateBandwidth(b, TR_PRI_LOW, dir, period_msec, &tmp);
|
|
|
|
peers = (struct tr_peerIo**)tr_ptrArrayBase(&tmp);
|
|
|
|
peerCount = tr_ptrArraySize(&tmp);
|
|
|
|
|
|
|
|
for (i = 0; i < peerCount; ++i)
|
2009-04-21 16:18:51 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
tr_peerIo* io = peers[i];
|
|
|
|
tr_peerIoRef(io);
|
2009-04-21 16:18:51 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
tr_peerIoFlushOutgoingProtocolMsgs(io);
|
2009-04-21 16:18:51 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
switch (io->priority)
|
2013-01-24 23:59:52 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
case TR_PRI_HIGH:
|
|
|
|
tr_ptrArrayAppend(&high, io); /* fall through */
|
|
|
|
|
|
|
|
case TR_PRI_NORMAL:
|
|
|
|
tr_ptrArrayAppend(&normal, io); /* fall through */
|
|
|
|
|
|
|
|
default:
|
|
|
|
tr_ptrArrayAppend(&low, io);
|
2009-04-18 23:17:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
/* First phase of IO. Tries to distribute bandwidth fairly to keep faster
|
|
|
|
* peers from starving the others. Loop through the peers, giving each a
|
|
|
|
* small chunk of bandwidth. Keep looping until we run out of bandwidth
|
|
|
|
* and/or peers that can use it */
|
|
|
|
phaseOne(&high, dir);
|
|
|
|
phaseOne(&normal, dir);
|
|
|
|
phaseOne(&low, dir);
|
|
|
|
|
|
|
|
/* Second phase of IO. To help us scale in high bandwidth situations,
|
|
|
|
* enable on-demand IO for peers with bandwidth left to burn.
|
|
|
|
* This on-demand IO is enabled until (1) the peer runs out of bandwidth,
|
|
|
|
* or (2) the next tr_bandwidthAllocate () call, when we start over again. */
|
|
|
|
for (i = 0; i < peerCount; ++i)
|
|
|
|
{
|
|
|
|
tr_peerIoSetEnabled(peers[i], dir, tr_peerIoHasBandwidthLeft(peers[i], dir));
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < peerCount; ++i)
|
|
|
|
{
|
|
|
|
tr_peerIoUnref(peers[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* cleanup */
|
|
|
|
tr_ptrArrayDestruct(&normal, NULL);
|
|
|
|
tr_ptrArrayDestruct(&high, NULL);
|
|
|
|
tr_ptrArrayDestruct(&low, NULL);
|
|
|
|
tr_ptrArrayDestruct(&tmp, NULL);
|
2008-12-09 22:05:45 +00:00
|
|
|
}
|
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
void tr_bandwidthSetPeer(tr_bandwidth* b, tr_peerIo* peer)
|
2008-11-25 21:35:17 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
assert(tr_isBandwidth(b));
|
|
|
|
assert((peer == NULL) || tr_isPeerIo(peer));
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
b->peer = peer;
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/***
|
|
|
|
****
|
|
|
|
***/
|
|
|
|
|
2017-04-20 16:02:19 +00:00
|
|
|
static unsigned int bandwidthClamp(tr_bandwidth const* b, uint64_t now, tr_direction dir, unsigned int byteCount)
|
2008-11-25 21:35:17 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
assert(tr_isBandwidth(b));
|
|
|
|
assert(tr_isDirection(dir));
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
if (b)
|
2008-11-25 21:35:17 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
if (b->band[dir].isLimited)
|
2011-03-02 07:21:58 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
byteCount = MIN(byteCount, b->band[dir].bytesLeft);
|
2011-03-03 07:20:18 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
/* if we're getting close to exceeding the speed limit,
|
|
|
|
* clamp down harder on the bytes available */
|
|
|
|
if (byteCount > 0)
|
2011-03-15 18:11:31 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
double current;
|
|
|
|
double desired;
|
|
|
|
double r;
|
|
|
|
|
|
|
|
if (now == 0)
|
|
|
|
{
|
|
|
|
now = tr_time_msec();
|
|
|
|
}
|
|
|
|
|
|
|
|
current = tr_bandwidthGetRawSpeed_Bps(b, now, TR_DOWN);
|
|
|
|
desired = tr_bandwidthGetDesiredSpeed_Bps(b, TR_DOWN);
|
|
|
|
r = desired >= 1 ? current / desired : 0;
|
|
|
|
|
|
|
|
if (r > 1.0)
|
|
|
|
{
|
|
|
|
byteCount = 0;
|
|
|
|
}
|
|
|
|
else if (r > 0.9)
|
|
|
|
{
|
|
|
|
byteCount *= 0.8;
|
|
|
|
}
|
|
|
|
else if (r > 0.8)
|
|
|
|
{
|
|
|
|
byteCount *= 0.9;
|
|
|
|
}
|
2011-03-15 18:11:31 +00:00
|
|
|
}
|
2011-03-02 07:21:58 +00:00
|
|
|
}
|
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
if (b->parent && b->band[dir].honorParentLimits && (byteCount > 0))
|
|
|
|
{
|
|
|
|
byteCount = bandwidthClamp(b->parent, now, dir, byteCount);
|
|
|
|
}
|
2008-11-25 21:35:17 +00:00
|
|
|
}
|
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
return byteCount;
|
2008-11-25 21:35:17 +00:00
|
|
|
}
|
2015-05-31 22:13:31 +00:00
|
|
|
|
2017-04-20 16:02:19 +00:00
|
|
|
unsigned int tr_bandwidthClamp(tr_bandwidth const* b, tr_direction dir, unsigned int byteCount)
|
2011-03-02 07:21:58 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
return bandwidthClamp(b, 0, dir, byteCount);
|
2011-03-02 07:21:58 +00:00
|
|
|
}
|
|
|
|
|
2017-04-20 16:02:19 +00:00
|
|
|
unsigned int tr_bandwidthGetRawSpeed_Bps(tr_bandwidth const* b, uint64_t const now, tr_direction const dir)
|
2008-11-24 04:21:23 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
assert(tr_isBandwidth(b));
|
|
|
|
assert(tr_isDirection(dir));
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
return getSpeed_Bps(&b->band[dir].raw, HISTORY_MSEC, now);
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
|
|
|
|
2017-04-20 16:02:19 +00:00
|
|
|
unsigned int tr_bandwidthGetPieceSpeed_Bps(tr_bandwidth const* b, uint64_t const now, tr_direction const dir)
|
2008-11-24 04:21:23 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
assert(tr_isBandwidth(b));
|
|
|
|
assert(tr_isDirection(dir));
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
return getSpeed_Bps(&b->band[dir].piece, HISTORY_MSEC, now);
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
void tr_bandwidthUsed(tr_bandwidth* b, tr_direction dir, size_t byteCount, bool isPieceData, uint64_t now)
|
2008-11-24 04:21:23 +00:00
|
|
|
{
|
2017-04-19 12:04:45 +00:00
|
|
|
struct tr_band* band;
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
assert(tr_isBandwidth(b));
|
|
|
|
assert(tr_isDirection(dir));
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
band = &b->band[dir];
|
2008-11-24 04:21:23 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
if (band->isLimited && isPieceData)
|
|
|
|
{
|
|
|
|
band->bytesLeft -= MIN(band->bytesLeft, byteCount);
|
|
|
|
}
|
2008-11-25 21:35:17 +00:00
|
|
|
|
|
|
|
#ifdef DEBUG_DIRECTION
|
2017-04-19 12:04:45 +00:00
|
|
|
|
|
|
|
if ((dir == DEBUG_DIRECTION) && (band->isLimited))
|
|
|
|
{
|
|
|
|
fprintf(stderr, "%p consumed %5zu bytes of %5s data... was %6zu, now %6zu left\n", b, byteCount,
|
|
|
|
(isPieceData ? "piece" : "raw"), oldBytesLeft, band->bytesLeft);
|
|
|
|
}
|
|
|
|
|
2008-11-25 21:35:17 +00:00
|
|
|
#endif
|
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
bytesUsed(now, &band->raw, byteCount);
|
2008-11-24 04:21:23 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
if (isPieceData)
|
|
|
|
{
|
|
|
|
bytesUsed(now, &band->piece, byteCount);
|
|
|
|
}
|
2008-11-25 21:35:17 +00:00
|
|
|
|
2017-04-19 12:04:45 +00:00
|
|
|
if (b->parent != NULL)
|
|
|
|
{
|
|
|
|
tr_bandwidthUsed(b->parent, dir, byteCount, isPieceData, now);
|
|
|
|
}
|
2008-11-24 04:21:23 +00:00
|
|
|
}
|