refactor: always use a blocksize of 16 KB (#2694)

Fixes #99.
This commit is contained in:
Charles Kerr 2022-02-23 14:25:06 -06:00 committed by GitHub
parent f7d74095ed
commit 13ad2b58dc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 57 additions and 88 deletions

View File

@ -10,34 +10,14 @@
#include "block-info.h"
#include "tr-assert.h"
// Decide on a block size. Constraints:
// (1) most clients decline requests over 16 KiB
// (2) pieceSize must be a multiple of block size
uint32_t tr_block_info::bestBlockSize(uint64_t piece_size)
{
uint32_t b = piece_size;
auto constexpr MaxBlockSize = uint32_t{ 1024 * 16 };
while (b > MaxBlockSize)
{
b /= 2U;
}
if (b == 0 || piece_size % b != 0) // not cleanly divisible
{
return 0;
}
return b;
}
void tr_block_info::initSizes(uint64_t total_size_in, uint64_t piece_size_in)
{
total_size = total_size_in;
piece_size = piece_size_in;
block_size = bestBlockSize(piece_size);
if (piece_size == 0 || block_size == 0)
TR_ASSERT(piece_size == 0 || piece_size >= BlockSize);
if (piece_size == 0)
{
*this = {};
return;
@ -48,30 +28,21 @@ void tr_block_info::initSizes(uint64_t total_size_in, uint64_t piece_size_in)
auto remainder = total_size % piece_size;
final_piece_size = remainder != 0U ? remainder : piece_size;
remainder = total_size % block_size;
final_block_size = remainder != 0U ? remainder : block_size;
remainder = total_size % BlockSize;
final_block_size = remainder != 0U ? remainder : BlockSize;
if (block_size != 0)
{
n_blocks = (total_size + block_size - 1) / block_size;
n_blocks_in_piece = piece_size / block_size;
n_blocks_in_final_piece = (final_piece_size + block_size - 1) / block_size;
}
n_blocks = (total_size + BlockSize - 1) / BlockSize;
n_blocks_in_piece = piece_size / BlockSize;
n_blocks_in_final_piece = (final_piece_size + BlockSize - 1) / BlockSize;
#ifdef TR_ENABLE_ASSERTS
// check our work
if (block_size != 0)
{
TR_ASSERT(piece_size % block_size == 0);
}
uint64_t t = n_pieces - 1;
t *= piece_size;
t += final_piece_size;
TR_ASSERT(t == total_size);
t = n_blocks - 1;
t *= block_size;
t *= BlockSize;
t += final_block_size;
TR_ASSERT(t == total_size);

View File

@ -11,6 +11,8 @@
struct tr_block_info
{
static auto constexpr BlockSize = uint32_t{ 1024 * 16 };
uint64_t total_size = 0;
uint64_t piece_size = 0;
uint64_t n_pieces = 0;
@ -18,7 +20,6 @@ struct tr_block_info
tr_block_index_t n_blocks = 0;
tr_block_index_t n_blocks_in_piece = 0;
tr_block_index_t n_blocks_in_final_piece = 0;
uint32_t block_size = 0;
uint32_t final_block_size = 0;
uint32_t final_piece_size = 0;
@ -35,15 +36,10 @@ struct tr_block_info
return n_blocks;
}
[[nodiscard]] constexpr auto blockSize() const
{
return block_size;
}
// return the number of bytes in `block`
[[nodiscard]] constexpr auto blockSize(tr_block_index_t block) const
{
return block + 1 == n_blocks ? final_block_size : blockSize();
return block + 1 == n_blocks ? final_block_size : BlockSize;
}
[[nodiscard]] constexpr auto pieceCount() const
@ -103,7 +99,7 @@ struct tr_block_info
{
TR_ASSERT(block < n_blocks);
return byteLoc(uint64_t{ block } * blockSize());
return byteLoc(uint64_t{ block } * BlockSize);
}
// Location of the last byte in `block`.
@ -114,7 +110,7 @@ struct tr_block_info
return {};
}
return byteLoc(uint64_t{ block } * blockSize() + blockSize(block) - 1);
return byteLoc(uint64_t{ block } * BlockSize + blockSize(block) - 1);
}
// Location of the first byte (+ optional offset and length) in `piece`
@ -157,11 +153,11 @@ struct tr_block_info
}
else
{
loc.block = byte / blockSize();
loc.block = byte / BlockSize;
loc.piece = byte / pieceSize();
}
loc.block_offset = static_cast<uint32_t>(loc.byte - (uint64_t{ loc.block } * blockSize()));
loc.block_offset = static_cast<uint32_t>(loc.byte - (uint64_t{ loc.block } * BlockSize));
loc.piece_offset = static_cast<uint32_t>(loc.byte - (uint64_t{ loc.piece } * pieceSize()));
return loc;

View File

@ -200,11 +200,11 @@ uint64_t tr_completion::countHasBytesInBlocks(tr_block_span_t span) const
auto const [begin, end] = span;
uint64_t n = blocks_.count(begin, end);
n *= block_info_->block_size;
n *= tr_block_info::BlockSize;
if (end == block_info_->n_blocks && blocks_.test(end - 1))
{
n -= block_info_->block_size - block_info_->final_block_size;
n -= tr_block_info::BlockSize - block_info_->final_block_size;
}
return n;
@ -237,7 +237,7 @@ uint64_t tr_completion::countHasBytesInSpan(tr_byte_span_t span) const
if (hasBlock(begin_block))
{
uint64_t u = begin_block + 1;
u *= block_info_->block_size;
u *= tr_block_info::BlockSize;
u -= begin_byte;
total += u;
}
@ -246,7 +246,7 @@ uint64_t tr_completion::countHasBytesInSpan(tr_byte_span_t span) const
if (begin_block + 1 < final_block)
{
uint64_t u = blocks_.count(begin_block + 1, final_block);
u *= block_info_->block_size;
u *= tr_block_info::BlockSize;
total += u;
}
@ -254,7 +254,7 @@ uint64_t tr_completion::countHasBytesInSpan(tr_byte_span_t span) const
if (hasBlock(final_block))
{
uint64_t u = final_block;
u *= block_info_->block_size;
u *= tr_block_info::BlockSize;
total += end_byte - u;
}

View File

@ -225,7 +225,7 @@ std::optional<tr_sha1_digest_t> recalculateHash(tr_torrent* tor, tr_piece_index_
tr_ioPrefetch(tor, loc, bytes_left);
auto sha = tr_sha1_init();
auto buffer = std::vector<uint8_t>(tor->blockSize());
auto buffer = std::vector<uint8_t>(tr_block_info::BlockSize);
while (bytes_left != 0)
{
size_t const len = std::min(bytes_left, std::size(buffer));

View File

@ -541,7 +541,7 @@ static void updateEndgame(tr_swarm* s)
{
/* we consider ourselves to be in endgame if the number of bytes
we've got requested is >= the number of bytes left to download */
s->endgame = uint64_t(std::size(s->active_requests)) * s->tor->blockSize() >= s->tor->leftUntilDone();
s->endgame = uint64_t(std::size(s->active_requests)) * tr_block_info::BlockSize >= s->tor->leftUntilDone();
}
std::vector<tr_block_span_t> tr_peerMgrGetNextRequests(tr_torrent* torrent, tr_peer const* peer, size_t numwant)

View File

@ -2030,7 +2030,7 @@ static void updateDesiredRequestCount(tr_peerMsgsImpl* msgs)
* many requests we should send to this peer */
size_t constexpr Floor = 32;
size_t constexpr Seconds = RequestBufSecs;
size_t const estimated_blocks_in_period = (rate_Bps * Seconds) / torrent->blockSize();
size_t const estimated_blocks_in_period = (rate_Bps * Seconds) / tr_block_info::BlockSize;
size_t const ceil = msgs->reqq ? *msgs->reqq : 250;
msgs->desired_request_count = std::clamp(estimated_blocks_in_period, Floor, ceil);
}
@ -2194,7 +2194,7 @@ static size_t fillOutputBuffer(tr_peerMsgsImpl* msgs, time_t now)
*** Data Blocks
**/
if (tr_peerIoGetWriteBufferSpace(msgs->io, now) >= msgs->torrent->blockSize() && popNextRequest(msgs, &req))
if (tr_peerIoGetWriteBufferSpace(msgs->io, now) >= tr_block_info::BlockSize && popNextRequest(msgs, &req))
{
--msgs->prefetchCount;

View File

@ -56,10 +56,6 @@ public:
{
return blockInfo().pieceLoc(piece, offset, length);
}
[[nodiscard]] constexpr auto blockSize() const
{
return blockInfo().blockSize();
}
[[nodiscard]] constexpr auto blockSize(tr_block_index_t block) const
{
return blockInfo().blockSize(block);

View File

@ -160,10 +160,6 @@ public:
{
return metainfo_.pieceLoc(piece, offset, length);
}
[[nodiscard]] constexpr auto blockSize() const
{
return metainfo_.blockSize();
}
[[nodiscard]] constexpr auto blockSize(tr_block_index_t block) const
{
return metainfo_.blockSize(block);

View File

@ -24,7 +24,6 @@ TEST_F(BlockInfoTest, fieldsAreSet)
uint64_t constexpr TotalSize = PieceSize * PieceCount;
info.initSizes(TotalSize, PieceSize);
EXPECT_EQ(ExpectedBlockSize, info.block_size);
EXPECT_EQ(ExpectedBlockSize, info.final_block_size);
EXPECT_EQ(ExpectedBlocksPerPiece, info.n_blocks_in_final_piece);
EXPECT_EQ(ExpectedBlocksPerPiece, info.n_blocks_in_piece);
@ -34,7 +33,6 @@ TEST_F(BlockInfoTest, fieldsAreSet)
EXPECT_EQ(TotalSize, info.total_size);
info.initSizes(0, 0);
EXPECT_EQ(0, info.block_size);
EXPECT_EQ(0, info.final_block_size);
EXPECT_EQ(0, info.n_blocks_in_final_piece);
EXPECT_EQ(0, info.n_blocks_in_piece);
@ -58,7 +56,6 @@ TEST_F(BlockInfoTest, handlesOddSize)
EXPECT_EQ(1, info.final_block_size);
EXPECT_EQ(1, info.final_piece_size);
EXPECT_EQ(1, info.n_blocks_in_final_piece);
EXPECT_EQ(ExpectedBlockSize, info.block_size);
EXPECT_EQ(ExpectedBlocksPerPiece, info.n_blocks_in_piece);
EXPECT_EQ(PieceCount, info.n_pieces);
EXPECT_EQ(PieceSize, info.piece_size);

View File

@ -285,7 +285,7 @@ TEST_F(CompletionTest, leftUntilDone)
// check that adding a block adjusts by block_info.block_size
completion.addBlock(0);
EXPECT_EQ(block_info.total_size - block_info.block_size, completion.leftUntilDone());
EXPECT_EQ(block_info.total_size - tr_block_info::BlockSize, completion.leftUntilDone());
}
TEST_F(CompletionTest, sizeWhenDone)
@ -367,7 +367,7 @@ TEST_F(CompletionTest, countMissingBytesInPiece)
EXPECT_EQ(block_info.pieceSize(0), completion.countMissingBytesInPiece(0));
completion.addBlock(0);
EXPECT_EQ(block_info.pieceSize(0) - block_info.block_size, completion.countMissingBytesInPiece(0));
EXPECT_EQ(block_info.pieceSize(0) - tr_block_info::BlockSize, completion.countMissingBytesInPiece(0));
completion.addPiece(0);
EXPECT_EQ(0, completion.countMissingBytesInPiece(0));

View File

@ -17,26 +17,39 @@
class FilePieceMapTest : public ::testing::Test
{
protected:
static constexpr size_t TotalSize{ 1001 };
static constexpr size_t PieceSize{ 100 };
static constexpr size_t PieceSize{ tr_block_info::BlockSize };
static constexpr size_t TotalSize{ 10 * PieceSize + 1 };
tr_block_info const block_info_{ TotalSize, PieceSize };
static constexpr std::array<uint64_t, 17> FileSizes{
500, // [offset 0] begins and ends on a piece boundary
0, // [offset 500] zero-sized files
0, 0, 0,
50, // [offset 500] begins on a piece boundary
100, // [offset 550] neither begins nor ends on a piece boundary, spans >1 piece
10, // [offset 650] small files all contained in a single piece
9, 8, 7, 6,
311, // [offset 690] ends end-of-torrent
0, // [offset 1001] zero-sized files at the end-of-torrent
0, 0, 0,
// sum is 1001 == TotalSize
5 * PieceSize, // [offset 0] begins and ends on a piece boundary
0, // [offset 5 P] zero-sized files
0,
0,
0,
PieceSize / 2, // [offset 5 P] begins on a piece boundary
PieceSize, // [offset 5.5 P] neither begins nor ends on a piece boundary, spans >1 piece
10, // [offset 6.5 P] small files all contained in a single piece
9,
8,
7,
6,
(3 * PieceSize + PieceSize / 2 + 1 - 10 - 9 - 8 - 7 - 6), // [offset 5.75P +10+9+8+7+6] ends end-of-torrent
0, // [offset 10P+1] zero-sized files at the end-of-torrent
0,
0,
0,
// sum is 10P + 1 == TotalSize
};
void SetUp() override
{
static_assert(
FileSizes[0] + FileSizes[1] + FileSizes[2] + FileSizes[3] + FileSizes[4] + FileSizes[5] + FileSizes[6] +
FileSizes[7] + FileSizes[8] + FileSizes[9] + FileSizes[10] + FileSizes[11] + FileSizes[12] + FileSizes[13] +
FileSizes[14] + FileSizes[15] + FileSizes[16] ==
TotalSize);
EXPECT_EQ(11, block_info_.n_pieces);
EXPECT_EQ(PieceSize, block_info_.piece_size);
EXPECT_EQ(TotalSize, block_info_.total_size);

View File

@ -81,7 +81,7 @@ TEST_P(IncompleteDirTest, incompleteDir)
data->session->cache,
data->tor,
data->tor->pieceLoc(0, data->offset),
data->tor->blockSize(),
tr_block_info::BlockSize,
data->buf);
tr_torrentGotBlock(data->tor, data->block);
data->done = true;
@ -89,7 +89,7 @@ TEST_P(IncompleteDirTest, incompleteDir)
// now finish writing it
{
char* zero_block = tr_new0(char, tor->blockSize());
char* zero_block = tr_new0(char, tr_block_info::BlockSize);
struct TestIncompleteDirData data = {};
data.session = session_;
@ -100,10 +100,10 @@ TEST_P(IncompleteDirTest, incompleteDir)
for (tr_block_index_t block_index = begin; block_index < end; ++block_index)
{
evbuffer_add(data.buf, zero_block, tor->blockSize());
evbuffer_add(data.buf, zero_block, tr_block_info::BlockSize);
data.block = block_index;
data.done = false;
data.offset = data.block * tor->blockSize();
data.offset = data.block * tr_block_info::BlockSize;
tr_runInEventThread(session_, test_incomplete_dir_threadfunc, &data);
auto const test = [&data]()