refactor: sonarcloud warnings about overly-complex looping logic (#1503)

* chore: simplify loop logic
This commit is contained in:
Charles Kerr 2020-11-03 18:59:19 -06:00 committed by GitHub
parent a6798adf2c
commit 46cb0b5e65
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 141 additions and 151 deletions

View File

@ -114,48 +114,36 @@ static void favicon_ready_cb(gpointer pixbuf, gpointer vreference)
static gboolean tracker_filter_model_update(gpointer gstore)
{
int all = 0;
int store_pos;
GtkTreeIter iter;
GObject* o = G_OBJECT(gstore);
GtkTreeStore* store = GTK_TREE_STORE(gstore);
GtkTreeModel* model = GTK_TREE_MODEL(gstore);
GPtrArray* hosts = g_ptr_array_new();
GStringChunk* strings = g_string_chunk_new(4096);
GHashTable* hosts_hash = g_hash_table_new_full(g_str_hash, g_str_equal, NULL, g_free);
GtkTreeModel* tmodel = GTK_TREE_MODEL(g_object_get_qdata(o, TORRENT_MODEL_KEY));
int const first_tracker_pos = 2; /* offset past the "All" and the separator */
g_object_steal_qdata(o, DIRTY_KEY);
/* Walk through all the torrents, tallying how many matches there are
* for the various categories. Also make a sorted list of all tracker
* hosts s.t. we can merge it with the existing list */
int num_torrents = 0;
GPtrArray* hosts = g_ptr_array_new();
GStringChunk* strings = g_string_chunk_new(4096);
GHashTable* hosts_hash = g_hash_table_new_full(g_str_hash, g_str_equal, NULL, g_free);
GtkTreeModel* tmodel = GTK_TREE_MODEL(g_object_get_qdata(o, TORRENT_MODEL_KEY));
GtkTreeIter iter;
if (gtk_tree_model_iter_nth_child(tmodel, &iter, NULL, 0))
{
do
{
tr_torrent* tor;
tr_info const* inf;
int keyCount;
char** keys;
tr_torrent const* tor;
gtk_tree_model_get(tmodel, &iter, MC_TORRENT, &tor, -1);
inf = tr_torrentInfo(tor);
keyCount = 0;
keys = g_new(char*, inf->trackerCount);
tr_info const* const inf = tr_torrentInfo(tor);
int keyCount = 0;
char** const keys = g_new(char*, inf->trackerCount);
for (unsigned int i = 0; i < inf->trackerCount; ++i)
{
int* count;
char buf[1024];
char* key;
gtr_get_host_from_url(buf, sizeof(buf), inf->trackers[i].announce);
key = g_string_chunk_insert_const(strings, buf);
count = g_hash_table_lookup(hosts_hash, key);
char name[1024];
gtr_get_host_from_url(name, sizeof(name), inf->trackers[i].announce);
char* const key = g_string_chunk_insert_const(strings, name);
int* count = g_hash_table_lookup(hosts_hash, key);
if (count == NULL)
{
count = tr_new0(int, 1);
@ -184,35 +172,36 @@ static gboolean tracker_filter_model_update(gpointer gstore)
g_free(keys);
++all;
++num_torrents;
}
while (gtk_tree_model_iter_next(tmodel, &iter));
}
qsort(hosts->pdata, hosts->len, sizeof(char*), pstrcmp);
/* update the "all" count */
// update the "all" count
GtkTreeStore* store = GTK_TREE_STORE(gstore);
GtkTreeModel* model = GTK_TREE_MODEL(gstore);
if (gtk_tree_model_iter_children(model, &iter, NULL))
{
tracker_model_update_count(store, &iter, all);
tracker_model_update_count(store, &iter, num_torrents);
}
store_pos = first_tracker_pos;
for (int i = 0, n = hosts->len;;)
int store_pos = 2; // offset past the "All" and the separator
guint i = 0;
for (;;)
{
gboolean const new_hosts_done = i >= n;
// are we done yet?
gboolean const new_hosts_done = i >= hosts->len;
gboolean const old_hosts_done = !gtk_tree_model_iter_nth_child(model, &iter, NULL, store_pos);
gboolean remove_row = FALSE;
gboolean insert_row = FALSE;
/* are we done yet? */
if (new_hosts_done && old_hosts_done)
{
break;
}
/* decide what to do */
// decide what to do
gboolean remove_row = FALSE;
gboolean insert_row = FALSE;
if (new_hosts_done)
{
remove_row = TRUE;
@ -223,10 +212,9 @@ static gboolean tracker_filter_model_update(gpointer gstore)
}
else
{
int cmp;
char* host;
gtk_tree_model_get(model, &iter, TRACKER_FILTER_COL_HOST, &host, -1);
cmp = g_strcmp0(host, hosts->pdata[i]);
int const cmp = g_strcmp0(host, hosts->pdata[i]);
if (cmp < 0)
{
@ -240,7 +228,7 @@ static gboolean tracker_filter_model_update(gpointer gstore)
g_free(host);
}
/* do something */
// do something
if (remove_row)
{
gtk_tree_store_remove(store, &iter);
@ -268,9 +256,9 @@ static gboolean tracker_filter_model_update(gpointer gstore)
++store_pos;
++i;
}
else /* update row */
else // update row
{
char const* host = hosts->pdata[i];
char const* const host = hosts->pdata[i];
int const count = *(int*)g_hash_table_lookup(hosts_hash, host);
tracker_model_update_count(store, &iter, count);
++store_pos;
@ -278,7 +266,7 @@ static gboolean tracker_filter_model_update(gpointer gstore)
}
}
/* cleanup */
// cleanup
g_ptr_array_free(hosts, TRUE);
g_hash_table_unref(hosts_hash);
g_string_chunk_free(strings);

View File

@ -46,7 +46,9 @@ static void base32_to_sha1(uint8_t* out, char const* in, size_t const inlen)
memset(out, 0, 20);
for (size_t i = 0, index = 0, offset = 0; i < inlen; ++i)
size_t index = 0;
size_t offset = 0;
for (size_t i = 0; i < inlen; ++i)
{
int digit;
int lookup = in[i] - '0';

View File

@ -377,7 +377,8 @@ static char const* getannounce(tr_info* inf, tr_variant* meta)
trackers = tr_new0(tr_tracker_info, n);
for (int i = 0, validTiers = 0; i < numTiers; i++)
int validTiers = 0;
for (int i = 0; i < numTiers; ++i)
{
tr_variant* tier = tr_variantListChild(tiers, i);
int const tierSize = tr_variantListSize(tier);

View File

@ -3287,13 +3287,15 @@ static void rechokeUploads(tr_swarm* s, uint64_t const now)
*
* If our bandwidth is maxed out, don't unchoke any more peers.
*/
int unchokedInterested = 0;
int checkedChokeCount = 0;
int unchokedInterested = 0;
for (int i = 0; i < size && unchokedInterested < session->uploadSlotsPerTorrent; ++i, ++checkedChokeCount)
for (int i = 0; i < size && unchokedInterested < session->uploadSlotsPerTorrent; ++i)
{
choke[i].isChoked = isMaxedOut ? choke[i].wasChoked : false;
++checkedChokeCount;
if (choke[i].isInterested)
{
++unchokedInterested;
@ -3622,16 +3624,13 @@ static int comparePeerLiveliness(void const* va, void const* vb)
static void sortPeersByLivelinessImpl(tr_peer** peers, void** clientData, int n, uint64_t now, tr_voidptr_compare_func compare)
{
struct peer_liveliness* lives;
struct peer_liveliness* l;
/* build a sortable array of peer + extra info */
lives = tr_new0(struct peer_liveliness, n);
l = lives;
for (int i = 0; i < n; ++i, ++l)
// build a sortable array of peer + extra info
struct peer_liveliness* lives = tr_new0(struct peer_liveliness, n);
for (int i = 0; i < n; ++i)
{
tr_peer* p = peers[i];
struct peer_liveliness* const l = &lives[i];
l->peer = p;
l->doPurge = p->doPurge;
l->pieceDataTime = p->atom->piece_data_time;
@ -3644,15 +3643,14 @@ static void sortPeersByLivelinessImpl(tr_peer** peers, void** clientData, int n,
}
}
/* sort 'em */
TR_ASSERT(n == l - lives);
// sort 'em
qsort(lives, n, sizeof(struct peer_liveliness), compare);
l = lives;
/* build the peer array */
for (int i = 0; i < n; ++i, ++l)
// build the peer array
for (int i = 0; i < n; ++i)
{
struct peer_liveliness* const l = &lives[i];
peers[i] = l->peer;
if (clientData != NULL)
@ -3661,9 +3659,7 @@ static void sortPeersByLivelinessImpl(tr_peer** peers, void** clientData, int n,
}
}
TR_ASSERT(n == l - lives);
/* cleanup */
// cleanup
tr_free(lives);
}
@ -3718,10 +3714,11 @@ static void enforceSessionPeerLimit(tr_session* session, uint64_t now)
{
tr_swarm* s = tor->swarm;
for (int i = 0, tn = tr_ptrArraySize(&s->peers); i < tn; ++i, ++n)
for (int i = 0, tn = tr_ptrArraySize(&s->peers); i < tn; ++i)
{
peers[n] = tr_ptrArrayNth(&s->peers, i);
swarms[n] = s;
++n;
}
}

View File

@ -206,6 +206,26 @@ void* tr_torrentGetMetadataPiece(tr_torrent* tor, int piece, size_t* len)
return ret;
}
static int getPieceNeededIndex(struct tr_incomplete_metadata const* m, int piece)
{
for (int i = 0; i < m->piecesNeededCount; ++i)
{
if (m->piecesNeeded[i].piece == piece)
{
return i;
}
}
return -1;
}
static int getPieceLength(struct tr_incomplete_metadata const* m, int piece)
{
return piece + 1 == m->pieceCount ? // last piece
m->metadata_size - (piece * METADATA_PIECE_SIZE) :
METADATA_PIECE_SIZE;
}
void tr_torrentSetMetadataPiece(tr_torrent* tor, int piece, void const* data, int len)
{
TR_ASSERT(tr_isTorrent(tor));
@ -214,53 +234,36 @@ void tr_torrentSetMetadataPiece(tr_torrent* tor, int piece, void const* data, in
dbgmsg(tor, "got metadata piece %d of %d bytes", piece, len);
/* are we set up to download metadata? */
struct tr_incomplete_metadata* m = tor->incompleteMetadata;
// are we set up to download metadata?
struct tr_incomplete_metadata* const m = tor->incompleteMetadata;
if (m == NULL)
{
return;
}
/* does this data pass the smell test? */
if (piece < 0 || piece >= m->pieceCount)
// sanity test: is `piece` in range?
if ((piece < 0) || (piece >= m->pieceCount))
{
return;
}
if (piece < m->pieceCount - 1 ? len != METADATA_PIECE_SIZE : len > METADATA_PIECE_SIZE)
// sanity test: is `len` the right size?
if (getPieceLength(m, piece) != len)
{
return;
}
int const offset = piece * METADATA_PIECE_SIZE;
TR_ASSERT(offset <= m->metadata_size);
if (len == 0 || len > m->metadata_size - offset)
{
return;
}
int neededPieceIndex = 0;
/* do we need this piece? */
for (int i = 0; i < m->piecesNeededCount; ++i, ++neededPieceIndex)
{
if (m->piecesNeeded[i].piece == piece)
{
break;
}
}
if (neededPieceIndex == m->piecesNeededCount)
// do we need this piece?
int const idx = getPieceNeededIndex(m, piece);
if (idx == -1)
{
return;
}
size_t const offset = piece * METADATA_PIECE_SIZE;
memcpy(m->metadata + offset, data, len);
tr_removeElementFromArray(m->piecesNeeded, neededPieceIndex, sizeof(struct metadata_node), m->piecesNeededCount);
tr_removeElementFromArray(m->piecesNeeded, idx, sizeof(struct metadata_node), m->piecesNeededCount);
--m->piecesNeededCount;
dbgmsg(tor, "saving metainfo piece %d... %d remain", piece, m->piecesNeededCount);

View File

@ -655,39 +655,24 @@ static bool pieceHasFile(tr_piece_index_t piece, tr_file const* file)
return file->firstPiece <= piece && piece <= file->lastPiece;
}
static tr_priority_t calculatePiecePriority(tr_torrent const* tor, tr_piece_index_t piece, int fileHint)
static tr_priority_t calculatePiecePriority(tr_torrent const* tor, tr_piece_index_t piece, tr_file_index_t file_hint)
{
tr_file_index_t firstFileIndex;
// safeguard against a bad arg
tr_info const* const inf = tr_torrentInfo(tor);
file_hint = MIN(file_hint, inf->fileCount - 1);
// find the first file with data in this piece
tr_file_index_t first = file_hint;
while (first > 0 && pieceHasFile(piece, &inf->files[first - 1]))
{
--first;
}
// the priority is the max of all the file priorities in the piece
tr_priority_t priority = TR_PRI_LOW;
/* find the first file that has data in this piece */
if (fileHint >= 0)
for (tr_file_index_t i = first; i < inf->fileCount; ++i)
{
firstFileIndex = fileHint;
while (firstFileIndex > 0 && pieceHasFile(piece, &tor->info.files[firstFileIndex - 1]))
{
--firstFileIndex;
}
}
else
{
firstFileIndex = 0;
for (tr_file_index_t i = 0; i < tor->info.fileCount; ++i, ++firstFileIndex)
{
if (pieceHasFile(piece, &tor->info.files[i]))
{
break;
}
}
}
/* the piece's priority is the max of the priorities
* of all the files in that piece */
for (tr_file_index_t i = firstFileIndex; i < tor->info.fileCount; ++i)
{
tr_file const* file = &tor->info.files[i];
tr_file const* file = &inf->files[i];
if (!pieceHasFile(piece, file))
{
@ -696,7 +681,7 @@ static tr_priority_t calculatePiecePriority(tr_torrent const* tor, tr_piece_inde
priority = MAX(priority, file->priority);
/* when dealing with multimedia files, getting the first and
/* When dealing with multimedia files, getting the first and
last pieces can sometimes allow you to preview it a bit
before it's fully downloaded... */
if ((file->priority >= TR_PRI_NORMAL) && (file->firstPiece == piece || file->lastPiece == piece))
@ -722,7 +707,7 @@ static void tr_torrentInitFilePieces(tr_torrent* tor)
}
/* build the array of first-file hints to give calculatePiecePriority */
int* firstFiles = tr_new(int, inf->pieceCount);
tr_file_index_t* firstFiles = tr_new(tr_file_index_t, inf->pieceCount);
tr_file_index_t f = 0;
for (tr_piece_index_t p = 0; p < inf->pieceCount; ++p)

View File

@ -446,44 +446,58 @@ void tr_dhtUninit(tr_session* ss)
}
else
{
tr_variant benc;
struct sockaddr_in sins[300];
struct sockaddr_in6 sins6[300];
char compact[300 * 6];
char compact6[300 * 18];
char* dat_file;
int num = 300;
int num6 = 300;
int n = dht_get_nodes(sins, &num, sins6, &num6);
enum
{
MAX_NODES = 300,
PORT_LEN = 2,
COMPACT_ADDR_LEN = 4,
COMPACT_LEN = (COMPACT_ADDR_LEN + PORT_LEN),
COMPACT6_ADDR_LEN = 16,
COMPACT6_LEN = (COMPACT6_ADDR_LEN + PORT_LEN),
};
struct sockaddr_in sins[MAX_NODES];
struct sockaddr_in6 sins6[MAX_NODES];
int num = MAX_NODES;
int num6 = MAX_NODES;
int n = dht_get_nodes(sins, &num, sins6, &num6);
tr_logAddNamedInfo("DHT", "Saving %d (%d + %d) nodes", n, num, num6);
for (int i = 0, j = 0; i < num; ++i, j += 6)
{
memcpy(compact + j, &sins[i].sin_addr, 4);
memcpy(compact + j + 4, &sins[i].sin_port, 2);
}
for (int i = 0, j = 0; i < num6; ++i, j += 18)
{
memcpy(compact6 + j, &sins6[i].sin6_addr, 16);
memcpy(compact6 + j + 16, &sins6[i].sin6_port, 2);
}
tr_variant benc;
tr_variantInitDict(&benc, 3);
tr_variantDictAddRaw(&benc, TR_KEY_id, myid, 20);
if (num > 0)
{
tr_variantDictAddRaw(&benc, TR_KEY_nodes, compact, num * 6);
char compact[MAX_NODES * COMPACT_LEN];
char* out = compact;
for (struct sockaddr_in const* in = sins; in < sins + num; ++in)
{
memcpy(out, &in->sin_addr, COMPACT_ADDR_LEN);
out += COMPACT_ADDR_LEN;
memcpy(out, &in->sin_port, PORT_LEN);
out += PORT_LEN;
}
tr_variantDictAddRaw(&benc, TR_KEY_nodes, compact, out - compact);
}
if (num6 > 0)
{
tr_variantDictAddRaw(&benc, TR_KEY_nodes6, compact6, num6 * 18);
char compact6[MAX_NODES * COMPACT6_LEN];
char* out6 = compact6;
for (struct sockaddr_in6 const* in = sins6; in < sins6 + num6; ++in)
{
memcpy(out6, &in->sin6_addr, COMPACT6_ADDR_LEN);
out6 += COMPACT6_ADDR_LEN;
memcpy(out6, &in->sin6_port, PORT_LEN);
out6 += PORT_LEN;
}
tr_variantDictAddRaw(&benc, TR_KEY_nodes6, compact6, out6 - compact6);
}
dat_file = tr_buildPath(ss->configDir, "dht.dat", NULL);
char* const dat_file = tr_buildPath(ss->configDir, "dht.dat", NULL);
tr_variantToFile(&benc, TR_VARIANT_FMT_BENC, dat_file);
tr_variantFree(&benc);
tr_free(dat_file);