relevance 4 | ../test/test_dht.cpp:1281 | pass in the actual salt as a parameter |
pass in the actual salt as a parameter../test/test_dht.cpp:1281
key_desc_t const desc_error[] =
{
{ "e", bdecode_node::list_t, 2, 0 },
{ "y", bdecode_node::string_t, 1, 0},
};
bdecode_node desc_error_keys[2];
// ==== get / put mutable items ===
span<char const> itemv;
signature sig;
char buffer[1200];
sequence_number seq(4);
public_key pk;
secret_key sk;
get_test_keypair(pk, sk);
for (int with_salt = 0; with_salt < 2; ++with_salt)
{
seq = sequence_number(4);
std::printf("\nTEST GET/PUT%s \ngenerating ed25519 keys\n\n"
, with_salt ? " with-salt" : " no-salt");
std::array<char, 32> seed = ed25519_create_seed();
std::tie(pk, sk) = ed25519_create_keypair(seed);
std::printf("pub: %s priv: %s\n"
, aux::to_hex(pk.bytes).c_str()
, aux::to_hex(sk.bytes).c_str());
std::string salt;
if (with_salt) salt = "foobar";
hasher h(pk.bytes);
if (with_salt) h.update(salt);
sha1_hash target_id = h.final();
std::printf("target_id: %s\n"
, aux::to_hex(target_id).c_str());
send_dht_request(t.dht_node, "get", t.source, &response
, msg_args().target(target_id));
key_desc_t const desc[] =
{
{ "r", bdecode_node::dict_t, 0, key_desc_t::parse_children },
{ "id", bdecode_node::string_t, 20, 0},
{ "token", bdecode_node::string_t, 0, 0},
{ "ip", bdecode_node::string_t, 0, key_desc_t::optional | key_desc_t::last_child},
| ||
relevance 4 | ../test/test_dht.cpp:2172 | pass in th actual salt as the argument |
pass in th actual salt as the argument../test/test_dht.cpp:2172 }
}
g_got_peers.clear();
}
} // anonymous namespace
TORRENT_TEST(get_peers_v4)
{
test_get_peers(rand_v4);
}
TORRENT_TEST(get_peers_v6)
{
if (supports_ipv6())
test_get_peers(rand_v6);
}
namespace {
void test_mutable_get(address(&rand_addr)(), bool const with_salt)
{
dht_test_setup t(udp::endpoint(rand_addr(), 20));
public_key pk;
secret_key sk;
get_test_keypair(pk, sk);
char buffer[1200];
sequence_number seq(4);
span<char const> itemv;
bdecode_node response;
std::string salt;
if (with_salt) salt = "foobar";
// mutable get
g_sent_packets.clear();
udp::endpoint const initial_node(rand_addr(), 1234);
dht::node_id const initial_node_id = to_hash("1111111111222222222233333333334444444444");
t.dht_node.m_table.add_node(node_entry{initial_node_id, initial_node, 10, true});
g_put_item.assign(items[0].ent, salt, seq, pk, sk);
t.dht_node.put_item(pk, std::string()
, std::bind(&put_mutable_item_cb, _1, _2, 0)
, put_mutable_item_data_cb);
TEST_EQUAL(g_sent_packets.size(), 1);
| ||
relevance 4 | ../src/disk_io_thread.cpp:1173 | instead of doing this. pass in the settings to each storage_interface call. Each disk thread could hold its most recent understanding of the settings in a shared_ptr, and update it every time it wakes up from a job. That way each access to the settings won't require a std::mutex to be held. |
instead of doing this. pass in the settings to each storage_interface
call. Each disk thread could hold its most recent understanding of the settings
in a shared_ptr, and update it every time it wakes up from a job. That way
each access to the settings won't require a std::mutex to be held.../src/disk_io_thread.cpp:1173
DLOG("perform_job job: %s ( %s) piece: %d offset: %d outstanding: %d\n"
, job_name(j->action)
, (j->flags & disk_io_job::fence) ? "fence ": ""
, static_cast<int>(j->piece), j->d.io.offset
, j->storage ? j->storage->num_outstanding_jobs() : -1);
}
#endif
std::shared_ptr<storage_interface> storage = j->storage;
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
if (j->storage)
{
std::unique_lock<std::mutex> l(m_cache_mutex);
auto const& pieces = j->storage->cached_pieces();
for (auto const& p : pieces)
TORRENT_ASSERT(p.storage == j->storage);
}
#endif
if (storage && storage->m_settings == nullptr)
storage->m_settings = &m_settings;
TORRENT_ASSERT(static_cast<int>(j->action) < int(job_functions.size()));
m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, 1);
// call disk function
| ||
relevance 3 | ../test/test_dht.cpp:111 | make the mock_socket hold a reference to the list of where to record packets instead of having a global variable |
make the mock_socket hold a reference to the list of where to record
packets instead of having a global variable../test/test_dht.cpp:111}
void add_and_replace(node_id& dst, node_id const& add)
{
bool carry = false;
for (int k = 19; k >= 0; --k)
{
int sum = dst[k] + add[k] + (carry ? 1 : 0);
dst[k] = sum & 255;
carry = sum > 255;
}
}
void node_push_back(std::vector<node_entry>* nv, node_entry const& n)
{
nv->push_back(n);
}
void nop_node() {}
std::list<std::pair<udp::endpoint, entry>> g_sent_packets;
struct mock_socket final : socket_manager
{
bool has_quota() override { return true; }
bool send_packet(aux::listen_socket_handle const&, entry& msg, udp::endpoint const& ep) override
{
| ||
relevance 3 | ../test/test_dht.cpp:120 | ideally the mock_socket would contain this queue of packets, to make tests independent |
ideally the mock_socket would contain this queue of packets, to
make tests independent../test/test_dht.cpp:120 int sum = dst[k] + add[k] + (carry ? 1 : 0);
dst[k] = sum & 255;
carry = sum > 255;
}
}
void node_push_back(std::vector<node_entry>* nv, node_entry const& n)
{
nv->push_back(n);
}
void nop_node() {}
std::list<std::pair<udp::endpoint, entry>> g_sent_packets;
struct mock_socket final : socket_manager
{
bool has_quota() override { return true; }
bool send_packet(aux::listen_socket_handle const&, entry& msg, udp::endpoint const& ep) override
{
g_sent_packets.push_back(std::make_pair(ep, msg));
return true;
}
};
std::shared_ptr<aux::listen_socket_t> dummy_listen_socket(udp::endpoint src)
{
auto ret = std::make_shared<aux::listen_socket_t>();
ret->local_endpoint = tcp::endpoint(src.address(), src.port());
ret->external_address.cast_vote(src.address()
, aux::session_interface::source_dht, rand_v4());
return ret;
}
std::shared_ptr<aux::listen_socket_t> dummy_listen_socket4()
{
auto ret = std::make_shared<aux::listen_socket_t>();
ret->local_endpoint = tcp::endpoint(addr4("192.168.4.1"), 6881);
ret->external_address.cast_vote(addr4("236.0.0.1")
, aux::session_interface::source_dht, rand_v4());
return ret;
}
std::shared_ptr<aux::listen_socket_t> dummy_listen_socket6()
{
auto ret = std::make_shared<aux::listen_socket_t>();
ret->local_endpoint = tcp::endpoint(addr6("2002::1"), 6881);
ret->external_address.cast_vote(addr6("2002::1")
, aux::session_interface::source_dht, rand_v6());
return ret;
}
| ||
relevance 3 | ../test/test_dht.cpp:1238 | split this up into smaller tests |
split this up into smaller tests../test/test_dht.cpp:1238 }
lt::aux::array<node_entry, 9> build_nodes(sha1_hash target)
{
return lt::aux::array<node_entry, 9>(
std::array<node_entry, 9> {
{ { target, udp::endpoint(addr4("1.1.1.1"), 1231), 10, true}
, { target, udp::endpoint(addr4("2.2.2.2"), 1232), 10, true}
, { target, udp::endpoint(addr4("3.3.3.3"), 1233), 10, true}
, { target, udp::endpoint(addr4("4.4.4.4"), 1234), 10, true}
, { target, udp::endpoint(addr4("5.5.5.5"), 1235), 10, true}
, { target, udp::endpoint(addr4("6.6.6.6"), 1236), 10, true}
, { target, udp::endpoint(addr4("7.7.7.7"), 1237), 10, true}
, { target, udp::endpoint(addr4("8.8.8.8"), 1238), 10, true}
, { target, udp::endpoint(addr4("9.9.9.9"), 1239), 10, true} }
});
}
span<char const> const empty_salt;
void test_put(address(&rand_addr)())
{
dht_test_setup t(udp::endpoint(rand_addr(), 20));
bdecode_node response;
bool ret;
// ====== put ======
init_rand_address();
udp::endpoint eps[1000];
for (int i = 0; i < 1000; ++i)
eps[i] = udp::endpoint(rand_addr(), (rand() % 16534) + 1);
announce_immutable_items(t.dht_node, eps, items, sizeof(items)/sizeof(items[0]));
key_desc_t const desc2[] =
{
{ "y", bdecode_node::string_t, 1, 0 }
};
bdecode_node desc2_keys[1];
key_desc_t const desc_error[] =
{
{ "e", bdecode_node::list_t, 2, 0 },
{ "y", bdecode_node::string_t, 1, 0},
};
bdecode_node desc_error_keys[2];
| ||
relevance 3 | ../test/test_dht.cpp:2641 | use dht_test_setup class to simplify the node setup |
use dht_test_setup class to simplify the node setup../test/test_dht.cpp:2641 args.nodes({nodes[8]});
send_dht_response(t.dht_node, response, nodes[i].ep(), args);
g_sent_packets.erase(packet);
// once we've sent the response from the farthest node, we're done
if (i == 0) break;
}
TEST_EQUAL(g_put_count, 1);
// k nodes should now have outstanding put requests
TEST_EQUAL(g_sent_packets.size(), 8);
g_sent_packets.clear();
g_put_item.clear();
g_put_count = 0;
}
TORRENT_TEST(dht_dual_stack)
{
dht::settings sett = test_settings();
mock_socket s;
auto sock4 = dummy_listen_socket4();
auto sock6 = dummy_listen_socket6();
obs observer;
counters cnt;
node* node4p = nullptr, *node6p = nullptr;
auto get_foreign_node = [&](node_id const&, std::string const& family)
{
if (family == "n4") return node4p;
if (family == "n6") return node6p;
TEST_CHECK(false);
return static_cast<node*>(nullptr);
};
std::unique_ptr<dht_storage_interface> dht_storage(dht_default_storage_constructor(sett));
dht_storage->update_node_ids({node_id(nullptr)});
dht::node node4(sock4, &s, sett, node_id(nullptr), &observer, cnt, get_foreign_node, *dht_storage);
dht::node node6(sock6, &s, sett, node_id(nullptr), &observer, cnt, get_foreign_node, *dht_storage);
node4p = &node4;
node6p = &node6;
// DHT should be running on port 48199 now
bdecode_node response;
char error_string[200];
bool ret;
node_id id = to_hash("3123456789abcdef01232456789abcdef0123456");
node4.m_table.node_seen(id, udp::endpoint(addr("4.4.4.4"), 4440), 10);
node6.m_table.node_seen(id, udp::endpoint(addr("4::4"), 4441), 10);
// v4 node requesting v6 nodes
| ||
relevance 3 | ../test/test_dht.cpp:3186 | use dht_test_setup class to simplify the node setup |
use dht_test_setup class to simplify the node setup../test/test_dht.cpp:3186
bdecode_node response;
send_dht_request(t.dht_node, "ping", t.source, &response);
dht::key_desc_t const pong_desc[] = {
{ "y", bdecode_node::string_t, 1, 0 },
{ "t", bdecode_node::string_t, 2, 0 },
{ "r", bdecode_node::dict_t, 0, key_desc_t::parse_children },
{ "id", bdecode_node::string_t, 20, key_desc_t::last_child },
};
bdecode_node pong_keys[4];
bool ret = dht::verify_message(response, pong_desc, pong_keys, t.error_string);
TEST_CHECK(ret);
if (!ret) return;
TEST_EQUAL(node_id(pong_keys[3].string_ptr()), t.dht_node.nid());
}
TORRENT_TEST(read_only_node)
{
dht::settings sett = test_settings();
sett.read_only = true;
mock_socket s;
auto ls = dummy_listen_socket4();
obs observer;
counters cnt;
std::unique_ptr<dht_storage_interface> dht_storage(dht_default_storage_constructor(sett));
dht_storage->update_node_ids({node_id(nullptr)});
dht::node node(ls, &s, sett, node_id(nullptr), &observer, cnt, get_foreign_node_stub, *dht_storage);
udp::endpoint source(addr("10.0.0.1"), 20);
bdecode_node response;
msg_args args;
// for incoming requests, read_only node won't response.
send_dht_request(node, "ping", source, &response, args, "10", false);
TEST_EQUAL(response.type(), bdecode_node::none_t);
args.target(sha1_hash("01010101010101010101"));
send_dht_request(node, "get", source, &response, args, "10", false);
TEST_EQUAL(response.type(), bdecode_node::none_t);
// also, the sender shouldn't be added to routing table.
TEST_EQUAL(std::get<0>(node.size()), 0);
// for outgoing requests, read_only node will add 'ro' key (value == 1)
// in top-level of request.
bdecode_node parsed[7];
char error_string[200];
udp::endpoint initial_node(addr("4.4.4.4"), 1234);
dht::node_id const initial_node_id = to_hash("1111111111222222222233333333334444444444");
| ||
relevance 3 | ../test/test_dht.cpp:3285 | use dht_test_setup class to simplify the node setup |
use dht_test_setup class to simplify the node setup../test/test_dht.cpp:3285 // both of them shouldn't have a 'ro' key.
node_from_entry(g_sent_packets.front().second, request);
ret = verify_message(request, get_item_desc_ro, parsed, error_string);
TEST_CHECK(ret);
TEST_CHECK(!parsed[3]);
node_from_entry(g_sent_packets.back().second, request);
ret = verify_message(request, get_item_desc_ro, parsed, error_string);
TEST_CHECK(ret);
TEST_CHECK(!parsed[3]);
#endif
}
#ifndef TORRENT_DISABLE_LOGGING
// these tests rely on logging being enabled
TORRENT_TEST(invalid_error_msg)
{
dht::settings sett = test_settings();
mock_socket s;
auto ls = dummy_listen_socket4();
obs observer;
counters cnt;
std::unique_ptr<dht_storage_interface> dht_storage(dht_default_storage_constructor(sett));
dht_storage->update_node_ids({node_id(nullptr)});
dht::node node(ls, &s, sett, node_id(nullptr), &observer, cnt, get_foreign_node_stub, *dht_storage);
udp::endpoint source(addr("10.0.0.1"), 20);
entry e;
e["y"] = "e";
e["e"].string() = "Malformed Error";
char msg_buf[1500];
int size = bencode(msg_buf, e);
bdecode_node decoded;
error_code ec;
bdecode(msg_buf, msg_buf + size, decoded, ec);
if (ec) std::printf("bdecode failed: %s\n", ec.message().c_str());
dht::msg m(decoded, source);
node.incoming(node.m_sock, m);
bool found = false;
for (auto const& log : observer.m_log)
{
if (log.find("INCOMING ERROR") != std::string::npos
&& log.find("(malformed)") != std::string::npos)
found = true;
| ||
relevance 3 | ../test/test_dht.cpp:3378 | use dht_test_setup class to simplify the node setup |
use dht_test_setup class to simplify the node setup../test/test_dht.cpp:3378 TEST_CHECK(algo->num_sorted_results() == 0);
auto results = algo->results();
TEST_CHECK(results.size() == eps.size());
for (std::size_t i = 0; i < eps.size(); ++i)
TEST_CHECK(eps[i] == results[i]->target_ep());
// setting the node ID, regardless of what we set it to, should cause this
// observer to become sorted. i.e. be moved to the beginning of the result
// list.
results[5]->set_id(node_id("abababababababababab"));
TEST_CHECK(algo->num_sorted_results() == 1);
results = algo->results();
TEST_CHECK(results.size() == eps.size());
TEST_CHECK(eps[5] == results[0]->target_ep());
algo->done();
}
TORRENT_TEST(rpc_invalid_error_msg)
{
dht::settings sett = test_settings();
mock_socket s;
auto ls = dummy_listen_socket4();
obs observer;
counters cnt;
dht::routing_table table(node_id(), udp::v4(), 8, sett, &observer);
dht::rpc_manager rpc(node_id(), sett, table, ls, &s, &observer);
std::unique_ptr<dht_storage_interface> dht_storage(dht_default_storage_constructor(sett));
dht_storage->update_node_ids({node_id(nullptr)});
dht::node node(ls, &s, sett, node_id(nullptr), &observer, cnt, get_foreign_node_stub, *dht_storage);
udp::endpoint source(addr("10.0.0.1"), 20);
// we need this to create an entry for this transaction ID, otherwise the
// incoming message will just be dropped
entry req;
req["y"] = "q";
req["q"] = "bogus_query";
req["t"] = "\0\0\0\0";
g_sent_packets.clear();
auto algo = std::make_shared<dht::traversal_algorithm>(node, node_id());
auto o = rpc.allocate_observer<null_observer>(std::move(algo), source, node_id());
#if TORRENT_USE_ASSERTS
o->m_in_constructor = false;
#endif
rpc.invoke(req, source, o);
// here's the incoming (malformed) error message
| ||
relevance 3 | ../src/ut_metadata.cpp:272 | use the aux::write_* functions and the span here instead, it will fit better with send_buffer() |
use the aux::write_* functions and the span here instead, it
will fit better with send_buffer()../src/ut_metadata.cpp:272 int metadata_piece_size = 0;
if (m_torrent.valid_metadata())
e["total_size"] = m_tp.get_metadata_size();
if (type == 1)
{
TORRENT_ASSERT(piece >= 0 && piece < (m_tp.get_metadata_size() + 16 * 1024 - 1) / (16 * 1024));
TORRENT_ASSERT(m_pc.associated_torrent().lock()->valid_metadata());
TORRENT_ASSERT(m_torrent.valid_metadata());
int const offset = piece * 16 * 1024;
metadata = m_tp.metadata().data() + offset;
metadata_piece_size = std::min(
m_tp.get_metadata_size() - offset, 16 * 1024);
TORRENT_ASSERT(metadata_piece_size > 0);
TORRENT_ASSERT(offset >= 0);
TORRENT_ASSERT(offset + metadata_piece_size <= m_tp.get_metadata_size());
}
char msg[200];
char* header = msg;
char* p = &msg[6];
int const len = bencode(p, e);
int const total_size = 2 + len + metadata_piece_size;
namespace io = detail;
io::write_uint32(total_size, header);
io::write_uint8(bt_peer_connection::msg_extended, header);
io::write_uint8(m_message_index, header);
m_pc.send_buffer({msg, len + 6});
| ||
relevance 3 | ../src/ConvertUTF.cpp:61 | replace this implementation with something maintained and/or robust. Perhaps std::codecvt<> |
replace this implementation with something maintained and/or robust.
Perhaps std::codecvt<>../src/ConvertUTF.cpp:61// ignore warnings in this file
#include "libtorrent/aux_/disable_warnings_push.hpp"
#include "libtorrent/ConvertUTF.h"
#ifdef CVTUTF_DEBUG
#include <stdio.h>
#endif
static const int halfShift = 10; /* used for shifting by 10 bits */
static const UTF32 halfBase = 0x0010000UL;
static const UTF32 halfMask = 0x3FFUL;
#define UNI_SUR_HIGH_START (UTF32)0xD800
#define UNI_SUR_HIGH_END (UTF32)0xDBFF
#define UNI_SUR_LOW_START (UTF32)0xDC00
#define UNI_SUR_LOW_END (UTF32)0xDFFF
/* --------------------------------------------------------------------- */
ConversionResult ConvertUTF32toUTF16 (
const UTF32** sourceStart, const UTF32* sourceEnd,
UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags) {
ConversionResult result = conversionOK;
const UTF32* source = *sourceStart;
UTF16* target = *targetStart;
while (source < sourceEnd) {
UTF32 ch;
if (target >= targetEnd) {
result = targetExhausted; break;
}
ch = *source++;
if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */
/* UTF-16 surrogate values are illegal in UTF-32; 0xffff or 0xfffe are both reserved values */
if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
if (flags == strictConversion) {
--source; /* return to the illegal value itself */
result = sourceIllegal;
break;
} else {
*target++ = UNI_REPLACEMENT_CHAR;
}
} else {
*target++ = (UTF16)ch; /* normal case */
}
} else if (ch > UNI_MAX_LEGAL_UTF32) {
if (flags == strictConversion) {
result = sourceIllegal;
} else {
*target++ = UNI_REPLACEMENT_CHAR;
| ||
relevance 3 | ../src/session_handle.cpp:625 | expose the sequence_number, public_key, secret_key and signature types to the client |
expose the sequence_number, public_key, secret_key and signature
types to the client../src/session_handle.cpp:625 void session_handle::dht_get_item(sha1_hash const& target)
{
#ifndef TORRENT_DISABLE_DHT
async_call(&session_impl::dht_get_immutable_item, target);
#else
TORRENT_UNUSED(target);
#endif
}
void session_handle::dht_get_item(std::array<char, 32> key
, std::string salt)
{
#ifndef TORRENT_DISABLE_DHT
async_call(&session_impl::dht_get_mutable_item, key, salt);
#else
TORRENT_UNUSED(key);
TORRENT_UNUSED(salt);
#endif
}
sha1_hash session_handle::dht_put_item(entry data)
{
std::vector<char> buf;
bencode(std::back_inserter(buf), data);
sha1_hash const ret = hasher(buf).final();
#ifndef TORRENT_DISABLE_DHT
async_call(&session_impl::dht_put_immutable_item, data, ret);
#endif
return ret;
}
void session_handle::dht_put_item(std::array<char, 32> key
, std::function<void(entry&, std::array<char,64>&
, std::int64_t&, std::string const&)> cb
, std::string salt)
{
#ifndef TORRENT_DISABLE_DHT
async_call(&session_impl::dht_put_mutable_item, key, cb, salt);
#else
TORRENT_UNUSED(key);
TORRENT_UNUSED(cb);
TORRENT_UNUSED(salt);
#endif
}
void session_handle::dht_get_peers(sha1_hash const& info_hash)
{
#ifndef TORRENT_DISABLE_DHT
async_call(&session_impl::dht_get_peers, info_hash);
#else
| ||
relevance 3 | ../src/peer_connection.cpp:1920 | replace this magic number with something that makes sense |
replace this magic number with something that makes sense../src/peer_connection.cpp:1920 // if this peer is choked, there's no point in sending suggest messages to
// it. They would just be out-of-date by the time we unchoke the peer
// anyway.
if (m_settings.get_int(settings_pack::suggest_mode) == settings_pack::suggest_read_cache
&& !is_choked()
&& std::any_of(m_suggest_pieces.begin(), m_suggest_pieces.end()
, [=](piece_index_t const idx) { return idx == index; }))
{
send_piece_suggestions(2);
}
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::incoming_message, "HAVE", "piece: %d"
, static_cast<int>(index));
#endif
if (is_disconnecting()) return;
if (!t->valid_metadata() && index >= m_have_piece.end_index())
{
if (index < piece_index_t(524288))
{
// if we don't have metadata
// and we might not have received a bitfield
// extend the bitmask to fit the new
// have message
m_have_piece.resize(static_cast<int>(index) + 1, false);
}
else
{
// unless the index > 64k, in which case
// we just ignore it
return;
}
}
// if we got an invalid message, abort
if (index >= m_have_piece.end_index() || index < piece_index_t(0))
{
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "ERROR", "have-metadata have_piece: %d size: %d"
, static_cast<int>(index), m_have_piece.size());
#endif
disconnect(errors::invalid_have, operation_t::bittorrent, peer_error);
return;
}
#ifndef TORRENT_DISABLE_SUPERSEEDING
if (t->super_seeding()
#if TORRENT_ABI_VERSION == 1
&& !m_settings.get_bool(settings_pack::strict_super_seeding)
| ||
relevance 3 | ../src/peer_connection.cpp:3056 | instead of having to ask the torrent whether it's in graceful pause mode or not, the peers should keep that state (and the torrent should update them when it enters graceful pause). When a peer enters graceful pause mode, it should cancel all outstanding requests and clear its request queue. |
instead of having to ask the torrent whether it's in graceful
pause mode or not, the peers should keep that state (and the torrent
should update them when it enters graceful pause). When a peer enters
graceful pause mode, it should cancel all outstanding requests and
clear its request queue.../src/peer_connection.cpp:3056 // to disk or are in the disk write cache
if (picker.is_piece_finished(p.piece) && !was_finished)
{
#if TORRENT_USE_INVARIANT_CHECKS
check_postcondition post_checker2_(t, false);
#endif
t->verify_piece(p.piece);
}
check_graceful_pause();
if (is_disconnecting()) return;
if (request_a_block(*t, *this))
m_counters.inc_stats_counter(counters::incoming_piece_picks);
send_block_requests();
}
void peer_connection::check_graceful_pause()
{
std::shared_ptr<torrent> t = m_torrent.lock();
if (!t || !t->graceful_pause()) return;
if (m_outstanding_bytes > 0) return;
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "GRACEFUL_PAUSE", "NO MORE DOWNLOAD");
#endif
disconnect(errors::torrent_paused, operation_t::bittorrent);
}
void peer_connection::on_disk_write_complete(storage_error const& error
, peer_request const& p, std::shared_ptr<torrent> t)
{
TORRENT_ASSERT(is_single_thread());
#ifndef TORRENT_DISABLE_LOGGING
if (should_log(peer_log_alert::info))
{
peer_log(peer_log_alert::info, "FILE_ASYNC_WRITE_COMPLETE", "piece: %d s: %x l: %x e: %s"
, static_cast<int>(p.piece), p.start, p.length, error.ec.message().c_str());
}
#endif
m_counters.inc_stats_counter(counters::queued_write_bytes, -p.length);
m_outstanding_writing_bytes -= p.length;
TORRENT_ASSERT(m_outstanding_writing_bytes >= 0);
// every peer is entitled to allocate a disk buffer if it has no writes outstanding
// see the comment in incoming_piece
if (m_outstanding_writing_bytes == 0
| ||
relevance 3 | ../src/peer_connection.cpp:3943 | once peers are properly put in graceful pause mode, they can cancel all outstanding requests and this test can be removed. |
once peers are properly put in graceful pause mode, they can
cancel all outstanding requests and this test can be removed.../src/peer_connection.cpp:3943 std::shared_ptr<torrent> t = m_torrent.lock();
TORRENT_ASSERT(t);
TORRENT_ASSERT(t->has_piece_passed(piece));
TORRENT_ASSERT(piece < t->torrent_file().end_piece());
}
#endif
write_suggest(piece);
}
void peer_connection::send_block_requests()
{
TORRENT_ASSERT(is_single_thread());
INVARIANT_CHECK;
std::shared_ptr<torrent> t = m_torrent.lock();
TORRENT_ASSERT(t);
if (m_disconnecting) return;
if (t->graceful_pause()) return;
// we can't download pieces in these states
if (t->state() == torrent_status::checking_files
|| t->state() == torrent_status::checking_resume_data
|| t->state() == torrent_status::downloading_metadata
|| t->state() == torrent_status::allocating)
return;
if (int(m_download_queue.size()) >= m_desired_queue_size
|| t->upload_mode()) return;
bool const empty_download_queue = m_download_queue.empty();
while (!m_request_queue.empty()
&& (int(m_download_queue.size()) < m_desired_queue_size
|| m_queued_time_critical > 0))
{
pending_block block = m_request_queue.front();
m_request_queue.erase(m_request_queue.begin());
if (m_queued_time_critical) --m_queued_time_critical;
// if we're a seed, we don't have a piece picker
// so we don't have to worry about invariants getting
// out of sync with it
if (!t->has_picker()) continue;
// this can happen if a block times out, is re-requested and
// then arrives "unexpectedly"
if (t->picker().is_downloaded(block.block))
| ||
relevance 3 | ../src/peer_connection.cpp:4627 | new_piece should be an optional. piece index -1 should not be allowed |
new_piece should be an optional. piece index -1
should not be allowed../src/peer_connection.cpp:4627 }
else
{
#if TORRENT_NO_FPU
p.progress = 0.f;
#else
p.progress = float(p.pieces.count()) / float(p.pieces.size());
#endif
p.progress_ppm = int(std::int64_t(p.pieces.count()) * 1000000 / p.pieces.size());
}
#if TORRENT_ABI_VERSION == 1
p.estimated_reciprocation_rate = m_est_reciprocation_rate;
#endif
error_code ec;
p.local_endpoint = get_socket()->local_endpoint(ec);
}
#ifndef TORRENT_DISABLE_SUPERSEEDING
void peer_connection::superseed_piece(piece_index_t const replace_piece
, piece_index_t const new_piece)
{
TORRENT_ASSERT(is_single_thread());
if (is_connecting()) return;
if (in_handshake()) return;
if (new_piece == piece_index_t(-1))
{
if (m_superseed_piece[0] == piece_index_t(-1)) return;
m_superseed_piece[0] = piece_index_t(-1);
m_superseed_piece[1] = piece_index_t(-1);
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "SUPER_SEEDING", "ending");
#endif
std::shared_ptr<torrent> t = m_torrent.lock();
TORRENT_ASSERT(t);
// this will either send a full bitfield or
// a have-all message, effectively terminating
// super-seeding, since the peer may pick any piece
write_bitfield();
return;
}
TORRENT_ASSERT(!has_piece(new_piece));
#ifndef TORRENT_DISABLE_LOGGING
| ||
relevance 3 | ../src/upnp.cpp:99 | bind the broadcast socket. it would probably have to be changed to a vector of interfaces to bind to, since the broadcast socket opens one socket per local interface by default |
bind the broadcast socket. it would probably have to be changed to a vector of interfaces to
bind to, since the broadcast socket opens one socket per local
interface by default../src/upnp.cpp:99} // upnp_errors namespace
static error_code ignore_error;
upnp::rootdevice::rootdevice() = default;
#if TORRENT_USE_ASSERTS
upnp::rootdevice::~rootdevice()
{
TORRENT_ASSERT(magic == 1337);
magic = 0;
}
#else
upnp::rootdevice::~rootdevice() = default;
#endif
upnp::rootdevice::rootdevice(rootdevice const&) = default;
upnp::rootdevice& upnp::rootdevice::operator=(rootdevice const&) = default;
upnp::rootdevice::rootdevice(rootdevice&&) = default;
upnp::rootdevice& upnp::rootdevice::operator=(rootdevice&&) = default;
upnp::upnp(io_service& ios
, aux::session_settings const& settings
, aux::portmap_callback& cb
, address_v4 const& listen_address
, address_v4 const& netmask
, std::string listen_device)
: m_settings(settings)
, m_callback(cb)
, m_io_service(ios)
, m_resolver(ios)
, m_multicast_socket(ios)
, m_unicast_socket(ios)
, m_broadcast_timer(ios)
, m_refresh_timer(ios)
, m_map_timer(ios)
, m_listen_address(listen_address)
, m_netmask(netmask)
, m_device(std::move(listen_device))
#ifdef TORRENT_USE_OPENSSL
, m_ssl_ctx(ssl::context::sslv23_client)
#endif
{
#ifdef TORRENT_USE_OPENSSL
m_ssl_ctx.set_verify_mode(ssl::context::verify_none);
#endif
}
void upnp::start()
{
TORRENT_ASSERT(is_single_thread());
| ||
relevance 3 | ../src/session_impl.cpp:971 | closing the udp sockets here means that the uTP connections cannot be closed gracefully |
closing the udp sockets here means that
the uTP connections cannot be closed gracefully../src/session_impl.cpp:971#ifndef TORRENT_DISABLE_LOGGING
session_log(" aborting all connections (%d)", int(m_connections.size()));
#endif
// abort all connections
for (auto i = m_connections.begin(); i != m_connections.end();)
{
peer_connection* p = (*i).get();
++i;
p->disconnect(errors::stopping_torrent, operation_t::bittorrent);
}
// close the listen sockets
for (auto const& l : m_listen_sockets)
{
if (l->sock)
{
l->sock->close(ec);
TORRENT_ASSERT(!ec);
}
if (l->udp_sock)
{
l->udp_sock->sock.close();
}
}
// we need to give all the sockets an opportunity to actually have their handlers
// called and cancelled before we continue the shutdown. This is a bit
// complicated, if there are no "undead" peers, it's safe to resume the
// shutdown, but if there are, we have to wait for them to be cleared out
// first. In session_impl::on_tick() we check them periodically. If we're
// shutting down and we remove the last one, we'll initiate
// shutdown_stage2 from there.
if (m_undead_peers.empty())
{
m_io_service.post(make_handler([this] { abort_stage2(); }
, m_abort_handler_storage, *this));
}
}
void session_impl::abort_stage2() noexcept
{
m_download_rate.close();
m_upload_rate.close();
// it's OK to detach the threads here. The disk_io_thread
// has an internal counter and won't release the network
// thread until they're all dead (via m_work).
m_disk_thread.abort(false);
// now it's OK for the network thread to exit
| ||
relevance 3 | ../src/session_impl.cpp:1417 | the logic in this if-block should be factored out into a separate function. At least most of it |
the logic in this if-block should be factored out into a
separate function. At least most of it../src/session_impl.cpp:1417 , (lep.flags & listen_socket_t::local_network) ? "local-network " : ""
, (lep.flags & listen_socket_t::accept_incoming) ? "accept-incoming " : "no-incoming "
, (lep.flags & listen_socket_t::was_expanded) ? "expanded-ip " : ""
, (lep.flags & listen_socket_t::proxy) ? "proxy " : "");
}
#endif
auto ret = std::make_shared<listen_socket_t>();
ret->ssl = lep.ssl;
ret->original_port = bind_ep.port();
ret->flags = lep.flags;
ret->netmask = lep.netmask;
operation_t last_op = operation_t::unknown;
socket_type_t const sock_type
= (lep.ssl == transport::ssl)
? socket_type_t::tcp_ssl
: socket_type_t::tcp;
// if we're in force-proxy mode, don't open TCP listen sockets. We cannot
// accept connections on our local machine in this case.
if (ret->flags & listen_socket_t::accept_incoming)
{
ret->sock = std::make_shared<tcp::acceptor>(m_io_service);
ret->sock->open(bind_ep.protocol(), ec);
last_op = operation_t::sock_open;
if (ec)
{
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
session_log("failed to open socket: %s"
, ec.message().c_str());
}
#endif
if (m_alerts.should_post<listen_failed_alert>())
m_alerts.emplace_alert<listen_failed_alert>(lep.device, bind_ep, last_op
, ec, sock_type);
return ret;
}
#ifdef TORRENT_WINDOWS
{
// this is best-effort. ignore errors
error_code err;
ret->sock->set_option(exclusive_address_use(true), err);
#ifndef TORRENT_DISABLE_LOGGING
if (err && should_log())
{
session_log("failed enable exclusive address use on listen socket: %s"
, err.message().c_str());
| ||
relevance 3 | ../src/session_impl.cpp:2388 | it would be neat if the utp socket manager would handle ICMP errors too |
it would be neat if the utp socket manager would
handle ICMP errors too../src/session_impl.cpp:2388 ssl == transport::ssl ? m_ssl_utp_socket_manager :
#endif
m_utp_socket_manager;
auto listen_socket = ls.lock();
if (listen_socket)
listen_socket->incoming_connection = true;
for (;;)
{
aux::array<udp_socket::packet, 50> p;
error_code err;
int const num_packets = s->sock.read(p, err);
for (int i = 0; i < num_packets; ++i)
{
udp_socket::packet& packet = p[i];
if (packet.error)
{
#ifndef TORRENT_DISABLE_DHT
if (m_dht)
m_dht->incoming_error(packet.error, packet.from);
#endif
m_tracker_manager.incoming_error(packet.error, packet.from);
continue;
}
span<char const> const buf = packet.data;
// give the uTP socket manager first dibs on the packet. Presumably
// the majority of packets are uTP packets.
if (!mgr.incoming_packet(ls, packet.from, buf))
{
// if it wasn't a uTP packet, try the other users of the UDP
// socket
bool handled = false;
#ifndef TORRENT_DISABLE_DHT
if (m_dht && buf.size() > 20
&& buf.front() == 'd'
&& buf.back() == 'e'
&& listen_socket)
{
handled = m_dht->incoming_packet(listen_socket, packet.from, buf);
}
#endif
if (!handled)
{
| ||
relevance 3 | ../src/session_impl.cpp:3896 | it would probably make sense to have a separate list of peers that are eligible for optimistic unchoke, similar to the torrents perhaps this could even iterate over the pool allocators of torrent_peer objects. It could probably be done in a single pass and collect the n best candidates. maybe just a queue of peers would make even more sense, just pick the next peer in the queue for unchoking. It would be O(1). |
it would probably make sense to have a separate list of peers
that are eligible for optimistic unchoke, similar to the torrents
perhaps this could even iterate over the pool allocators of
torrent_peer objects. It could probably be done in a single pass and
collect the n best candidates. maybe just a queue of peers would make
even more sense, just pick the next peer in the queue for unchoking. It
would be O(1).../src/session_impl.cpp:3896 }
void session_impl::recalculate_optimistic_unchoke_slots()
{
INVARIANT_CHECK;
TORRENT_ASSERT(is_single_thread());
if (m_stats_counters[counters::num_unchoke_slots] == 0) return;
// if we unchoke everyone, skip this logic
if (settings().get_int(settings_pack::choking_algorithm) == settings_pack::fixed_slots_choker
&& settings().get_int(settings_pack::unchoke_slots_limit) < 0)
return;
std::vector<opt_unchoke_candidate> opt_unchoke;
// collect the currently optimistically unchoked peers here, so we can
// choke them when we've found new optimistic unchoke candidates.
std::vector<torrent_peer*> prev_opt_unchoke;
for (auto& i : m_connections)
{
peer_connection* p = i.get();
TORRENT_ASSERT(p);
torrent_peer* pi = p->peer_info_struct();
if (!pi) continue;
if (pi->web_seed) continue;
if (pi->optimistically_unchoked)
{
prev_opt_unchoke.push_back(pi);
}
torrent const* t = p->associated_torrent().lock().get();
if (!t) continue;
| ||
relevance 3 | ../src/session_impl.cpp:3919 | peers should know whether their torrent is paused or not, instead of having to ask it over and over again |
peers should know whether their torrent is paused or not,
instead of having to ask it over and over again../src/session_impl.cpp:3919 // collect the currently optimistically unchoked peers here, so we can
// choke them when we've found new optimistic unchoke candidates.
std::vector<torrent_peer*> prev_opt_unchoke;
for (auto& i : m_connections)
{
peer_connection* p = i.get();
TORRENT_ASSERT(p);
torrent_peer* pi = p->peer_info_struct();
if (!pi) continue;
if (pi->web_seed) continue;
if (pi->optimistically_unchoked)
{
prev_opt_unchoke.push_back(pi);
}
torrent const* t = p->associated_torrent().lock().get();
if (!t) continue;
if (t->is_paused()) continue;
if (!p->is_connecting()
&& !p->is_disconnecting()
&& p->is_peer_interested()
&& t->free_upload_slots()
&& (p->is_choked() || pi->optimistically_unchoked)
&& !p->ignore_unchoke_slots()
&& t->valid_metadata())
{
opt_unchoke.emplace_back(&i);
}
}
// find the peers that has been waiting the longest to be optimistically
// unchoked
int num_opt_unchoke = m_settings.get_int(settings_pack::num_optimistic_unchoke_slots);
int const allowed_unchoke_slots = int(m_stats_counters[counters::num_unchoke_slots]);
if (num_opt_unchoke == 0) num_opt_unchoke = std::max(1, allowed_unchoke_slots / 5);
if (num_opt_unchoke > int(opt_unchoke.size())) num_opt_unchoke =
int(opt_unchoke.size());
// find the n best optimistic unchoke candidates
std::partial_sort(opt_unchoke.begin()
, opt_unchoke.begin() + num_opt_unchoke
, opt_unchoke.end()
#ifndef TORRENT_DISABLE_EXTENSIONS
, last_optimistic_unchoke_cmp(m_ses_extensions[plugins_optimistic_unchoke_idx])
#else
, last_optimistic_unchoke_cmp()
| ||
relevance 3 | ../src/session_impl.cpp:4165 | there should be a pre-calculated list of all peers eligible for unchoking |
there should be a pre-calculated list of all peers eligible for
unchoking../src/session_impl.cpp:4165 }
void session_impl::recalculate_unchoke_slots()
{
TORRENT_ASSERT(is_single_thread());
time_point const now = aux::time_now();
time_duration const unchoke_interval = now - m_last_choke;
m_last_choke = now;
// if we unchoke everyone, skip this logic
if (settings().get_int(settings_pack::choking_algorithm) == settings_pack::fixed_slots_choker
&& settings().get_int(settings_pack::unchoke_slots_limit) < 0)
{
m_stats_counters.set_value(counters::num_unchoke_slots, std::numeric_limits<int>::max());
return;
}
// build list of all peers that are
// unchokable.
std::vector<peer_connection*> peers;
for (auto i = m_connections.begin(); i != m_connections.end();)
{
std::shared_ptr<peer_connection> p = *i;
TORRENT_ASSERT(p);
++i;
torrent* const t = p->associated_torrent().lock().get();
torrent_peer* const pi = p->peer_info_struct();
if (p->ignore_unchoke_slots() || t == nullptr || pi == nullptr
|| pi->web_seed || t->is_paused())
{
p->reset_choke_counters();
continue;
}
if (!p->is_peer_interested()
|| p->is_disconnecting()
|| p->is_connecting())
{
// this peer is not unchokable. So, if it's unchoked
// already, make sure to choke it.
if (p->is_choked())
{
p->reset_choke_counters();
continue;
}
if (pi && pi->optimistically_unchoked)
{
m_stats_counters.inc_stats_counter(counters::num_peers_up_unchoked_optimistic, -1);
pi->optimistically_unchoked = false;
| ||
relevance 3 | ../src/session_impl.cpp:4740 | perhaps params could be moved into the torrent object, instead of it being copied by the torrent constructor |
perhaps params could be moved into the torrent object, instead
of it being copied by the torrent constructor../src/session_impl.cpp:4740 for (auto& e : m_ses_extensions[plugins_all_idx])
{
std::shared_ptr<torrent_plugin> tp(e->new_torrent(
torrent_ptr->get_handle(), userdata));
if (tp) torrent_ptr->add_extension(std::move(tp));
}
}
#endif
torrent_handle session_impl::add_torrent(add_torrent_params&& params
, error_code& ec)
{
// params is updated by add_torrent_impl()
std::shared_ptr<torrent> torrent_ptr;
// in case there's an error, make sure to abort the torrent before leaving
// the scope
auto abort_torrent = aux::scope_end([&]{ if (torrent_ptr) torrent_ptr->abort(); });
bool added;
std::tie(torrent_ptr, added) = add_torrent_impl(params, ec);
torrent_handle const handle(torrent_ptr);
m_alerts.emplace_alert<add_torrent_alert>(handle, params, ec);
if (!torrent_ptr) return handle;
// params.info_hash should have been initialized by add_torrent_impl()
TORRENT_ASSERT(params.info_hash != sha1_hash(nullptr));
#ifndef TORRENT_DISABLE_DHT
if (params.ti)
{
for (auto const& n : params.ti->nodes())
add_dht_node_name(n);
}
#endif
#if TORRENT_ABI_VERSION == 1
if (m_alerts.should_post<torrent_added_alert>())
m_alerts.emplace_alert<torrent_added_alert>(handle);
#endif
// if this was an existing torrent, we can't start it again, or add
// another set of plugins etc. we're done
if (!added)
{
abort_torrent.disarm();
return handle;
}
| ||
relevance 3 | ../src/session_impl.cpp:6007 | use public_key here instead of std::array |
use public_key here instead of std::array../src/session_impl.cpp:6007
void session_impl::dht_get_immutable_item(sha1_hash const& target)
{
if (!m_dht) return;
m_dht->get_item(target, std::bind(&session_impl::get_immutable_callback
, this, target, _1));
}
// callback for dht_mutable_get
void session_impl::get_mutable_callback(dht::item const& i
, bool const authoritative)
{
TORRENT_ASSERT(i.is_mutable());
m_alerts.emplace_alert<dht_mutable_item_alert>(i.pk().bytes
, i.sig().bytes, i.seq().value
, i.salt(), i.value(), authoritative);
}
// key is a 32-byte binary string, the public key to look up.
// the salt is optional
void session_impl::dht_get_mutable_item(std::array<char, 32> key
, std::string salt)
{
if (!m_dht) return;
m_dht->get_item(dht::public_key(key.data()), std::bind(&session_impl::get_mutable_callback
, this, _1, _2), std::move(salt));
}
namespace {
void on_dht_put_immutable_item(alert_manager& alerts, sha1_hash target, int num)
{
if (alerts.should_post<dht_put_alert>())
alerts.emplace_alert<dht_put_alert>(target, num);
}
void on_dht_put_mutable_item(alert_manager& alerts, dht::item const& i, int num)
{
if (alerts.should_post<dht_put_alert>())
{
dht::signature const sig = i.sig();
dht::public_key const pk = i.pk();
dht::sequence_number const seq = i.seq();
std::string salt = i.salt();
alerts.emplace_alert<dht_put_alert>(pk.bytes, sig.bytes
, std::move(salt), seq.value, num);
}
}
void put_mutable_callback(dht::item& i
, std::function<void(entry&, std::array<char, 64>&
| ||
relevance 3 | ../src/torrent.cpp:239 | we could probably get away with just saving a few fields here |
we could probably get away with just saving a few fields here../src/torrent.cpp:239 | ||
relevance 3 | ../src/torrent.cpp:836 | assert there are no outstanding async operations on this torrent |
assert there are no outstanding async operations on this
torrent../src/torrent.cpp:836 // deprecated in 1.2
// if we don't have the metadata, and we're waiting
// for a web server to serve it to us, no need to announce
// because the info-hash is just the URL hash
if (!m_torrent_file->is_valid() && !m_url.empty()) return false;
#endif
// don't announce private torrents
if (m_torrent_file->is_valid() && m_torrent_file->priv()) return false;
if (m_trackers.empty()) return true;
if (!settings().get_bool(settings_pack::use_dht_as_fallback)) return true;
return std::none_of(m_trackers.begin(), m_trackers.end()
, [](announce_entry const& tr) { return bool(tr.verified); });
}
#endif
torrent::~torrent()
{
#if TORRENT_USE_ASSERTS
for (torrent_list_index_t i{}; i != m_links.end_index(); ++i)
{
if (!m_links[i].in_list()) continue;
m_links[i].unlink(m_ses.torrent_list(i), i);
}
#endif
// The invariant can't be maintained here, since the torrent
// is being destructed, all weak references to it have been
// reset, which means that all its peers already have an
// invalidated torrent pointer (so it cannot be verified to be correct)
// i.e. the invariant can only be maintained if all connections have
// been closed by the time the torrent is destructed. And they are
// supposed to be closed. So we can still do the invariant check.
// however, the torrent object may be destructed from the main
// thread when shutting down, if the disk cache has references to it.
// this means that the invariant check that this is called from the
// network thread cannot be maintained
TORRENT_ASSERT(m_peer_class == peer_class_t{0});
TORRENT_ASSERT(m_connections.empty());
// just in case, make sure the session accounting is kept right
for (auto p : m_connections)
m_ses.close_connection(p);
}
void torrent::read_piece(piece_index_t const piece)
| ||
relevance 3 | ../src/torrent.cpp:1379 | there's some duplication between this function and peer_connection::incoming_piece(). is there a way to merge something? |
there's some duplication between this function and
peer_connection::incoming_piece(). is there a way to merge something?../src/torrent.cpp:1379 piece_refcount(piece_picker& p, piece_index_t piece)
: m_picker(p)
, m_piece(piece)
{
m_picker.inc_refcount(m_piece, nullptr);
}
piece_refcount(piece_refcount const&) = delete;
piece_refcount& operator=(piece_refcount const&) = delete;
~piece_refcount()
{
m_picker.dec_refcount(m_piece, nullptr);
}
private:
piece_picker& m_picker;
piece_index_t m_piece;
};
void torrent::add_piece(piece_index_t const piece, char const* data
, add_piece_flags_t const flags)
{
TORRENT_ASSERT(is_single_thread());
int const piece_size = m_torrent_file->piece_size(piece);
int const blocks_in_piece = (piece_size + block_size() - 1) / block_size();
if (m_deleted) return;
// avoid crash trying to access the picker when there is none
if (m_have_all && !has_picker()) return;
need_picker();
if (picker().have_piece(piece)
&& !(flags & torrent_handle::overwrite_existing))
return;
peer_request p;
p.piece = piece;
p.start = 0;
piece_refcount refcount{picker(), piece};
for (int i = 0; i < blocks_in_piece; ++i, p.start += block_size())
{
piece_block const block(piece, i);
if (!(flags & torrent_handle::overwrite_existing)
&& picker().is_finished(block))
continue;
p.length = std::min(piece_size - p.start, block_size());
| ||
relevance 3 | ../src/torrent.cpp:3733 | this could probably be pulled out into a free function |
this could probably be pulled out into a free function../src/torrent.cpp:3733 {
// it's an impossible combination to have 0 pieces, but still have one of them be the last piece
TORRENT_ASSERT(!(pc.num_pieces == 0 && pc.last_piece == true));
// if we have 0 pieces, we can't have any pad blocks either
TORRENT_ASSERT(!(pc.num_pieces == 0 && pc.pad_blocks > 0));
// if we have all pieces, we must also have the last one
TORRENT_ASSERT(!(pc.num_pieces == fs.num_pieces() && pc.last_piece == false));
int const block_size = std::min(default_block_size, fs.piece_length());
// every block should not be a pad block
TORRENT_ASSERT(pc.pad_blocks <= std::int64_t(pc.num_pieces) * fs.piece_length() / block_size);
return std::int64_t(pc.num_pieces) * fs.piece_length()
- (pc.last_piece ? fs.piece_length() - fs.piece_size(fs.last_piece()) : 0)
- std::int64_t(pc.pad_blocks) * block_size;
}
// fills in total_wanted, total_wanted_done and total_done
void torrent::bytes_done(torrent_status& st, status_flags_t const flags) const
{
INVARIANT_CHECK;
st.total_done = 0;
st.total_wanted_done = 0;
st.total_wanted = m_torrent_file->total_size();
TORRENT_ASSERT(st.total_wanted >= m_padding_blocks * default_block_size);
TORRENT_ASSERT(st.total_wanted >= 0);
TORRENT_ASSERT(!valid_metadata() || m_torrent_file->num_pieces() > 0);
if (!valid_metadata()) return;
TORRENT_ASSERT(st.total_wanted >= std::int64_t(m_torrent_file->piece_length())
* (m_torrent_file->num_pieces() - 1));
// if any piece hash fails, we'll be taken out of seed mode
// and m_seed_mode will be false
if (m_seed_mode || is_seed())
{
st.total_done = m_torrent_file->total_size()
- m_padding_blocks * default_block_size;
st.total_wanted_done = st.total_done;
st.total_wanted = st.total_done;
return;
}
else if (!has_picker())
{
st.total_done = 0;
st.total_wanted_done = 0;
| ||
relevance 3 | ../src/torrent.cpp:4490 | should this alert have an error code in it? |
should this alert have an error code in it?../src/torrent.cpp:4490 on_remove_peers();
TORRENT_ASSERT(m_connections.empty());
// post a message to the main thread to destruct
// the torrent object from there
if (m_storage)
{
try {
m_ses.disk_thread().async_stop_torrent(m_storage
, std::bind(&torrent::on_torrent_aborted, shared_from_this()));
}
catch (std::exception const& e)
{
TORRENT_UNUSED(e);
m_storage.reset();
#ifndef TORRENT_DISABLE_LOGGING
debug_log("Failed to flush disk cache: %s", e.what());
#endif
// clients may rely on this alert to be posted, so it's probably a
// good idea to post it here, even though we failed
if (alerts().should_post<cache_flushed_alert>())
alerts().emplace_alert<cache_flushed_alert>(get_handle());
}
}
else
{
if (alerts().should_post<cache_flushed_alert>())
alerts().emplace_alert<cache_flushed_alert>(get_handle());
}
| ||
relevance 3 | ../src/torrent.cpp:4557 | this should return optional<>. piece index -1 should not be allowed |
this should return optional<>. piece index -1 should not be
allowed../src/torrent.cpp:4557 }
#ifndef TORRENT_DISABLE_SUPERSEEDING
void torrent::set_super_seeding(bool on)
{
if (on == m_super_seeding) return;
m_super_seeding = on;
set_need_save_resume();
state_updated();
if (m_super_seeding) return;
// disable super seeding for all peers
for (auto pc : *this)
{
pc->superseed_piece(piece_index_t(-1), piece_index_t(-1));
}
}
piece_index_t torrent::get_piece_to_super_seed(typed_bitfield<piece_index_t> const& bits)
{
// return a piece with low availability that is not in
// the bitfield and that is not currently being super
// seeded by any peer
TORRENT_ASSERT(m_super_seeding);
// do a linear search from the first piece
int min_availability = 9999;
std::vector<piece_index_t> avail_vec;
for (auto const i : m_torrent_file->piece_range())
{
if (bits[i]) continue;
int availability = 0;
for (auto pc : *this)
{
if (pc->super_seeded_piece(i))
{
// avoid super-seeding the same piece to more than one
// peer if we can avoid it. Do this by artificially
// increase the availability
availability = 999;
break;
}
if (pc->has_piece(i)) ++availability;
}
if (availability > min_availability) continue;
if (availability == min_availability)
{
avail_vec.push_back(i);
| ||
relevance 3 | ../src/path.cpp:327 | find out what error code is reported when the filesystem does not support hard links. |
find out what error code is reported when the filesystem
does not support hard links.../src/path.cpp:327 int ret = ::mkdir(n.c_str(), S_IRWXU | S_IRWXG | S_IRWXO);
if (ret < 0 && errno != EEXIST)
ec.assign(errno, system_category());
#endif
}
void hard_link(std::string const& file, std::string const& link
, error_code& ec)
{
native_path_string n_exist = convert_to_native_path_string(file);
native_path_string n_link = convert_to_native_path_string(link);
#ifdef TORRENT_WINDOWS
BOOL ret = CreateHardLinkW(n_link.c_str(), n_exist.c_str(), nullptr);
if (ret)
{
ec.clear();
return;
}
// something failed. Does the filesystem not support hard links?
DWORD const error = GetLastError();
if (error != ERROR_NOT_SUPPORTED && error != ERROR_ACCESS_DENIED)
{
// it's possible CreateHardLink will copy the file internally too,
// if the filesystem does not support it.
ec.assign(GetLastError(), system_category());
return;
}
// fall back to making a copy
#else
// assume posix's link() function exists
int ret = ::link(n_exist.c_str(), n_link.c_str());
if (ret == 0)
{
ec.clear();
return;
}
// most errors are passed through, except for the ones that indicate that
// hard links are not supported and require a copy.
| ||
relevance 3 | ../src/web_peer_connection.cpp:187 | this should be an optional, piece index -1 should not be allowed |
this should be an optional, piece index -1 should
not be allowed../src/web_peer_connection.cpp:187 if (m_web->have_files.get_bit(i) || fs.pad_file_at(i)) continue;
auto const range = aux::file_piece_range_inclusive(fs, i);
for (piece_index_t k = std::get<0>(range); k < std::get<1>(range); ++k)
have.clear_bit(k);
}
if (have.none_set())
{
incoming_have_none();
m_web->interesting = false;
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "WEB-SEED", "have no pieces, not interesting. %s", m_url.c_str());
#endif
}
else
{
incoming_bitfield(have);
}
}
if (m_web->restart_request.piece != piece_index_t(-1))
{
// increase the chances of requesting the block
// we have partial data for already, to finish it
incoming_suggest(m_web->restart_request.piece);
}
web_connection_base::on_connected();
}
void web_peer_connection::disconnect(error_code const& ec
, operation_t op, disconnect_severity_t const error)
{
if (is_disconnecting()) return;
if (op == operation_t::sock_write && ec == boost::system::errc::broken_pipe)
{
#ifndef TORRENT_DISABLE_LOGGING
// a write operation failed with broken-pipe. This typically happens
// with HTTP 1.0 servers that close their incoming channel of the TCP
// stream whenever they're done reading one full request. Instead of
// us bailing out and failing the entire request just because our
// write-end was closed, ignore it and keep reading until the read-end
// also is closed.
peer_log(peer_log_alert::info, "WRITE_DIRECTION", "CLOSED");
#endif
// prevent the peer from trying to send anything more
m_send_buffer.clear();
// when the web server closed our write-end of the socket (i.e. its
// read-end), if it's an HTTP 1.0 server. we will stop sending more
| ||
relevance 3 | ../src/web_peer_connection.cpp:401 | do we really need a special case here? wouldn't the multi-file case handle single file torrents correctly too? |
do we really need a special case here? wouldn't the multi-file
case handle single file torrents correctly too?../src/web_peer_connection.cpp:401 size -= pr.length;
}
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::outgoing_message, "REQUESTING", "(piece: %d start: %d) - (piece: %d end: %d)"
, static_cast<int>(r.piece), r.start
, static_cast<int>(pr.piece), pr.start + pr.length);
#endif
bool const single_file_request = t->torrent_file().num_files() == 1;
int const proxy_type = m_settings.get_int(settings_pack::proxy_type);
bool const using_proxy = (proxy_type == settings_pack::http
|| proxy_type == settings_pack::http_pw) && !m_ssl;
// the number of pad files that have been "requested". In case we _only_
// request padfiles, we can't rely on handling them in the on_receive()
// callback (because we won't receive anything), instead we have to post a
// pretend read callback where we can deliver the zeroes for the partfile
int num_pad_files = 0;
if (single_file_request)
{
file_request_t file_req;
file_req.file_index = file_index_t(0);
file_req.start = std::int64_t(static_cast<int>(req.piece)) * info.piece_length()
+ req.start;
file_req.length = req.length;
request += "GET ";
// do not encode single file paths, they are
// assumed to be encoded in the torrent file
request += using_proxy ? m_url : m_path;
request += " HTTP/1.1\r\n";
add_headers(request, m_settings, using_proxy);
request += "\r\nRange: bytes=";
request += to_string(file_req.start).data();
request += "-";
request += to_string(file_req.start + file_req.length - 1).data();
request += "\r\n\r\n";
m_first_request = false;
m_file_requests.push_back(file_req);
}
else
{
std::vector<file_slice> files = info.orig_files().map_block(req.piece, req.start
, req.length);
for (auto const &f : files)
{
file_request_t file_req;
| ||
relevance 3 | ../src/web_peer_connection.cpp:486 | file_index_t should not allow negative values |
file_index_t should not allow negative values../src/web_peer_connection.cpp:486 // with the correct slashes. Don't encode it again
request += m_path;
}
request += escape_file_path(info.orig_files(), f.file_index);
}
request += " HTTP/1.1\r\n";
add_headers(request, m_settings, using_proxy);
request += "\r\nRange: bytes=";
request += to_string(f.offset).data();
request += "-";
request += to_string(f.offset + f.size - 1).data();
request += "\r\n\r\n";
m_first_request = false;
#if 0
std::cerr << this << " SEND-REQUEST: f: " << f.file_index
<< " s: " << f.offset
<< " e: " << (f.offset + f.size - 1) << std::endl;
#endif
TORRENT_ASSERT(f.file_index >= file_index_t(0));
m_file_requests.push_back(file_req);
}
}
if (num_pad_files == int(m_file_requests.size()))
{
get_io_service().post(std::bind(
&web_peer_connection::on_receive_padfile,
std::static_pointer_cast<web_peer_connection>(self())));
return;
}
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::outgoing_message, "REQUEST", "%s", request.c_str());
#endif
send_buffer(request);
}
namespace {
std::string get_peer_name(http_parser const& p, std::string const& host)
{
std::string ret = "URL seed @ ";
ret += host;
std::string const& server_version = p.header("server");
if (!server_version.empty())
{
| ||
relevance 3 | ../src/web_peer_connection.cpp:665 | this could be made more efficient for the case when we use an HTTP proxy. Then we wouldn't need to add new web seeds to the torrent, we could just make the redirect table contain full URLs. |
this could be made more efficient for the case when we use an
HTTP proxy. Then we wouldn't need to add new web seeds to the torrent,
we could just make the redirect table contain full URLs.../src/web_peer_connection.cpp:665 // we should not try this server again.
t->remove_web_seed_conn(this, errors::missing_location, operation_t::bittorrent, peer_error);
m_web = nullptr;
TORRENT_ASSERT(is_disconnecting());
return;
}
bool const single_file_request = !m_path.empty()
&& m_path[m_path.size() - 1] != '/';
// add the redirected url and remove the current one
if (!single_file_request)
{
TORRENT_ASSERT(!m_file_requests.empty());
file_index_t const file_index = m_file_requests.front().file_index;
location = resolve_redirect_location(m_url, location);
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "LOCATION", "%s", location.c_str());
#endif
std::string redirect_base;
std::string redirect_path;
error_code ec;
std::tie(redirect_base, redirect_path) = split_url(location, ec);
if (ec)
{
// we should not try this server again.
disconnect(errors::missing_location, operation_t::bittorrent, failure);
return;
}
// add_web_seed won't add duplicates. If we have already added an entry
// with this URL, we'll get back the existing entry
// "ephemeral" flag should be set to avoid "web_seed_t" saving in resume data.
// E.g. original "web_seed_t" request url points to "http://example1.com/file1" and
// web server responses with redirect location "http://example2.com/subpath/file2".
// "handle_redirect" process this location to create new "web_seed_t"
// with base url=="http://example2.com/" and redirects[0]=="/subpath/file2").
// If we try to load resume with such "web_seed_t" then "web_peer_connection" will send
// request with wrong path "http://example2.com/file1" (cause "redirects" map is not serialized in resume)
web_seed_t* web = t->add_web_seed(redirect_base, web_seed_entry::url_seed
, m_external_auth, m_extra_headers, torrent::ephemeral);
web->have_files.resize(t->torrent_file().num_files(), false);
// the new web seed we're adding only has this file for now
// we may add more files later
web->redirects[file_index] = redirect_path;
if (web->have_files.get_bit(file_index) == false)
{
| ||
relevance 3 | ../src/file_storage.cpp:665 | this is a hack to retain ABI compatibility with 1.2.1 in next major release, make this return by value |
this is a hack to retain ABI compatibility with 1.2.1
in next major release, make this return by value../src/file_storage.cpp:665 m_mtime[last_file()] = std::time_t(mtime);
}
m_total_size += e.size;
}
sha1_hash file_storage::hash(file_index_t const index) const
{
if (index >= m_file_hashes.end_index()) return sha1_hash();
return sha1_hash(m_file_hashes[index]);
}
std::string const& file_storage::symlink(file_index_t const index) const
{
TORRENT_ASSERT_PRECOND(index >= file_index_t(0) && index < end_file());
internal_file_entry const& fe = m_files[index];
TORRENT_ASSERT(fe.symlink_index < int(m_symlinks.size()));
auto const& link = m_symlinks[fe.symlink_index];
static std::string storage[4];
static std::atomic<size_t> counter{0};
std::string& ret = storage[(counter++) % 4];
ret.reserve(m_name.size() + link.size() + 1);
ret.assign(m_name);
append_path(ret, link);
return ret;
}
std::time_t file_storage::mtime(file_index_t const index) const
{
if (index >= m_mtime.end_index()) return 0;
return m_mtime[index];
}
namespace {
template <class CRC>
void process_string_lowercase(CRC& crc, string_view str)
{
for (char const c : str)
crc.process_byte(to_lower(c) & 0xff);
}
template <class CRC>
void process_path_lowercase(
std::unordered_set<std::uint32_t>& table
, CRC crc, string_view str)
{
if (str.empty()) return;
for (char const c : str)
| ||
relevance 3 | ../src/kademlia/rpc_manager.cpp:65 | move this into it's own .cpp file |
move this into it's own .cpp file../src/kademlia/rpc_manager.cpp:65#include <libtorrent/kademlia/get_item.hpp>
#include <libtorrent/kademlia/sample_infohashes.hpp>
#include <libtorrent/kademlia/dht_settings.hpp>
#include <libtorrent/socket_io.hpp> // for print_endpoint
#include <libtorrent/aux_/time.hpp> // for aux::time_now
#include <libtorrent/aux_/aligned_union.hpp>
#include <libtorrent/broadcast_socket.hpp> // for is_v6
#include <type_traits>
#include <functional>
#ifndef TORRENT_DISABLE_LOGGING
#include <cinttypes> // for PRId64 et.al.
#endif
using namespace std::placeholders;
namespace libtorrent { namespace dht {
constexpr observer_flags_t observer::flag_queried;
constexpr observer_flags_t observer::flag_initial;
constexpr observer_flags_t observer::flag_no_id;
constexpr observer_flags_t observer::flag_short_timeout;
constexpr observer_flags_t observer::flag_failed;
constexpr observer_flags_t observer::flag_ipv6_address;
constexpr observer_flags_t observer::flag_alive;
constexpr observer_flags_t observer::flag_done;
dht_observer* observer::get_observer() const
{
return m_algorithm->get_node().observer();
}
void observer::set_target(udp::endpoint const& ep)
{
m_sent = clock_type::now();
m_port = ep.port();
if (is_v6(ep))
{
flags |= flag_ipv6_address;
m_addr.v6 = ep.address().to_v6().to_bytes();
}
else
{
flags &= ~flag_ipv6_address;
m_addr.v4 = ep.address().to_v4().to_bytes();
}
}
| ||
relevance 3 | ../include/libtorrent/utf8.hpp:80 | take a string_view here |
take a string_view here../include/libtorrent/utf8.hpp:80
// source sequence is illegal/malformed
source_illegal
};
// hidden
TORRENT_EXPORT error_code make_error_code(error_code_enum e);
}
TORRENT_EXPORT boost::system::error_category const& utf8_category();
// ``utf8_wchar`` converts a UTF-8 string (``utf8``) to a wide character
// string (``wide``). ``wchar_utf8`` converts a wide character string
// (``wide``) to a UTF-8 string (``utf8``). The return value is one of
// the enumeration values from utf8_conv_result_t.
TORRENT_EXTRA_EXPORT std::wstring utf8_wchar(string_view utf8, error_code& ec);
TORRENT_EXTRA_EXPORT std::wstring utf8_wchar(string_view utf8);
TORRENT_EXTRA_EXPORT std::string wchar_utf8(wstring_view wide, error_code& ec);
TORRENT_EXTRA_EXPORT std::string wchar_utf8(wstring_view wide);
TORRENT_EXTRA_EXPORT std::pair<std::int32_t, int>
parse_utf8_codepoint(string_view str);
}
#endif
| ||
relevance 3 | ../include/libtorrent/enum_net.hpp:116 | use string_view for device_name |
use string_view for device_name../include/libtorrent/enum_net.hpp:116
// return a netmask with the specified address family and the specified
// number of prefix bit set, of the most significant bits in the resulting
// netmask
TORRENT_EXTRA_EXPORT address build_netmask(int bits, int family);
// return the gateway for the given ip_interface, if there is one. Otherwise
// return nullopt.
TORRENT_EXTRA_EXPORT boost::optional<address> get_gateway(
ip_interface const& iface, span<ip_route const> routes);
TORRENT_EXTRA_EXPORT bool has_default_route(char const* device, int family
, span<ip_route const> routes);
// attempt to bind socket to the device with the specified name. For systems
// that don't support SO_BINDTODEVICE the socket will be bound to one of the
// IP addresses of the specified device. In this case it is necessary to
// verify the local endpoint of the socket once the connection is established.
// the returned address is the ip the socket was bound to (or address_v4::any()
// in case SO_BINDTODEVICE succeeded and we don't need to verify it).
template <class Socket>
address bind_socket_to_device(io_service& ios, Socket& sock
, tcp const& protocol
, char const* device_name, int port, error_code& ec)
{
tcp::endpoint bind_ep(address_v4::any(), std::uint16_t(port));
address ip = make_address(device_name, ec);
if (!ec)
{
// this is to cover the case where "0.0.0.0" is considered any IPv4 or
// IPv6 address. If we're asking to be bound to an IPv6 address and
// providing 0.0.0.0 as the device, turn it into "::"
if (ip == address_v4::any() && protocol == boost::asio::ip::tcp::v6())
ip = address_v6::any();
bind_ep.address(ip);
// it appears to be an IP. Just bind to that address
sock.bind(bind_ep, ec);
return bind_ep.address();
}
ec.clear();
#if TORRENT_HAS_BINDTODEVICE
// try to use SO_BINDTODEVICE here, if that exists. If it fails,
// fall back to the mechanism we have below
aux::bind_device(sock, device_name, ec);
if (ec)
#endif
{
ec.clear();
| ||
relevance 3 | ../include/libtorrent/stat.hpp:255 | everything but payload counters and rates could probably be removed from here |
everything but payload counters and rates could probably be
removed from here../include/libtorrent/stat.hpp:255 // peer_connection is opened and have some previous
// transfers from earlier connections.
void add_stat(std::int64_t downloaded, std::int64_t uploaded)
{
m_stat[download_payload].offset(downloaded);
m_stat[upload_payload].offset(uploaded);
}
int last_payload_downloaded() const
{ return m_stat[download_payload].counter(); }
int last_payload_uploaded() const
{ return m_stat[upload_payload].counter(); }
int last_protocol_downloaded() const
{ return m_stat[download_protocol].counter(); }
int last_protocol_uploaded() const
{ return m_stat[upload_protocol].counter(); }
// these are the channels we keep stats for
enum
{
upload_payload,
upload_protocol,
download_payload,
download_protocol,
upload_ip_protocol,
download_ip_protocol,
num_channels
};
void clear()
{
for (int i = 0; i < num_channels; ++i)
m_stat[i].clear();
}
stat_channel const& operator[](int i) const
{
TORRENT_ASSERT(i >= 0 && i < num_channels);
return m_stat[i];
}
private:
stat_channel m_stat[num_channels];
};
}
#endif // TORRENT_STAT_HPP_INCLUDED
| ||
relevance 3 | ../include/libtorrent/torrent_handle.hpp:491 | unify url_seed and http_seed with just web_seed, using the web_seed_entry. |
unify url_seed and http_seed with just web_seed, using the
web_seed_entry.../include/libtorrent/torrent_handle.hpp:491 // announce url for the tracker as well as an int ``tier``, which is
// specifies the order in which this tracker is tried. If you want
// libtorrent to use another list of trackers for this torrent, you can
// use ``replace_trackers()`` which takes a list of the same form as the
// one returned from ``trackers()`` and will replace it. If you want an
// immediate effect, you have to call force_reannounce(). See
// announce_entry.
//
// ``add_tracker()`` will look if the specified tracker is already in the
// set. If it is, it doesn't do anything. If it's not in the current set
// of trackers, it will insert it in the tier specified in the
// announce_entry.
//
// The updated set of trackers will be saved in the resume data, and when
// a torrent is started with resume data, the trackers from the resume
// data will replace the original ones.
std::vector<announce_entry> trackers() const;
void replace_trackers(std::vector<announce_entry> const&) const;
void add_tracker(announce_entry const&) const;
// ``add_url_seed()`` adds another url to the torrent's list of url
// seeds. If the given url already exists in that list, the call has no
// effect. The torrent will connect to the server and try to download
// pieces from it, unless it's paused, queued, checking or seeding.
// ``remove_url_seed()`` removes the given url if it exists already.
// ``url_seeds()`` return a set of the url seeds currently in this
// torrent. Note that URLs that fails may be removed automatically from
// the list.
//
// See http-seeding_ for more information.
void add_url_seed(std::string const& url) const;
void remove_url_seed(std::string const& url) const;
std::set<std::string> url_seeds() const;
// These functions are identical as the ``*_url_seed()`` variants, but
// they operate on `BEP 17`_ web seeds instead of `BEP 19`_.
//
// See http-seeding_ for more information.
void add_http_seed(std::string const& url) const;
void remove_http_seed(std::string const& url) const;
std::set<std::string> http_seeds() const;
// add the specified extension to this torrent. The ``ext`` argument is
// a function that will be called from within libtorrent's context
// passing in the internal torrent object and the specified userdata
// pointer. The function is expected to return a shared pointer to
// a torrent_plugin instance.
void add_extension(
std::function<std::shared_ptr<torrent_plugin>(torrent_handle const&, void*)> const& ext
, void* userdata = nullptr);
| ||
relevance 3 | ../include/libtorrent/tracker_manager.hpp:388 | make sure the udp_socket supports passing on string-hostnames too, and that this function is used |
make sure the udp_socket supports passing on string-hostnames
too, and that this function is used../include/libtorrent/tracker_manager.hpp:388 io_service& ios
, tracker_request const& r
, aux::session_settings const& sett
, std::weak_ptr<request_callback> c
= std::weak_ptr<request_callback>()) = delete;
void abort_all_requests(bool all = false);
void remove_request(http_tracker_connection const* c);
void remove_request(udp_tracker_connection const* c);
bool empty() const;
int num_requests() const;
void sent_bytes(int bytes);
void received_bytes(int bytes);
void incoming_error(error_code const& ec, udp::endpoint const& ep);
bool incoming_packet(udp::endpoint const& ep, span<char const> buf);
// this is only used for SOCKS packets, since
// they may be addressed to hostname
bool incoming_packet(char const* hostname, span<char const> buf);
void update_transaction_id(
std::shared_ptr<udp_tracker_connection> c
, std::uint32_t tid);
aux::session_settings const& settings() const { return m_settings; }
resolver_interface& host_resolver() { return m_host_resolver; }
void send_hostname(aux::listen_socket_handle const& sock
, char const* hostname, int port, span<char const> p
, error_code& ec, udp_send_flags_t flags = {});
void send(aux::listen_socket_handle const& sock
, udp::endpoint const& ep, span<char const> p
, error_code& ec, udp_send_flags_t flags = {});
private:
// maps transactionid to the udp_tracker_connection
// These must use shared_ptr to avoid a dangling reference
// if a connection is erased while a timeout event is in the queue
std::unordered_map<std::uint32_t, std::shared_ptr<udp_tracker_connection>> m_udp_conns;
std::vector<std::shared_ptr<http_tracker_connection>> m_http_conns;
std::deque<std::shared_ptr<http_tracker_connection>> m_queued;
send_fun_t m_send_fun;
send_fun_hostname_t m_send_fun_hostname;
resolver_interface& m_host_resolver;
aux::session_settings const& m_settings;
| ||
relevance 3 | ../include/libtorrent/pe_crypto.hpp:71 | dh_key_exchange should probably move into its own file |
dh_key_exchange should probably move into its own file../include/libtorrent/pe_crypto.hpp:71
#include <list>
#include <array>
#include <cstdint>
namespace libtorrent {
namespace mp = boost::multiprecision;
using key_t = mp::number<mp::cpp_int_backend<768, 768, mp::unsigned_magnitude, mp::unchecked, void>>;
TORRENT_EXTRA_EXPORT std::array<char, 96> export_key(key_t const& k);
// RC4 state from libtomcrypt
struct rc4 {
int x;
int y;
aux::array<std::uint8_t, 256> buf;
};
class TORRENT_EXTRA_EXPORT dh_key_exchange
{
public:
dh_key_exchange();
bool good() const { return true; }
// Get local public key
key_t const& get_local_key() const { return m_dh_local_key; }
// read remote_pubkey, generate and store shared secret in
// m_dh_shared_secret.
void compute_secret(std::uint8_t const* remote_pubkey);
void compute_secret(key_t const& remote_pubkey);
key_t const& get_secret() const { return m_dh_shared_secret; }
sha1_hash const& get_hash_xor_mask() const { return m_xor_mask; }
private:
key_t m_dh_local_key;
key_t m_dh_local_secret;
key_t m_dh_shared_secret;
sha1_hash m_xor_mask;
};
struct TORRENT_EXTRA_EXPORT encryption_handler
{
std::tuple<int, span<span<char const>>>
encrypt(span<span<char>> iovec);
| ||
relevance 3 | ../include/libtorrent/torrent.hpp:1362 | factor out predictive pieces and all operations on it into a separate class (to use as memeber here instead) |
factor out predictive pieces and all operations on it into a
separate class (to use as memeber here instead)../include/libtorrent/torrent.hpp:1362 std::string m_url;
// if this was added from an RSS feed, this is the unique
// identifier in the feed.
std::string m_uuid;
// if this torrent was added by an RSS feed, this is the
// URL to that feed
std::string m_source_feed_url;
#endif
#ifndef TORRENT_DISABLE_PREDICTIVE_PIECES
// this is a list of all pieces that we have announced
// as having, without actually having yet. If we receive
// a request for a piece in this list, we need to hold off
// on responding until we have completed the piece and
// verified its hash. If the hash fails, send reject to
// peers with outstanding requests, and dont_have to other
// peers. This vector is ordered, to make lookups fast.
std::vector<piece_index_t> m_predictive_pieces;
#endif
// the performance counters of this session
counters& m_stats_counters;
// each bit represents a piece. a set bit means
// the piece has had its hash verified. This
// is only used in seed mode (when m_seed_mode
// is true)
typed_bitfield<piece_index_t> m_verified;
// this means there is an outstanding, async, operation
// to verify each piece that has a 1
typed_bitfield<piece_index_t> m_verifying;
// set if there's an error on this torrent
error_code m_error;
// used if there is any resume data. Some of the information from the
// add_torrent_params struct are needed later in the torrent object's life
// cycle, and not in the constructor. So we need to save if away here
std::unique_ptr<add_torrent_params> m_add_torrent_params;
// if the torrent is started without metadata, it may
// still be given a name until the metadata is received
// once the metadata is received this field will no
// longer be used and will be reset
std::unique_ptr<std::string> m_name;
storage_constructor_type m_storage_constructor;
| ||
relevance 3 | ../include/libtorrent/torrent.hpp:1421 | factor out the links (as well as update_list() to a separate class that torrent can inherit) |
factor out the links (as well as update_list() to a separate
class that torrent can inherit)../include/libtorrent/torrent.hpp:1421
// this was the last time _we_ saw a seed in this swarm
std::time_t m_last_seen_complete = 0;
// this is the time last any of our peers saw a seed
// in this swarm
std::time_t m_swarm_last_seen_complete = 0;
// keep a copy if the info-hash here, so it can be accessed from multiple
// threads, and be cheap to access from the client
sha1_hash m_info_hash;
public:
// these are the lists this torrent belongs to. For more
// details about each list, see session_impl.hpp. Each list
// represents a group this torrent belongs to and makes it
// efficient to enumerate only torrents belonging to a specific
// group. Such as torrents that want peer connections or want
// to be ticked etc.
aux::array<link, aux::session_interface::num_torrent_lists, torrent_list_index_t>
m_links;
private:
// m_num_verified = m_verified.count()
std::uint32_t m_num_verified = 0;
// if this torrent is running, this was the time
// when it was started. This is used to have a
// bias towards keeping seeding torrents that
// recently was started, to avoid oscillation
// this is specified at a second granularity
// in session-time. see session_impl for details.
// the reference point is stepped forward every 4
// hours to keep the timestamps fit in 16 bits
time_point32 m_started = aux::time_now32();
// if we're a seed, this is the session time
// timestamp of when we became one
time_point32 m_became_seed = aux::time_now32();
// if we're finished, this is the session time
// timestamp of when we finished
time_point32 m_became_finished = aux::time_now32();
// when checking, this is the first piece we have not
// issued a hash job for
piece_index_t m_checking_piece{0};
// the number of pieces we completed the check of
| ||
relevance 3 | ../include/libtorrent/web_peer_connection.hpp:117 | if we make this be a disk_buffer_holder instead we would save a copy use allocate_disk_receive_buffer and release_disk_receive_buffer |
if we make this be a disk_buffer_holder instead
we would save a copy
use allocate_disk_receive_buffer and release_disk_receive_buffer../include/libtorrent/web_peer_connection.hpp:117 piece_block_progress downloading_piece_progress() const override;
void handle_padfile();
// this has one entry per http-request
// (might be more than the bt requests)
struct file_request_t
{
file_index_t file_index;
int length;
std::int64_t start;
};
std::deque<file_request_t> m_file_requests;
std::string m_url;
web_seed_t* m_web;
// this is used for intermediate storage of pieces to be delivered to the
// bittorrent engine
aux::vector<char> m_piece;
// the number of bytes we've forwarded to the incoming_payload() function
// in the current HTTP response. used to know where in the buffer the
// next response starts
int m_received_body;
// this is the offset inside the current receive
// buffer where the next chunk header will be.
// this is updated for each chunk header that's
// parsed. It does not necessarily point to a valid
// offset in the receive buffer, if we haven't received
// it yet. This offset never includes the HTTP header
int m_chunk_pos;
// this is the number of bytes we've already received
// from the next chunk header we're waiting for
int m_partial_chunk_header;
// the number of responses we've received so far on
// this connection
int m_num_responses;
};
}
#endif // TORRENT_WEB_PEER_CONNECTION_HPP_INCLUDED
| ||
relevance 3 | ../include/libtorrent/kademlia/routing_table.hpp:141 | to improve memory locality and scanning performance, turn the routing table into a single vector with boundaries for the nodes instead. Perhaps replacement nodes should be in a separate vector. |
to improve memory locality and scanning performance, turn the
routing table into a single vector with boundaries for the nodes instead.
Perhaps replacement nodes should be in a separate vector.../include/libtorrent/kademlia/routing_table.hpp:141// the paper:
//
// * Nodes are not marked as being stale, they keep a counter
// that tells how many times in a row they have failed. When
// a new node is to be inserted, the node that has failed
// the most times is replaced. If none of the nodes in the
// bucket has failed, then it is put in the replacement
// cache (just like in the paper).
// * The routing table bucket sizes are larger towards the "top" of the routing
// table. This is to get closer to the target in fewer round-trips.
// * Nodes with lower RTT are preferred and may replace nodes with higher RTT
// * Nodes that are "verified" (i.e. use a node-ID derived from their IP) are
// preferred and may replace nodes that are not verified.
TORRENT_EXTRA_EXPORT bool mostly_verified_nodes(bucket_t const&);
TORRENT_EXTRA_EXPORT bool compare_ip_cidr(address const& lhs, address const& rhs);
class TORRENT_EXTRA_EXPORT routing_table
{
public:
using table_t = aux::vector<routing_table_node>;
routing_table(node_id const& id, udp proto
, int bucket_size
, dht::settings const& settings
, dht_logger* log);
routing_table(routing_table const&) = delete;
routing_table& operator=(routing_table const&) = delete;
#if TORRENT_ABI_VERSION == 1
void status(session_status& s) const;
#endif
void status(std::vector<dht_routing_bucket>& s) const;
void node_failed(node_id const& id, udp::endpoint const& ep);
// adds an endpoint that will never be added to
// the routing table
void add_router_node(udp::endpoint const& router);
// iterates over the router nodes added
using router_iterator = std::set<udp::endpoint>::const_iterator;
router_iterator begin() const { return m_router_nodes.begin(); }
router_iterator end() const { return m_router_nodes.end(); }
enum add_node_status_t {
failed_to_add = 0,
node_added,
need_bucket_split
| ||
relevance 3 | ../include/libtorrent/aux_/storage_utils.hpp:52 | remove this typedef, and use span for disk write operations |
remove this typedef, and use span for disk write
operations../include/libtorrent/aux_/storage_utils.hpp:52
#ifndef TORRENT_STORAGE_UTILS_HPP_INCLUDE
#define TORRENT_STORAGE_UTILS_HPP_INCLUDE
#include <cstdint>
#include <string>
#include "libtorrent/config.hpp"
#include "libtorrent/fwd.hpp"
#include "libtorrent/span.hpp"
#include "libtorrent/span.hpp"
#include "libtorrent/units.hpp"
#include "libtorrent/storage_defs.hpp" // for status_t
#include "libtorrent/session_types.hpp"
namespace libtorrent {
struct part_file;
struct stat_cache;
using iovec_t = span<char>;
namespace aux {
TORRENT_EXTRA_EXPORT int copy_bufs(span<iovec_t const> bufs
, int bytes, span<iovec_t> target);
TORRENT_EXTRA_EXPORT span<iovec_t> advance_bufs(span<iovec_t> bufs, int bytes);
TORRENT_EXTRA_EXPORT void clear_bufs(span<iovec_t const> bufs);
// this is a read or write operation so that readwritev() knows
// what to do when it's actually touching the file
using fileop = std::function<int(file_index_t, std::int64_t, span<iovec_t const>, storage_error&)>;
// this function is responsible for turning read and write operations in the
// torrent space (pieces) into read and write operations in the filesystem
// space (files on disk).
TORRENT_EXTRA_EXPORT int readwritev(file_storage const& files
, span<iovec_t const> bufs, piece_index_t piece, int offset
, storage_error& ec, fileop op);
// moves the files in file_storage f from ``save_path`` to
// ``destination_save_path`` according to the rules defined by ``flags``.
// returns the status code and the new save_path.
TORRENT_EXTRA_EXPORT std::pair<status_t, std::string>
move_storage(file_storage const& f
, std::string const& save_path
, std::string const& destination_save_path
, part_file* pf
, move_flags_t flags, storage_error& ec);
// deletes the files on fs from save_path according to options. Options may
| ||
relevance 2 | ../test/test_piece_picker.cpp:2367 | test picking with partial pieces and other peers present so that both backup_pieces and backup_pieces2 are used |
test picking with partial pieces and other peers present so that both
backup_pieces and backup_pieces2 are used../test/test_piece_picker.cpp:2367 | ||
relevance 2 | ../test/test_storage.cpp:533 | split this test up into smaller parts |
split this test up into smaller parts../test/test_storage.cpp:533 aux::vector<std::string, file_index_t> links;
io.async_check_files(st, &frd, links
, std::bind(&on_check_resume_data, _1, _2, &done));
io.submit_jobs();
ios.reset();
run_until(ios, done);
for (auto const i : info->piece_range())
{
done = false;
io.async_hash(st, i, disk_interface::sequential_access | disk_interface::volatile_read
, std::bind(&on_piece_checked, _1, _2, _3, &done));
io.submit_jobs();
ios.reset();
run_until(ios, done);
}
io.abort(true);
}
void run_test(bool unbuffered)
{
std::string test_path = current_working_directory();
std::cout << "\n=== " << test_path << " ===\n" << std::endl;
std::shared_ptr<torrent_info> info;
std::vector<char> piece0 = new_piece(piece_size);
std::vector<char> piece1 = new_piece(piece_size);
std::vector<char> piece2 = new_piece(piece_size);
std::vector<char> piece3 = new_piece(piece_size);
{
error_code ec;
remove_all(combine_path(test_path, "temp_storage"), ec);
if (ec && ec != boost::system::errc::no_such_file_or_directory)
std::cout << "remove_all '" << combine_path(test_path, "temp_storage")
<< "': " << ec.message() << std::endl;
file_storage fs;
fs.add_file("temp_storage/test1.tmp", 17);
fs.add_file("temp_storage/test2.tmp", 612);
fs.add_file("temp_storage/test3.tmp", 0);
fs.add_file("temp_storage/test4.tmp", 0);
fs.add_file("temp_storage/test5.tmp", 3253);
fs.add_file("temp_storage/test6.tmp", 841);
int const last_file_size = 4 * int(piece_size) - int(fs.total_size());
fs.add_file("temp_storage/test7.tmp", last_file_size);
// File layout
// +-+--+++-------+-------+----------------------------------------------------------------------------------------+
// |1| 2||| file5 | file6 | file7 |
| ||
relevance 2 | ../test/test_dht.cpp:1674 | test num_global_nodes |
test num_global_nodes../test/test_dht.cpp:1674 | ||
relevance 2 | ../test/test_dht.cpp:1675 | test need_refresh |
test need_refresh../test/test_dht.cpp:1675
s.restrict_routing_ips = false;
{
auto const ep = rand_udp_ep(rand_addr);
auto const id = generate_id_impl(ep.address(), 2);
table.node_seen(id, ep, 10);
}
nodes.clear();
for (int i = 0; i < 10000; ++i)
{
auto const ep = rand_udp_ep(rand_addr);
auto const id = generate_id_impl(ep.address(), 6);
table.node_seen(id, ep, 20 + (id[19] & 0xff));
}
std::printf("active buckets: %d\n", table.num_active_buckets());
TEST_CHECK(table.num_active_buckets() == 11
|| table.num_active_buckets() == 12);
TEST_CHECK(std::get<0>(table.size()) >= bucket_size * 10);
print_state(std::cout, table);
table.for_each_node(std::bind(node_push_back, &nodes, _1), nullptr);
std::printf("nodes: %d\n", int(nodes.size()));
std::vector<node_entry> temp;
{
node_id const id = generate_random_id();
table.find_node(id, temp, 0, int(nodes.size()) * 2);
std::printf("returned-all: %d\n", int(temp.size()));
TEST_EQUAL(temp.size(), nodes.size());
}
// This makes sure enough of the nodes returned are actually
// part of the closest nodes
std::set<node_id> duplicates;
const int reps = 50;
for (int r = 0; r < reps; ++r)
{
node_id const id = generate_random_id();
table.find_node(id, temp, 0, bucket_size * 2);
TEST_EQUAL(int(temp.size()), std::min(bucket_size * 2, int(nodes.size())));
std::sort(nodes.begin(), nodes.end(), std::bind(&compare_ref
, std::bind(&node_entry::id, _1)
, std::bind(&node_entry::id, _2), id));
| ||
relevance 2 | ../test/test_dht.cpp:2862 | split this up into smaller test cases |
split this up into smaller test cases../test/test_dht.cpp:2862
TEST_EQUAL(aux::to_hex(sig.bytes)
, "6834284b6b24c3204eb2fea824d82f88883a3d95e8b4a21b8c0ded553d17d17d"
"df9a8a7104b1258f30bed3787e6cb896fca78c58f8e03b5f18f14951a87d9a08");
sha1_hash target_id = item_target_id(test_salt, pk);
TEST_EQUAL(aux::to_hex(target_id), "411eba73b6f087ca51a3795d9c8c938d365e32c1");
}
TORRENT_TEST(signing_test3)
{
// test vector 3
// test content
span<char const> test_content("12:Hello World!", 15);
sha1_hash target_id = item_target_id(test_content);
TEST_EQUAL(aux::to_hex(target_id), "e5f96f6f38320f0f33959cb4d3d656452117aadb");
}
TORRENT_TEST(verify_message)
{
char error_string[200];
// test verify_message
static const key_desc_t msg_desc[] = {
{"A", bdecode_node::string_t, 4, 0},
{"B", bdecode_node::dict_t, 0, key_desc_t::optional | key_desc_t::parse_children},
{"B1", bdecode_node::string_t, 0, 0},
{"B2", bdecode_node::string_t, 0, key_desc_t::last_child},
{"C", bdecode_node::dict_t, 0, key_desc_t::optional | key_desc_t::parse_children},
{"C1", bdecode_node::string_t, 0, 0},
{"C2", bdecode_node::string_t, 0, key_desc_t::last_child},
};
bdecode_node msg_keys[7];
bdecode_node ent;
error_code ec;
char const test_msg[] = "d1:A4:test1:Bd2:B15:test22:B25:test3ee";
bdecode(test_msg, test_msg + sizeof(test_msg)-1, ent, ec);
std::printf("%s\n", print_entry(ent).c_str());
bool ret = verify_message(ent, msg_desc, msg_keys, error_string);
TEST_CHECK(ret);
TEST_CHECK(msg_keys[0]);
if (msg_keys[0]) TEST_EQUAL(msg_keys[0].string_value(), "test");
TEST_CHECK(msg_keys[1]);
TEST_CHECK(msg_keys[2]);
if (msg_keys[2]) TEST_EQUAL(msg_keys[2].string_value(), "test2");
| ||
relevance 2 | ../src/ut_metadata.cpp:103 | if we were to initialize m_metadata_size lazily instead, we would probably be more efficient initialize m_metadata_size |
if we were to initialize m_metadata_size lazily instead,
we would probably be more efficient
initialize m_metadata_size../src/ut_metadata.cpp:103
int div_round_up(int numerator, int denominator)
{
return (numerator + denominator - 1) / denominator;
}
struct ut_metadata_peer_plugin;
struct ut_metadata_plugin final
: torrent_plugin
{
explicit ut_metadata_plugin(torrent& t) : m_torrent(t)
{
// initialize m_metadata_size
if (m_torrent.valid_metadata())
metadata();
}
void on_files_checked() override
{
metadata();
}
std::shared_ptr<peer_plugin> new_connection(
peer_connection_handle const& pc) override;
int get_metadata_size() const
{
TORRENT_ASSERT(m_metadata_size > 0);
return m_metadata_size;
}
span<char const> metadata() const
{
TORRENT_ASSERT(m_torrent.valid_metadata());
if (!m_metadata)
{
m_metadata = m_torrent.torrent_file().metadata();
m_metadata_size = m_torrent.torrent_file().metadata_size();
TORRENT_ASSERT(hasher(m_metadata.get(), m_metadata_size).final()
== m_torrent.torrent_file().info_hash());
}
return {m_metadata.get(), m_metadata_size};
}
bool received_metadata(ut_metadata_peer_plugin& source
, span<char const> buf, int piece, int total_size);
// returns a piece of the metadata that
// we should request.
// returns -1 if we should hold off the request
| ||
relevance 2 | ../src/instantiate_connection.cpp:40 | peer_connection and tracker_connection should probably be flags |
peer_connection and tracker_connection should probably be flags../src/instantiate_connection.cpp:40AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "libtorrent/socket.hpp"
#include "libtorrent/aux_/socket_type.hpp"
#include "libtorrent/utp_socket_manager.hpp"
#include "libtorrent/aux_/instantiate_connection.hpp"
namespace libtorrent { namespace aux {
bool instantiate_connection(io_service& ios
, aux::proxy_settings const& ps, aux::socket_type& s
, void* ssl_context
, utp_socket_manager* sm
, bool peer_connection
, bool tracker_connection)
{
#ifndef TORRENT_USE_OPENSSL
TORRENT_UNUSED(ssl_context);
#endif
if (sm)
{
utp_stream* str;
#ifdef TORRENT_USE_OPENSSL
if (ssl_context)
{
s.instantiate<ssl_stream<utp_stream>>(ios, ssl_context);
str = &s.get<ssl_stream<utp_stream>>()->next_layer();
}
else
#endif
{
s.instantiate<utp_stream>(ios);
str = s.get<utp_stream>();
}
str->set_impl(sm->new_utp_socket(str));
}
#if TORRENT_USE_I2P
else if (ps.type == settings_pack::i2p_proxy)
{
| ||
relevance 2 | ../src/bdecode.cpp:785 | attempt to simplify this implementation by embracing the span |
attempt to simplify this implementation by embracing the span../src/bdecode.cpp:785 }
bdecode_node bdecode(span<char const> buffer
, error_code& ec, int* error_pos, int depth_limit, int token_limit)
{
bdecode_node ret;
ec.clear();
if (buffer.size() > bdecode_token::max_offset)
{
if (error_pos) *error_pos = 0;
ec = bdecode_errors::limit_exceeded;
return ret;
}
// this is the stack of bdecode_token indices, into m_tokens.
// sp is the stack pointer, as index into the array, stack
int sp = 0;
TORRENT_ALLOCA(stack, stack_frame, depth_limit);
char const* start = buffer.data();
char const* end = start + buffer.size();
char const* const orig_start = start;
if (start == end)
TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
while (start <= end)
{
if (start >= end) TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
if (sp >= depth_limit)
TORRENT_FAIL_BDECODE(bdecode_errors::depth_exceeded);
--token_limit;
if (token_limit < 0)
TORRENT_FAIL_BDECODE(bdecode_errors::limit_exceeded);
// look for a new token
char const t = *start;
int const current_frame = sp;
// if we're currently parsing a dictionary, assert that
// every other node is a string.
if (current_frame > 0
&& ret.m_tokens[stack[current_frame - 1].token].type == bdecode_token::dict)
{
if (stack[current_frame - 1].state == 0)
{
// the current parent is a dict and we are parsing a key.
| ||
relevance 2 | ../src/peer_connection.cpp:2497 | this should probably be based on time instead of number of request messages. For a very high throughput connection, 300 may be a legitimate number of requests to have in flight when getting choked |
this should probably be based on time instead of number
of request messages. For a very high throughput connection, 300
may be a legitimate number of requests to have in flight when
getting choked../src/peer_connection.cpp:2497 , "piece: %d s: %d l: %d invalid request"
, static_cast<int>(r.piece), r.start , r.length);
#endif
write_reject_request(r);
++m_num_invalid_requests;
if (t->alerts().should_post<invalid_request_alert>())
{
// msvc 12 appears to deduce the rvalue reference template
// incorrectly for bool temporaries. So, create a dummy instance
bool const peer_interested = bool(m_peer_interested);
t->alerts().emplace_alert<invalid_request_alert>(
t->get_handle(), m_remote, m_peer_id, r
, t->has_piece_passed(r.piece), peer_interested, false);
}
// every ten invalid request, remind the peer that it's choked
if (!m_peer_interested && m_num_invalid_requests % 10 == 0 && m_choked)
{
if (m_num_invalid_requests > 300 && !m_peer_choked
&& can_disconnect(errors::too_many_requests_when_choked))
{
disconnect(errors::too_many_requests_when_choked, operation_t::bittorrent, peer_error);
return;
}
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::outgoing_message, "CHOKE");
#endif
write_choke();
}
return;
}
// if we have choked the client
// ignore the request
int const blocks_per_piece =
(ti.piece_length() + t->block_size() - 1) / t->block_size();
// disconnect peers that downloads more than foo times an allowed
// fast piece
if (m_choked && fast_idx != -1 && m_accept_fast_piece_cnt[fast_idx] >= 3 * blocks_per_piece
&& can_disconnect(errors::too_many_requests_when_choked))
{
disconnect(errors::too_many_requests_when_choked, operation_t::bittorrent, peer_error);
return;
}
if (m_choked && fast_idx == -1)
{
| ||
relevance 2 | ../src/peer_connection.cpp:3233 | since we throw away the queue entry once we issue the disk job, this may happen. Instead, we should keep the queue entry around, mark it as having been requested from disk and once the disk job comes back, discard it if it has been cancelled. Maybe even be able to cancel disk jobs? |
since we throw away the queue entry once we issue
the disk job, this may happen. Instead, we should keep the
queue entry around, mark it as having been requested from
disk and once the disk job comes back, discard it if it has
been cancelled. Maybe even be able to cancel disk jobs?../src/peer_connection.cpp:3233#endif
auto const i = std::find(m_requests.begin(), m_requests.end(), r);
if (i != m_requests.end())
{
m_counters.inc_stats_counter(counters::cancelled_piece_requests);
m_requests.erase(i);
if (m_requests.empty())
m_counters.inc_stats_counter(counters::num_peers_up_requests, -1);
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::outgoing_message, "REJECT_PIECE", "piece: %d s: %x l: %x cancelled"
, static_cast<int>(r.piece), r.start , r.length);
#endif
write_reject_request(r);
}
else
{
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "INVALID_CANCEL", "got cancel not in the queue");
#endif
}
}
// -----------------------------
// --------- DHT PORT ----------
// -----------------------------
void peer_connection::incoming_dht_port(int const listen_port)
{
TORRENT_ASSERT(is_single_thread());
INVARIANT_CHECK;
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::incoming_message, "DHT_PORT", "p: %d", listen_port);
#endif
#ifndef TORRENT_DISABLE_DHT
m_ses.add_dht_node({m_remote.address(), std::uint16_t(listen_port)});
#else
TORRENT_UNUSED(listen_port);
#endif
}
// -----------------------------
// --------- HAVE ALL ----------
// -----------------------------
void peer_connection::incoming_have_all()
{
| ||
relevance 2 | ../src/peer_connection.cpp:4869 | use a deadline_timer for timeouts. Don't rely on second_tick()! Hook this up to connect timeout as well. This would improve performance because of less work in second_tick(), and might let use remove ticking entirely eventually |
use a deadline_timer for timeouts. Don't rely on second_tick()!
Hook this up to connect timeout as well. This would improve performance
because of less work in second_tick(), and might let use remove ticking
entirely eventually../src/peer_connection.cpp:4869 connect_timeout += 20;
#endif
if (d > seconds(connect_timeout)
&& can_disconnect(errors::timed_out))
{
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "CONNECT_FAILED", "waited %d seconds"
, int(total_seconds(d)));
#endif
connect_failed(errors::timed_out);
return;
}
}
// if we can't read, it means we're blocked on the rate-limiter
// or the disk, not the peer itself. In this case, don't blame
// the peer and disconnect it
bool const may_timeout = bool(m_channel_state[download_channel] & peer_info::bw_network);
if (may_timeout && d > seconds(timeout()) && !m_connecting && m_reading_bytes == 0
&& can_disconnect(errors::timed_out_inactivity))
{
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "LAST_ACTIVITY", "%d seconds ago"
, int(total_seconds(d)));
#endif
disconnect(errors::timed_out_inactivity, operation_t::bittorrent);
return;
}
// do not stall waiting for a handshake
int timeout = m_settings.get_int (settings_pack::handshake_timeout);
#if TORRENT_USE_I2P
timeout *= is_i2p(*m_socket) ? 4 : 1;
#endif
if (may_timeout
&& !m_connecting
&& in_handshake()
&& d > seconds(timeout))
{
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "NO_HANDSHAKE", "waited %d seconds"
, int(total_seconds(d)));
#endif
disconnect(errors::timed_out_no_handshake, operation_t::bittorrent);
return;
}
// disconnect peers that we unchoked, but they didn't send a request in
// the last 60 seconds, and we haven't been working on servicing a request
| ||
relevance 2 | ../src/http_tracker_connection.cpp:392 | returning a bool here is redundant. Instead this function should return the peer_entry |
returning a bool here is redundant. Instead this function should
return the peer_entry../src/http_tracker_connection.cpp:392 {
cb->tracker_scrape_response(tracker_req(), resp.complete
, resp.incomplete, resp.downloaded, resp.downloaders);
}
else
{
std::list<address> ip_list;
if (m_tracker_connection)
{
for (auto const& endp : m_tracker_connection->endpoints())
{
ip_list.push_back(endp.address());
}
}
cb->tracker_response(tracker_req(), m_tracker_ip, ip_list, resp);
}
close();
}
bool extract_peer_info(bdecode_node const& info, peer_entry& ret, error_code& ec)
{
// extract peer id (if any)
if (info.type() != bdecode_node::dict_t)
{
ec = errors::invalid_peer_dict;
return false;
}
bdecode_node i = info.dict_find_string("peer id");
if (i && i.string_length() == 20)
{
std::copy(i.string_ptr(), i.string_ptr() + 20, ret.pid.begin());
}
else
{
// if there's no peer_id, just initialize it to a bunch of zeroes
ret.pid.clear();
}
// extract ip
i = info.dict_find_string("ip");
if (!i)
{
ec = errors::invalid_tracker_response;
return false;
}
ret.hostname = i.string_value().to_string();
// extract port
i = info.dict_find_int("port");
if (!i)
| ||
relevance 2 | ../src/session_impl.cpp:540 | is there a reason not to move all of this into init()? and just post it to the io_service? |
is there a reason not to move all of this into init()? and just
post it to the io_service?../src/session_impl.cpp:540 try
#endif
{
(this->*f)(std::forward<Args>(a)...);
}
#ifndef BOOST_NO_EXCEPTIONS
catch (system_error const& e) {
alerts().emplace_alert<session_error_alert>(e.code(), e.what());
pause();
} catch (std::exception const& e) {
alerts().emplace_alert<session_error_alert>(error_code(), e.what());
pause();
} catch (...) {
alerts().emplace_alert<session_error_alert>(error_code(), "unknown error");
pause();
}
#endif
// This function is called by the creating thread, not in the message loop's
// io_service thread.
void session_impl::start_session()
{
#ifndef TORRENT_DISABLE_LOGGING
session_log("start session");
#endif
#ifdef TORRENT_USE_OPENSSL
error_code ec;
m_ssl_ctx.set_verify_mode(boost::asio::ssl::context::verify_none, ec);
m_ssl_ctx.set_default_verify_paths(ec);
m_peer_ssl_ctx.set_verify_mode(boost::asio::ssl::context::verify_none, ec);
#if OPENSSL_VERSION_NUMBER >= 0x90812f
aux::openssl_set_tlsext_servername_callback(m_peer_ssl_ctx.native_handle()
, servername_callback);
aux::openssl_set_tlsext_servername_arg(m_peer_ssl_ctx.native_handle(), this);
#endif // OPENSSL_VERSION_NUMBER
#endif
#ifndef TORRENT_DISABLE_DHT
m_next_dht_torrent = m_torrents.begin();
#endif
m_next_lsd_torrent = m_torrents.begin();
m_global_class = m_classes.new_peer_class("global");
m_tcp_peer_class = m_classes.new_peer_class("tcp");
m_local_peer_class = m_classes.new_peer_class("local");
// local peers are always unchoked
m_classes.at(m_local_peer_class)->ignore_unchoke_slots = true;
// local peers are allowed to exceed the normal connection
// limit by 50%
m_classes.at(m_local_peer_class)->connection_limit_factor = 150;
| ||
relevance 2 | ../src/session_impl.cpp:662 | the ip filter should probably be saved here too |
the ip filter should probably be saved here too../src/session_impl.cpp:662 m_lsd_announce_timer.async_wait([this](error_code const& e) {
this->wrap(&session_impl::on_lsd_announce, e); } );
TORRENT_ASSERT(!ec);
#ifndef TORRENT_DISABLE_LOGGING
session_log(" done starting session");
#endif
// this applies unchoke settings from m_settings
recalculate_unchoke_slots();
// apply all m_settings to this session
run_all_updates(*this);
reopen_listen_sockets(false);
#if TORRENT_USE_INVARIANT_CHECKS
check_invariant();
#endif
}
void session_impl::save_state(entry* eptr, save_state_flags_t const flags) const
{
TORRENT_ASSERT(is_single_thread());
entry& e = *eptr;
// make it a dict
e.dict();
if (flags & session::save_settings)
{
entry::dictionary_type& sett = e["settings"].dict();
save_settings_to_dict(m_settings, sett);
}
#ifndef TORRENT_DISABLE_DHT
if (flags & session::save_dht_settings)
{
e["dht"] = dht::save_dht_settings(m_dht_settings);
}
if (m_dht && (flags & session::save_dht_state))
{
e["dht state"] = dht::save_dht_state(m_dht->state());
}
#endif
#ifndef TORRENT_DISABLE_EXTENSIONS
for (auto const& ext : m_ses_extensions[plugins_all_idx])
{
ext->save_state(*eptr);
}
| ||
relevance 2 | ../src/session_impl.cpp:3622 | make a list for torrents that want to be announced on the DHT so we don't have to loop over all torrents, just to find the ones that want to announce |
make a list for torrents that want to be announced on the DHT so we
don't have to loop over all torrents, just to find the ones that want to announce../src/session_impl.cpp:3622 if (!m_dht_torrents.empty())
{
std::shared_ptr<torrent> t;
do
{
t = m_dht_torrents.front().lock();
m_dht_torrents.pop_front();
} while (!t && !m_dht_torrents.empty());
if (t)
{
t->dht_announce();
return;
}
}
if (m_torrents.empty()) return;
if (m_next_dht_torrent == m_torrents.end())
m_next_dht_torrent = m_torrents.begin();
m_next_dht_torrent->second->dht_announce();
++m_next_dht_torrent;
if (m_next_dht_torrent == m_torrents.end())
m_next_dht_torrent = m_torrents.begin();
}
#endif
void session_impl::on_lsd_announce(error_code const& e)
{
COMPLETE_ASYNC("session_impl::on_lsd_announce");
m_stats_counters.inc_stats_counter(counters::on_lsd_counter);
TORRENT_ASSERT(is_single_thread());
if (e) return;
if (m_abort) return;
ADD_OUTSTANDING_ASYNC("session_impl::on_lsd_announce");
// announce on local network every 5 minutes
int const delay = std::max(m_settings.get_int(settings_pack::local_service_announce_interval)
/ std::max(int(m_torrents.size()), 1), 1);
error_code ec;
m_lsd_announce_timer.expires_from_now(seconds(delay), ec);
m_lsd_announce_timer.async_wait([this](error_code const& err) {
this->wrap(&session_impl::on_lsd_announce, err); });
if (m_torrents.empty()) return;
if (m_next_lsd_torrent == m_torrents.end())
m_next_lsd_torrent = m_torrents.begin();
m_next_lsd_torrent->second->lsd_announce();
++m_next_lsd_torrent;
if (m_next_lsd_torrent == m_torrents.end())
| ||
relevance 2 | ../src/session_impl.cpp:5409 | this function should be removed and users need to deal with the more generic case of having multiple listen ports |
this function should be removed and users need to deal with the
more generic case of having multiple listen ports../src/session_impl.cpp:5409
void session_impl::update_dht_settings()
{
#ifndef TORRENT_DISABLE_DHT
bool const prefer_verified_nodes = m_settings.get_bool(
settings_pack::dht_prefer_verified_node_ids);
m_dht_settings.prefer_verified_node_ids = prefer_verified_nodes;
#endif
}
void session_impl::update_count_slow()
{
error_code ec;
for (auto const& tp : m_torrents)
{
tp.second->on_inactivity_tick(ec);
}
}
std::uint16_t session_impl::listen_port() const
{
return listen_port(nullptr);
}
std::uint16_t session_impl::listen_port(listen_socket_t* sock) const
{
if (m_listen_sockets.empty()) return 0;
if (sock)
{
// if we're using a proxy, we won't be able to accept any TCP
// connections. We may be able to accept uTP connections though, so
// announce the UDP port instead
if (sock->flags & listen_socket_t::proxy)
return std::uint16_t(sock->udp_external_port());
if (!(sock->flags & listen_socket_t::accept_incoming))
return 0;
return std::uint16_t(sock->tcp_external_port());
}
#ifdef TORRENT_USE_OPENSSL
for (auto const& s : m_listen_sockets)
{
if (!(s->flags & listen_socket_t::accept_incoming)) continue;
if (s->ssl == transport::plaintext)
return std::uint16_t(s->tcp_external_port());
}
return 0;
#else
| ||
relevance 2 | ../src/session_impl.cpp:5448 | this function should be removed and users need to deal with the more generic case of having multiple ssl ports |
this function should be removed and users need to deal with the
more generic case of having multiple ssl ports../src/session_impl.cpp:5448 return 0;
return std::uint16_t(sock->tcp_external_port());
}
#ifdef TORRENT_USE_OPENSSL
for (auto const& s : m_listen_sockets)
{
if (!(s->flags & listen_socket_t::accept_incoming)) continue;
if (s->ssl == transport::plaintext)
return std::uint16_t(s->tcp_external_port());
}
return 0;
#else
sock = m_listen_sockets.front().get();
if (!(sock->flags & listen_socket_t::accept_incoming)) return 0;
return std::uint16_t(sock->tcp_external_port());
#endif
}
std::uint16_t session_impl::ssl_listen_port() const
{
return ssl_listen_port(nullptr);
}
std::uint16_t session_impl::ssl_listen_port(listen_socket_t* sock) const
{
#ifdef TORRENT_USE_OPENSSL
if (sock)
{
if (!(sock->flags & listen_socket_t::accept_incoming)) return 0;
return std::uint16_t(sock->tcp_external_port());
}
if (m_settings.get_int(settings_pack::proxy_type) != settings_pack::none)
return 0;
for (auto const& s : m_listen_sockets)
{
if (!(s->flags & listen_socket_t::accept_incoming)) continue;
if (s->ssl == transport::ssl)
return std::uint16_t(s->tcp_external_port());
}
#else
TORRENT_UNUSED(sock);
#endif
return 0;
}
int session_impl::get_listen_port(transport const ssl, aux::listen_socket_handle const& s)
{
| ||
relevance 2 | ../src/session_impl.cpp:6282 | this should be factored into the udp socket, so we only have the code once |
this should be factored into the udp socket, so we only have the
code once../src/session_impl.cpp:6282 int session_impl::download_rate_limit_depr() const
{
return download_rate_limit(m_global_class);
}
#endif // DEPRECATE
namespace {
template <typename Socket>
void set_tos(Socket& s, int v, error_code& ec)
{
#if defined IPV6_TCLASS
if (is_v6(s.local_endpoint(ec)))
s.set_option(traffic_class(char(v)), ec);
else if (!ec)
#endif
s.set_option(type_of_service(char(v)), ec);
}
}
void session_impl::update_peer_tos()
{
int const tos = m_settings.get_int(settings_pack::peer_tos);
for (auto const& l : m_listen_sockets)
{
if (l->sock)
{
error_code ec;
set_tos(*l->sock, tos, ec);
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
session_log(">>> SET_TOS [ tcp (%s %d) tos: %x e: %s ]"
, l->sock->local_endpoint().address().to_string().c_str()
, l->sock->local_endpoint().port(), tos, ec.message().c_str());
}
#endif
}
if (l->udp_sock)
{
error_code ec;
set_tos(l->udp_sock->sock, tos, ec);
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
session_log(">>> SET_TOS [ udp (%s %d) tos: %x e: %s ]"
, l->udp_sock->sock.local_endpoint().address().to_string().c_str()
, l->udp_sock->sock.local_port()
| ||
relevance 2 | ../src/torrent.cpp:240 | p should probably be moved in here |
p should probably be moved in here../src/torrent.cpp:240 , m_connect_boost_counter(static_cast<std::uint8_t>(settings().get_int(settings_pack::torrent_connect_boost)))
, m_incomplete(0xffffff)
, m_announce_to_dht(!(p.flags & torrent_flags::paused))
, m_ssl_torrent(false)
, m_deleted(false)
, m_last_download(seconds32(p.last_download))
, m_last_upload(seconds32(p.last_upload))
, m_auto_managed(p.flags & torrent_flags::auto_managed)
, m_current_gauge_state(static_cast<std::uint32_t>(no_gauge_state))
, m_moving_storage(false)
, m_inactive(false)
, m_downloaded(0xffffff)
, m_progress_ppm(0)
, m_torrent_initialized(false)
, m_outstanding_file_priority(false)
, m_complete_sent(false)
{
// we cannot log in the constructor, because it relies on shared_from_this
// being initialized, which happens after the constructor returns.
m_add_torrent_params.reset(new add_torrent_params(p));
#if TORRENT_USE_UNC_PATHS
m_save_path = canonicalize_path(m_save_path);
#endif
if (!m_apply_ip_filter)
{
inc_stats_counter(counters::non_filter_torrents);
}
if (!p.ti || !p.ti->is_valid())
{
// we don't have metadata for this torrent. We'll download
// it either through the URL passed in, or through a metadata
// extension. Make sure that when we save resume data for this
// torrent, we also save the metadata
m_magnet_link = true;
}
if (!m_torrent_file)
m_torrent_file = (p.ti ? p.ti : std::make_shared<torrent_info>(m_info_hash));
// in case we added the torrent via magnet link, make sure to preserve any
// DHT nodes passed in on the URI in the torrent file itself
if (!m_torrent_file->is_valid())
{
for (auto const& n : p.dht_nodes)
m_torrent_file->add_node(n);
}
| ||
relevance 2 | ../src/torrent.cpp:353 | set_merkle_tree should probably take the vector as && |
set_merkle_tree should probably take the vector as &&../src/torrent.cpp:353 // torrent_info object.
if (!m_torrent_file->is_valid())
m_torrent_file->add_tracker(e.url, e.tier, announce_entry::tracker_source(e.source));
}
}
std::sort(m_trackers.begin(), m_trackers.end()
, [] (announce_entry const& lhs, announce_entry const& rhs)
{ return lhs.tier < rhs.tier; });
if (settings().get_bool(settings_pack::prefer_udp_trackers))
prioritize_udp_trackers();
// --- MERKLE TREE ---
if (m_torrent_file->is_valid()
&& m_torrent_file->is_merkle_torrent())
{
if (p.merkle_tree.size() == m_torrent_file->merkle_tree().size())
{
std::vector<sha1_hash> tree(p.merkle_tree);
m_torrent_file->set_merkle_tree(tree);
}
else
{
| ||
relevance 2 | ../src/torrent.cpp:594 | post alert |
post alert../src/torrent.cpp:594 if (m_current_gauge_state != no_gauge_state)
inc_stats_counter(m_current_gauge_state + counters::num_checking_torrents, -1);
if (new_gauge_state != no_gauge_state)
inc_stats_counter(new_gauge_state + counters::num_checking_torrents, 1);
TORRENT_ASSERT(new_gauge_state >= 0);
TORRENT_ASSERT(new_gauge_state <= no_gauge_state);
m_current_gauge_state = static_cast<std::uint32_t>(new_gauge_state);
}
void torrent::leave_seed_mode(seed_mode_t const checking)
{
if (!m_seed_mode) return;
if (checking == seed_mode_t::check_files)
{
// this means the user promised we had all the
// files, but it turned out we didn't. This is
// an error.
#ifndef TORRENT_DISABLE_LOGGING
debug_log("*** FAILED SEED MODE, rechecking");
#endif
}
#ifndef TORRENT_DISABLE_LOGGING
debug_log("*** LEAVING SEED MODE (%s)"
, checking == seed_mode_t::skip_checking ? "as seed" : "as non-seed");
#endif
m_seed_mode = false;
// seed is false if we turned out not
// to be a seed after all
if (checking == seed_mode_t::check_files
&& state() != torrent_status::checking_resume_data)
{
m_have_all = false;
set_state(torrent_status::downloading);
force_recheck();
}
m_num_verified = 0;
m_verified.clear();
m_verifying.clear();
set_need_save_resume();
}
void torrent::verified(piece_index_t const piece)
{
TORRENT_ASSERT(!m_verified.get_bit(piece));
++m_num_verified;
| ||
relevance 2 | ../src/torrent.cpp:1859 | add a unit test where we don't have metadata, connect to a peer that sends a bitfield that's too large, then we get the metadata |
add a unit test where we don't have metadata, connect to a peer
that sends a bitfield that's too large, then we get the metadata../src/torrent.cpp:1859 for (auto const& f : m_add_torrent_params->renamed_files)
{
if (f.first < file_index_t(0) || f.first >= fs.end_file()) continue;
m_torrent_file->rename_file(file_index_t(f.first), f.second);
}
}
construct_storage();
#ifndef TORRENT_DISABLE_SHARE_MODE
if (m_share_mode && valid_metadata())
{
// in share mode, all pieces have their priorities initialized to 0
m_file_priority.clear();
m_file_priority.resize(m_torrent_file->num_files(), dont_download);
}
#endif
// it's important to initialize the peers early, because this is what will
// fix up their have-bitmasks to have the correct size
if (!m_connections_initialized)
{
m_connections_initialized = true;
// all peer connections have to initialize themselves now that the metadata
// is available
// copy the peer list since peers may disconnect and invalidate
// m_connections as we initialize them
for (auto c : m_connections)
{
auto pc = c->self();
if (pc->is_disconnecting()) continue;
pc->on_metadata_impl();
if (pc->is_disconnecting()) continue;
pc->init();
}
}
// in case file priorities were passed in via the add_torrent_params
// and also in the case of share mode, we need to update the priorities
// this has to be applied before piece priority
if (!m_file_priority.empty()) update_piece_priorities(m_file_priority);
if (m_add_torrent_params)
{
piece_index_t idx(0);
if (m_add_torrent_params->piece_priorities.size() > std::size_t(m_torrent_file->num_pieces()))
m_add_torrent_params->piece_priorities.resize(std::size_t(m_torrent_file->num_pieces()));
for (auto prio : m_add_torrent_params->piece_priorities)
{
if (has_picker() || prio != default_priority)
| ||
relevance 2 | ../src/torrent.cpp:4105 | use chrono type for time duration |
use chrono type for time duration../src/torrent.cpp:4105 // announcing a piece may invalidate the torrent_peer pointers
// so we can't use them anymore
downloaders.clear();
peers.clear();
// make the disk cache flush the piece to disk
if (m_storage)
m_ses.disk_thread().async_flush_piece(m_storage, index);
m_picker->piece_passed(index);
update_gauge();
we_have(index);
update_want_tick();
}
#ifndef TORRENT_DISABLE_PREDICTIVE_PIECES
// we believe we will complete this piece very soon
// announce it to peers ahead of time to eliminate the
// round-trip times involved in announcing it, requesting it
// and sending it
void torrent::predicted_have_piece(piece_index_t const index, int const milliseconds)
{
auto const i = std::lower_bound(m_predictive_pieces.begin()
, m_predictive_pieces.end(), index);
if (i != m_predictive_pieces.end() && *i == index) return;
for (auto p : m_connections)
{
TORRENT_INCREMENT(m_iterating_connections);
#ifndef TORRENT_DISABLE_LOGGING
p->peer_log(peer_log_alert::outgoing, "PREDICTIVE_HAVE", "piece: %d expected in %d ms"
, static_cast<int>(index), milliseconds);
#else
TORRENT_UNUSED(milliseconds);
#endif
p->announce_piece(index);
}
m_predictive_pieces.insert(i, index);
}
#endif
void torrent::piece_failed(piece_index_t const index)
{
// if the last piece fails the peer connection will still
// think that it has received all of it until this function
// resets the download queue. So, we cannot do the
// invariant check here since it assumes:
// (total_done == m_torrent_file->total_size()) => is_seed()
INVARIANT_CHECK;
TORRENT_ASSERT(is_single_thread());
| ||
relevance 2 | ../src/torrent.cpp:4501 | abort lookups this torrent has made via the session host resolver interface |
abort lookups this torrent has made via the
session host resolver interface../src/torrent.cpp:4501 }
catch (std::exception const& e)
{
TORRENT_UNUSED(e);
m_storage.reset();
#ifndef TORRENT_DISABLE_LOGGING
debug_log("Failed to flush disk cache: %s", e.what());
#endif
// clients may rely on this alert to be posted, so it's probably a
// good idea to post it here, even though we failed
if (alerts().should_post<cache_flushed_alert>())
alerts().emplace_alert<cache_flushed_alert>(get_handle());
}
}
else
{
if (alerts().should_post<cache_flushed_alert>())
alerts().emplace_alert<cache_flushed_alert>(get_handle());
}
if (!m_apply_ip_filter)
{
inc_stats_counter(counters::non_filter_torrents, -1);
m_apply_ip_filter = true;
}
m_paused = false;
m_auto_managed = false;
update_state_list();
for (torrent_list_index_t i{}; i != m_links.end_index(); ++i)
{
if (!m_links[i].in_list()) continue;
m_links[i].unlink(m_ses.torrent_list(i), i);
}
// don't re-add this torrent to the state-update list
m_state_subscription = false;
}
// this is called when we're destructing non-gracefully. i.e. we're _just_
// destructing everything.
void torrent::panic()
{
m_storage.reset();
// if there are any other peers allocated still, we need to clear them
// now. They can't be cleared later because the allocator will already
// have been destructed
if (m_peer_list) m_peer_list->clear();
m_connections.clear();
m_outgoing_pids.clear();
m_peers_to_disconnect.clear();
| ||
relevance 2 | ../src/torrent.cpp:7134 | if peer is a really good peer, maybe we shouldn't disconnect it perhaps this logic should be disabled if we have too many idle peers (with some definition of idle) |
if peer is a really good peer, maybe we shouldn't disconnect it
perhaps this logic should be disabled if we have too many idle peers
(with some definition of idle)../src/torrent.cpp:7134
m_peers_to_disconnect.reserve(m_connections.size() + 1);
m_connections.reserve(m_connections.size() + 1);
#if TORRENT_USE_ASSERTS
error_code ec;
TORRENT_ASSERT(p->remote() == p->get_socket()->remote_endpoint(ec) || ec);
#endif
TORRENT_ASSERT(p->peer_info_struct() != nullptr);
// we need to do this after we've added the peer to the peer_list
// since that's when the peer is assigned its peer_info object,
// which holds the rank
if (maybe_replace_peer)
{
// now, find the lowest rank peer and disconnect that
// if it's lower rank than the incoming connection
peer_connection* peer = find_lowest_ranking_peer();
if (peer != nullptr && peer->peer_rank() < p->peer_rank())
{
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
debug_log("CLOSING CONNECTION \"%s\" peer list full (low peer rank) "
"connections: %d limit: %d"
, print_endpoint(peer->remote()).c_str()
, num_peers()
, m_max_connections);
}
#endif
peer->disconnect(errors::too_many_connections, operation_t::bittorrent);
p->peer_disconnected_other();
}
else
{
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
debug_log("CLOSING CONNECTION \"%s\" peer list full (low peer rank) "
"connections: %d limit: %d"
, print_endpoint(p->remote()).c_str()
, num_peers()
, m_max_connections);
}
#endif
p->disconnect(errors::too_many_connections, operation_t::bittorrent);
// we have to do this here because from the peer's point of view
// it wasn't really attached to the torrent, but we do need
// to let peer_list know we're removing it
| ||
relevance 2 | ../src/utp_stream.cpp:294 | it would be nice if not everything would have to be public here |
it would be nice if not everything would have to be public here../src/utp_stream.cpp:294 void do_ledbat(int acked_bytes, int delay, int in_flight);
int packet_timeout() const;
bool test_socket_state();
void maybe_trigger_receive_callback();
void maybe_trigger_send_callback();
bool cancel_handlers(error_code const& ec, bool shutdown);
bool consume_incoming_data(
utp_header const* ph, std::uint8_t const* ptr, int payload_size, time_point now);
void update_mtu_limits();
void experienced_loss(std::uint32_t seq_nr, time_point now);
void set_state(int s);
packet_ptr acquire_packet(int const allocate) { return m_sm.acquire_packet(allocate); }
void release_packet(packet_ptr p) { m_sm.release_packet(std::move(p)); }
// non-copyable
utp_socket_impl(utp_socket_impl const&) = delete;
utp_socket_impl const& operator=(utp_socket_impl const&) = delete;
#if TORRENT_USE_INVARIANT_CHECKS
void check_receive_buffers() const;
void check_invariant() const;
#endif
utp_socket_manager& m_sm;
std::weak_ptr<utp_socket_interface> m_sock;
// userdata pointer passed along
// with any callback. This is initialized to 0
// then set to point to the utp_stream when
// hooked up, and then reset to 0 once the utp_stream
// detaches. This is used to know whether or not
// the socket impl is still attached to a utp_stream
// object. When it isn't, we'll never be able to
// signal anything back to the client, and in case
// of errors, we just have to delete ourselves
// i.e. transition to the UTP_STATE_DELETED state
void* m_userdata;
// This is a platform-independent replacement
// for the regular iovec type in posix. Since
// it's not used in any system call, we might as
// well define our own type instead of wrapping
// the system's type.
struct iovec_t
{
iovec_t(void* b, std::size_t l): buf(b), len(l) {}
void* buf;
std::size_t len;
| ||
relevance 2 | ../src/udp_tracker_connection.cpp:77 | support authentication here. tracker_req().auth |
support authentication here. tracker_req().auth../src/udp_tracker_connection.cpp:77 udp_tracker_connection::m_connection_cache;
std::mutex udp_tracker_connection::m_cache_mutex;
udp_tracker_connection::udp_tracker_connection(
io_service& ios
, tracker_manager& man
, tracker_request const& req
, std::weak_ptr<request_callback> c)
: tracker_connection(man, req, ios, std::move(c))
, m_transaction_id(0)
, m_attempts(0)
, m_state(action_t::error)
, m_abort(false)
{
update_transaction_id();
}
void udp_tracker_connection::start()
{
std::string hostname;
std::string protocol;
int port;
error_code ec;
std::tie(protocol, std::ignore, hostname, port, std::ignore)
= parse_url_components(tracker_req().url, ec);
if (port == -1) port = protocol == "http" ? 80 : 443;
if (ec)
{
tracker_connection::fail(ec);
return;
}
aux::session_settings const& settings = m_man.settings();
int const proxy_type = settings.get_int(settings_pack::proxy_type);
if (settings.get_bool(settings_pack::proxy_hostnames)
&& (proxy_type == settings_pack::socks5
|| proxy_type == settings_pack::socks5_pw))
{
m_hostname = hostname;
m_target.port(std::uint16_t(port));
start_announce();
}
else
{
using namespace std::placeholders;
ADD_OUTSTANDING_ASYNC("udp_tracker_connection::name_lookup");
| ||
relevance 2 | ../src/alert_manager.cpp:75 | keep a count of the number of threads waiting. Only if it's > 0 notify them |
keep a count of the number of threads waiting. Only if it's
> 0 notify them../src/alert_manager.cpp:75 return m_alerts[m_generation].front();
// this call can be interrupted prematurely by other signals
m_condition.wait_for(lock, max_wait);
if (!m_alerts[m_generation].empty())
return m_alerts[m_generation].front();
return nullptr;
}
void alert_manager::maybe_notify(alert* a)
{
if (m_alerts[m_generation].size() == 1)
{
// we just posted to an empty queue. If anyone is waiting for
// alerts, we need to notify them. Also (potentially) call the
// user supplied m_notify callback to let the client wake up its
// message loop to poll for alerts.
if (m_notify) m_notify();
m_condition.notify_all();
}
#ifndef TORRENT_DISABLE_EXTENSIONS
for (auto& e : m_ses_extensions)
e->on_alert(a);
#else
TORRENT_UNUSED(a);
#endif
}
void alert_manager::set_notify_function(std::function<void()> const& fun)
{
std::unique_lock<std::recursive_mutex> lock(m_mutex);
m_notify = fun;
if (!m_alerts[m_generation].empty())
{
if (m_notify) m_notify();
}
}
#ifndef TORRENT_DISABLE_EXTENSIONS
void alert_manager::add_extension(std::shared_ptr<plugin> ext)
{
m_ses_extensions.push_back(ext);
}
#endif
void alert_manager::get_all(std::vector<alert*>& alerts)
{
std::lock_guard<std::recursive_mutex> lock(m_mutex);
| ||
relevance 2 | ../src/path.cpp:352 | test this on a FAT volume to see what error we get! |
test this on a FAT volume to see what error we get!../src/path.cpp:352 // it's possible CreateHardLink will copy the file internally too,
// if the filesystem does not support it.
ec.assign(GetLastError(), system_category());
return;
}
// fall back to making a copy
#else
// assume posix's link() function exists
int ret = ::link(n_exist.c_str(), n_link.c_str());
if (ret == 0)
{
ec.clear();
return;
}
// most errors are passed through, except for the ones that indicate that
// hard links are not supported and require a copy.
if (errno != EMLINK && errno != EXDEV)
{
// some error happened, report up to the caller
ec.assign(errno, system_category());
return;
}
// fall back to making a copy
#endif
// if we get here, we should copy the file
copy_file(file, link, ec);
}
bool is_directory(std::string const& f, error_code& ec)
{
ec.clear();
error_code e;
file_status s;
stat_file(f, &s, e);
if (!e && s.mode & file_status::directory) return true;
ec = e;
return false;
}
void recursive_copy(std::string const& old_path, std::string const& new_path, error_code& ec)
{
TORRENT_ASSERT(!ec);
if (is_directory(old_path, ec))
{
| ||
relevance 2 | ../src/peer_list.cpp:471 | it would be nice if there was a way to iterate over these torrent_peer objects in the order they are allocated in the pool instead. It would probably be more efficient |
it would be nice if there was a way to iterate over these
torrent_peer objects in the order they are allocated in the pool
instead. It would probably be more efficient../src/peer_list.cpp:471 , int session_time, torrent_state* state)
{
TORRENT_ASSERT(is_single_thread());
INVARIANT_CHECK;
const int candidate_count = 10;
peers.reserve(candidate_count);
int erase_candidate = -1;
if (bool(m_finished) != state->is_finished)
recalculate_connect_candidates(state);
external_ip const& external = state->ip;
int external_port = state->port;
if (m_round_robin >= int(m_peers.size())) m_round_robin = 0;
int max_peerlist_size = state->max_peerlist_size;
for (int iterations = std::min(int(m_peers.size()), 300);
iterations > 0; --iterations)
{
++state->loop_counter;
if (m_round_robin >= int(m_peers.size())) m_round_robin = 0;
torrent_peer& pe = *m_peers[m_round_robin];
TORRENT_ASSERT(pe.in_use);
int current = m_round_robin;
// if the number of peers is growing large
// we need to start weeding.
if (int(m_peers.size()) >= max_peerlist_size * 0.95
&& max_peerlist_size > 0)
{
if (is_erase_candidate(pe)
&& (erase_candidate == -1
|| !compare_peer_erase(*m_peers[erase_candidate], pe)))
{
if (should_erase_immediately(pe))
{
if (erase_candidate > current) --erase_candidate;
erase_peer(m_peers.begin() + current, state);
continue;
}
else
{
erase_candidate = current;
}
| ||
relevance 2 | ../src/piece_picker.cpp:1946 | make the 2048 limit configurable |
make the 2048 limit configurable../src/piece_picker.cpp:1946 // indicating which path thought the picker we took to arrive at the
// returned block picks.
picker_flags_t piece_picker::pick_pieces(typed_bitfield<piece_index_t> const& pieces
, std::vector<piece_block>& interesting_blocks, int num_blocks
, int prefer_contiguous_blocks, torrent_peer* peer
, picker_options_t options, std::vector<piece_index_t> const& suggested_pieces
, int num_peers
, counters& pc
) const
{
TORRENT_ASSERT(peer == nullptr || peer->in_use);
picker_flags_t ret;
// prevent the number of partial pieces to grow indefinitely
// make this scale by the number of peers we have. For large
// scale clients, we would have more peers, and allow a higher
// threshold for the number of partials
// the second condition is to make sure we cap the number of partial
// _bytes_. The larger the pieces are, the fewer partial pieces we want.
// 2048 corresponds to 32 MiB
const int num_partials = int(m_downloads[piece_pos::piece_downloading].size());
if (num_partials > num_peers * 3 / 2
|| num_partials * m_blocks_per_piece > 2048)
{
// if we have too many partial pieces, prioritize completing
// them. In order for this to have an affect, also disable
// prefer whole pieces (otherwise partial pieces would be de-prioritized)
options |= prioritize_partials;
prefer_contiguous_blocks = 0;
ret |= picker_log_alert::partial_ratio;
}
if (prefer_contiguous_blocks) ret |= picker_log_alert::prefer_contiguous;
// only one of rarest_first and sequential can be set.
TORRENT_ASSERT(((options & rarest_first) ? 1 : 0)
+ ((options & sequential) ? 1 : 0) <= 1);
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
INVARIANT_CHECK;
#endif
TORRENT_ASSERT(num_blocks > 0);
TORRENT_ASSERT(pieces.size() == int(m_piece_map.size()));
TORRENT_ASSERT(!m_priority_boundaries.empty() || m_dirty);
// this will be filled with blocks that we should not request
// unless we can't find num_blocks among the other ones.
std::vector<piece_block> backup_blocks;
std::vector<piece_block> backup_blocks2;
static const std::vector<piece_index_t> empty_vector;
| ||
relevance 2 | ../src/piece_picker.cpp:2589 | the first_block returned here is the largest free range, not the first-fit range, which would be better |
the first_block returned here is the largest free range, not
the first-fit range, which would be better../src/piece_picker.cpp:2589 {
for (auto const& b : m_block_info)
{
TORRENT_ASSERT(b.peer == nullptr || static_cast<torrent_peer*>(b.peer)->in_use);
}
}
#endif
void piece_picker::clear_peer(torrent_peer* peer)
{
for (auto& b : m_block_info)
{
if (b.peer == peer) b.peer = nullptr;
}
}
// the first bool is true if this is the only peer that has requested and downloaded
// blocks from this piece.
// the second bool is true if this is the only active peer that is requesting
// and downloading blocks from this piece. Active means having a connection.
std::tuple<bool, bool, int, int> piece_picker::requested_from(
piece_picker::downloading_piece const& p
, int const num_blocks_in_piece, torrent_peer* peer) const
{
bool exclusive = true;
bool exclusive_active = true;
int contiguous_blocks = 0;
int max_contiguous = 0;
int first_block = 0;
int idx = -1;
for (auto const& info : blocks_for_piece(p))
{
++idx;
TORRENT_ASSERT(info.peer == nullptr || info.peer->in_use);
TORRENT_ASSERT(info.piece_index == p.index);
if (info.state == piece_picker::block_info::state_none)
{
++contiguous_blocks;
continue;
}
if (contiguous_blocks > max_contiguous)
{
max_contiguous = contiguous_blocks;
first_block = idx - contiguous_blocks;
}
contiguous_blocks = 0;
if (info.peer != peer)
{
exclusive = false;
if (info.state == piece_picker::block_info::state_requested
&& info.peer != nullptr)
| ||
relevance 2 | ../src/piece_picker.cpp:3407 | it would be nice if this could be folded into lock_piece() the main distinction is that this also maintains the m_num_passed counter and the passed_hash_check member Is there ever a case where we call write filed without also locking the piece? Perhaps write_failed() should imply locking it. |
it would be nice if this could be folded into lock_piece()
the main distinction is that this also maintains the m_num_passed
counter and the passed_hash_check member
Is there ever a case where we call write filed without also locking
the piece? Perhaps write_failed() should imply locking it.../src/piece_picker.cpp:3407 auto const state = m_piece_map[piece].download_queue();
if (state == piece_pos::piece_open) return;
auto const i = find_dl_piece(state, piece);
if (i == m_downloads[state].end()) return;
TORRENT_ASSERT(i->passed_hash_check == false);
if (i->passed_hash_check)
{
// it's not clear why this would happen,
// but it seems reasonable to not break the
// accounting over it.
i->passed_hash_check = false;
TORRENT_ASSERT(m_num_passed > 0);
--m_num_passed;
}
// prevent this piece from being picked until it's restored
i->locked = true;
}
void piece_picker::write_failed(piece_block const block)
{
INVARIANT_CHECK;
#if TORRENT_USE_INVARIANT_CHECKS
check_piece_state();
#endif
#ifdef TORRENT_PICKER_LOG
std::cerr << "[" << this << "] " << "write_failed( {" << block.piece_index << ", " << block.block_index << "} )" << std::endl;
#endif
auto const state = m_piece_map[block.piece_index].download_queue();
if (state == piece_pos::piece_open) return;
auto i = find_dl_piece(state, block.piece_index);
if (i == m_downloads[state].end()) return;
auto const binfo = mutable_blocks_for_piece(*i);
block_info& info = binfo[block.block_index];
TORRENT_ASSERT(&info >= &m_block_info[0]);
TORRENT_ASSERT(&info < &m_block_info[0] + m_block_info.size());
TORRENT_ASSERT(info.piece_index == block.piece_index);
TORRENT_ASSERT(info.state == block_info::state_writing);
TORRENT_ASSERT(info.num_peers == 0);
TORRENT_ASSERT(i->writing > 0);
TORRENT_ASSERT(info.state == block_info::state_writing);
if (info.state == block_info::state_finished) return;
if (info.state == block_info::state_writing) --i->writing;
| ||
relevance 2 | ../src/storage_utils.cpp:350 | technically, this is where the transaction of moving the files is completed. This is where the new save_path should be committed. If there is an error in the code below, that should not prevent the new save path to be set. Maybe it would make sense to make the save_path an in-out parameter |
technically, this is where the transaction of moving the files
is completed. This is where the new save_path should be committed. If
there is an error in the code below, that should not prevent the new
save path to be set. Maybe it would make sense to make the save_path
an in-out parameter../src/storage_utils.cpp:350 while (--file_index >= file_index_t(0))
{
// files moved out to absolute paths are not moved
if (f.file_absolute_path(file_index)) continue;
// if we ended up copying the file, don't do anything during
// roll-back
if (copied_files[file_index]) continue;
std::string const old_path = combine_path(save_path, f.file_path(file_index));
std::string const new_path = combine_path(new_save_path, f.file_path(file_index));
// ignore errors when rolling back
error_code ignore;
move_file(new_path, old_path, ignore);
}
return { status_t::fatal_disk_error, save_path };
}
std::set<std::string> subdirs;
for (auto const i : f.file_range())
{
// files moved out to absolute paths are not moved
if (f.file_absolute_path(i)) continue;
if (has_parent_path(f.file_path(i)))
subdirs.insert(parent_path(f.file_path(i)));
// if we ended up renaming the file instead of moving it, there's no
// need to delete the source.
if (copied_files[i] == false) continue;
std::string const old_path = combine_path(save_path, f.file_path(i));
// we may still have some files in old save_path
// eg. if (flags == dont_replace && exists(new_path))
// ignore errors when removing
error_code ignore;
remove(old_path, ignore);
}
for (std::string const& s : subdirs)
{
error_code err;
std::string subdir = combine_path(save_path, s);
while (!compare_path(subdir, save_path) && !err)
{
remove(subdir, err);
| ||
relevance 2 | ../src/storage_utils.cpp:496 | is this risky? The upper layer will assume we have the whole file. Perhaps we should verify that at least the size of the file is correct |
is this risky? The upper layer will assume we have the
whole file. Perhaps we should verify that at least the size
of the file is correct../src/storage_utils.cpp:496 if (!links.empty())
{
TORRENT_ASSERT(int(links.size()) == fs.num_files());
// if this is a mutable torrent, and we need to pick up some files
// from other torrents, do that now. Note that there is an inherent
// race condition here. We checked if the files existed on a different
// thread a while ago. These files may no longer exist or may have been
// moved. If so, we just fail. The user is responsible to not touch
// other torrents until a new mutable torrent has been completely
// added.
for (auto const idx : fs.file_range())
{
std::string const& s = links[idx];
if (s.empty()) continue;
error_code err;
std::string file_path = fs.file_path(idx, save_path);
hard_link(s, file_path, err);
// if the file already exists, that's not an error
if (!err || err == boost::system::errc::file_exists)
continue;
ec.ec = err;
ec.file(idx);
ec.operation = operation_t::file_hard_link;
return false;
}
}
#endif // TORRENT_DISABLE_MUTABLE_TORRENTS
bool const seed = rd.have_pieces.all_set()
&& rd.have_pieces.size() >= fs.num_pieces();
// parse have bitmask. Verify that the files we expect to have
// actually do exist
for (piece_index_t i(0); i < piece_index_t(rd.have_pieces.size()); ++i)
{
if (rd.have_pieces.get_bit(i) == false) continue;
std::vector<file_slice> f = fs.map_block(i, 0, 1);
TORRENT_ASSERT(!f.empty());
file_index_t const file_index = f[0].file_index;
// files with priority zero may not have been saved to disk at their
// expected location, but is likely to be in a partfile. Just exempt it
// from checking
if (file_index < file_priority.end_index()
&& file_priority[file_index] == dont_download)
continue;
| ||
relevance 2 | ../src/web_peer_connection.cpp:615 | just make this peer not have the pieces associated with the file we just requested. Only when it doesn't have any of the file do the following pad files will make it complicated |
just make this peer not have the pieces
associated with the file we just requested. Only
when it doesn't have any of the file do the following
pad files will make it complicated../src/web_peer_connection.cpp:615
peer_connection::received_invalid_data(index, single_peer);
// if we don't think we have any of the files, allow banning the web seed
if (num_have_pieces() == 0) return true;
// don't disconnect, we won't request anything from this file again
return false;
}
void web_peer_connection::on_receive_padfile()
{
handle_padfile();
}
void web_peer_connection::handle_error(int const bytes_left)
{
std::shared_ptr<torrent> t = associated_torrent().lock();
TORRENT_ASSERT(t);
// temporarily unavailable, retry later
t->retry_web_seed(this, m_parser.header_duration("retry-after"));
if (t->alerts().should_post<url_seed_alert>())
{
std::string const error_msg = to_string(m_parser.status_code()).data()
+ (" " + m_parser.message());
t->alerts().emplace_alert<url_seed_alert>(t->get_handle(), m_url
, error_msg);
}
received_bytes(0, bytes_left);
disconnect(error_code(m_parser.status_code(), http_category()), operation_t::bittorrent, failure);
}
void web_peer_connection::handle_redirect(int const bytes_left)
{
// this means we got a redirection request
// look for the location header
std::string location = m_parser.header("location");
received_bytes(0, bytes_left);
std::shared_ptr<torrent> t = associated_torrent().lock();
TORRENT_ASSERT(t);
if (location.empty())
{
// we should not try this server again.
t->remove_web_seed_conn(this, errors::missing_location, operation_t::bittorrent, peer_error);
m_web = nullptr;
TORRENT_ASSERT(is_disconnecting());
return;
| ||
relevance 2 | ../src/escape_string.cpp:199 | this should probably be moved into string_util.cpp |
this should probably be moved into string_util.cpp../src/escape_string.cpp:199 {
if (std::strchr(unreserved_chars, *str) == nullptr || *str == 0)
return true;
++str;
}
return false;
}
void convert_path_to_posix(std::string& path)
{
std::replace(path.begin(), path.end(), '\\', '/');
}
#ifdef TORRENT_WINDOWS
void convert_path_to_windows(std::string& path)
{
std::replace(path.begin(), path.end(), '/', '\\');
}
#endif
std::string read_until(char const*& str, char const delim, char const* end)
{
TORRENT_ASSERT(str <= end);
std::string ret;
while (str != end && *str != delim)
{
ret += *str;
++str;
}
// skip the delimiter as well
while (str != end && *str == delim) ++str;
return ret;
}
std::string maybe_url_encode(std::string const& url)
{
std::string protocol, host, auth, path;
int port;
error_code ec;
std::tie(protocol, auth, host, port, path) = parse_url_components(url, ec);
if (ec) return url;
// first figure out if this url contains unencoded characters
if (!need_encoding(path.c_str(), int(path.size())))
return url;
std::string msg;
std::string escaped_path { escape_path(path) };
// reserve enough space so further append will
// only copy values to existing location
| ||
relevance 2 | ../src/tracker_manager.cpp:373 | implement |
implement../src/tracker_manager.cpp:373#ifndef TORRENT_DISABLE_LOGGING
if (m_ses.should_log())
{
m_ses.session_log("incoming UDP tracker packet from %s has invalid "
"transaction ID (%x)", print_endpoint(ep).c_str()
, transaction);
}
#endif
return false;
}
std::shared_ptr<udp_tracker_connection> const p = i->second;
// on_receive() may remove the tracker connection from the list
return p->on_receive(ep, buf);
}
void tracker_manager::incoming_error(error_code const&
, udp::endpoint const&)
{
TORRENT_ASSERT(is_single_thread());
}
bool tracker_manager::incoming_packet(char const* hostname
, span<char const> const buf)
{
TORRENT_ASSERT(is_single_thread());
// ignore packets smaller than 8 bytes
if (buf.size() < 16) return false;
// the first word is the action, if it's not [0, 3]
// it's not a valid udp tracker response
span<const char> ptr = buf;
std::uint32_t const action = aux::read_uint32(ptr);
if (action > 3) return false;
std::uint32_t const transaction = aux::read_uint32(ptr);
auto const i = m_udp_conns.find(transaction);
if (i == m_udp_conns.end())
{
#ifndef TORRENT_DISABLE_LOGGING
// now, this may not have been meant to be a tracker response,
// but chances are pretty good, so it's probably worth logging
m_ses.session_log("incoming UDP tracker packet from %s has invalid "
"transaction ID (%x)", hostname, int(transaction));
#endif
return false;
}
std::shared_ptr<udp_tracker_connection> const p = i->second;
// on_receive() may remove the tracker connection from the list
| ||
relevance 2 | ../src/alert.cpp:1688 | the salt here is allocated on the heap. It would be nice to allocate in the stack_allocator |
the salt here is allocated on the heap. It would be nice to
allocate in the stack_allocator../src/alert.cpp:1688 , operation_name(op)
, error.value()
, convert_from_native(error.message()).c_str());
return msg;
}
dht_immutable_item_alert::dht_immutable_item_alert(aux::stack_allocator&
, sha1_hash const& t, entry const& i)
: target(t), item(i)
{}
std::string dht_immutable_item_alert::message() const
{
char msg[1050];
std::snprintf(msg, sizeof(msg), "DHT immutable item %s [ %s ]"
, aux::to_hex(target).c_str()
, item.to_string().c_str());
return msg;
}
dht_mutable_item_alert::dht_mutable_item_alert(aux::stack_allocator&
, std::array<char, 32> const& k
, std::array<char, 64> const& sig
, std::int64_t sequence
, string_view s
, entry const& i
, bool a)
: key(k), signature(sig), seq(sequence), salt(s), item(i), authoritative(a)
{}
std::string dht_mutable_item_alert::message() const
{
char msg[1050];
std::snprintf(msg, sizeof(msg), "DHT mutable item (key=%s salt=%s seq=%" PRId64 " %s) [ %s ]"
, aux::to_hex(key).c_str()
, salt.c_str()
, seq
, authoritative ? "auth" : "non-auth"
, item.to_string().c_str());
return msg;
}
dht_put_alert::dht_put_alert(aux::stack_allocator&, sha1_hash const& t, int n)
: target(t)
, public_key()
, signature()
, salt()
, seq(0)
, num_success(n)
{}
| ||
relevance 2 | ../src/block_cache.cpp:1619 | turn these return values into enums returns -1: block not in cache -2: out of memory |
turn these return values into enums
returns
-1: block not in cache
-2: out of memory../src/block_cache.cpp:1619 else
{
TORRENT_PIECE_ASSERT(!p.blocks[k].dirty, &p);
TORRENT_PIECE_ASSERT(!p.blocks[k].pending, &p);
TORRENT_PIECE_ASSERT(p.blocks[k].refcount == 0, &p);
}
num_refcount += p.blocks[k].refcount;
}
TORRENT_PIECE_ASSERT(num_blocks == p.num_blocks, &p);
TORRENT_PIECE_ASSERT(num_pending <= p.refcount, &p);
TORRENT_PIECE_ASSERT(num_refcount == p.refcount, &p);
TORRENT_PIECE_ASSERT(num_dirty == p.num_dirty, &p);
}
TORRENT_ASSERT(m_read_cache_size == cached_read_blocks);
TORRENT_ASSERT(m_write_cache_size == cached_write_blocks);
TORRENT_ASSERT(m_pinned_blocks == num_pinned);
TORRENT_ASSERT(m_write_cache_size + m_read_cache_size <= in_use());
}
#endif
int block_cache::copy_from_piece(cached_piece_entry* const pe
, disk_io_job* const j, buffer_allocator_interface& allocator
, bool const expect_no_fail)
{
INVARIANT_CHECK;
TORRENT_UNUSED(expect_no_fail);
TORRENT_PIECE_ASSERT(pe->in_use, pe);
// copy from the cache and update the last use timestamp
int block = j->d.io.offset / default_block_size;
int block_offset = j->d.io.offset & (default_block_size - 1);
int buffer_offset = 0;
int size = j->d.io.buffer_size;
int const blocks_to_read = block_offset > 0 && (size > default_block_size - block_offset) ? 2 : 1;
TORRENT_PIECE_ASSERT(size <= default_block_size, pe);
int const start_block = block;
#if TORRENT_USE_ASSERTS
int const piece_size = j->storage->files().piece_size(j->piece);
int const blocks_in_piece = (piece_size + default_block_size - 1) / default_block_size;
TORRENT_PIECE_ASSERT(start_block < blocks_in_piece, pe);
#endif
// if there's no buffer, we don't have this block in
// the cache, and we're not currently reading it in either
// since it's not pending
if (inc_block_refcount(pe, start_block, ref_reading) == false)
{
| ||
relevance 2 | ../src/kademlia/node.cpp:681 | it would be nice to have a bias towards node-id prefixes that are missing in the bucket |
it would be nice to have a bias towards node-id prefixes that
are missing in the bucket../src/kademlia/node.cpp:681 node_entry const* ne = m_table.next_refresh();
if (ne == nullptr) return;
// this shouldn't happen
TORRENT_ASSERT(m_id != ne->id);
if (ne->id == m_id) return;
int const bucket = 159 - distance_exp(m_id, ne->id);
TORRENT_ASSERT(bucket < 160);
send_single_refresh(ne->ep(), bucket, ne->id);
}
void node::send_single_refresh(udp::endpoint const& ep, int const bucket
, node_id const& id)
{
TORRENT_ASSERT(id != m_id);
TORRENT_ASSERT(bucket >= 0);
TORRENT_ASSERT(bucket <= 159);
// generate a random node_id within the given bucket
node_id mask = generate_prefix_mask(bucket + 1);
node_id target = generate_secret_id() & ~mask;
target |= m_id & mask;
// create a dummy traversal_algorithm
auto algo = std::make_shared<traversal_algorithm>(*this, node_id());
auto o = m_rpc.allocate_observer<ping_observer>(std::move(algo), ep, id);
if (!o) return;
#if TORRENT_USE_ASSERTS
o->m_in_constructor = false;
#endif
entry e;
e["y"] = "q";
if (m_table.is_full(bucket))
{
// current bucket is full, just ping it.
e["q"] = "ping";
m_counters.inc_stats_counter(counters::dht_ping_out);
}
else
{
// use get_peers instead of find_node. We'll get nodes in the response
// either way.
e["q"] = "get_peers";
e["a"]["info_hash"] = target.to_string();
m_counters.inc_stats_counter(counters::dht_get_peers_out);
}
m_rpc.invoke(e, ep, o);
}
| ||
relevance 2 | ../src/kademlia/node.cpp:750 | use the non deprecated function instead of this one |
use the non deprecated function instead of this one../src/kademlia/node.cpp:750 std::lock_guard<std::mutex> l(m_mutex);
m_table.status(table);
for (auto const& r : m_running_requests)
{
requests.emplace_back();
dht_lookup& lookup = requests.back();
r->status(lookup);
}
}
std::tuple<int, int, int> node::get_stats_counters() const
{
int nodes, replacements;
std::tie(nodes, replacements, std::ignore) = size();
return std::make_tuple(nodes, replacements, m_rpc.num_allocated_observers());
}
#if TORRENT_ABI_VERSION == 1
void node::status(session_status& s)
{
std::lock_guard<std::mutex> l(m_mutex);
m_table.status(s);
s.dht_total_allocations += m_rpc.num_allocated_observers();
for (auto& r : m_running_requests)
{
s.active_requests.emplace_back();
dht_lookup& lookup = s.active_requests.back();
r->status(lookup);
}
}
#endif
bool node::lookup_peers(sha1_hash const& info_hash, entry& reply
, bool noseed, bool scrape, address const& requester) const
{
if (m_observer)
m_observer->get_peers(info_hash);
return m_storage.get_peers(info_hash, noseed, scrape, requester, reply);
}
entry write_nodes_entry(std::vector<node_entry> const& nodes)
{
entry r;
std::back_insert_iterator<std::string> out(r.string());
for (auto const& n : nodes)
{
std::copy(n.id.begin(), n.id.end(), out);
| ||
relevance 2 | ../src/kademlia/routing_table.cpp:291 | use the non deprecated function instead of this one |
use the non deprecated function instead of this one../src/kademlia/routing_table.cpp:291 static const aux::array<int, 4> size_exceptions{{{16, 8, 4, 2}}};
if (bucket < size_exceptions.end_index())
return m_bucket_size * size_exceptions[bucket];
return m_bucket_size;
}
void routing_table::status(std::vector<dht_routing_bucket>& s) const
{
if (s.size() > m_buckets.size()) return;
s.clear();
for (auto const& i : m_buckets)
{
dht_routing_bucket b;
b.num_nodes = int(i.live_nodes.size());
b.num_replacements = int(i.replacements.size());
s.push_back(b);
}
}
#if TORRENT_ABI_VERSION == 1
void routing_table::status(session_status& s) const
{
int dht_nodes;
int dht_node_cache;
int ignore;
std::tie(dht_nodes, dht_node_cache, ignore) = size();
s.dht_nodes += dht_nodes;
s.dht_node_cache += dht_node_cache;
| ||
relevance 2 | ../src/kademlia/routing_table.cpp:926 | move the lowest priority nodes to the replacement bucket |
move the lowest priority nodes to the replacement bucket../src/kademlia/routing_table.cpp:926 bucket_t& rb = m_buckets[bucket_index].replacements;
// move any node whose (160 - distance_exp(m_id, id)) >= (i - m_buckets.begin())
// to the new bucket
int const new_bucket_size = bucket_limit(bucket_index + 1);
for (auto j = b.begin(); j != b.end();)
{
int const d = distance_exp(m_id, j->id);
if (d >= 159 - bucket_index)
{
++j;
continue;
}
// this entry belongs in the new bucket
new_bucket.push_back(*j);
j = b.erase(j);
}
if (int(b.size()) > bucket_size_limit)
{
for (auto i = b.begin() + bucket_size_limit
, end(b.end()); i != end; ++i)
{
rb.push_back(*i);
}
b.resize(bucket_size_limit);
}
// split the replacement bucket as well. If the live bucket
// is not full anymore, also move the replacement entries
// into the main bucket
for (auto j = rb.begin(); j != rb.end();)
{
if (distance_exp(m_id, j->id) >= 159 - bucket_index)
{
if (!j->pinged() || int(b.size()) >= bucket_size_limit)
{
++j;
continue;
}
b.push_back(*j);
}
else
{
// this entry belongs in the new bucket
if (j->pinged() && int(new_bucket.size()) < new_bucket_size)
new_bucket.push_back(*j);
else
new_replacement_bucket.push_back(*j);
}
| ||
relevance 2 | ../src/kademlia/dht_storage.cpp:82 | make this configurable in dht_settings |
make this configurable in dht_settings../src/kademlia/dht_storage.cpp:82 tcp::endpoint addr;
bool seed = 0;
};
// internal
bool operator<(peer_entry const& lhs, peer_entry const& rhs)
{
return lhs.addr.address() == rhs.addr.address()
? lhs.addr.port() < rhs.addr.port()
: lhs.addr.address() < rhs.addr.address();
}
// this is a group. It contains a set of group members
struct torrent_entry
{
std::string name;
std::vector<peer_entry> peers4;
std::vector<peer_entry> peers6;
};
constexpr time_duration announce_interval = minutes(30);
struct dht_immutable_item
{
// the actual value
std::unique_ptr<char[]> value;
// this counts the number of IPs we have seen
// announcing this item, this is used to determine
// popularity if we reach the limit of items to store
bloom_filter<128> ips;
// the last time we heard about this item
// the correct interpretation of this field
// requires a time reference
time_point last_seen;
// number of IPs in the bloom filter
int num_announcers = 0;
// size of malloced space pointed to by value
int size = 0;
};
struct dht_mutable_item : dht_immutable_item
{
signature sig{};
sequence_number seq{};
public_key key{};
std::string salt;
};
void set_value(dht_immutable_item& item, span<char const> buf)
{
int const size = int(buf.size());
| ||
relevance 2 | ../include/libtorrent/chained_buffer.hpp:59 | this type should probably be renamed to send_buffer |
this type should probably be renamed to send_buffer../include/libtorrent/chained_buffer.hpp:59#include "libtorrent/debug.hpp"
#include "libtorrent/buffer.hpp"
#include <deque>
#include <vector>
#include "libtorrent/aux_/disable_warnings_push.hpp"
#include <boost/asio/buffer.hpp>
#include "libtorrent/aux_/disable_warnings_pop.hpp"
#ifdef _MSC_VER
// visual studio requires the value in a deque to be copyable. C++11
// has looser requirements depending on which functions are actually used.
#define TORRENT_CPP98_DEQUE 1
#else
#define TORRENT_CPP98_DEQUE 0
#endif
namespace libtorrent {
struct TORRENT_EXTRA_EXPORT chained_buffer : private single_threaded
{
chained_buffer(): m_bytes(0), m_capacity(0)
{
thread_started();
#if TORRENT_USE_ASSERTS
m_destructed = false;
#endif
}
private:
// destructs/frees the holder object
using destruct_holder_fun = void (*)(void*);
using move_construct_holder_fun = void (*)(void*, void*);
struct buffer_t
{
buffer_t() {}
#if TORRENT_CPP98_DEQUE
buffer_t(buffer_t&& rhs) noexcept
{
destruct_holder = rhs.destruct_holder;
move_holder = rhs.move_holder;
buf = rhs.buf;
size = rhs.size;
used_size = rhs.used_size;
move_holder(&holder, &rhs.holder);
}
buffer_t& operator=(buffer_t&& rhs) & noexcept
{
| ||
relevance 2 | ../include/libtorrent/peer_connection.hpp:955 | this should really be a circular buffer |
this should really be a circular buffer../include/libtorrent/peer_connection.hpp:955 // it just serves as a queue to remember what we've sent, to avoid
// re-sending suggests for the same piece
// i.e. outgoing suggest pieces
aux::vector<piece_index_t> m_suggest_pieces;
// the pieces we will send to the peer
// if requested (regardless of choke state)
std::vector<piece_index_t> m_accept_fast;
// a sent-piece counter for the allowed fast set
// to avoid exploitation. Each slot is a counter
// for one of the pieces from the allowed-fast set
aux::vector<std::uint16_t> m_accept_fast_piece_cnt;
// the pieces the peer will send us if
// requested (regardless of choke state)
std::vector<piece_index_t> m_allowed_fast;
// pieces that has been suggested to be downloaded from this peer
// i.e. incoming suggestions
aux::vector<piece_index_t> m_suggested_pieces;
// the time when this peer last saw a complete copy
// of this torrent
time_t m_last_seen_complete = 0;
// the block we're currently receiving. Or
// (-1, -1) if we're not receiving one
piece_block m_receiving_block = piece_block::invalid;
// the local endpoint for this peer, i.e. our address
// and our port. If this is set for outgoing connections
// before the connection completes, it means we want to
// force the connection to be bound to the specified interface.
// if it ends up being bound to a different local IP, the connection
// is closed.
tcp::endpoint m_local;
// remote peer's id
peer_id m_peer_id;
protected:
template <typename Fun, typename... Args>
void wrap(Fun f, Args&&... a);
// statistics about upload and download speeds
// and total amount of uploads and downloads for
// this peer
| ||
relevance 2 | ../include/libtorrent/peer_connection.hpp:1053 | rename this target queue size |
rename this target queue size../include/libtorrent/peer_connection.hpp:1053 // thread that hasn't yet been completely written.
int m_outstanding_writing_bytes = 0;
// max transfer rates seen on this peer
int m_download_rate_peak = 0;
int m_upload_rate_peak = 0;
#if TORRENT_ABI_VERSION == 1
// when using the BitTyrant choker, this is our
// estimated reciprocation rate. i.e. the rate
// we need to send to this peer for it to unchoke
// us
int m_est_reciprocation_rate;
#endif
// stop sending data after this many bytes, INT_MAX = inf
int m_send_barrier = INT_MAX;
// the number of request we should queue up
// at the remote end.
std::uint16_t m_desired_queue_size = 4;
// if set to non-zero, this peer will always prefer
// to request entire n pieces, rather than blocks.
// where n is the value of this variable.
// if it is 0, the download rate limit setting
// will be used to determine if whole pieces
// are preferred.
int m_prefer_contiguous_blocks = 0;
// this is the number of times this peer has had
// a request rejected because of a disk I/O failure.
// once this reaches a certain threshold, the
// peer is disconnected in order to avoid infinite
// loops of consistent failures
std::uint8_t m_disk_read_failures = 0;
// this is used in seed mode whenever we trigger a hash check
// for a piece, before we read it. It's used to throttle
// the hash checks to just a few per peer at a time.
std::uint8_t m_outstanding_piece_verification:3;
// is true if it was we that connected to the peer
// and false if we got an incoming connection
// could be considered: true = local, false = remote
bool m_outgoing:1;
// is true if we learn the incoming connections listening
// during the extended handshake
bool m_received_listen_port:1;
| ||
relevance 2 | ../include/libtorrent/piece_picker.hpp:611 | having 8 priority levels is probably excessive. It should probably be changed to 3 levels + dont-download |
having 8 priority levels is probably excessive. It should
probably be changed to 3 levels + dont-download../include/libtorrent/piece_picker.hpp:611 else if (state() == piece_full)
state(piece_full_reverse);
}
// the number of peers that has this piece
// (availability)
std::uint32_t peer_count : 26;
// one of the download_queue_t values. This indicates whether this piece
// is currently being downloaded or not, and what state it's in if
// it is. Specifically, as an optimization, pieces that have all blocks
// requested from them are separated out into separate lists to make
// lookups quicker. The main oddity is that whether a downloading piece
// has only been requested from peers that are reverse, that's
// recorded as piece_downloading_reverse, which really means the same
// as piece_downloading, it just saves space to also indicate that it
// has a bit lower priority. The reverse bit is only relevant if the
// state is piece_downloading.
std::uint32_t download_state : 3;
// is 0 if the piece is filtered (not to be downloaded)
// 1 is low priority
// 2 is low priority
// 3 is mid priority
// 4 is default priority
// 5 is mid priority
// 6 is high priority
// 7 is high priority
std::uint32_t piece_priority : 3;
// index in to the piece_info vector
prio_index_t index;
#ifdef TORRENT_DEBUG_REFCOUNTS
// all the peers that have this piece
std::set<const torrent_peer*> have_peers;
#endif
// index is set to this to indicate that we have the
// piece. There is no entry for the piece in the
// buckets if this is the case.
static constexpr prio_index_t we_have_index{-1};
// the priority value that means the piece is filtered
static constexpr std::uint32_t filter_priority = 0;
// the max number the peer count can hold
static constexpr std::uint32_t max_peer_count = 0xffff;
bool have() const { return index == we_have_index; }
| ||
relevance 2 | ../include/libtorrent/enum_net.hpp:148 | this could be done more efficiently by just looking up the interface with the given name, maybe even with if_nametoindex() |
this could be done more efficiently by just looking up
the interface with the given name, maybe even with if_nametoindex()../include/libtorrent/enum_net.hpp:148 // IPv6 address. If we're asking to be bound to an IPv6 address and
// providing 0.0.0.0 as the device, turn it into "::"
if (ip == address_v4::any() && protocol == boost::asio::ip::tcp::v6())
ip = address_v6::any();
bind_ep.address(ip);
// it appears to be an IP. Just bind to that address
sock.bind(bind_ep, ec);
return bind_ep.address();
}
ec.clear();
#if TORRENT_HAS_BINDTODEVICE
// try to use SO_BINDTODEVICE here, if that exists. If it fails,
// fall back to the mechanism we have below
aux::bind_device(sock, device_name, ec);
if (ec)
#endif
{
ec.clear();
std::vector<ip_interface> ifs = enum_net_interfaces(ios, ec);
if (ec) return bind_ep.address();
bool found = false;
for (auto const& iface : ifs)
{
// we're looking for a specific interface, and its address
// (which must be of the same family as the address we're
// connecting to)
if (std::strcmp(iface.name, device_name) != 0) continue;
if (iface.interface_address.is_v4() != (protocol == boost::asio::ip::tcp::v4()))
continue;
bind_ep.address(iface.interface_address);
found = true;
break;
}
if (!found)
{
ec = error_code(boost::system::errc::no_such_device, generic_category());
return bind_ep.address();
}
}
sock.bind(bind_ep, ec);
return bind_ep.address();
}
// returns the device name whose local address is ``addr``. If
// no such device is found, an empty string is returned.
| ||
relevance 2 | ../include/libtorrent/proxy_base.hpp:274 | use the resolver interface that has a built-in cache |
use the resolver interface that has a built-in cache../include/libtorrent/proxy_base.hpp:274 return m_sock.lowest_layer();
}
next_layer_type& next_layer()
{
return m_sock;
}
bool is_open() const { return m_sock.is_open(); }
protected:
bool handle_error(error_code const& e, handler_type const& h);
tcp::socket m_sock;
std::string m_hostname; // proxy host
int m_port; // proxy port
endpoint_type m_remote_endpoint;
tcp::resolver m_resolver;
};
}
#endif
| ||
relevance 2 | ../include/libtorrent/broadcast_socket.hpp:50 | factor these functions out |
factor these functions out../include/libtorrent/broadcast_socket.hpp:50
*/
#ifndef TORRENT_BROADCAST_SOCKET_HPP_INCLUDED
#define TORRENT_BROADCAST_SOCKET_HPP_INCLUDED
#include "libtorrent/config.hpp"
#include "libtorrent/io_service_fwd.hpp"
#include "libtorrent/socket.hpp"
#include "libtorrent/address.hpp"
#include "libtorrent/error_code.hpp"
#include "libtorrent/string_view.hpp"
#include "libtorrent/span.hpp"
#include <memory>
#include <list>
#include <array>
namespace libtorrent {
TORRENT_EXTRA_EXPORT bool is_global(address const& a);
TORRENT_EXTRA_EXPORT bool is_local(address const& a);
TORRENT_EXTRA_EXPORT bool is_link_local(address const& addr);
TORRENT_EXTRA_EXPORT bool is_loopback(address const& addr);
TORRENT_EXTRA_EXPORT bool is_any(address const& addr);
TORRENT_EXTRA_EXPORT bool is_teredo(address const& addr);
TORRENT_EXTRA_EXPORT bool is_ip_address(std::string const& host);
// internal
| ||
relevance 2 | ../include/libtorrent/socks5_stream.hpp:137 | add async_connect() that takes a hostname and port as well |
add async_connect() that takes a hostname and port as well../include/libtorrent/socks5_stream.hpp:137 TORRENT_ASSERT(!is_ip_address(host));
m_dst_name = host;
if (m_dst_name.size() > 255)
m_dst_name.resize(255);
}
void close(error_code& ec)
{
m_dst_name.clear();
proxy_base::close(ec);
}
#ifndef BOOST_NO_EXCEPTIONS
void close()
{
m_dst_name.clear();
proxy_base::close();
}
#endif
template <class Handler>
void async_connect(endpoint_type const& endpoint, Handler const& handler)
{
// make sure we don't try to connect to INADDR_ANY. binding is fine,
// and using a hostname is fine on SOCKS version 5.
TORRENT_ASSERT(endpoint.address() != address()
|| (!m_dst_name.empty() && m_version == 5));
m_remote_endpoint = endpoint;
// the connect is split up in the following steps:
// 1. resolve name of proxy server
// 2. connect to proxy server
// 3. if version == 5:
// 3.1 send SOCKS5 authentication method message
// 3.2 read SOCKS5 authentication response
// 3.3 send username+password
// 4. send SOCKS command message
using std::placeholders::_1;
using std::placeholders::_2;
ADD_OUTSTANDING_ASYNC("socks5_stream::name_lookup");
tcp::resolver::query q(m_hostname, to_string(m_port).data());
m_resolver.async_resolve(q, std::bind(
&socks5_stream::name_lookup, this, _1, _2, handler_type(std::move(handler))));
}
private:
void name_lookup(error_code const& e, tcp::resolver::iterator i
, handler_type h);
| ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:130 | make this interface a lot smaller. It could be split up into several smaller interfaces. Each subsystem could then limit the size of the mock object to test it. |
make this interface a lot smaller. It could be split up into
several smaller interfaces. Each subsystem could then limit the size
of the mock object to test it.../include/libtorrent/aux_/session_interface.hpp:130 // a release build with logging disabled (which is the default) will
// not have this class at all
struct TORRENT_EXTRA_EXPORT session_logger
{
#ifndef TORRENT_DISABLE_LOGGING
virtual bool should_log() const = 0;
virtual void session_log(char const* fmt, ...) const TORRENT_FORMAT(2,3) = 0;
#endif
#if TORRENT_USE_ASSERTS
virtual bool is_single_thread() const = 0;
virtual bool has_peer(peer_connection const* p) const = 0;
virtual bool any_torrent_has_peer(peer_connection const* p) const = 0;
virtual bool is_posting_torrent_updates() const = 0;
#endif
protected:
~session_logger() {}
};
#endif // TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
struct TORRENT_EXTRA_EXPORT session_interface
#if !defined TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
: session_logger
#endif
{
| ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:139 | the IP voting mechanism should be factored out to its own class, not part of the session and these constants should move too |
the IP voting mechanism should be factored out
to its own class, not part of the session
and these constants should move too../include/libtorrent/aux_/session_interface.hpp:139 virtual void session_log(char const* fmt, ...) const TORRENT_FORMAT(2,3) = 0;
#endif
#if TORRENT_USE_ASSERTS
virtual bool is_single_thread() const = 0;
virtual bool has_peer(peer_connection const* p) const = 0;
virtual bool any_torrent_has_peer(peer_connection const* p) const = 0;
virtual bool is_posting_torrent_updates() const = 0;
#endif
protected:
~session_logger() {}
};
#endif // TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
struct TORRENT_EXTRA_EXPORT session_interface
#if !defined TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
: session_logger
#endif
{
// the logic in ip_voter relies on more reliable sources are represented
// by more significant bits
static constexpr ip_source_t source_dht = 1_bit;
static constexpr ip_source_t source_peer = 2_bit;
static constexpr ip_source_t source_tracker = 3_bit;
static constexpr ip_source_t source_router = 4_bit;
virtual void set_external_address(tcp::endpoint const& local_endpoint
, address const& ip
, ip_source_t source_type, address const& source) = 0;
virtual external_ip external_address() const = 0;
virtual disk_interface& disk_thread() = 0;
virtual alert_manager& alerts() = 0;
virtual torrent_peer_allocator_interface& get_peer_allocator() = 0;
virtual io_service& get_io_service() = 0;
virtual resolver_interface& get_resolver() = 0;
virtual bool has_connection(peer_connection* p) const = 0;
virtual void insert_peer(std::shared_ptr<peer_connection> const& c) = 0;
virtual void remove_torrent(torrent_handle const& h, remove_flags_t options = {}) = 0;
virtual void remove_torrent_impl(std::shared_ptr<torrent> tptr, remove_flags_t options) = 0;
// port filter
virtual port_filter const& get_port_filter() const = 0;
virtual void ban_ip(address addr) = 0;
| ||
relevance 1 | ../src/session_impl.cpp:5624 | report the proper address of the router as the source IP of this vote of our external address, instead of the empty address |
report the proper address of the router as the source IP of
this vote of our external address, instead of the empty address../src/session_impl.cpp:5624
// look through our listen sockets to see if this mapping is for one of
// them (it could also be a user mapping)
auto ls
= std::find_if(m_listen_sockets.begin(), m_listen_sockets.end()
, std::bind(find_tcp_port_mapping, transport, mapping, _1));
bool tcp = true;
if (ls == m_listen_sockets.end())
{
ls = std::find_if(m_listen_sockets.begin(), m_listen_sockets.end()
, std::bind(find_udp_port_mapping, transport, mapping, _1));
tcp = false;
}
if (ls != m_listen_sockets.end())
{
if (!ec && ip != address())
{
(*ls)->external_address.cast_vote(ip, source_router, address());
}
if (tcp) (*ls)->tcp_port_mapping[transport].port = port;
else (*ls)->udp_port_mapping[transport].port = port;
}
if (!ec && m_alerts.should_post<portmap_alert>())
{
m_alerts.emplace_alert<portmap_alert>(mapping, port
, transport, proto);
}
}
#if TORRENT_ABI_VERSION == 1
session_status session_impl::status() const
{
// INVARIANT_CHECK;
TORRENT_ASSERT(is_single_thread());
session_status s;
s.optimistic_unchoke_counter = m_optimistic_unchoke_time_scaler;
s.unchoke_counter = m_unchoke_time_scaler;
s.num_dead_peers = int(m_undead_peers.size());
s.num_peers = int(m_stats_counters[counters::num_peers_connected]);
s.num_unchoked = int(m_stats_counters[counters::num_peers_up_unchoked_all]);
s.allowed_upload_slots = int(m_stats_counters[counters::num_unchoke_slots]);
s.num_torrents
| ||
relevance 1 | ../src/torrent.cpp:1220 | make this depend on the error and on the filesystem the files are being downloaded to. If the error is no_space_left_on_device and the filesystem doesn't support sparse files, only zero the priorities of the pieces that are at the tails of all files, leaving everything up to the highest written piece in each file |
make this depend on the error and on the filesystem the
files are being downloaded to. If the error is no_space_left_on_device
and the filesystem doesn't support sparse files, only zero the priorities
of the pieces that are at the tails of all files, leaving everything
up to the highest written piece in each file../src/torrent.cpp:1220
// notify the user of the error
if (alerts().should_post<file_error_alert>())
alerts().emplace_alert<file_error_alert>(error.ec
, resolve_filename(error.file()), error.operation, get_handle());
// if a write operation failed, and future writes are likely to
// fail, while reads may succeed, just set the torrent to upload mode
// if we make an incorrect assumption here, it's not the end of the
// world, if we ever issue a read request and it fails as well, we
// won't get in here and we'll actually end up pausing the torrent
if (rw == disk_class::write
&& (error.ec == boost::system::errc::read_only_file_system
|| error.ec == boost::system::errc::permission_denied
|| error.ec == boost::system::errc::operation_not_permitted
|| error.ec == boost::system::errc::no_space_on_device
|| error.ec == boost::system::errc::file_too_large))
{
// if we failed to write, stop downloading and just
// keep seeding.
set_upload_mode(true);
return;
}
// put the torrent in an error-state
set_error(error.ec, error.file());
// if the error appears to be more serious than a full disk, just pause the torrent
pause();
}
void torrent::on_piece_fail_sync(piece_index_t, piece_block) try
{
if (m_abort) return;
update_gauge();
// some peers that previously was no longer interesting may
// now have become interesting, since we lack this one piece now.
for (auto i = begin(); i != end();)
{
peer_connection* p = *i;
// update_interest may disconnect the peer and
// invalidate the iterator
++i;
// no need to do anything with peers that
// already are interested. Gaining a piece may
// only make uninteresting peers interesting again.
if (p->is_interesting()) continue;
p->update_interest();
if (!m_abort)
{
| ||
relevance 1 | ../src/torrent.cpp:7493 | should disconnect all peers that have the pieces we have not just seeds. It would be pretty expensive to check all pieces for all peers though |
should disconnect all peers that have the pieces we have
not just seeds. It would be pretty expensive to check all pieces
for all peers though../src/torrent.cpp:7493
set_state(torrent_status::finished);
set_queue_position(no_pos);
m_became_finished = aux::time_now32();
// we have to call completed() before we start
// disconnecting peers, since there's an assert
// to make sure we're cleared the piece picker
if (is_seed()) completed();
send_upload_only();
state_updated();
if (m_completed_time == 0)
m_completed_time = time(nullptr);
// disconnect all seeds
if (settings().get_bool(settings_pack::close_redundant_connections))
{
std::vector<peer_connection*> seeds;
for (auto const p : m_connections)
{
TORRENT_INCREMENT(m_iterating_connections);
TORRENT_ASSERT(p->associated_torrent().lock().get() == this);
if (p->upload_only())
{
#ifndef TORRENT_DISABLE_LOGGING
p->peer_log(peer_log_alert::info, "SEED", "CLOSING CONNECTION");
#endif
seeds.push_back(p);
}
}
for (auto& p : seeds)
p->disconnect(errors::torrent_finished, operation_t::bittorrent
, peer_connection_interface::normal);
}
if (m_abort) return;
update_want_peers();
if (m_storage)
{
// we need to keep the object alive during this operation
m_ses.disk_thread().async_release_files(m_storage
, std::bind(&torrent::on_cache_flushed, shared_from_this(), false));
}
// this torrent just completed downloads, which means it will fall
// under a different limit with the auto-manager. Make sure we
| ||
relevance 1 | ../include/libtorrent/ip_voter.hpp:128 | have one instance per possible subnet, 192.168.x.x, 10.x.x.x, etc. |
have one instance per possible subnet, 192.168.x.x, 10.x.x.x, etc.../include/libtorrent/ip_voter.hpp:128 // stores one address for each combination of local/global and ipv4/ipv6
// use of this class should be avoided, get the IP from the appropriate
// listen interface wherever possible
struct TORRENT_EXTRA_EXPORT external_ip
{
external_ip()
: m_addresses{{address_v4(), address_v6()}, {address_v4(), address_v6()}}
{}
external_ip(address const& local4, address const& global4
, address const& local6, address const& global6);
// the external IP as it would be observed from `ip`
address external_address(address const& ip) const;
private:
// support one local and one global address per address family
// [0][n] = global [1][n] = local
// [n][0] = IPv4 [n][1] = IPv6
address m_addresses[2][2];
};
}
#endif
| ||
relevance 0 | ../test/test_resume.cpp:545 | test what happens when loading a resume file with both piece priorities and file priorities (file prio should take precedence) |
test what happens when loading a resume file with both piece priorities
and file priorities (file prio should take precedence)../test/test_resume.cpp:545 {
TEST_EQUAL(pieces[i], true);
}
}
}
} // anonymous namespace
TORRENT_TEST(piece_slots_seed)
{
test_piece_slots_seed(settings());
}
TORRENT_TEST(piece_slots_seed_suggest_cache)
{
settings_pack sett = settings();
sett.set_int(settings_pack::suggest_mode, settings_pack::suggest_read_cache);
test_piece_slots_seed(sett);
}
| ||
relevance 0 | ../test/test_resume.cpp:548 | make sure a resume file only ever contain file priorities OR piece priorities. Never both. |
make sure a resume file only ever contain file priorities OR piece
priorities. Never both.../test/test_resume.cpp:548 TEST_EQUAL(pieces[i], true);
}
}
}
} // anonymous namespace
TORRENT_TEST(piece_slots_seed)
{
test_piece_slots_seed(settings());
}
TORRENT_TEST(piece_slots_seed_suggest_cache)
{
settings_pack sett = settings();
sett.set_int(settings_pack::suggest_mode, settings_pack::suggest_read_cache);
test_piece_slots_seed(sett);
}
| ||
relevance 0 | ../test/test_resume.cpp:551 | generally save |
generally save../test/test_resume.cpp:551 }
}
}
} // anonymous namespace
TORRENT_TEST(piece_slots_seed)
{
test_piece_slots_seed(settings());
}
TORRENT_TEST(piece_slots_seed_suggest_cache)
{
settings_pack sett = settings();
sett.set_int(settings_pack::suggest_mode, settings_pack::suggest_read_cache);
test_piece_slots_seed(sett);
}
#if TORRENT_ABI_VERSION == 1
TORRENT_TEST(file_priorities_default_deprecated)
{
lt::session ses(settings());
std::vector<download_priority_t> file_priorities = test_resume_flags(ses
, {}, "", "", true).get_file_priorities();
TEST_EQUAL(file_priorities.size(), 3);
TEST_EQUAL(file_priorities[0], 4);
TEST_EQUAL(file_priorities[1], 4);
TEST_EQUAL(file_priorities[2], 4);
}
// As long as the add_torrent_params priorities are empty, the file_priorities
// from the resume data should take effect
TORRENT_TEST(file_priorities_in_resume_deprecated)
{
lt::session ses(settings());
std::vector<download_priority_t> file_priorities = test_resume_flags(ses, {}, "", "123").get_file_priorities();
TEST_EQUAL(file_priorities.size(), 3);
TEST_EQUAL(file_priorities[0], 1);
TEST_EQUAL(file_priorities[1], 2);
TEST_EQUAL(file_priorities[2], 3);
}
// if both resume data and add_torrent_params has file_priorities, the
// add_torrent_params one take precedence
TORRENT_TEST(file_priorities_in_resume_and_params_deprecated)
{
| ||
relevance 0 | ../test/test_resume.cpp:871 | test all other resume flags here too. This would require returning more than just the torrent_status from test_resume_flags. Also http seeds and trackers for instance |
test all other resume flags here too. This would require returning
more than just the torrent_status from test_resume_flags. Also http seeds
and trackers for instance../test/test_resume.cpp:871 TEST_EQUAL(s.flags & flags_mask, torrent_flags_t{});
TEST_EQUAL(s.connections_limit, 1345);
TEST_EQUAL(s.uploads_limit, 1346);
}
TORRENT_TEST(paused_deprecated)
{
lt::session ses(settings());
// resume data overrides the paused flag
torrent_status s = test_resume_flags(ses, torrent_flags::paused, "", "", true).status();
default_tests(s);
#ifdef TORRENT_WINDOWS
TEST_EQUAL(s.save_path, "c:\\add_torrent_params save_path");
#else
TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
#endif
TEST_EQUAL(s.flags & flags_mask, torrent_flags_t{});
TEST_EQUAL(s.connections_limit, 1345);
TEST_EQUAL(s.uploads_limit, 1346);
}
TORRENT_TEST(url_seed_resume_data_deprecated)
{
// merge url seeds with resume data
std::printf("flags: merge_resume_http_seeds\n");
lt::session ses(settings());
torrent_handle h = test_resume_flags(ses,
torrent_flags::merge_resume_http_seeds, "", "", true);
std::set<std::string> us = h.url_seeds();
std::set<std::string> ws = h.http_seeds();
TEST_EQUAL(us.size(), 3);
TEST_EQUAL(std::count(us.begin(), us.end()
, "http://add_torrent_params_url_seed.com/"), 1);
TEST_EQUAL(std::count(us.begin(), us.end()
, "http://torrent_file_url_seed.com/"), 1);
TEST_EQUAL(std::count(us.begin(), us.end()
, "http://resume_data_url_seed.com/"), 1);
TEST_EQUAL(ws.size(), 1);
TEST_EQUAL(std::count(ws.begin(), ws.end()
, "http://resume_data_http_seed.com"), 1);
}
TORRENT_TEST(resume_override_torrent_deprecated)
{
// resume data overrides the .torrent_file
std::printf("flags: no merge_resume_http_seed\n");
lt::session ses(settings());
torrent_handle h = test_resume_flags(ses,
| ||
relevance 0 | ../test/test_resume.cpp:1476 | test all other resume flags here too. This would require returning more than just the torrent_status from test_resume_flags. Also http seeds and trackers for instance |
test all other resume flags here too. This would require returning
more than just the torrent_status from test_resume_flags. Also http seeds
and trackers for instance../test/test_resume.cpp:1476 TEST_EQUAL(s.flags & flags_mask, torrent_flags::auto_managed);
TEST_EQUAL(s.connections_limit, 1345);
TEST_EQUAL(s.uploads_limit, 1346);
}
TORRENT_TEST(paused)
{
lt::session ses(settings());
// resume data overrides the paused flag
torrent_status s = test_resume_flags(ses, torrent_flags::paused).status();
default_tests(s);
#ifdef TORRENT_WINDOWS
TEST_EQUAL(s.save_path, "c:\\add_torrent_params save_path");
#else
TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
#endif
TEST_EQUAL(s.flags & flags_mask, torrent_flags::paused);
TEST_EQUAL(s.connections_limit, 1345);
TEST_EQUAL(s.uploads_limit, 1346);
}
| ||
relevance 0 | ../test/test_ssl.cpp:411 | test using a signed certificate with the wrong info-hash in DN |
test using a signed certificate with the wrong info-hash in DN../test/test_ssl.cpp:411 // in verifying peers
ctx.set_verify_mode(context::verify_none, ec);
if (ec)
{
std::printf("Failed to set SSL verify mode: %s\n"
, ec.message().c_str());
TEST_CHECK(!ec);
return false;
}
std::string certificate = combine_path("..", combine_path("ssl", "peer_certificate.pem"));
std::string private_key = combine_path("..", combine_path("ssl", "peer_private_key.pem"));
std::string dh_params = combine_path("..", combine_path("ssl", "dhparams.pem"));
if (flags & invalid_certificate)
{
certificate = combine_path("..", combine_path("ssl", "invalid_peer_certificate.pem"));
private_key = combine_path("..", combine_path("ssl", "invalid_peer_private_key.pem"));
}
if (flags & (valid_certificate | invalid_certificate))
{
std::printf("set_password_callback\n");
ctx.set_password_callback(std::bind(&password_callback, _1, _2, "test"), ec);
if (ec)
{
std::printf("Failed to set certificate password callback: %s\n"
, ec.message().c_str());
TEST_CHECK(!ec);
return false;
}
std::printf("use_certificate_file \"%s\"\n", certificate.c_str());
ctx.use_certificate_file(certificate, context::pem, ec);
if (ec)
{
std::printf("Failed to set certificate file: %s\n"
, ec.message().c_str());
TEST_CHECK(!ec);
return false;
}
std::printf("use_private_key_file \"%s\"\n", private_key.c_str());
ctx.use_private_key_file(private_key, context::pem, ec);
if (ec)
{
std::printf("Failed to set private key: %s\n"
, ec.message().c_str());
TEST_CHECK(!ec);
return false;
}
std::printf("use_tmp_dh_file \"%s\"\n", dh_params.c_str());
| ||
relevance 0 | ../test/test_ssl.cpp:509 | also test using a hash that refers to a valid torrent but that differs from the SNI hash |
also test using a hash that refers to a valid torrent
but that differs from the SNI hash../test/test_ssl.cpp:509 print_alerts(ses1, "ses1", true, true, &on_alert);
if (ec)
{
std::printf("Failed SSL handshake: %s\n"
, ec.message().c_str());
return false;
}
char handshake[] = "\x13" "BitTorrent protocol\0\0\0\0\0\0\0\x04"
" " // space for info-hash
"aaaaaaaaaaaaaaaaaaaa" // peer-id
"\0\0\0\x01\x02"; // interested
// fill in the info-hash
if (flags & valid_bittorrent_hash)
{
std::memcpy(handshake + 28, &t->info_hash()[0], 20);
}
else
{
std::generate(handshake + 28, handshake + 48, &rand);
}
// fill in the peer-id
std::generate(handshake + 48, handshake + 68, &rand);
std::printf("bittorrent handshake\n");
boost::asio::write(ssl_sock, boost::asio::buffer(handshake, (sizeof(handshake) - 1)), ec);
print_alerts(ses1, "ses1", true, true, &on_alert);
if (ec)
{
std::printf("failed to write bittorrent handshake: %s\n"
, ec.message().c_str());
return false;
}
char buf[68];
std::printf("read bittorrent handshake\n");
boost::asio::read(ssl_sock, boost::asio::buffer(buf, sizeof(buf)), ec);
print_alerts(ses1, "ses1", true, true, &on_alert);
if (ec)
{
std::printf("failed to read bittorrent handshake: %s\n"
, ec.message().c_str());
return false;
}
if (memcmp(buf, "\x13" "BitTorrent protocol", 20) != 0)
{
std::printf("invalid bittorrent handshake\n");
return false;
| ||
relevance 0 | ../test/test_peer_list.cpp:968 | test erasing peers |
test erasing peers../test/test_peer_list.cpp:968 | ||
relevance 0 | ../test/test_peer_list.cpp:969 | test update_peer_port with allow_multiple_connections_per_ip and without |
test update_peer_port with allow_multiple_connections_per_ip and without../test/test_peer_list.cpp:969 | ||
relevance 0 | ../test/test_peer_list.cpp:970 | test add i2p peers |
test add i2p peers../test/test_peer_list.cpp:970 | ||
relevance 0 | ../test/test_peer_list.cpp:971 | test allow_i2p_mixed |
test allow_i2p_mixed../test/test_peer_list.cpp:971 | ||
relevance 0 | ../test/test_peer_list.cpp:972 | test insert_peer failing with all error conditions |
test insert_peer failing with all error conditions../test/test_peer_list.cpp:972 | ||
relevance 0 | ../test/test_peer_list.cpp:973 | test IPv6 |
test IPv6../test/test_peer_list.cpp:973 | ||
relevance 0 | ../test/test_peer_list.cpp:974 | test connect_to_peer() failing |
test connect_to_peer() failing../test/test_peer_list.cpp:974 | ||
relevance 0 | ../test/test_peer_list.cpp:975 | test connection_closed |
test connection_closed../test/test_peer_list.cpp:975 | ||
relevance 0 | ../test/test_peer_list.cpp:976 | connect candidates recalculation when incrementing failcount |
connect candidates recalculation when incrementing failcount../test/test_peer_list.cpp:976 | ||
relevance 0 | ../test/test_tracker.cpp:55 | test scrape requests |
test scrape requests../test/test_tracker.cpp:55 | ||
relevance 0 | ../test/test_tracker.cpp:56 | test parse peers6 |
test parse peers6../test/test_tracker.cpp:56 | ||
relevance 0 | ../test/test_tracker.cpp:57 | test parse tracker-id |
test parse tracker-id../test/test_tracker.cpp:57 | ||
relevance 0 | ../test/test_tracker.cpp:58 | test parse failure-reason |
test parse failure-reason../test/test_tracker.cpp:58 | ||
relevance 0 | ../test/test_tracker.cpp:59 | test all failure paths, including invalid bencoding not a dictionary no files entry in scrape response no info-hash entry in scrape response malformed peers in peer list of dictionaries uneven number of bytes in peers and peers6 string responses |
test all failure paths, including
invalid bencoding
not a dictionary
no files entry in scrape response
no info-hash entry in scrape response
malformed peers in peer list of dictionaries
uneven number of bytes in peers and peers6 string responses../test/test_tracker.cpp:59#include "udp_tracker.hpp"
#include "settings.hpp"
#include "libtorrent/alert.hpp"
#include "libtorrent/peer_info.hpp" // for peer_list_entry
#include "libtorrent/broadcast_socket.hpp" // for supports_ipv6
#include "libtorrent/alert_types.hpp"
#include "libtorrent/session.hpp"
#include "libtorrent/error_code.hpp"
#include "libtorrent/tracker_manager.hpp"
#include "libtorrent/http_tracker_connection.hpp" // for parse_tracker_response
#include "libtorrent/torrent_info.hpp"
#include "libtorrent/announce_entry.hpp"
#include "libtorrent/torrent.hpp"
#include "libtorrent/aux_/path.hpp"
#include "libtorrent/socket_io.hpp"
#include <fstream>
using namespace lt;
TORRENT_TEST(parse_hostname_peers)
{
char const response[] = "d5:peersld7:peer id20:aaaaaaaaaaaaaaaaaaaa"
"2:ip13:test_hostname4:porti1000eed"
"7:peer id20:bbbbabaababababababa2:ip12:another_host4:porti1001eeee";
error_code ec;
tracker_response resp = parse_tracker_response(response
, ec, false, sha1_hash());
TEST_EQUAL(ec, error_code());
TEST_EQUAL(resp.peers.size(), 2);
if (resp.peers.size() == 2)
{
peer_entry const& e0 = resp.peers[0];
peer_entry const& e1 = resp.peers[1];
TEST_EQUAL(e0.hostname, "test_hostname");
TEST_EQUAL(e0.port, 1000);
TEST_EQUAL(e0.pid, peer_id("aaaaaaaaaaaaaaaaaaaa"));
TEST_EQUAL(e1.hostname, "another_host");
TEST_EQUAL(e1.port, 1001);
TEST_EQUAL(e1.pid, peer_id("bbbbabaababababababa"));
}
}
TORRENT_TEST(parse_peers4)
{
char const response[] = "d5:peers12:\x01\x02\x03\x04\x30\x10"
"\x09\x08\x07\x06\x20\x10" "e";
error_code ec;
| ||
relevance 0 | ../test/test_timestamp_history.cpp:54 | test the case where we have > 120 samples (and have the base delay actually be updated) |
test the case where we have > 120 samples (and have the base delay actually be updated)../test/test_timestamp_history.cpp:54 | ||
relevance 0 | ../test/test_timestamp_history.cpp:55 | test the case where a sample is lower than the history entry but not lower than the base |
test the case where a sample is lower than the history entry but not lower than the base../test/test_timestamp_history.cpp:55#include "libtorrent/timestamp_history.hpp"
TORRENT_TEST(timestamp_history)
{
using namespace lt;
timestamp_history h;
TEST_EQUAL(h.add_sample(0x32, false), 0);
TEST_EQUAL(h.base(), 0x32);
TEST_EQUAL(h.add_sample(0x33, false), 0x1);
TEST_EQUAL(h.base(), 0x32);
TEST_EQUAL(h.add_sample(0x3433, false), 0x3401);
TEST_EQUAL(h.base(), 0x32);
TEST_EQUAL(h.add_sample(0x30, false), 0);
TEST_EQUAL(h.base(), 0x30);
// test that wrapping of the timestamp is properly handled
h.add_sample(0xfffffff3, false);
TEST_EQUAL(h.base(), 0xfffffff3);
}
| ||
relevance 0 | ../test/test_flags.cpp:140 | change to a different test setup. currently always paused. test_set_after_add(torrent_flags::paused); test_unset_after_add(torrent_flags::paused); |
change to a different test setup. currently always paused.
test_set_after_add(torrent_flags::paused);
test_unset_after_add(torrent_flags::paused);../test/test_flags.cpp:140{
// share-mode
test_add_and_get_flags(torrent_flags::share_mode);
test_set_after_add(torrent_flags::share_mode);
test_unset_after_add(torrent_flags::share_mode);
}
#endif
TORRENT_TEST(flag_apply_ip_filter)
{
// apply-ip-filter
test_add_and_get_flags(torrent_flags::apply_ip_filter);
test_set_after_add(torrent_flags::apply_ip_filter);
test_unset_after_add(torrent_flags::apply_ip_filter);
}
TORRENT_TEST(flag_paused)
{
// paused
test_add_and_get_flags(torrent_flags::paused);
}
TORRENT_TEST(flag_auto_managed)
{
// auto-managed
test_add_and_get_flags(torrent_flags::auto_managed);
test_set_after_add(torrent_flags::auto_managed);
test_unset_after_add(torrent_flags::auto_managed);
}
#ifndef TORRENT_DISABLE_SUPERSEEDING
TORRENT_TEST(flag_super_seeding)
{
// super-seeding
test_add_and_get_flags(torrent_flags::super_seeding);
test_set_after_add(torrent_flags::super_seeding);
test_unset_after_add(torrent_flags::super_seeding);
}
#endif
TORRENT_TEST(flag_sequential_download)
{
// sequential-download
test_add_and_get_flags(torrent_flags::sequential_download);
test_set_after_add(torrent_flags::sequential_download);
test_unset_after_add(torrent_flags::sequential_download);
}
TORRENT_TEST(flag_stop_when_ready)
{
// stop-when-ready
| ||
relevance 0 | ../test/test_flags.cpp:176 | change to a different test setup. currently always paused. test_set_after_add(torrent_flags::stop_when_ready); |
change to a different test setup. currently always paused.
test_set_after_add(torrent_flags::stop_when_ready);../test/test_flags.cpp:176 // super-seeding
test_add_and_get_flags(torrent_flags::super_seeding);
test_set_after_add(torrent_flags::super_seeding);
test_unset_after_add(torrent_flags::super_seeding);
}
#endif
TORRENT_TEST(flag_sequential_download)
{
// sequential-download
test_add_and_get_flags(torrent_flags::sequential_download);
test_set_after_add(torrent_flags::sequential_download);
test_unset_after_add(torrent_flags::sequential_download);
}
TORRENT_TEST(flag_stop_when_ready)
{
// stop-when-ready
test_add_and_get_flags(torrent_flags::stop_when_ready);
// setting stop-when-ready when already stopped has no effect.
test_unset_after_add(torrent_flags::stop_when_ready);
}
TORRENT_TEST(flag_disable_dht)
{
test_add_and_get_flags(torrent_flags::disable_dht);
test_set_after_add(torrent_flags::disable_dht);
test_unset_after_add(torrent_flags::disable_dht);
}
TORRENT_TEST(flag_disable_lsd)
{
test_add_and_get_flags(torrent_flags::disable_lsd);
test_set_after_add(torrent_flags::disable_lsd);
test_unset_after_add(torrent_flags::disable_lsd);
}
TORRENT_TEST(flag_disable_pex)
{
test_add_and_get_flags(torrent_flags::disable_pex);
test_set_after_add(torrent_flags::disable_pex);
test_unset_after_add(torrent_flags::disable_pex);
}
| ||
relevance 0 | ../test/test_upnp.cpp:151 | store the log and verify that some key messages are there |
store the log and verify that some key messages are there../test/test_upnp.cpp:151 , address const& ip, int port
, portmap_protocol const protocol, error_code const& err
, portmap_transport) override
{
callback_info info = {mapping, port, err};
callbacks.push_back(info);
std::cout << "mapping: " << static_cast<int>(mapping)
<< ", port: " << port << ", IP: " << ip
<< ", proto: " << static_cast<int>(protocol)
<< ", error: \"" << err.message() << "\"\n";
}
#ifndef TORRENT_DISABLE_LOGGING
bool should_log_portmap(portmap_transport) const override
{
return true;
}
void log_portmap(portmap_transport, char const* msg) const override
{
std::cout << "UPnP: " << msg << std::endl;
}
#endif
};
ip_interface pick_upnp_interface()
{
lt::io_service ios;
error_code ec;
std::vector<ip_route> const routes = enum_routes(ios, ec);
if (ec)
{
std::cerr << "failed to enumerate routes: " << ec.message() << '\n';
TEST_CHECK(false);
return {};
}
std::vector<ip_interface> const ifs = enum_net_interfaces(ios, ec);
if (ec)
{
std::cerr << "failed to enumerate network interfaces: " << ec.message() << '\n';
TEST_CHECK(false);
return {};
}
int idx = 0;
auto const iface = std::find_if(ifs.begin(), ifs.end(), [&](ip_interface const& face)
{
std::cerr << " - " << idx << ' ' << face.interface_address.to_string() << ' ' << face.name << '\n';
++idx;
if (!face.interface_address.is_v4()) return false;
if (is_loopback(face.interface_address)) return false;
auto const route = std::find_if(routes.begin(), routes.end(), [&](ip_route const& r)
{ return r.destination.is_unspecified() && string_view(face.name) == r.name; });
| ||
relevance 0 | ../test/test_file_storage.cpp:778 | test file attributes |
test file attributes../test/test_file_storage.cpp:778 | ||
relevance 0 | ../test/test_file_storage.cpp:779 | test symlinks |
test symlinks../test/test_file_storage.cpp:779 | ||
relevance 0 | ../test/test_file_storage.cpp:780 | test reorder_file (make sure internal_file_entry::swap() is used) |
test reorder_file (make sure internal_file_entry::swap() is used)../test/test_file_storage.cpp:780 | ||
relevance 0 | ../test/test_storage.cpp:990 | this should take a span of iovec_ts |
this should take a span of iovec_ts../test/test_storage.cpp:990}
#if TORRENT_ABI_VERSION == 1
TORRENT_TEST(rename_file_fastresume_deprecated)
{
test_rename_file_fastresume(true);
}
#endif
namespace {
void alloc_iov(iovec_t* iov, int num_bufs)
{
for (int i = 0; i < num_bufs; ++i)
{
iov[i] = { new char[static_cast<std::size_t>(num_bufs * (i + 1))]
, num_bufs * (i + 1) };
}
}
void fill_pattern(iovec_t* iov, int num_bufs)
{
int counter = 0;
for (int i = 0; i < num_bufs; ++i)
{
for (char& v : iov[i])
{
v = char(counter & 0xff);
++counter;
}
}
}
bool check_pattern(std::vector<char> const& buf, int counter)
{
unsigned char const* p = reinterpret_cast<unsigned char const*>(buf.data());
for (int k = 0; k < int(buf.size()); ++k)
{
if (p[k] != (counter & 0xff)) return false;
++counter;
}
return true;
}
| ||
relevance 0 | ../test/test_storage.cpp:1015 | this should take a span |
this should take a span../test/test_storage.cpp:1015 {
for (char& v : iov[i])
{
v = char(counter & 0xff);
++counter;
}
}
}
bool check_pattern(std::vector<char> const& buf, int counter)
{
unsigned char const* p = reinterpret_cast<unsigned char const*>(buf.data());
for (int k = 0; k < int(buf.size()); ++k)
{
if (p[k] != (counter & 0xff)) return false;
++counter;
}
return true;
}
void free_iov(iovec_t* iov, int num_bufs)
{
for (int i = 0; i < num_bufs; ++i)
{
delete[] iov[i].data();
iov[i] = { nullptr, 0 };
}
}
} // anonymous namespace
TORRENT_TEST(iovec_copy_bufs)
{
iovec_t iov1[10];
iovec_t iov2[10];
alloc_iov(iov1, 10);
fill_pattern(iov1, 10);
TEST_CHECK(bufs_size({iov1, 10}) >= 106);
// copy exactly 106 bytes from iov1 to iov2
int num_bufs = aux::copy_bufs(iov1, 106, iov2);
// verify that the first 100 bytes is pattern 1
// and that the remaining bytes are pattern 2
int counter = 0;
for (int i = 0; i < num_bufs; ++i)
{
for (char v : iov2[i])
| ||
relevance 0 | ../test/test_torrent_info.cpp:173 | test remap_files |
test remap_files../test/test_torrent_info.cpp:173 | ||
relevance 0 | ../test/test_torrent_info.cpp:174 | merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash" |
merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash"../test/test_torrent_info.cpp:174 | ||
relevance 0 | ../test/test_torrent_info.cpp:175 | torrent with 'p' (padfile) attribute |
torrent with 'p' (padfile) attribute../test/test_torrent_info.cpp:175 | ||
relevance 0 | ../test/test_torrent_info.cpp:176 | torrent with 'h' (hidden) attribute |
torrent with 'h' (hidden) attribute../test/test_torrent_info.cpp:176 | ||
relevance 0 | ../test/test_torrent_info.cpp:177 | torrent with 'x' (executable) attribute |
torrent with 'x' (executable) attribute../test/test_torrent_info.cpp:177 | ||
relevance 0 | ../test/test_torrent_info.cpp:178 | torrent with 'l' (symlink) attribute |
torrent with 'l' (symlink) attribute../test/test_torrent_info.cpp:178 | ||
relevance 0 | ../test/test_torrent_info.cpp:179 | creating a merkle torrent (torrent_info::build_merkle_list) |
creating a merkle torrent (torrent_info::build_merkle_list)../test/test_torrent_info.cpp:179 | ||
relevance 0 | ../test/test_torrent_info.cpp:180 | torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once) |
torrent with multiple trackers in multiple tiers, making sure we
shuffle them (how do you test shuffling?, load it multiple times and make
sure it's in different order at least once)../test/test_torrent_info.cpp:180 | ||
relevance 0 | ../test/test_torrent_info.cpp:183 | torrents with a zero-length name |
torrents with a zero-length name../test/test_torrent_info.cpp:183 | ||
relevance 0 | ../test/test_torrent_info.cpp:184 | torrents with a merkle tree and add_merkle_nodes |
torrents with a merkle tree and add_merkle_nodes../test/test_torrent_info.cpp:184 | ||
relevance 0 | ../test/test_torrent_info.cpp:185 | torrent with a non-dictionary info-section |
torrent with a non-dictionary info-section../test/test_torrent_info.cpp:185 | ||
relevance 0 | ../test/test_torrent_info.cpp:186 | torrents with DHT nodes |
torrents with DHT nodes../test/test_torrent_info.cpp:186 | ||
relevance 0 | ../test/test_torrent_info.cpp:187 | torrent with url-list as a single string |
torrent with url-list as a single string../test/test_torrent_info.cpp:187 | ||
relevance 0 | ../test/test_torrent_info.cpp:188 | torrent with http seed as a single string |
torrent with http seed as a single string../test/test_torrent_info.cpp:188 | ||
relevance 0 | ../test/test_torrent_info.cpp:189 | torrent with a comment |
torrent with a comment../test/test_torrent_info.cpp:189 | ||
relevance 0 | ../test/test_torrent_info.cpp:190 | torrent with an SSL cert |
torrent with an SSL cert../test/test_torrent_info.cpp:190 | ||
relevance 0 | ../test/test_torrent_info.cpp:191 | torrent with attributes (executable and hidden) |
torrent with attributes (executable and hidden)../test/test_torrent_info.cpp:191 | ||
relevance 0 | ../test/test_torrent_info.cpp:192 | torrent_info::add_tracker |
torrent_info::add_tracker../test/test_torrent_info.cpp:192 | ||
relevance 0 | ../test/test_torrent_info.cpp:193 | torrent_info constructor that takes an invalid bencoded buffer |
torrent_info constructor that takes an invalid bencoded buffer../test/test_torrent_info.cpp:193 | ||
relevance 0 | ../test/test_torrent_info.cpp:194 | verify_encoding with a string that triggers character replacement |
verify_encoding with a string that triggers character replacement../test/test_torrent_info.cpp:194 { "invalid_name.torrent", errors::torrent_missing_name },
{ "invalid_info.torrent", errors::torrent_missing_info },
{ "string.torrent", errors::torrent_is_no_dict },
{ "negative_size.torrent", errors::torrent_invalid_length },
{ "negative_file_size.torrent", errors::torrent_invalid_length },
{ "invalid_path_list.torrent", errors::torrent_invalid_name},
{ "missing_path_list.torrent", errors::torrent_missing_name },
{ "invalid_pieces.torrent", errors::torrent_missing_pieces },
{ "unaligned_pieces.torrent", errors::torrent_invalid_hashes },
{ "invalid_root_hash.torrent", errors::torrent_invalid_hashes },
{ "invalid_root_hash2.torrent", errors::torrent_missing_pieces },
{ "invalid_merkle.torrent", errors::no_files_in_torrent},
{ "invalid_file_size.torrent", errors::torrent_invalid_length },
{ "invalid_symlink.torrent", errors::torrent_invalid_name },
{ "many_pieces.torrent", errors::too_many_pieces_in_torrent },
{ "no_files.torrent", errors::no_files_in_torrent},
};
} // anonymous namespace
TORRENT_TEST(url_list_and_httpseeds)
{
entry info;
info["pieces"] = "aaaaaaaaaaaaaaaaaaaa";
info["name.utf-8"] = "test1";
info["name"] = "test__";
info["piece length"] = 16 * 1024;
info["length"] = 3245;
entry::list_type l;
l.push_back(entry("http://foo.com/bar1"));
l.push_back(entry("http://foo.com/bar1"));
l.push_back(entry("http://foo.com/bar2"));
entry const e(l);
entry torrent;
torrent["url-list"] = e;
torrent["httpseeds"] = e;
torrent["info"] = info;
std::vector<char> buf;
bencode(std::back_inserter(buf), torrent);
torrent_info ti(buf, from_span);
TEST_EQUAL(ti.web_seeds().size(), 4);
}
TORRENT_TEST(add_url_seed)
{
torrent_info ti(sha1_hash(" "));
TEST_EQUAL(ti.web_seeds().size(), 0);
ti.add_url_seed("http://test.com");
| ||
relevance 0 | ../test/test_block_cache.cpp:484 | test try_evict_blocks |
test try_evict_blocks../test/test_block_cache.cpp:484 | ||
relevance 0 | ../test/test_block_cache.cpp:485 | test evicting volatile pieces, to see them be removed |
test evicting volatile pieces, to see them be removed../test/test_block_cache.cpp:485 | ||
relevance 0 | ../test/test_block_cache.cpp:486 | test evicting dirty pieces |
test evicting dirty pieces../test/test_block_cache.cpp:486 | ||
relevance 0 | ../test/test_block_cache.cpp:487 | test free_piece |
test free_piece../test/test_block_cache.cpp:487 | ||
relevance 0 | ../test/test_block_cache.cpp:488 | test abort_dirty |
test abort_dirty../test/test_block_cache.cpp:488 | ||
relevance 0 | ../test/test_block_cache.cpp:489 | test unaligned reads |
test unaligned reads../test/test_block_cache.cpp:489 // return the reference to the buffer we just read
rj.argument = remove_flags_t{};
tailqueue<disk_io_job> jobs;
bc.clear(jobs);
}
} // anonymous namespace
TORRENT_TEST(block_cache)
{
test_write();
test_flush();
test_insert();
test_evict();
test_arc_promote();
test_arc_unghost();
test_iovec();
test_unaligned_read();
}
TORRENT_TEST(delete_piece)
{
TEST_SETUP;
TEST_CHECK(bc.num_pieces() == 0);
INSERT(0, 0);
TEST_CHECK(bc.num_pieces() == 1);
rj.action = job_action_t::read;
rj.d.io.offset = 0x2000;
rj.d.io.buffer_size = 0x4000;
rj.piece = piece_index_t(0);
rj.storage = pm;
rj.argument = remove_flags_t{};
ret = bc.try_read(&rj, alloc);
TEST_EQUAL(ret, -1);
cached_piece_entry* pe_ = bc.find_piece(pm.get(), piece_index_t(0));
bc.mark_for_eviction(pe_, block_cache::disallow_ghost);
TEST_CHECK(bc.num_pieces() == 0);
}
| ||
relevance 0 | ../test/test_fast_extension.cpp:1076 | test sending invalid requests (out of bound piece index, offsets and sizes) |
test sending invalid requests (out of bound piece index, offsets and
sizes)../test/test_fast_extension.cpp:1076 | ||
relevance 0 | ../test/test_bloom_filter.cpp:134 | test size() |
test size()../test/test_bloom_filter.cpp:134 | ||
relevance 0 | ../test/test_bloom_filter.cpp:135 | test clear() |
test clear()../test/test_bloom_filter.cpp:135 sha1_hash k("\x01\x00\x02\x00 ");
TEST_CHECK(!filter.find(k));
filter.set(k);
TEST_CHECK(filter.find(k));
std::uint8_t compare[4] = { 0x16, 0xff, 0x55, 0xaa};
bits_out = filter.to_string();
TEST_EQUAL(memcmp(compare, bits_out.c_str(), 4), 0);
}
} // anonymous namespace
TORRENT_TEST(bloom_filter)
{
test_set_and_get();
test_set_bits();
test_count_zeroes();
test_to_from_string();
}
| ||
relevance 0 | ../test/test_dht.cpp:465 | check to make sure the "best" items are stored |
check to make sure the "best" items are stored../test/test_dht.cpp:465 , msg_args().target(items[j].target));
key_desc_t const desc[] =
{
{ "r", bdecode_node::dict_t, 0, key_desc_t::parse_children },
{ "v", bdecode_node::dict_t, 0, 0},
{ "id", bdecode_node::string_t, 20, key_desc_t::last_child},
{ "y", bdecode_node::string_t, 1, 0},
};
bdecode_node parsed[4];
char error_string[200];
int ret = verify_message(response, desc, parsed, error_string);
if (ret)
{
items_num.insert(items_num.begin(), j);
}
}
TEST_EQUAL(items_num.size(), 4);
}
int sum_distance_exp(int s, node_entry const& e, node_id const& ref)
{
return s + distance_exp(e.id, ref);
}
std::vector<tcp::endpoint> g_got_peers;
void get_peers_cb(std::vector<tcp::endpoint> const& peers)
{
g_got_peers.insert(g_got_peers.end(), peers.begin(), peers.end());
}
std::vector<dht::item> g_got_items;
dht::item g_put_item;
int g_put_count;
void get_mutable_item_cb(dht::item const& i, bool a)
{
if (!a) return;
if (!i.empty())
g_got_items.push_back(i);
}
void put_mutable_item_data_cb(dht::item& i)
{
if (!i.empty())
g_got_items.push_back(i);
| ||
relevance 0 | ../test/test_dht.cpp:3257 | this won't work because the second node isn't pinged so it wont be added to the routing table |
this won't work because the second node isn't pinged so it wont
be added to the routing table../test/test_dht.cpp:3257 bool ret = verify_message(request, get_item_desc_ro, parsed, error_string);
TEST_CHECK(ret);
TEST_EQUAL(parsed[3].int_value(), 1);
// should have one node now, which is 4.4.4.4:1234
TEST_EQUAL(std::get<0>(node.size()), 1);
// and no replacement nodes
TEST_EQUAL(std::get<1>(node.size()), 0);
// now, disable read_only, try again.
g_sent_packets.clear();
sett.read_only = false;
send_dht_request(node, "get", source, &response);
// sender should be added to repacement bucket
TEST_EQUAL(std::get<1>(node.size()), 1);
g_sent_packets.clear();
#if 0
target = generate_next();
node.get_item(target, get_immutable_item_cb);
// since we have 2 nodes, we should have two packets.
TEST_EQUAL(g_sent_packets.size(), 2);
// both of them shouldn't have a 'ro' key.
node_from_entry(g_sent_packets.front().second, request);
ret = verify_message(request, get_item_desc_ro, parsed, error_string);
TEST_CHECK(ret);
TEST_CHECK(!parsed[3]);
node_from_entry(g_sent_packets.back().second, request);
ret = verify_message(request, get_item_desc_ro, parsed, error_string);
TEST_CHECK(ret);
TEST_CHECK(!parsed[3]);
#endif
}
#ifndef TORRENT_DISABLE_LOGGING
// these tests rely on logging being enabled
TORRENT_TEST(invalid_error_msg)
{
| ||
relevance 0 | ../test/test_dht.cpp:4047 | test obfuscated_get_peers |
test obfuscated_get_peers../test/test_dht.cpp:4047 TEST_CHECK(all_in_same_bucket(b, to_hash("0800000000000000000000000000000000000000"), 4) == false);
}
{
dht::bucket_t b = {
n(nullptr, "0800000000000000000000000000000000000000"),
n(nullptr, "0800000000000000000000000000000000000000"),
};
TEST_CHECK(all_in_same_bucket(b, to_hash("0800000000000000000000000000000000000000"), 4) == true);
}
{
dht::bucket_t b = {
n(nullptr, "0007000000000000000000000000000000000000"),
n(nullptr, "0004000000000000000000000000000000000000"),
};
TEST_CHECK(all_in_same_bucket(b, to_hash("0005000000000000000000000000000000000000"), 13) == true);
}
}
#else
TORRENT_TEST(dht)
{
// dummy dht test
TEST_CHECK(true);
}
#endif
| ||
relevance 0 | ../test/test_resolve_links.cpp:86 | test files with different piece size (negative test) |
test files with different piece size (negative test)../test/test_resolve_links.cpp:86 { "test2", "test1_pad_files", 0},
{ "test3", "test1_pad_files", 0},
{ "test2", "test1_single", 0},
// these are all padded. The first small file will accidentally also
// match, even though it's not tail padded, the following file is identical
{ "test2_pad_files", "test1_pad_files", 2},
{ "test3_pad_files", "test1_pad_files", 2},
{ "test3_pad_files", "test2_pad_files", 2},
{ "test1_pad_files", "test2_pad_files", 2},
{ "test1_pad_files", "test3_pad_files", 2},
{ "test2_pad_files", "test3_pad_files", 2},
// one might expect this to work, but since the tail of the single file
// torrent is not padded, the last piece hash won't match
{ "test1_pad_files", "test1_single", 0},
// if it's padded on the other hand, it will work
{ "test1_pad_files", "test1_single_padded", 1},
};
| ||
relevance 0 | ../test/test_resolve_links.cpp:89 | it would be nice to test resolving of more than just 2 files as well. like 3 single file torrents merged into one, resolving all 3 files. |
it would be nice to test resolving of more than just 2 files as well.
like 3 single file torrents merged into one, resolving all 3 files.../test/test_resolve_links.cpp:89 { "test2", "test1_single", 0},
// these are all padded. The first small file will accidentally also
// match, even though it's not tail padded, the following file is identical
{ "test2_pad_files", "test1_pad_files", 2},
{ "test3_pad_files", "test1_pad_files", 2},
{ "test3_pad_files", "test2_pad_files", 2},
{ "test1_pad_files", "test2_pad_files", 2},
{ "test1_pad_files", "test3_pad_files", 2},
{ "test2_pad_files", "test3_pad_files", 2},
// one might expect this to work, but since the tail of the single file
// torrent is not padded, the last piece hash won't match
{ "test1_pad_files", "test1_single", 0},
// if it's padded on the other hand, it will work
{ "test1_pad_files", "test1_single_padded", 1},
};
TORRENT_TEST(resolve_links)
{
std::string path = combine_path(parent_path(current_working_directory())
, "mutable_test_torrents");
for (int i = 0; i < int(sizeof(test_torrents)/sizeof(test_torrents[0])); ++i)
{
test_torrent_t const& e = test_torrents[i];
std::string p = combine_path(path, e.filename1) + ".torrent";
std::printf("loading %s\n", p.c_str());
std::shared_ptr<torrent_info> ti1 = std::make_shared<torrent_info>(p);
p = combine_path(path, e.filename2) + ".torrent";
std::printf("loading %s\n", p.c_str());
std::shared_ptr<torrent_info> ti2 = std::make_shared<torrent_info>(p);
std::printf("resolving\n");
resolve_links l(ti1);
l.match(ti2, ".");
aux::vector<resolve_links::link_t, file_index_t> const& links = l.get_links();
auto const num_matches = std::size_t(std::count_if(links.begin(), links.end()
, std::bind(&resolve_links::link_t::ti, _1)));
// some debug output in case the test fails
if (num_matches > e.expected_matches)
{
file_storage const& fs = ti1->files();
| ||
relevance 0 | ../test/test_transfer.cpp:218 | these settings_pack tests belong in their own test |
these settings_pack tests belong in their own test../test/test_transfer.cpp:218 // to the time it will take to complete the test
pack.set_int(settings_pack::min_reconnect_time, 0);
pack.set_int(settings_pack::stop_tracker_timeout, 1);
pack.set_bool(settings_pack::announce_to_all_trackers, true);
pack.set_bool(settings_pack::announce_to_all_tiers, true);
// make sure we announce to both http and udp trackers
pack.set_bool(settings_pack::prefer_udp_trackers, false);
pack.set_bool(settings_pack::enable_outgoing_utp, false);
pack.set_bool(settings_pack::enable_incoming_utp, false);
pack.set_bool(settings_pack::enable_lsd, false);
pack.set_bool(settings_pack::enable_natpmp, false);
pack.set_bool(settings_pack::enable_upnp, false);
pack.set_bool(settings_pack::enable_dht, false);
pack.set_int(settings_pack::out_enc_policy, settings_pack::pe_disabled);
pack.set_int(settings_pack::in_enc_policy, settings_pack::pe_disabled);
pack.set_bool(settings_pack::allow_multiple_connections_per_ip, false);
pack.set_int(settings_pack::unchoke_slots_limit, 0);
ses1.apply_settings(pack);
TEST_CHECK(ses1.get_settings().get_int(settings_pack::unchoke_slots_limit) == 0);
pack.set_int(settings_pack::unchoke_slots_limit, -1);
ses1.apply_settings(pack);
TEST_CHECK(ses1.get_settings().get_int(settings_pack::unchoke_slots_limit) == -1);
pack.set_int(settings_pack::unchoke_slots_limit, 8);
ses1.apply_settings(pack);
TEST_CHECK(ses1.get_settings().get_int(settings_pack::unchoke_slots_limit) == 8);
ses2.apply_settings(pack);
torrent_handle tor1;
torrent_handle tor2;
create_directory("tmp1_transfer", ec);
std::ofstream file("tmp1_transfer/temporary");
std::shared_ptr<torrent_info> t = ::create_torrent(&file, "temporary", 32 * 1024, 13, false);
file.close();
TEST_CHECK(exists(combine_path("tmp1_transfer", "temporary")));
add_torrent_params addp(&test_storage_constructor);
addp.flags &= ~torrent_flags::paused;
addp.flags &= ~torrent_flags::auto_managed;
add_torrent_params params;
params.storage_mode = storage_mode;
params.flags &= ~torrent_flags::paused;
| ||
relevance 0 | ../test/test_transfer.cpp:313 | factor out the disk-full test into its own unit test |
factor out the disk-full test into its own unit test../test/test_transfer.cpp:313 {
flags &= ~move_storage;
tor1.move_storage("tmp1_transfer_moved");
tor2.move_storage("tmp2_transfer_moved");
std::cout << "moving storage" << std::endl;
}
if ((flags & delete_files) && st2.progress > 0.1f)
{
ses1.remove_torrent(tor1, session::delete_files);
std::cout << "deleting files" << std::endl;
std::this_thread::sleep_for(lt::seconds(1));
break;
}
// wait 10 loops before we restart the torrent. This lets
// us catch all events that failed (and would put the torrent
// back into upload mode) before we restart it.
if (flags & disk_full
&& !(tor2.flags() & torrent_flags::upload_mode)
&& ++upload_mode_timer > 10)
{
flags &= ~disk_full;
static_cast<test_storage*>(tor2.get_storage_impl())->set_limit(16 * 1024 * 1024);
// if we reset the upload mode too soon, there may be more disk
// jobs failing right after, putting us back in upload mode. So,
// give the disk some time to fail all disk jobs before resetting
// upload mode to false
std::this_thread::sleep_for(lt::milliseconds(500));
// then we need to drain the alert queue, so the peer_disconnects
// counter doesn't get incremented by old alerts
print_alerts(ses1, "ses1", true, true, &on_alert);
print_alerts(ses2, "ses2", true, true, &on_alert);
lt::error_code err = tor2.status().errc;
std::printf("error: \"%s\"\n", err.message().c_str());
TEST_CHECK(!err);
tor2.unset_flags(torrent_flags::upload_mode);
// at this point we probably disconnected the seed
// so we need to reconnect as well
std::printf("%s: reconnecting peer\n", time_now_string());
error_code ec2;
tor2.connect_peer(tcp::endpoint(address::from_string("127.0.0.1", ec2)
, ses1.listen_port()));
TEST_CHECK(tor2.status().is_finished == false);
| ||
relevance 0 | ../src/ut_metadata.cpp:285 | we really need to increment the refcounter on the torrent while this buffer is still in the peer's send buffer |
we really need to increment the refcounter on the torrent
while this buffer is still in the peer's send buffer../src/ut_metadata.cpp:285 int const offset = piece * 16 * 1024;
metadata = m_tp.metadata().data() + offset;
metadata_piece_size = std::min(
m_tp.get_metadata_size() - offset, 16 * 1024);
TORRENT_ASSERT(metadata_piece_size > 0);
TORRENT_ASSERT(offset >= 0);
TORRENT_ASSERT(offset + metadata_piece_size <= m_tp.get_metadata_size());
}
char msg[200];
char* header = msg;
char* p = &msg[6];
int const len = bencode(p, e);
int const total_size = 2 + len + metadata_piece_size;
namespace io = detail;
io::write_uint32(total_size, header);
io::write_uint8(bt_peer_connection::msg_extended, header);
io::write_uint8(m_message_index, header);
m_pc.send_buffer({msg, len + 6});
if (metadata_piece_size)
{
m_pc.append_const_send_buffer(
span<char>(const_cast<char*>(metadata), metadata_piece_size), metadata_piece_size);
}
m_pc.stats_counters().inc_stats_counter(counters::num_outgoing_extended);
m_pc.stats_counters().inc_stats_counter(counters::num_outgoing_metadata);
}
bool on_extended(int const length
, int const extended_msg, span<char const> body) override
{
if (extended_msg != 2) return false;
if (m_message_index == 0) return false;
if (length > 17 * 1024)
{
#ifndef TORRENT_DISABLE_LOGGING
m_pc.peer_log(peer_log_alert::incoming_message, "UT_METADATA"
, "packet too big %d", length);
#endif
m_pc.disconnect(errors::invalid_metadata_message, operation_t::bittorrent, peer_connection_interface::peer_error);
return true;
}
if (!m_pc.packet_finished()) return true;
error_code ec;
bdecode_node msg = bdecode(body, ec);
if (msg.type() != bdecode_node::dict_t)
| ||
relevance 0 | ../src/ut_metadata.cpp:338 | make this an enum class |
make this an enum class../src/ut_metadata.cpp:338 {
#ifndef TORRENT_DISABLE_LOGGING
m_pc.peer_log(peer_log_alert::incoming_message, "UT_METADATA"
, "not a dictionary");
#endif
m_pc.disconnect(errors::invalid_metadata_message, operation_t::bittorrent, peer_connection_interface::peer_error);
return true;
}
bdecode_node const& type_ent = msg.dict_find_int("msg_type");
bdecode_node const& piece_ent = msg.dict_find_int("piece");
if (!type_ent || !piece_ent)
{
#ifndef TORRENT_DISABLE_LOGGING
m_pc.peer_log(peer_log_alert::incoming_message, "UT_METADATA"
, "missing or invalid keys");
#endif
m_pc.disconnect(errors::invalid_metadata_message, operation_t::bittorrent, peer_connection_interface::peer_error);
return true;
}
auto const type = static_cast<int>(type_ent.int_value());
auto const piece = static_cast<int>(piece_ent.int_value());
#ifndef TORRENT_DISABLE_LOGGING
m_pc.peer_log(peer_log_alert::incoming_message, "UT_METADATA"
, "type: %d piece: %d", type, piece);
#endif
switch (type)
{
case metadata_req:
{
if (!m_torrent.valid_metadata()
|| piece < 0 || piece >= (m_tp.get_metadata_size() + 16 * 1024 - 1) / (16 * 1024))
{
#ifndef TORRENT_DISABLE_LOGGING
if (m_pc.should_log(peer_log_alert::info))
{
m_pc.peer_log(peer_log_alert::info, "UT_METADATA"
, "have: %d invalid piece %d metadata size: %d"
, int(m_torrent.valid_metadata()), piece
, m_torrent.valid_metadata()
? m_tp.get_metadata_size() : 0);
}
#endif
write_metadata_packet(metadata_dont_have, piece);
return true;
}
if (m_pc.send_buffer_size() < send_buffer_limit)
write_metadata_packet(metadata_piece, piece);
else if (m_incoming_requests.size() < max_incoming_requests)
| ||
relevance 0 | ../src/disk_buffer_pool.cpp:207 | perhaps we should sort the buffers here? |
perhaps we should sort the buffers here?../src/disk_buffer_pool.cpp:207 {
// uh oh. We failed to allocate the buffer!
// we need to roll back and free all the buffers
// we've already allocated
for (auto j : iov)
{
if (j.data() == nullptr) break;
char* buf = j.data();
TORRENT_ASSERT(is_disk_buffer(buf, l));
remove_buffer_in_use(buf);
free_buffer_impl(buf, l);
}
return -1;
}
}
return 0;
}
void disk_buffer_pool::free_iovec(span<iovec_t const> iov)
{
std::unique_lock<std::mutex> l(m_pool_mutex);
for (auto i : iov)
{
char* buf = i.data();
TORRENT_ASSERT(is_disk_buffer(buf, l));
remove_buffer_in_use(buf);
free_buffer_impl(buf, l);
}
check_buffer_level(l);
}
char* disk_buffer_pool::allocate_buffer_impl(std::unique_lock<std::mutex>& l
, char const*)
{
TORRENT_ASSERT(m_settings_set);
TORRENT_ASSERT(m_magic == 0x1337);
TORRENT_ASSERT(l.owns_lock());
TORRENT_UNUSED(l);
char* ret = static_cast<char*>(std::malloc(default_block_size));
if (ret == nullptr)
{
m_exceeded_max_size = true;
m_trigger_cache_trim();
return nullptr;
}
++m_in_use;
#if TORRENT_USE_INVARIANT_CHECKS
| ||
relevance 0 | ../src/socks5_stream.cpp:93 | we could bind the socket here, since we know what the target endpoint is of the proxy |
we could bind the socket here, since we know what the
target endpoint is of the proxy../src/socks5_stream.cpp:93
boost::system::error_category& socks_category()
{
static socks_error_category cat;
return cat;
}
void socks5_stream::name_lookup(error_code const& e, tcp::resolver::iterator i
, handler_type h)
{
COMPLETE_ASYNC("socks5_stream::name_lookup");
if (handle_error(e, h)) return;
error_code ec;
if (!m_sock.is_open())
{
m_sock.open(i->endpoint().protocol(), ec);
if (handle_error(ec, h)) return;
}
ADD_OUTSTANDING_ASYNC("socks5_stream::connected");
m_sock.async_connect(i->endpoint(), std::bind(
&socks5_stream::connected, this, _1, std::move(h)));
}
void socks5_stream::connected(error_code const& e, handler_type h)
{
COMPLETE_ASYNC("socks5_stream::connected");
if (handle_error(e, h)) return;
using namespace libtorrent::detail;
if (m_version == 5)
{
// send SOCKS5 authentication methods
m_buffer.resize(m_user.empty()?3:4);
char* p = &m_buffer[0];
write_uint8(5, p); // SOCKS VERSION 5
if (m_user.empty())
{
write_uint8(1, p); // 1 authentication method (no auth)
write_uint8(0, p); // no authentication
}
else
{
write_uint8(2, p); // 2 authentication methods
write_uint8(0, p); // no authentication
write_uint8(2, p); // username/password
}
ADD_OUTSTANDING_ASYNC("socks5_stream::handshake1");
async_write(m_sock, boost::asio::buffer(m_buffer)
, std::bind(&socks5_stream::handshake1, this, _1, std::move(h)));
| ||
relevance 0 | ../src/session_handle.cpp:415 | in C++14, use unique_ptr and move it into the lambda |
in C++14, use unique_ptr and move it into the lambda../src/session_handle.cpp:415 return sync_call_ret<torrent_handle>(&session_impl::add_torrent, std::move(params), ecr);
}
torrent_handle session_handle::add_torrent(add_torrent_params const& params, error_code& ec)
{
return add_torrent(add_torrent_params(params), ec);
}
void session_handle::async_add_torrent(add_torrent_params const& params)
{
async_add_torrent(add_torrent_params(params));
}
void session_handle::async_add_torrent(add_torrent_params&& params)
{
TORRENT_ASSERT_PRECOND(!params.save_path.empty());
// we cannot capture a unique_ptr into a lambda in c++11, so we use a raw
// pointer for now. async_call uses a lambda expression to post the call
// to the main thread
auto* p = new add_torrent_params(std::move(params));
auto guard = aux::scope_end([p]{ delete p; });
p->save_path = complete(p->save_path);
#if TORRENT_ABI_VERSION == 1
handle_backwards_compatible_resume_data(*p);
#endif
async_call(&session_impl::async_add_torrent, p);
guard.disarm();
}
#ifndef BOOST_NO_EXCEPTIONS
#if TORRENT_ABI_VERSION == 1
// if the torrent already exists, this will throw duplicate_torrent
torrent_handle session_handle::add_torrent(
torrent_info const& ti
, std::string const& save_path
, entry const& resume_data
, storage_mode_t storage_mode
, bool paused
, storage_constructor_type sc)
{
add_torrent_params p(std::move(sc));
p.ti = std::make_shared<torrent_info>(ti);
p.save_path = save_path;
if (resume_data.type() != entry::undefined_t)
{
bencode(std::back_inserter(p.resume_data), resume_data);
}
p.storage_mode = storage_mode;
| ||
relevance 0 | ../src/peer_connection.cpp:1098 | this should be the global download rate |
this should be the global download rate../src/peer_connection.cpp:1098
int rate = 0;
// if we haven't received any data recently, the current download rate
// is not representative
if (aux::time_now() - m_last_piece > seconds(30) && m_download_rate_peak > 0)
{
rate = m_download_rate_peak;
}
else if (aux::time_now() - m_last_unchoked < seconds(5)
&& m_statistics.total_payload_upload() < 2 * 0x4000)
{
// if we're have only been unchoked for a short period of time,
// we don't know what rate we can get from this peer. Instead of assuming
// the lowest possible rate, assume the average.
int peers_with_requests = int(stats_counters()[counters::num_peers_down_requests]);
// avoid division by 0
if (peers_with_requests == 0) peers_with_requests = 1;
rate = t->statistics().transfer_rate(stat::download_payload) / peers_with_requests;
}
else
{
// current download rate in bytes per seconds
rate = m_statistics.transfer_rate(stat::download_payload);
}
// avoid division by zero
if (rate < 50) rate = 50;
// average of current rate and peak
// rate = (rate + m_download_rate_peak) / 2;
return milliseconds((m_outstanding_bytes + extra_bytes
+ m_queued_time_critical * t->block_size() * 1000) / rate);
}
void peer_connection::add_stat(std::int64_t const downloaded, std::int64_t const uploaded)
{
TORRENT_ASSERT(is_single_thread());
m_statistics.add_stat(downloaded, uploaded);
}
void peer_connection::received_bytes(int const bytes_payload, int const bytes_protocol)
{
TORRENT_ASSERT(is_single_thread());
m_statistics.received_bytes(bytes_payload, bytes_protocol);
if (m_ignore_stats) return;
std::shared_ptr<torrent> t = m_torrent.lock();
if (!t) return;
| ||
relevance 0 | ../src/peer_connection.cpp:3458 | sort the allowed fast set in priority order |
sort the allowed fast set in priority order../src/peer_connection.cpp:3458
// if the peer has the piece and we want
// to download it, request it
if (index < m_have_piece.end_index()
&& m_have_piece[index]
&& !t->has_piece_passed(index)
&& t->valid_metadata()
&& t->has_picker()
&& t->picker().piece_priority(index) > dont_download)
{
t->peer_is_interesting(*this);
}
}
std::vector<piece_index_t> const& peer_connection::allowed_fast()
{
TORRENT_ASSERT(is_single_thread());
std::shared_ptr<torrent> t = m_torrent.lock();
TORRENT_ASSERT(t);
return m_allowed_fast;
}
bool peer_connection::can_request_time_critical() const
{
TORRENT_ASSERT(is_single_thread());
if (has_peer_choked() || !is_interesting()) return false;
if (int(m_download_queue.size()) + int(m_request_queue.size())
> m_desired_queue_size * 2) return false;
if (on_parole()) return false;
if (m_disconnecting) return false;
std::shared_ptr<torrent> t = m_torrent.lock();
TORRENT_ASSERT(t);
if (t->upload_mode()) return false;
// ignore snubbed peers, since they're not likely to return pieces in a
// timely manner anyway
if (m_snubbed) return false;
return true;
}
bool peer_connection::make_time_critical(piece_block const& block)
{
TORRENT_ASSERT(is_single_thread());
auto const rit = std::find_if(m_request_queue.begin()
, m_request_queue.end(), aux::has_block(block));
if (rit == m_request_queue.end()) return false;
#if TORRENT_USE_ASSERTS
std::shared_ptr<torrent> t = m_torrent.lock();
TORRENT_ASSERT(t);
TORRENT_ASSERT(t->has_picker());
| ||
relevance 0 | ../src/torrent_info.cpp:85 | remove this limit and the overloads that imply it, in favour of using load_torrent_limits |
remove this limit and the overloads that imply it, in favour of
using load_torrent_limits../src/torrent_info.cpp:85
#include <unordered_map>
#include <unordered_set>
#include <cstdint>
#include <iterator>
#include <algorithm>
#include <set>
#include <ctime>
#include <array>
namespace libtorrent {
TORRENT_EXPORT from_span_t from_span;
namespace {
// this is an arbitrary limit to avoid malicious torrents causing
// unreasaonably large allocations for the merkle hash tree
// the size of the tree would be max_pieces * sizeof(int) * 2
// which is about 8 MB with this limit
constexpr int default_piece_limit = 0x100000;
bool valid_path_character(std::int32_t const c)
{
#ifdef TORRENT_WINDOWS
static const char invalid_chars[] = "?<>\"|\b*:";
#else
static const char invalid_chars[] = "";
#endif
if (c < 32) return false;
if (c > 127) return true;
return std::strchr(invalid_chars, static_cast<char>(c)) == nullptr;
}
bool filter_path_character(std::int32_t const c)
{
// these unicode characters change the writing direction of the
// string and can be used for attacks:
// https://security.stackexchange.com/questions/158802/how-can-this-executable-have-an-avi-extension
static const std::array<std::int32_t, 7> bad_cp = {{0x202a, 0x202b, 0x202c, 0x202d, 0x202e, 0x200e, 0x200f}};
if (std::find(bad_cp.begin(), bad_cp.end(), c) != bad_cp.end()) return true;
static const char invalid_chars[] = "/\\";
if (c > 127) return false;
return std::strchr(invalid_chars, static_cast<char>(c)) != nullptr;
}
} // anonymous namespace
// fixes invalid UTF-8 sequences
bool verify_encoding(std::string& target)
| ||
relevance 0 | ../src/torrent_info.cpp:767 | this should be considered a failure, and the .torrent file rejected |
this should be considered a failure, and the .torrent file
rejected../src/torrent_info.cpp:767 std::string ext = extension(filename);
int cnt = 0;
for (;;)
{
++cnt;
char new_ext[50];
std::snprintf(new_ext, sizeof(new_ext), ".%d%s", cnt, ext.c_str());
filename = base + new_ext;
boost::crc_optimal<32, 0x1EDC6F41, 0xFFFFFFFF, 0xFFFFFFFF, true, true> crc;
process_string_lowercase(crc, filename);
std::uint32_t const new_hash = crc.checksum();
if (files.find(new_hash) == files.end())
{
files.insert({new_hash, {i, 0}});
break;
}
++num_collisions;
if (num_collisions > 100)
{
}
}
copy_on_write();
m_files.rename_file(i, filename);
}
}
void torrent_info::remap_files(file_storage const& f)
{
INVARIANT_CHECK;
TORRENT_ASSERT(is_loaded());
// the new specified file storage must have the exact
// same size as the current file storage
TORRENT_ASSERT(m_files.total_size() == f.total_size());
if (m_files.total_size() != f.total_size()) return;
copy_on_write();
m_files = f;
m_files.set_num_pieces(m_orig_files->num_pieces());
m_files.set_piece_length(m_orig_files->piece_length());
}
#if TORRENT_ABI_VERSION == 1
torrent_info::torrent_info(lazy_entry const& torrent_file, error_code& ec)
{
std::pair<char const*, int> buf = torrent_file.data_section();
bdecode_node e;
if (bdecode(buf.first, buf.first + buf.second, e, ec) != 0)
return;
| ||
relevance 0 | ../src/magnet_uri.cpp:271 | what's the right number here? |
what's the right number here?../src/magnet_uri.cpp:271 return;
}
p.info_hash = info_hash;
has_ih = true;
}
else if (name == "so"_sv) // select-only (files)
{
// accept only digits, '-' and ','
if (std::any_of(value.begin(), value.end(), [](char c)
{ return !is_digit(c) && c != '-' && c != ','; }))
continue;
do
{
string_view token;
std::tie(token, value) = split_string(value, ',');
if (token.empty()) continue;
int idx1, idx2;
constexpr int max_index = 10000; // can't risk out of memory
auto const divider = token.find_first_of('-');
if (divider != std::string::npos) // it's a range
{
if (divider == 0) // no start index
continue;
if (divider == token.size() - 1) // no end index
continue;
idx1 = std::atoi(token.substr(0, divider).to_string().c_str());
if (idx1 < 0 || idx1 > max_index) // invalid index
continue;
idx2 = std::atoi(token.substr(divider + 1).to_string().c_str());
if (idx2 < 0 || idx2 > max_index) // invalid index
continue;
if (idx1 > idx2) // wrong range limits
continue;
}
else // it's an index
{
idx1 = std::atoi(token.to_string().c_str());
if (idx1 < 0 || idx1 > max_index) // invalid index
continue;
idx2 = idx1;
}
if (int(p.file_priorities.size()) <= idx2)
p.file_priorities.resize(static_cast<std::size_t>(idx2) + 1, dont_download);
| ||
relevance 0 | ../src/part_file.cpp:246 | what do we do if someone is currently reading from the disk from this piece? does it matter? Since we won't actively erase the data from disk, but it may be overwritten soon, it's probably not that big of a deal |
what do we do if someone is currently reading from the disk
from this piece? does it matter? Since we won't actively erase the
data from disk, but it may be overwritten soon, it's probably not that
big of a deal../src/part_file.cpp:246 && ec == boost::system::errc::no_such_file_or_directory)
{
// this means the directory the file is in doesn't exist.
// so create it
ec.clear();
create_directories(m_path, ec);
if (ec) return;
f = std::make_shared<file>(fn, mode, ec);
}
if (!ec) m_file = std::move(f);
}
void part_file::free_piece(piece_index_t const piece)
{
std::lock_guard<std::mutex> l(m_mutex);
auto const i = m_piece_map.find(piece);
if (i == m_piece_map.end()) return;
m_free_slots.push_back(i->second);
m_piece_map.erase(i);
m_dirty_metadata = true;
}
void part_file::move_partfile(std::string const& path, error_code& ec)
{
std::lock_guard<std::mutex> l(m_mutex);
flush_metadata_impl(ec);
if (ec) return;
// we're only supposed to move part files from a fence job. i.e. no other
// disk jobs are supposed to be in-flight at this point
TORRENT_ASSERT(!m_file || m_file.use_count() == 1);
m_file.reset();
if (!m_piece_map.empty())
{
std::string old_path = combine_path(m_path, m_name);
std::string new_path = combine_path(path, m_name);
rename(old_path, new_path, ec);
if (ec == boost::system::errc::no_such_file_or_directory)
ec.clear();
if (ec)
{
copy_file(old_path, new_path, ec);
if (ec) return;
| ||
relevance 0 | ../src/part_file.cpp:358 | instead of rebuilding the whole file header and flushing it, update the slot entries as we go |
instead of rebuilding the whole file header
and flushing it, update the slot entries as we go../src/part_file.cpp:358 TORRENT_ASSERT(j->second == slot);
m_free_slots.push_back(j->second);
m_piece_map.erase(j);
m_dirty_metadata = true;
}
}
}
file_offset += block_to_copy;
piece_offset = 0;
size -= block_to_copy;
}
}
void part_file::flush_metadata(error_code& ec)
{
std::lock_guard<std::mutex> l(m_mutex);
flush_metadata_impl(ec);
}
void part_file::flush_metadata_impl(error_code& ec)
{
// do we need to flush the metadata?
if (m_dirty_metadata == false) return;
if (m_piece_map.empty())
{
m_file.reset();
// if we don't have any pieces left in the
// part file, remove it
std::string const p = combine_path(m_path, m_name);
remove(p, ec);
if (ec == boost::system::errc::no_such_file_or_directory)
ec.clear();
return;
}
open_file(open_mode::read_write | open_mode::attribute_hidden, ec);
if (ec) return;
std::vector<char> header(static_cast<std::size_t>(m_header_size));
using namespace libtorrent::detail;
char* ptr = header.data();
write_uint32(m_max_pieces, ptr);
write_uint32(m_piece_size, ptr);
for (piece_index_t piece(0); piece < piece_index_t(m_max_pieces); ++piece)
| ||
relevance 0 | ../src/packet_buffer.cpp:155 | use compare_less_wrap for this comparison as well |
use compare_less_wrap for this comparison as well../src/packet_buffer.cpp:155 {
INVARIANT_CHECK;
TORRENT_ASSERT_VAL(size <= 0xffff, size);
std::uint32_t new_size = m_capacity == 0 ? 16 : m_capacity;
while (new_size < size)
new_size <<= 1;
aux::unique_ptr<packet_ptr[], index_type> new_storage(new packet_ptr[new_size]);
for (index_type i = m_first; i < (m_first + m_capacity); ++i)
new_storage[i & (new_size - 1)] = std::move(m_storage[i & (m_capacity - 1)]);
m_storage = std::move(new_storage);
m_capacity = new_size;
}
packet_ptr packet_buffer::remove(index_type idx)
{
INVARIANT_CHECK;
if (idx >= m_first + m_capacity)
return packet_ptr();
if (compare_less_wrap(idx, m_first, 0xffff))
return packet_ptr();
std::size_t const mask = m_capacity - 1;
packet_ptr old_value = std::move(m_storage[idx & mask]);
m_storage[idx & mask].reset();
if (old_value)
{
--m_size;
if (m_size == 0) m_last = m_first;
}
if (idx == m_first && m_size != 0)
{
++m_first;
for (index_type i = 0; i < m_capacity; ++i, ++m_first)
if (m_storage[m_first & mask]) break;
m_first &= 0xffff;
}
if (((idx + 1) & 0xffff) == m_last && m_size != 0)
{
--m_last;
for (index_type i = 0; i < m_capacity; ++i, --m_last)
if (m_storage[m_last & mask]) break;
++m_last;
m_last &= 0xffff;
| ||
relevance 0 | ../src/session_impl.cpp:1328 | it would be nice to reserve() these vectors up front |
it would be nice to reserve() these vectors up front../src/session_impl.cpp:1328 bandwidth_channel* ch = &p->channel[peer_connection::download_channel];
if (use_quota_overhead(ch, amount_down))
ret |= 1 << peer_connection::download_channel;
ch = &p->channel[peer_connection::upload_channel];
if (use_quota_overhead(ch, amount_up))
ret |= 1 << peer_connection::upload_channel;
}
return ret;
}
// session_impl is responsible for deleting 'pack'
void session_impl::apply_settings_pack(std::shared_ptr<settings_pack> pack)
{
INVARIANT_CHECK;
apply_settings_pack_impl(*pack);
}
settings_pack session_impl::get_settings() const
{
settings_pack ret;
for (int i = settings_pack::string_type_base;
i < settings_pack::max_string_setting_internal; ++i)
{
ret.set_str(i, m_settings.get_str(i));
}
for (int i = settings_pack::int_type_base;
i < settings_pack::max_int_setting_internal; ++i)
{
ret.set_int(i, m_settings.get_int(i));
}
for (int i = settings_pack::bool_type_base;
i < settings_pack::max_bool_setting_internal; ++i)
{
ret.set_bool(i, m_settings.get_bool(i));
}
return ret;
}
void session_impl::apply_settings_pack_impl(settings_pack const& pack)
{
bool const reopen_listen_port =
#if TORRENT_ABI_VERSION == 1
(pack.has_val(settings_pack::ssl_listen)
&& pack.get_int(settings_pack::ssl_listen)
!= m_settings.get_int(settings_pack::ssl_listen))
||
#endif
(pack.has_val(settings_pack::listen_interfaces)
&& pack.get_str(settings_pack::listen_interfaces)
!= m_settings.get_str(settings_pack::listen_interfaces))
|| (pack.has_val(settings_pack::proxy_type)
| ||
relevance 0 | ../src/session_impl.cpp:1776 | could this function be merged with expand_unspecified_addresses? right now both listen_endpoint_t and listen_interface_t are almost identical, maybe the latter could be removed too |
could this function be merged with expand_unspecified_addresses?
right now both listen_endpoint_t and listen_interface_t are almost
identical, maybe the latter could be removed too../src/session_impl.cpp:1776 session_log("FATAL SESSION ERROR (%s : %d) [%s]"
, ec.category().name(), ec.value(), ec.message().c_str());
#endif
this->abort();
}
void session_impl::on_ip_change(error_code const& ec)
{
#ifndef TORRENT_DISABLE_LOGGING
if (!ec)
session_log("received ip change from internal ip_notifier");
else
session_log("received error on_ip_change: %d, %s", ec.value(), ec.message().c_str());
#endif
if (ec || m_abort || !m_ip_notifier) return;
m_ip_notifier->async_wait([this] (error_code const& e)
{ this->wrap(&session_impl::on_ip_change, e); });
reopen_network_sockets({});
}
void interface_to_endpoints(listen_interface_t const& iface
, listen_socket_flags_t flags
, span<ip_interface const> const ifs
, std::vector<listen_endpoint_t>& eps)
{
flags |= iface.local ? listen_socket_t::local_network : listen_socket_flags_t{};
transport const ssl = iface.ssl ? transport::ssl : transport::plaintext;
// First, check to see if it's an IP address
error_code err;
address const adr = make_address(iface.device.c_str(), err);
if (!err)
{
eps.emplace_back(adr, iface.port, std::string{}, ssl, flags);
}
else
{
flags |= listen_socket_t::was_expanded;
// this is the case where device names a network device. We need to
// enumerate all IPs associated with this device
for (auto const& ipface : ifs)
{
// we're looking for a specific interface, and its address
// (which must be of the same family as the address we're
// connecting to)
if (iface.device != ipface.name) continue;
// record whether the device has a gateway associated with it
// (which indicates it can be used to reach the internet)
// if the IP address tell us it's loopback or link-local, don't
| ||
relevance 0 | ../src/session_impl.cpp:2081 | it would probably be better to do this by having a listen-socket "version" number that gets bumped. And instead of setting a bool to disable a tracker, we set the version number that it was disabled at. This change would affect the ABI in 1.2, so should be done in 2.0 or later |
it would probably be better to do this by having a
listen-socket "version" number that gets bumped. And instead of
setting a bool to disable a tracker, we set the version number that
it was disabled at. This change would affect the ABI in 1.2, so
should be done in 2.0 or later../src/session_impl.cpp:2081 {
for (auto const& s : m_listen_sockets)
remap_ports(remap_natpmp_and_upnp, *s);
}
else
{
// new sockets need to map ports even if the caller did not request
// re-mapping
for (auto const& s : new_sockets)
remap_ports(remap_natpmp_and_upnp, *s);
}
update_lsd();
#if TORRENT_USE_I2P
open_new_incoming_i2p_connection();
#endif
// trackers that were not reachable, may have become reachable now.
// so clear the "disabled" flags to let them be tried one more time
for (auto& t : m_torrents)
t.second->enable_all_trackers();
}
void session_impl::reopen_network_sockets(reopen_network_flags_t const options)
{
reopen_listen_sockets(bool(options & session_handle::reopen_map_ports));
}
namespace {
template <typename MapProtocol, typename ProtoType, typename EndpointType>
void map_port(MapProtocol& m, ProtoType protocol, EndpointType const& ep
, port_mapping_t& map_handle)
{
if (map_handle != port_mapping_t{-1}) m.delete_mapping(map_handle);
map_handle = port_mapping_t{-1};
address const addr = ep.address();
// with IPv4 the interface might be behind NAT so we can't skip them
// based on the scope of the local address
if (addr.is_v6() && is_local(addr))
return;
// only update this mapping if we actually have a socket listening
if (ep != EndpointType())
map_handle = m.add_mapping(protocol, ep.port(), ep);
}
}
void session_impl::remap_ports(remap_port_mask_t const mask
, listen_socket_t& s)
| ||
relevance 0 | ../src/session_impl.cpp:3316 | have a separate list for these connections, instead of having to loop through all of them |
have a separate list for these connections, instead of having to loop through all of them../src/session_impl.cpp:3316 // --------------------------------------------------------------
if (!m_paused) m_auto_manage_time_scaler--;
if (m_auto_manage_time_scaler < 0)
{
m_auto_manage_time_scaler = settings().get_int(settings_pack::auto_manage_interval);
recalculate_auto_managed_torrents();
}
// --------------------------------------------------------------
// check for incoming connections that might have timed out
// --------------------------------------------------------------
for (auto i = m_connections.begin(); i != m_connections.end();)
{
peer_connection* p = (*i).get();
++i;
// ignore connections that already have a torrent, since they
// are ticked through the torrents' second_tick
if (!p->associated_torrent().expired()) continue;
int timeout = m_settings.get_int(settings_pack::handshake_timeout);
#if TORRENT_USE_I2P
timeout *= is_i2p(*p->get_socket()) ? 4 : 1;
#endif
if (m_last_tick - p->connected_time () > seconds(timeout))
p->disconnect(errors::timed_out, operation_t::bittorrent);
}
// --------------------------------------------------------------
// second_tick every torrent (that wants it)
// --------------------------------------------------------------
#if TORRENT_DEBUG_STREAMING > 0
std::printf("\033[2J\033[0;0H");
#endif
aux::vector<torrent*>& want_tick = m_torrent_lists[torrent_want_tick];
for (int i = 0; i < int(want_tick.size()); ++i)
{
torrent& t = *want_tick[i];
TORRENT_ASSERT(t.want_tick());
TORRENT_ASSERT(!t.is_aborted());
t.second_tick(tick_interval_ms);
// if the call to second_tick caused the torrent
// to no longer want to be ticked (i.e. it was
// removed from the list) we need to back up the counter
// to not miss the torrent after it
if (!t.want_tick()) --i;
}
| ||
relevance 0 | ../src/session_impl.cpp:3349 | this should apply to all bandwidth channels |
this should apply to all bandwidth channels../src/session_impl.cpp:3349#if TORRENT_DEBUG_STREAMING > 0
std::printf("\033[2J\033[0;0H");
#endif
aux::vector<torrent*>& want_tick = m_torrent_lists[torrent_want_tick];
for (int i = 0; i < int(want_tick.size()); ++i)
{
torrent& t = *want_tick[i];
TORRENT_ASSERT(t.want_tick());
TORRENT_ASSERT(!t.is_aborted());
t.second_tick(tick_interval_ms);
// if the call to second_tick caused the torrent
// to no longer want to be ticked (i.e. it was
// removed from the list) we need to back up the counter
// to not miss the torrent after it
if (!t.want_tick()) --i;
}
if (m_settings.get_bool(settings_pack::rate_limit_ip_overhead))
{
int const up_limit = upload_rate_limit(m_global_class);
int const down_limit = download_rate_limit(m_global_class);
if (down_limit > 0
&& m_stat.download_ip_overhead() >= down_limit
&& m_alerts.should_post<performance_alert>())
{
m_alerts.emplace_alert<performance_alert>(torrent_handle()
, performance_alert::download_limit_too_low);
}
if (up_limit > 0
&& m_stat.upload_ip_overhead() >= up_limit
&& m_alerts.should_post<performance_alert>())
{
m_alerts.emplace_alert<performance_alert>(torrent_handle()
, performance_alert::upload_limit_too_low);
}
}
#if TORRENT_ABI_VERSION == 1
m_peak_up_rate = std::max(m_stat.upload_rate(), m_peak_up_rate);
#endif
m_stat.second_tick(tick_interval_ms);
// --------------------------------------------------------------
// scrape paused torrents that are auto managed
// (unless the session is paused)
| ||
relevance 0 | ../src/session_impl.cpp:4055 | use a lower limit than m_settings.connections_limit to allocate the to 10% or so of connection slots for incoming connections cap this at max - 1, since we may add one below |
use a lower limit than m_settings.connections_limit
to allocate the to 10% or so of connection slots for incoming
connections
cap this at max - 1, since we may add one below../src/session_impl.cpp:4055 // boost, which are done immediately on a tracker response. These
// connections needs to be deducted from the regular connection attempt
// quota for this tick
if (m_boost_connections > 0)
{
if (m_boost_connections > max_connections)
{
m_boost_connections -= max_connections;
max_connections = 0;
}
else
{
max_connections -= m_boost_connections;
m_boost_connections = 0;
}
}
// zero connections speeds are allowed, we just won't make any connections
if (max_connections <= 0) return;
int const limit = std::min(m_settings.get_int(settings_pack::connections_limit)
- num_connections(), std::numeric_limits<int>::max() - 1);
// this logic is here to smooth out the number of new connection
// attempts over time, to prevent connecting a large number of
// sockets, wait 10 seconds, and then try again
if (m_settings.get_bool(settings_pack::smooth_connects) && max_connections > (limit+1) / 2)
max_connections = (limit + 1) / 2;
aux::vector<torrent*>& want_peers_download = m_torrent_lists[torrent_want_peers_download];
aux::vector<torrent*>& want_peers_finished = m_torrent_lists[torrent_want_peers_finished];
// if no torrent want any peers, just return
if (want_peers_download.empty() && want_peers_finished.empty()) return;
// if we don't have any connection attempt quota, return
if (max_connections <= 0) return;
int steps_since_last_connect = 0;
int const num_torrents = int(want_peers_finished.size() + want_peers_download.size());
for (;;)
{
if (m_next_downloading_connect_torrent >= int(want_peers_download.size()))
m_next_downloading_connect_torrent = 0;
if (m_next_finished_connect_torrent >= int(want_peers_finished.size()))
m_next_finished_connect_torrent = 0;
torrent* t = nullptr;
// there are prioritized torrents. Pick one of those
while (!m_prio_torrents.empty())
| ||
relevance 0 | ../src/session_impl.cpp:4200 | post a message to have this happen immediately instead of waiting for the next tick |
post a message to have this happen
immediately instead of waiting for the next tick../src/session_impl.cpp:4200 continue;
}
if (!p->is_peer_interested()
|| p->is_disconnecting()
|| p->is_connecting())
{
// this peer is not unchokable. So, if it's unchoked
// already, make sure to choke it.
if (p->is_choked())
{
p->reset_choke_counters();
continue;
}
if (pi && pi->optimistically_unchoked)
{
m_stats_counters.inc_stats_counter(counters::num_peers_up_unchoked_optimistic, -1);
pi->optimistically_unchoked = false;
// force a new optimistic unchoke
m_optimistic_unchoke_time_scaler = 0;
}
t->choke_peer(*p);
p->reset_choke_counters();
continue;
}
peers.push_back(p.get());
}
#if TORRENT_ABI_VERSION == 1
// the unchoker wants an estimate of our upload rate capacity
// (used by bittyrant)
int max_upload_rate = upload_rate_limit(m_global_class);
if (m_settings.get_int(settings_pack::choking_algorithm)
== settings_pack::bittyrant_choker
&& max_upload_rate == 0)
{
// we don't know at what rate we can upload. If we have a
// measurement of the peak, use that + 10kB/s, otherwise
// assume 20 kB/s
max_upload_rate = std::max(20000, m_peak_up_rate + 10000);
if (m_alerts.should_post<performance_alert>())
m_alerts.emplace_alert<performance_alert>(torrent_handle()
, performance_alert::bittyrant_with_no_uplimit);
}
#else
int const max_upload_rate = 0;
#endif
int const allowed_upload_slots = unchoke_sort(peers, max_upload_rate
, unchoke_interval, m_settings);
| ||
relevance 0 | ../src/session_impl.cpp:4584 | it might be a nice feature here to limit the number of torrents to send in a single update. By just posting the first n torrents, they would nicely be round-robined because the torrent lists are always pushed back. Perhaps the status_update_alert could even have a fixed array of n entries rather than a vector, to further improve memory locality. |
it might be a nice feature here to limit the number of torrents
to send in a single update. By just posting the first n torrents, they
would nicely be round-robined because the torrent lists are always
pushed back. Perhaps the status_update_alert could even have a fixed
array of n entries rather than a vector, to further improve memory
locality.../src/session_impl.cpp:4584 t->status(&st, flags);
}
}
void session_impl::post_torrent_updates(status_flags_t const flags)
{
INVARIANT_CHECK;
TORRENT_ASSERT(is_single_thread());
std::vector<torrent*>& state_updates
= m_torrent_lists[aux::session_impl::torrent_state_updates];
#if TORRENT_USE_ASSERTS
m_posting_torrent_updates = true;
#endif
std::vector<torrent_status> status;
status.reserve(state_updates.size());
for (auto& t : state_updates)
{
TORRENT_ASSERT(t->m_links[aux::session_impl::torrent_state_updates].in_list());
status.emplace_back();
// querying accurate download counters may require
// the torrent to be loaded. Loading a torrent, and evicting another
// one will lead to calling state_updated(), which screws with
// this list while we're working on it, and break things
t->status(&status.back(), flags);
t->clear_in_state_update();
}
state_updates.clear();
#if TORRENT_USE_ASSERTS
m_posting_torrent_updates = false;
#endif
m_alerts.emplace_alert<state_update_alert>(std::move(status));
}
void session_impl::post_session_stats()
{
if (!m_posted_stats_header)
{
m_posted_stats_header = true;
m_alerts.emplace_alert<session_stats_header_alert>();
}
m_disk_thread.update_stats_counters(m_stats_counters);
#ifndef TORRENT_DISABLE_DHT
if (m_dht)
| ||
relevance 0 | ../src/session_impl.cpp:5000 | factor out this logic into a separate function for unit testing |
factor out this logic into a separate function for unit
testing../src/session_impl.cpp:5000 if (m_settings.get_int(settings_pack::outgoing_port) > 0)
{
#ifdef TORRENT_WINDOWS
s.set_option(exclusive_address_use(true), ec);
#else
s.set_option(tcp::acceptor::reuse_address(true), ec);
#endif
// ignore errors because the underlying socket may not
// be opened yet. This happens when we're routing through
// a proxy. In that case, we don't yet know the address of
// the proxy server, and more importantly, we don't know
// the address family of its address. This means we can't
// open the socket yet. The socks abstraction layer defers
// opening it.
ec.clear();
bind_ep.port(std::uint16_t(next_port()));
}
if (is_utp(s))
{
utp_socket_impl* impl = nullptr;
transport ssl = transport::plaintext;
#ifdef TORRENT_USE_OPENSSL
if (s.get<ssl_stream<utp_stream>>() != nullptr)
{
impl = s.get<ssl_stream<utp_stream>>()->next_layer().get_impl();
ssl = transport::ssl;
}
else
#endif
impl = s.get<utp_stream>()->get_impl();
std::vector<std::shared_ptr<listen_socket_t>> with_gateways;
std::shared_ptr<listen_socket_t> match;
for (auto& ls : m_listen_sockets)
{
if (is_v4(ls->local_endpoint) != remote_address.is_v4()) continue;
if (ls->ssl != ssl) continue;
if (!(ls->flags & listen_socket_t::local_network))
with_gateways.push_back(ls);
if (match_addr_mask(ls->local_endpoint.address(), remote_address, ls->netmask))
{
// is this better than the previous match?
match = ls;
}
}
if (!match && !with_gateways.empty())
match = with_gateways[random(std::uint32_t(with_gateways.size() - 1))];
| ||
relevance 0 | ../src/session_impl.cpp:5814 | refactor, move the storage to dht_tracker |
refactor, move the storage to dht_tracker../src/session_impl.cpp:5814#ifndef TORRENT_DISABLE_LOGGING
session_log("not starting DHT, outstanding router lookups: %d"
, m_outstanding_router_lookups);
#endif
return;
}
if (m_abort)
{
#ifndef TORRENT_DISABLE_LOGGING
session_log("not starting DHT, aborting");
#endif
return;
}
#ifndef TORRENT_DISABLE_LOGGING
session_log("starting DHT, running: %s, router lookups: %d"
, m_dht ? "true" : "false", m_outstanding_router_lookups);
#endif
m_dht_storage = m_dht_storage_constructor(m_dht_settings);
m_dht = std::make_shared<dht::dht_tracker>(
static_cast<dht::dht_observer*>(this)
, m_io_service
, [=](aux::listen_socket_handle const& sock
, udp::endpoint const& ep
, span<char const> p
, error_code& ec
, udp_send_flags_t const flags)
{ send_udp_packet_listen(sock, ep, p, ec, flags); }
, m_dht_settings
, m_stats_counters
, *m_dht_storage
, std::move(m_dht_state));
for (auto& s : m_listen_sockets)
{
if (s->ssl != transport::ssl
&& !(s->flags & listen_socket_t::local_network))
{
m_dht->new_socket(s);
}
}
for (auto const& n : m_dht_router_nodes)
{
m_dht->add_router_node(n);
}
for (auto const& n : m_dht_nodes)
{
| ||
relevance 0 | ../src/session_impl.cpp:6144 | asserts that no outstanding async operations are still in flight |
asserts that no outstanding async operations are still in flight../src/session_impl.cpp:6144 void session_impl::add_obfuscated_hash(sha1_hash const& obfuscated
, std::weak_ptr<torrent> const& t)
{
m_obfuscated_torrents.insert(std::make_pair(obfuscated, t.lock()));
}
#endif // TORRENT_DISABLE_ENCRYPTION
bool session_impl::is_listening() const
{
return !m_listen_sockets.empty();
}
session_impl::~session_impl()
{
// since we're destructing the session, no more alerts will make it out to
// the user. So stop posting them now
m_alerts.set_alert_mask({});
// this is not allowed to be the network thread!
// TORRENT_ASSERT(is_not_thread());
// this can happen if we end the io_service run loop with an exception
m_connections.clear();
for (auto& t : m_torrents)
{
t.second->panic();
t.second->abort();
}
m_torrents.clear();
#if !defined TORRENT_DISABLE_ENCRYPTION
m_obfuscated_torrents.clear();
#endif
#if TORRENT_ABI_VERSION == 1
m_uuids.clear();
#endif
#if defined TORRENT_ASIO_DEBUGGING
FILE* f = fopen("wakeups.log", "w+");
if (f != nullptr)
{
time_point m = min_time();
if (!_wakeups.empty()) m = _wakeups[0].timestamp;
time_point prev = m;
std::uint64_t prev_csw = 0;
if (!_wakeups.empty()) prev_csw = _wakeups[0].context_switches;
std::fprintf(f, "abs. time\trel. time\tctx switch\tidle-wakeup\toperation\n");
for (wakeup_t const& w : _wakeups)
{
bool const idle_wakeup = w.context_switches > prev_csw;
std::fprintf(f, "%" PRId64 "\t%" PRId64 "\t%" PRId64 "\t%c\t%s\n"
, total_microseconds(w.timestamp - m)
| ||
relevance 0 | ../src/ip_notifier.cpp:37 | simulator support |
simulator support../src/ip_notifier.cpp:37 from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "libtorrent/aux_/ip_notifier.hpp"
#include "libtorrent/assert.hpp"
#if defined TORRENT_BUILD_SIMULATOR
#elif TORRENT_USE_NETLINK
#include "libtorrent/netlink.hpp"
#include "libtorrent/socket.hpp"
#include <array>
#elif TORRENT_USE_SYSTEMCONFIGURATION
#include <SystemConfiguration/SystemConfiguration.h>
#elif defined TORRENT_WINDOWS
#include "libtorrent/aux_/throw.hpp"
#include "libtorrent/aux_/disable_warnings_push.hpp"
#include <iphlpapi.h>
#include <mutex>
#include "libtorrent/aux_/disable_warnings_pop.hpp"
#endif
namespace libtorrent { namespace aux {
namespace {
#if defined TORRENT_BUILD_SIMULATOR
struct ip_change_notifier_impl final : ip_change_notifier
{
explicit ip_change_notifier_impl(io_service& ios)
: m_ios(ios) {}
void async_wait(std::function<void(error_code const&)> cb) override
{
m_ios.post([cb]()
{ cb(make_error_code(boost::system::errc::not_supported)); });
}
void cancel() override {}
| ||
relevance 0 | ../src/cpuid.cpp:128 | enable when aarch64 is really tested |
enable when aarch64 is really tested../src/cpuid.cpp:128 bool supports_mmx()
{
#if TORRENT_HAS_SSE
std::uint32_t cpui[4] = {0};
cpuid(cpui, 1);
return (cpui[2] & (1 << 23)) != 0;
#else
return false;
#endif
}
bool supports_arm_neon()
{
#if TORRENT_HAS_ARM_NEON && TORRENT_HAS_AUXV
#if defined __arm__
//return (getauxval(AT_HWCAP) & HWCAP_NEON);
return (helper_getauxval(16) & (1 << 12));
#elif defined __aarch64__
//return (getauxval(AT_HWCAP) & HWCAP_ASIMD);
//return (getauxval(16) & (1 << 1));
return false;
#endif
#else
return false;
#endif
}
bool supports_arm_crc32c()
{
#if TORRENT_HAS_ARM_CRC32 && TORRENT_HAS_AUXV
#if defined TORRENT_FORCE_ARM_CRC32
return true;
#elif defined __arm__
//return (getauxval(AT_HWCAP2) & HWCAP2_CRC32);
return (helper_getauxval(26) & (1 << 4));
#elif defined __aarch64__
//return (getauxval(AT_HWCAP) & HWCAP_CRC32);
return (helper_getauxval(16) & (1 << 7));
#endif
#else
return false;
#endif
}
} // anonymous namespace
bool const sse42_support = supports_sse42();
bool const mmx_support = supports_mmx();
bool const arm_neon_support = supports_arm_neon();
bool const arm_crc32c_support = supports_arm_crc32c();
} }
| ||
relevance 0 | ../src/add_torrent_params.cpp:76 | pre C++17, GCC and msvc does not make std::string nothrow move assignable, which means no type containing a string will be nothrow move assignable by default either static_assert(std::is_nothrow_move_assignable::value , "should be nothrow move assignable"); |
pre C++17, GCC and msvc does not make std::string nothrow move
assignable, which means no type containing a string will be nothrow move
assignable by default either
static_assert(std::is_nothrow_move_assignable::value
, "should be nothrow move assignable");../src/add_torrent_params.cpp:76 DECL_FLAG(sequential_download);
DECL_FLAG(pinned);
DECL_FLAG(stop_when_ready);
DECL_FLAG(override_trackers);
DECL_FLAG(override_web_seeds);
DECL_FLAG(need_save_resume);
DECL_FLAG(override_resume_data);
DECL_FLAG(merge_resume_trackers);
DECL_FLAG(use_resume_save_path);
DECL_FLAG(merge_resume_http_seeds);
DECL_FLAG(default_flags);
#undef DECL_FLAG
#endif // TORRENT_ABI_VERSION
static_assert(std::is_nothrow_move_constructible<add_torrent_params>::value
, "should be nothrow move constructible");
static_assert(std::is_nothrow_move_constructible<std::string>::value
, "should be nothrow move constructible");
| ||
relevance 0 | ../src/add_torrent_params.cpp:82 | it would be nice if this was nothrow default constructible static_assert(std::is_nothrow_default_constructible::value , "should be nothrow default constructible"); |
it would be nice if this was nothrow default constructible
static_assert(std::is_nothrow_default_constructible::value
, "should be nothrow default constructible");../src/add_torrent_params.cpp:82 DECL_FLAG(pinned);
DECL_FLAG(stop_when_ready);
DECL_FLAG(override_trackers);
DECL_FLAG(override_web_seeds);
DECL_FLAG(need_save_resume);
DECL_FLAG(override_resume_data);
DECL_FLAG(merge_resume_trackers);
DECL_FLAG(use_resume_save_path);
DECL_FLAG(merge_resume_http_seeds);
DECL_FLAG(default_flags);
#undef DECL_FLAG
#endif // TORRENT_ABI_VERSION
static_assert(std::is_nothrow_move_constructible<add_torrent_params>::value
, "should be nothrow move constructible");
static_assert(std::is_nothrow_move_constructible<std::string>::value
, "should be nothrow move constructible");
}
| ||
relevance 0 | ../src/torrent.cpp:98 | factor out cache_status to its own header |
factor out cache_status to its own header../src/torrent.cpp:98#include "libtorrent/kademlia/dht_tracker.hpp"
#include "libtorrent/peer_info.hpp"
#include "libtorrent/http_connection.hpp"
#include "libtorrent/random.hpp"
#include "libtorrent/peer_class.hpp" // for peer_class
#include "libtorrent/socket_io.hpp" // for read_*_endpoint
#include "libtorrent/ip_filter.hpp"
#include "libtorrent/request_blocks.hpp"
#include "libtorrent/performance_counters.hpp" // for counters
#include "libtorrent/resolver_interface.hpp"
#include "libtorrent/aux_/alloca.hpp"
#include "libtorrent/resolve_links.hpp"
#include "libtorrent/aux_/file_progress.hpp"
#include "libtorrent/aux_/has_block.hpp"
#include "libtorrent/alert_manager.hpp"
#include "libtorrent/disk_interface.hpp"
#include "libtorrent/broadcast_socket.hpp" // for is_ip_address
#include "libtorrent/download_priority.hpp"
#include "libtorrent/hex.hpp" // to_hex
#include "libtorrent/aux_/range.hpp"
#include "libtorrent/disk_io_thread.hpp" // for cache_status
#include "libtorrent/aux_/numeric_cast.hpp"
#include "libtorrent/aux_/path.hpp"
#include "libtorrent/aux_/generate_peer_id.hpp"
#ifndef TORRENT_DISABLE_LOGGING
#include "libtorrent/aux_/session_impl.hpp" // for tracker_logger
#endif
#include "libtorrent/aux_/torrent_impl.hpp"
using namespace std::placeholders;
namespace libtorrent {
namespace {
bool is_downloading_state(int const st)
{
switch (st)
{
case torrent_status::checking_files:
case torrent_status::allocating:
case torrent_status::checking_resume_data:
return false;
case torrent_status::downloading_metadata:
case torrent_status::downloading:
case torrent_status::finished:
case torrent_status::seeding:
return true;
default:
// unexpected state
| ||
relevance 0 | ../src/torrent.cpp:359 | if this is a merkle torrent and we can't restore the tree, we need to wipe all the bits in the have array, but not necessarily we might want to do a full check to see if we have all the pieces. This is low priority since almost no one uses merkle torrents |
if this is a merkle torrent and we can't
restore the tree, we need to wipe all the
bits in the have array, but not necessarily
we might want to do a full check to see if we have
all the pieces. This is low priority since almost
no one uses merkle torrents../src/torrent.cpp:359
std::sort(m_trackers.begin(), m_trackers.end()
, [] (announce_entry const& lhs, announce_entry const& rhs)
{ return lhs.tier < rhs.tier; });
if (settings().get_bool(settings_pack::prefer_udp_trackers))
prioritize_udp_trackers();
// --- MERKLE TREE ---
if (m_torrent_file->is_valid()
&& m_torrent_file->is_merkle_torrent())
{
if (p.merkle_tree.size() == m_torrent_file->merkle_tree().size())
{
std::vector<sha1_hash> tree(p.merkle_tree);
m_torrent_file->set_merkle_tree(tree);
}
else
{
TORRENT_ASSERT_FAIL();
}
}
if (m_torrent_file->is_valid())
{
// setting file- or piece priorities for seed mode makes no sense. If a
// torrent ends up in seed mode by accident, it can be very confusing,
// so assume the seed mode flag is not intended and don't enable it in
// that case. Also, if the resume data says we're missing a piece, we
// can't be in seed-mode.
m_seed_mode = (p.flags & torrent_flags::seed_mode)
&& std::find(p.file_priorities.begin(), p.file_priorities.end(), dont_download) == p.file_priorities.end()
&& std::find(p.piece_priorities.begin(), p.piece_priorities.end(), dont_download) == p.piece_priorities.end()
&& std::find(p.have_pieces.begin(), p.have_pieces.end(), false) == p.have_pieces.end();
m_connections_initialized = true;
}
else
{
if (!p.name.empty()) m_name.reset(new std::string(p.name));
}
#if TORRENT_ABI_VERSION == 1
// deprecated in 1.2
if (!m_url.empty() && m_uuid.empty()) m_uuid = m_url;
#endif
TORRENT_ASSERT(is_single_thread());
m_file_priority.assign(p.file_priorities.begin(), p.file_priorities.end());
| ||
relevance 0 | ../src/torrent.cpp:488 | if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it. |
if the existing torrent doesn't have metadata, insert
the metadata we just downloaded into it.../src/torrent.cpp:488 alerts().emplace_alert<torrent_update_alert>(get_handle(), info_hash(), tf->info_hash());
m_torrent_file = tf;
m_info_hash = tf->info_hash();
// now, we might already have this torrent in the session.
std::shared_ptr<torrent> t = m_ses.find_torrent(m_torrent_file->info_hash()).lock();
if (t)
{
if (!m_uuid.empty() && t->uuid().empty())
t->set_uuid(m_uuid);
if (!m_url.empty() && t->url().empty())
t->set_url(m_url);
// insert this torrent in the uuid index
if (!m_uuid.empty() || !m_url.empty())
{
m_ses.insert_uuid_torrent(m_uuid.empty() ? m_url : m_uuid, t);
}
set_error(errors::duplicate_torrent, torrent_status::error_file_url);
abort();
return;
}
m_ses.insert_torrent(m_torrent_file->info_hash(), me, m_uuid);
// if the user added any trackers while downloading the
// .torrent file, merge them into the new tracker list
std::vector<announce_entry> new_trackers = m_torrent_file->trackers();
for (auto const& tr : m_trackers)
{
// if we already have this tracker, ignore it
if (std::any_of(new_trackers.begin(), new_trackers.end()
, [&tr] (announce_entry const& ae) { return ae.url == tr.url; }))
continue;
// insert the tracker ordered by tier
new_trackers.insert(std::find_if(new_trackers.begin(), new_trackers.end()
, [&tr] (announce_entry const& ae) { return ae.tier >= tr.tier; }), tr);
}
m_trackers.swap(new_trackers);
// add the web seeds from the .torrent file
std::vector<web_seed_entry> const& web_seeds = m_torrent_file->web_seeds();
std::vector<web_seed_t> ws(web_seeds.begin(), web_seeds.end());
aux::random_shuffle(ws);
for (auto& w : ws) m_web_seeds.push_back(std::move(w));
#if !defined TORRENT_DISABLE_ENCRYPTION
| ||
relevance 0 | ../src/torrent.cpp:1551 | is verify_peer_cert called once per certificate in the chain, and this function just tells us which depth we're at right now? If so, the comment makes sense. any certificate that isn't the leaf (i.e. the one presented by the peer) should be accepted automatically, given preverified is true. The leaf certificate need to be verified to make sure its DN matches the info-hash |
is verify_peer_cert called once per certificate in the chain, and
this function just tells us which depth we're at right now? If so, the comment
makes sense.
any certificate that isn't the leaf (i.e. the one presented by the peer)
should be accepted automatically, given preverified is true. The leaf certificate
need to be verified to make sure its DN matches the info-hash../src/torrent.cpp:1551 // if files are checked for this torrent, call the extension
// to let it initialize itself
if (m_connections_initialized)
tp->on_files_checked();
}
#endif
#ifdef TORRENT_USE_OPENSSL
#ifdef TORRENT_MACOS_DEPRECATED_LIBCRYPTO
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#endif
bool torrent::verify_peer_cert(bool const preverified, boost::asio::ssl::verify_context& ctx)
{
// if the cert wasn't signed by the correct CA, fail the verification
if (!preverified) return false;
// we're only interested in checking the certificate at the end of the chain.
int depth = X509_STORE_CTX_get_error_depth(ctx.native_handle());
if (depth > 0) return true;
X509* cert = X509_STORE_CTX_get_current_cert(ctx.native_handle());
// Go through the alternate names in the certificate looking for matching DNS entries
auto* gens = static_cast<GENERAL_NAMES*>(
X509_get_ext_d2i(cert, NID_subject_alt_name, nullptr, nullptr));
#ifndef TORRENT_DISABLE_LOGGING
std::string names;
bool match = false;
#endif
for (int i = 0; i < aux::openssl_num_general_names(gens); ++i)
{
GENERAL_NAME* gen = aux::openssl_general_name_value(gens, i);
if (gen->type != GEN_DNS) continue;
ASN1_IA5STRING* domain = gen->d.dNSName;
if (domain->type != V_ASN1_IA5STRING || !domain->data || !domain->length) continue;
auto const* torrent_name = reinterpret_cast<char const*>(domain->data);
std::size_t const name_length = aux::numeric_cast<std::size_t>(domain->length);
#ifndef TORRENT_DISABLE_LOGGING
if (i > 1) names += " | n: ";
names.append(torrent_name, name_length);
#endif
if (std::strncmp(torrent_name, "*", name_length) == 0
|| std::strncmp(torrent_name, m_torrent_file->name().c_str(), name_length) == 0)
{
#ifndef TORRENT_DISABLE_LOGGING
match = true;
| ||
relevance 0 | ../src/torrent.cpp:1983 | this could be optimized by looking up which files are complete and just look at those |
this could be optimized by looking up which files are
complete and just look at those../src/torrent.cpp:1983 we_have(i);
}
}
}
set_state(torrent_status::checking_resume_data);
aux::vector<std::string, file_index_t> links;
#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
if (!m_torrent_file->similar_torrents().empty()
|| !m_torrent_file->collections().empty())
{
resolve_links res(m_torrent_file);
for (auto const& ih : m_torrent_file->similar_torrents())
{
std::shared_ptr<torrent> t = m_ses.find_torrent(ih).lock();
if (!t) continue;
// Only attempt to reuse files from torrents that are seeding.
if (!t->is_seed()) continue;
res.match(t->get_torrent_copy(), t->save_path());
}
for (auto const& c : m_torrent_file->collections())
{
std::vector<std::shared_ptr<torrent>> ts = m_ses.find_collection(c);
for (auto const& t : ts)
{
// Only attempt to reuse files from torrents that are seeding.
| ||
relevance 0 | ../src/torrent.cpp:1996 | this could be optimized by looking up which files are complete and just look at those |
this could be optimized by looking up which files are
complete and just look at those../src/torrent.cpp:1996 {
resolve_links res(m_torrent_file);
for (auto const& ih : m_torrent_file->similar_torrents())
{
std::shared_ptr<torrent> t = m_ses.find_torrent(ih).lock();
if (!t) continue;
// Only attempt to reuse files from torrents that are seeding.
if (!t->is_seed()) continue;
res.match(t->get_torrent_copy(), t->save_path());
}
for (auto const& c : m_torrent_file->collections())
{
std::vector<std::shared_ptr<torrent>> ts = m_ses.find_collection(c);
for (auto const& t : ts)
{
// Only attempt to reuse files from torrents that are seeding.
if (!t->is_seed()) continue;
res.match(t->get_torrent_copy(), t->save_path());
}
}
std::vector<resolve_links::link_t> const& l = res.get_links();
if (!l.empty())
{
for (auto const& i : l)
{
if (!i.ti) continue;
links.push_back(combine_path(i.save_path
, i.ti->files().file_path(i.file_idx)));
}
}
}
#endif // TORRENT_DISABLE_MUTABLE_TORRENTS
#if TORRENT_USE_ASSERTS
TORRENT_ASSERT(m_outstanding_check_files == false);
m_outstanding_check_files = true;
#endif
m_ses.disk_thread().async_check_files(
m_storage, m_add_torrent_params ? m_add_torrent_params.get() : nullptr
, links, std::bind(&torrent::on_resume_data_checked
, shared_from_this(), _1, _2));
// async_check_files will gut links
#ifndef TORRENT_DISABLE_LOGGING
debug_log("init, async_check_files");
#endif
| ||
relevance 0 | ../src/torrent.cpp:2646 | this pattern is repeated in a few places. Factor this into a function and generalize the concept of a torrent having a dedicated listen port |
this pattern is repeated in a few places. Factor this into
a function and generalize the concept of a torrent having a
dedicated listen port../src/torrent.cpp:2646 // if the files haven't been checked yet, we're
// not ready for peers. Except, if we don't have metadata,
// we need peers to download from
if (!m_files_checked && valid_metadata()) return;
if (!m_announce_to_lsd) return;
// private torrents are never announced on LSD
if (m_torrent_file->is_valid() && m_torrent_file->priv()) return;
// i2p torrents are also never announced on LSD
// unless we allow mixed swarms
if (m_torrent_file->is_valid()
&& (torrent_file().is_i2p() && !settings().get_bool(settings_pack::allow_i2p_mixed)))
return;
if (is_paused()) return;
if (!m_ses.has_lsd()) return;
#ifdef TORRENT_USE_OPENSSL
int port = is_ssl_torrent() ? m_ses.ssl_listen_port() : m_ses.listen_port();
#else
int port = m_ses.listen_port();
#endif
// announce with the local discovery service
m_ses.announce_lsd(m_torrent_file->info_hash(), port);
}
#ifndef TORRENT_DISABLE_DHT
void torrent::dht_announce()
{
TORRENT_ASSERT(is_single_thread());
if (!m_ses.dht())
{
#ifndef TORRENT_DISABLE_LOGGING
debug_log("DHT: no dht initialized");
#endif
return;
}
if (!should_announce_dht())
{
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
if (!m_ses.announce_dht())
debug_log("DHT: no listen sockets");
if (m_torrent_file->is_valid() && !m_files_checked)
| ||
relevance 0 | ../src/torrent.cpp:3649 | add one peer per IP the hostname resolves to |
add one peer per IP the hostname resolves to../src/torrent.cpp:3649 }
catch (...) { handle_exception(); }
#endif
void torrent::on_peer_name_lookup(error_code const& e
, std::vector<address> const& host_list, int const port) try
{
TORRENT_ASSERT(is_single_thread());
INVARIANT_CHECK;
COMPLETE_ASYNC("torrent::on_peer_name_lookup");
#ifndef TORRENT_DISABLE_LOGGING
if (e && should_log())
debug_log("peer name lookup error: %s", e.message().c_str());
#endif
if (e || m_abort || host_list.empty() || m_ses.is_aborted()) return;
tcp::endpoint host(host_list.front(), std::uint16_t(port));
if (m_ip_filter && m_ip_filter->access(host.address()) & ip_filter::blocked)
{
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
error_code ec;
debug_log("blocked ip from tracker: %s", host.address().to_string(ec).c_str());
}
#endif
if (m_ses.alerts().should_post<peer_blocked_alert>())
m_ses.alerts().emplace_alert<peer_blocked_alert>(get_handle()
, host, peer_blocked_alert::ip_filter);
return;
}
if (add_peer(host, peer_info::tracker))
{
state_updated();
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
error_code ec;
debug_log("name-lookup add_peer() [ %s ] connect-candidates: %d"
, host.address().to_string(ec).c_str()
, m_peer_list ? m_peer_list->num_connect_candidates() : -1);
}
#endif
}
| ||
relevance 0 | ../src/torrent.cpp:8520 | add a flag to ignore stats, and only care about resume data for content. For unchanged files, don't trigger a load of the metadata just to save an empty resume data file |
add a flag to ignore stats, and only care about resume data for
content. For unchanged files, don't trigger a load of the metadata
just to save an empty resume data file../src/torrent.cpp:8520 if (m_complete != 0xffffff) seeds = m_complete;
else seeds = m_peer_list ? m_peer_list->num_seeds() : 0;
if (m_incomplete != 0xffffff) downloaders = m_incomplete;
else downloaders = m_peer_list ? m_peer_list->num_peers() - m_peer_list->num_seeds() : 0;
if (seeds == 0)
{
ret |= no_seeds;
ret |= downloaders & prio_mask;
}
else
{
ret |= ((1 + downloaders) * scale / seeds) & prio_mask;
}
return ret;
}
// this is an async operation triggered by the client
void torrent::save_resume_data(resume_data_flags_t const flags)
{
TORRENT_ASSERT(is_single_thread());
INVARIANT_CHECK;
if (!valid_metadata())
{
alerts().emplace_alert<save_resume_data_failed_alert>(get_handle()
, errors::no_metadata);
return;
}
if ((flags & torrent_handle::only_if_modified) && !m_need_save_resume_data)
{
alerts().emplace_alert<save_resume_data_failed_alert>(get_handle()
, errors::resume_data_not_modified);
return;
}
m_need_save_resume_data = false;
m_save_resume_flags = flags;
state_updated();
if ((flags & torrent_handle::flush_disk_cache) && m_storage)
m_ses.disk_thread().async_release_files(m_storage);
state_updated();
add_torrent_params atp;
write_resume_data(atp);
alerts().emplace_alert<save_resume_data_alert>(std::move(atp), get_handle());
| ||
relevance 0 | ../src/torrent.cpp:10061 | instead of resorting the whole list, insert the peers directly into the right place |
instead of resorting the whole list, insert the peers
directly into the right place../src/torrent.cpp:10061 std::printf("timed out [average-piece-time: %d ms ]\n"
, m_average_piece_time);
#endif
}
// pick all blocks for this piece. the peers list is kept up to date
// and sorted. when we issue a request to a peer, its download queue
// time will increase and it may need to be bumped in the peers list,
// since it's ordered by download queue time
pick_time_critical_block(peers, ignore_peers
, peers_with_requests
, pi, &i, m_picker.get()
, blocks_in_piece, timed_out);
// put back the peers we ignored into the peer list for the next piece
if (!ignore_peers.empty())
{
peers.insert(peers.begin(), ignore_peers.begin(), ignore_peers.end());
ignore_peers.clear();
std::sort(peers.begin(), peers.end()
, [] (peer_connection const* lhs, peer_connection const* rhs)
{ return lhs->download_queue_time(16*1024) < rhs->download_queue_time(16*1024); });
}
// if this peer's download time exceeds 2 seconds, we're done.
// We don't want to build unreasonably long request queues
if (!peers.empty() && peers[0]->download_queue_time() > milliseconds(2000))
break;
}
// commit all the time critical requests
for (auto p : peers_with_requests)
{
p->send_block_requests();
}
}
#endif // TORRENT_DISABLE_STREAMING
std::set<std::string> torrent::web_seeds(web_seed_entry::type_t const type) const
{
TORRENT_ASSERT(is_single_thread());
std::set<std::string> ret;
for (auto const& s : m_web_seeds)
{
if (s.peer_info.banned) continue;
if (s.removed) continue;
if (s.type != type) continue;
ret.insert(s.url);
}
return ret;
| ||
relevance 0 | ../src/choker.cpp:272 | use an incremental partial_sort() here |
use an incremental partial_sort() here../src/choker.cpp:272 for (auto const p : peers)
{
if (p->is_choked() || !p->is_interesting()) continue;
if (!p->has_peer_choked())
{
// we're unchoked, we may want to lower our estimated
// reciprocation rate
p->decrease_est_reciprocation_rate();
}
else
{
// we've unchoked this peer, and it hasn't reciprocated
// we may want to increase our estimated reciprocation rate
p->increase_est_reciprocation_rate();
}
}
// if we're using the bittyrant choker, sort peers by their return
// on investment. i.e. download rate / upload rate
std::sort(peers.begin(), peers.end()
, std::bind(&bittyrant_unchoke_compare, _1, _2));
int upload_capacity_left = max_upload_rate;
// now, figure out how many peers should be unchoked. We deduct the
// estimated reciprocation rate from our upload_capacity estimate
// until there none left
int upload_slots = 0;
for (auto const p : peers)
{
TORRENT_ASSERT(p != nullptr);
if (p->est_reciprocation_rate() > upload_capacity_left) break;
++upload_slots;
upload_capacity_left -= p->est_reciprocation_rate();
}
return upload_slots;
}
#else
TORRENT_UNUSED(max_upload_rate);
#endif
int upload_slots = sett.get_int(settings_pack::unchoke_slots_limit);
if (upload_slots < 0)
upload_slots = std::numeric_limits<int>::max();
// ==== rate-based ====
| ||
relevance 0 | ../src/choker.cpp:335 | make configurable |
make configurable../src/choker.cpp:335 {
// first reset the number of unchoke slots, because we'll calculate
// it purely based on the current state of our peers.
upload_slots = 0;
int rate_threshold = sett.get_int(settings_pack::rate_choker_initial_threshold);
std::sort(peers.begin(), peers.end()
, std::bind(&upload_rate_compare, _1, _2));
for (auto const* p : peers)
{
int const rate = int(p->uploaded_in_last_round()
* 1000 / total_milliseconds(unchoke_interval));
// always have at least 1 unchoke slot
if (rate < rate_threshold) break;
++upload_slots;
rate_threshold += 2048;
}
++upload_slots;
}
// sorts the peers that are eligible for unchoke by download rate and
// secondary by total upload. The reason for this is, if all torrents are
// being seeded, the download rate will be 0, and the peers we have sent
// the least to should be unchoked
// we use partial sort here, because we only care about the top
// upload_slots peers.
int const slots = std::min(upload_slots, int(peers.size()));
if (sett.get_int(settings_pack::seed_choking_algorithm)
== settings_pack::round_robin)
{
int const pieces = sett.get_int(settings_pack::seeding_piece_quota);
std::nth_element(peers.begin(), peers.begin()
+ slots, peers.end()
, std::bind(&unchoke_compare_rr, _1, _2, pieces));
}
else if (sett.get_int(settings_pack::seed_choking_algorithm)
== settings_pack::fastest_upload)
{
std::nth_element(peers.begin(), peers.begin()
+ slots, peers.end()
, std::bind(&unchoke_compare_fastest_upload, _1, _2));
}
| ||
relevance 0 | ../src/web_connection_base.cpp:68 | introduce a web-seed default class which has a low download priority |
introduce a web-seed default class which has a low download priority../src/web_connection_base.cpp:68 : peer_connection(pack)
, m_first_request(true)
, m_ssl(false)
, m_external_auth(web.auth)
, m_extra_headers(web.extra_headers)
, m_parser(http_parser::dont_parse_chunks)
, m_body_start(0)
{
TORRENT_ASSERT(&web.peer_info == pack.peerinfo);
// when going through a proxy, we don't necessarily have an endpoint here,
// since the proxy might be resolving the hostname, not us
TORRENT_ASSERT(web.endpoints.empty() || web.endpoints.front() == pack.endp);
INVARIANT_CHECK;
TORRENT_ASSERT(is_outgoing());
TORRENT_ASSERT(!m_torrent.lock()->is_upload_only());
// we only want left-over bandwidth
std::string protocol;
error_code ec;
std::tie(protocol, m_basic_auth, m_host, m_port, m_path)
= parse_url_components(web.url, ec);
TORRENT_ASSERT(!ec);
if (m_port == -1 && protocol == "http")
m_port = 80;
#ifdef TORRENT_USE_OPENSSL
if (protocol == "https")
{
m_ssl = true;
if (m_port == -1) m_port = 443;
}
#endif
if (!m_basic_auth.empty())
m_basic_auth = base64encode(m_basic_auth);
m_server_string = "URL seed @ ";
m_server_string += m_host;
}
int web_connection_base::timeout() const
{
// since this is a web seed, change the timeout
// according to the settings.
return m_settings.get_int(settings_pack::urlseed_timeout);
}
| ||
relevance 0 | ../src/broadcast_socket.cpp:111 | this function is pointless |
this function is pointless../src/broadcast_socket.cpp:111 // routing of this prefix by default.
address_v6 const a6 = a.to_v6();
return a6.is_loopback()
|| a6.is_link_local()
|| a6.is_site_local()
|| a6.is_multicast_link_local()
|| a6.is_multicast_site_local()
// fc00::/7, unique local address
|| (a6.to_bytes()[0] & 0xfe) == 0xfc;
}
address_v4 a4 = a.to_v4();
unsigned long ip = a4.to_ulong();
return ((ip & 0xff000000) == 0x0a000000 // 10.x.x.x
|| (ip & 0xfff00000) == 0xac100000 // 172.16.x.x
|| (ip & 0xffff0000) == 0xc0a80000 // 192.168.x.x
|| (ip & 0xffff0000) == 0xa9fe0000 // 169.254.x.x
|| (ip & 0xff000000) == 0x7f000000); // 127.x.x.x
}
bool is_loopback(address const& addr)
{
return addr.is_loopback();
}
bool is_any(address const& addr)
{
if (addr.is_v4())
return addr.to_v4() == address_v4::any();
else if (addr.to_v6().is_v4_mapped())
return (addr.to_v6().to_v4() == address_v4::any());
else
return addr.to_v6() == address_v6::any();
}
bool is_teredo(address const& addr)
{
if (!addr.is_v6()) return false;
static const std::uint8_t teredo_prefix[] = {0x20, 0x01, 0, 0};
address_v6::bytes_type b = addr.to_v6().to_bytes();
return std::memcmp(b.data(), teredo_prefix, 4) == 0;
}
bool supports_ipv6()
{
#if defined TORRENT_BUILD_SIMULATOR
return true;
#elif defined TORRENT_WINDOWS
TORRENT_TRY {
error_code ec;
make_address("::1", ec);
| ||
relevance 0 | ../src/utp_stream.cpp:1727 | this loop is not very efficient. It could be fixed by having a separate list of sequence numbers that need resending |
this loop is not very efficient. It could be fixed by having
a separate list of sequence numbers that need resending../src/utp_stream.cpp:1727}
// sends a packet, pulls data from the write buffer (if there's any)
// if ack is true, we need to send a packet regardless of if there's
// any data. Returns true if we could send more data (i.e. call
// send_pkt() again)
// returns true if there is more space for payload in our
// congestion window, false if there is no more space.
bool utp_socket_impl::send_pkt(int const flags)
{
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
INVARIANT_CHECK;
#endif
bool const force = (flags & pkt_ack) || (flags & pkt_fin);
// TORRENT_ASSERT(m_state != UTP_STATE_FIN_SENT || (flags & pkt_ack));
// first see if we need to resend any packets
for (int i = (m_acked_seq_nr + 1) & ACK_MASK; i != m_seq_nr; i = (i + 1) & ACK_MASK)
{
packet* p = m_outbuf.at(aux::numeric_cast<packet_buffer::index_type>(i));
if (!p) continue;
if (!p->need_resend) continue;
if (!resend_packet(p))
{
// we couldn't resend the packet. It probably doesn't
// fit in our cwnd. If force is set, we need to continue
// to send our packet anyway, if we don't have force set,
// we might as well return
if (!force) return false;
// resend_packet might have failed
if (m_state == UTP_STATE_ERROR_WAIT || m_state == UTP_STATE_DELETE) return false;
break;
}
// don't fast-resend this packet
if (m_fast_resend_seq_nr == i)
m_fast_resend_seq_nr = (m_fast_resend_seq_nr + 1) & ACK_MASK;
}
// MTU DISCOVERY
// under these conditions, the next packet we send should be an MTU probe.
// MTU probes get to use the mid-point packet size, whereas other packets
// use a conservative packet size of the largest known to work. The reason
// for the cwnd condition is to make sure the probe is surrounded by non-
// probes, to be able to distinguish a loss of the probe vs. just loss in
// general.
bool const mtu_probe = (m_mtu_seq == 0
| ||
relevance 0 | ../src/udp_tracker_connection.cpp:629 | why is this a linked list? |
why is this a linked list?../src/udp_tracker_connection.cpp:629 ipv6_peer_entry e{};
std::memcpy(e.ip.data(), buf.data(), 16);
buf = buf.subspan(16);
e.port = aux::read_uint16(buf);
resp.peers6.push_back(e);
}
}
else
{
resp.peers4.reserve(static_cast<std::size_t>(num_peers));
for (int i = 0; i < num_peers; ++i)
{
ipv4_peer_entry e{};
std::memcpy(e.ip.data(), buf.data(), 4);
buf = buf.subspan(4);
e.port = aux::read_uint16(buf);
resp.peers4.push_back(e);
}
}
std::list<address> ip_list;
std::transform(m_endpoints.begin(), m_endpoints.end(), std::back_inserter(ip_list)
, [](tcp::endpoint const& ep) { return ep.address(); } );
cb->tracker_response(tracker_req(), m_target.address(), ip_list, resp);
close();
return true;
}
bool udp_tracker_connection::on_scrape_response(span<char const> buf)
{
restart_read_timeout();
auto const action = static_cast<action_t>(aux::read_int32(buf));
std::uint32_t const transaction = aux::read_uint32(buf);
if (transaction != m_transaction_id)
{
fail(error_code(errors::invalid_tracker_transaction_id));
return false;
}
if (action == action_t::error)
{
fail(error_code(errors::tracker_failure)
, std::string(buf.data(), static_cast<std::size_t>(buf.size())).c_str());
return true;
}
if (action != action_t::scrape)
{
| ||
relevance 0 | ../src/torrent_peer.cpp:174 | how do we deal with our external address changing? |
how do we deal with our external address changing?../src/torrent_peer.cpp:174 // prefer opening non-encrypted
// connections. If it fails, we'll
// retry with encryption
, pe_support(false)
#endif
, is_v6_addr(false)
#if TORRENT_USE_I2P
, is_i2p_addr(false)
#endif
, on_parole(false)
, banned(false)
, supports_utp(true) // assume peers support utp
, confirmed_supports_utp(false)
, supports_holepunch(false)
, web_seed(false)
{}
std::uint32_t torrent_peer::rank(external_ip const& external, int external_port) const
{
TORRENT_ASSERT(in_use);
if (peer_rank == 0)
peer_rank = peer_priority(
tcp::endpoint(external.external_address(this->address()), std::uint16_t(external_port))
, tcp::endpoint(this->address(), this->port));
return peer_rank;
}
#ifndef TORRENT_DISABLE_LOGGING
std::string torrent_peer::to_string() const
{
TORRENT_ASSERT(in_use);
#if TORRENT_USE_I2P
if (is_i2p_addr) return dest().to_string();
#endif // TORRENT_USE_I2P
error_code ec;
return address().to_string(ec);
}
#endif
std::int64_t torrent_peer::total_download() const
{
TORRENT_ASSERT(in_use);
if (connection != nullptr)
{
TORRENT_ASSERT(prev_amount_download == 0);
return connection->statistics().total_payload_download();
}
else
{
return std::int64_t(prev_amount_download) << 10;
}
| ||
relevance 0 | ../src/piece_picker.cpp:107 | find a better place for this |
find a better place for this../src/piece_picker.cpp:107 if (limit == 0)
{
std::cerr << " ...";
break;
}
if (*i == -1) break;
while (j != p.m_priority_boundaries.end() && *j <= index)
{
std::cerr << "| ";
++j;
}
std::cerr << *i << "(" << p.m_piece_map[*i].index << ") ";
--limit;
}
std::cerr << std::endl;
}
}
#endif // TORRENT_PICKER_LOG
namespace libtorrent {
const piece_block piece_block::invalid(
std::numeric_limits<piece_index_t>::max()
, std::numeric_limits<int>::max());
constexpr prio_index_t piece_picker::piece_pos::we_have_index;
constexpr picker_options_t piece_picker::rarest_first;
constexpr picker_options_t piece_picker::reverse;
constexpr picker_options_t piece_picker::on_parole;
constexpr picker_options_t piece_picker::prioritize_partials;
constexpr picker_options_t piece_picker::sequential;
constexpr picker_options_t piece_picker::time_critical_mode;
constexpr picker_options_t piece_picker::align_expanded_pieces;
constexpr picker_options_t piece_picker::piece_extent_affinity;
constexpr download_queue_t piece_picker::piece_pos::piece_downloading;
constexpr download_queue_t piece_picker::piece_pos::piece_full;
constexpr download_queue_t piece_picker::piece_pos::piece_finished;
constexpr download_queue_t piece_picker::piece_pos::piece_zero_prio;
constexpr download_queue_t piece_picker::piece_pos::num_download_categories;
constexpr download_queue_t piece_picker::piece_pos::piece_open;
constexpr download_queue_t piece_picker::piece_pos::piece_downloading_reverse;
constexpr download_queue_t piece_picker::piece_pos::piece_full_reverse;
// the max number of blocks to create an affinity for
constexpr int max_piece_affinity_extent = 4 * 1024 * 1024 / default_block_size;
piece_picker::piece_picker(int const blocks_per_piece
, int const blocks_in_last_piece, int const total_num_pieces)
: m_priority_boundaries(1, m_pieces.end_index())
{
| ||
relevance 0 | ../src/piece_picker.cpp:2022 | this could probably be optimized by incrementally calling partial_sort to sort one more element in the list. Because chances are that we'll just need a single piece, and once we've picked from it we're done. Sorting the rest of the list in that case is a waste of time. |
this could probably be optimized by incrementally
calling partial_sort to sort one more element in the list. Because
chances are that we'll just need a single piece, and once we've
picked from it we're done. Sorting the rest of the list in that
case is a waste of time.../src/piece_picker.cpp:2022 pc.inc_stats_counter(counters::piece_picker_partial_loops);
// in time critical mode, only pick high priority pieces
if ((options & time_critical_mode)
&& piece_priority(dp.index) != top_priority)
continue;
if (!is_piece_free(dp.index, pieces)) continue;
TORRENT_ASSERT(m_piece_map[dp.index].download_queue()
== piece_pos::piece_downloading);
ordered_partials[num_ordered_partials++] = &dp;
}
// now, sort the list.
if (options & rarest_first)
{
ret |= picker_log_alert::rarest_first_partials;
std::sort(ordered_partials.begin(), ordered_partials.begin() + num_ordered_partials
, std::bind(&piece_picker::partial_compare_rarest_first, this
, _1, _2));
}
for (int i = 0; i < num_ordered_partials; ++i)
{
ret |= picker_log_alert::prioritize_partials;
num_blocks = add_blocks_downloading(*ordered_partials[i], pieces
, interesting_blocks, backup_blocks, backup_blocks2
, num_blocks, prefer_contiguous_blocks, peer, options);
if (num_blocks <= 0) return ret;
if (int(backup_blocks.size()) >= num_blocks
&& int(backup_blocks2.size()) >= num_blocks)
break;
}
num_blocks = append_blocks(interesting_blocks, backup_blocks
, num_blocks);
if (num_blocks <= 0) return ret;
num_blocks = append_blocks(interesting_blocks, backup_blocks2
, num_blocks);
if (num_blocks <= 0) return ret;
}
if (!suggested_pieces.empty())
{
for (piece_index_t i : suggested_pieces)
{
| ||
relevance 0 | ../src/piece_picker.cpp:2172 | Is it a good idea that this affinity takes precedence over piece priority? |
Is it a good idea that this affinity takes precedence over
piece priority?../src/piece_picker.cpp:2172 prio_index_t const end = priority_end(i);
for (prio_index_t p = prev(end); p >= start; --p)
{
pc.inc_stats_counter(counters::piece_picker_reverse_rare_loops);
if (!is_piece_free(m_pieces[p], pieces)) continue;
ret |= picker_log_alert::reverse_rarest_first;
num_blocks = add_blocks(m_pieces[p], pieces
, interesting_blocks, backup_blocks
, backup_blocks2, num_blocks
, prefer_contiguous_blocks, peer, suggested_pieces
, options);
if (num_blocks <= 0) return ret;
}
}
}
else
{
if (options & piece_extent_affinity)
{
int to_erase = -1;
int idx = -1;
for (piece_extent_t const e : m_recent_extents)
{
++idx;
bool have_all = true;
for (piece_index_t const p : extent_for(e))
{
if (!m_piece_map[p].have()) have_all = false;
if (!is_piece_free(p, pieces)) continue;
ret |= picker_log_alert::extent_affinity;
num_blocks = add_blocks(p, pieces
, interesting_blocks, backup_blocks
, backup_blocks2, num_blocks
, prefer_contiguous_blocks, peer, suggested_pieces
, options);
if (num_blocks <= 0)
{
// if we have all pieces belonging to this extent, remove it
if (to_erase != -1) m_recent_extents.erase(m_recent_extents.begin() + to_erase);
return ret;
}
}
// if we have all pieces belonging to this extent, remove it
if (have_all) to_erase = idx;
}
if (to_erase != -1) m_recent_extents.erase(m_recent_extents.begin() + to_erase);
| ||
relevance 0 | ../src/piece_picker.cpp:2561 | when expanding pieces for cache stripe reasons, the !downloading condition doesn't make much sense |
when expanding pieces for cache stripe reasons,
the !downloading condition doesn't make much sense../src/piece_picker.cpp:2561 TORRENT_ASSERT(index < m_piece_map.end_index());
if (next(index) == m_piece_map.end_index())
return m_blocks_in_last_piece;
else
return m_blocks_per_piece;
}
bool piece_picker::is_piece_free(piece_index_t const piece
, typed_bitfield<piece_index_t> const& bitmask) const
{
return bitmask[piece]
&& !m_piece_map[piece].have()
&& !m_piece_map[piece].filtered();
}
bool piece_picker::can_pick(piece_index_t const piece
, typed_bitfield<piece_index_t> const& bitmask) const
{
return bitmask[piece]
&& !m_piece_map[piece].have()
&& !m_piece_map[piece].downloading()
&& !m_piece_map[piece].filtered();
}
#if TORRENT_USE_INVARIANT_CHECKS
void piece_picker::check_peers()
{
for (auto const& b : m_block_info)
{
TORRENT_ASSERT(b.peer == nullptr || static_cast<torrent_peer*>(b.peer)->in_use);
}
}
#endif
void piece_picker::clear_peer(torrent_peer* peer)
{
for (auto& b : m_block_info)
{
if (b.peer == peer) b.peer = nullptr;
}
}
// the first bool is true if this is the only peer that has requested and downloaded
// blocks from this piece.
// the second bool is true if this is the only active peer that is requesting
// and downloading blocks from this piece. Active means having a connection.
| ||
relevance 0 | ../src/piece_picker.cpp:3117 | should 5 be configurable? |
should 5 be configurable?../src/piece_picker.cpp:3117 bool have_all = true;
for (auto const piece : extent_for(this_extent))
{
if (piece == p) continue;
if (!m_piece_map[piece].have()) have_all = false;
// if at least one piece in this extent has a different priority than
// the one we just started downloading, don't create an affinity for
// adjecent pieces. This probably means the pieces belong to different
// files, or that some other mechanism determining the priority should
// take precedence.
if (piece_priority(piece) != this_prio) return;
}
// if we already have all the *other* pieces in this extent, there's no
// need to inflate their priorities
if (have_all) return;
if (m_recent_extents.size() < 5)
m_recent_extents.push_back(this_extent);
// limit the number of extent affinities active at any given time to limit
// the cost of checking them. Also, don't replace them, commit to
// finishing them before starting another extent. This is analoguous to
// limiting the number of partial pieces.
}
// options may be 0 or piece_picker::reverse
// returns false if the block could not be marked as downloading
bool piece_picker::mark_as_downloading(piece_block const block
, torrent_peer* peer, picker_options_t const options)
{
#ifdef TORRENT_PICKER_LOG
std::cerr << "[" << this << "] " << "mark_as_downloading( {"
<< block.piece_index << ", " << block.block_index << "} )" << std::endl;
#endif
TORRENT_ASSERT(peer == nullptr || peer->in_use);
TORRENT_ASSERT(block.block_index != piece_block::invalid.block_index);
TORRENT_ASSERT(block.piece_index != piece_block::invalid.piece_index);
TORRENT_ASSERT(block.piece_index < m_piece_map.end_index());
TORRENT_ASSERT(block.block_index < blocks_in_piece(block.piece_index));
TORRENT_ASSERT(!m_piece_map[block.piece_index].have());
piece_pos& p = m_piece_map[block.piece_index];
if (p.download_queue() == piece_pos::piece_open)
{
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
INVARIANT_CHECK;
| ||
relevance 0 | ../src/torrent_handle.cpp:528 | support moving files into this call |
support moving files into this call../src/torrent_handle.cpp:528 auto retp = &prio;
sync_call(&torrent::piece_priorities, retp);
std::vector<int> ret;
ret.reserve(prio.size());
for (auto p : prio)
ret.push_back(int(static_cast<std::uint8_t>(p)));
return ret;
}
#endif
void torrent_handle::file_priority(file_index_t index, download_priority_t priority) const
{
async_call(&torrent::set_file_priority, index, priority);
}
download_priority_t torrent_handle::file_priority(file_index_t index) const
{
return sync_call_ret<download_priority_t>(dont_download, &torrent::file_priority, index);
}
void torrent_handle::prioritize_files(std::vector<download_priority_t> const& files) const
{
async_call(&torrent::prioritize_files
, static_cast<aux::vector<download_priority_t, file_index_t> const&>(files));
}
std::vector<download_priority_t> torrent_handle::get_file_priorities() const
{
aux::vector<download_priority_t, file_index_t> ret;
auto retp = &ret;
sync_call(&torrent::file_priorities, retp);
return std::move(ret);
}
#if TORRENT_ABI_VERSION == 1
// ============ start deprecation ===============
void torrent_handle::prioritize_files(std::vector<int> const& files) const
{
aux::vector<download_priority_t, file_index_t> file_prio;
file_prio.reserve(files.size());
for (auto const p : files) {
file_prio.push_back(download_priority_t(static_cast<std::uint8_t>(p)));
}
async_call(&torrent::prioritize_files, file_prio);
}
std::vector<int> torrent_handle::file_priorities() const
{
aux::vector<download_priority_t, file_index_t> prio;
| ||
relevance 0 | ../src/storage_utils.cpp:285 | ideally, if we end up copying files because of a move across volumes, the source should not be deleted until they've all been copied. That would let us rollback with higher confidence. |
ideally, if we end up copying files because of a move across
volumes, the source should not be deleted until they've all been
copied. That would let us rollback with higher confidence.../src/storage_utils.cpp:285 // later
aux::vector<bool, file_index_t> copied_files(std::size_t(f.num_files()), false);
// track how far we got in case of an error
file_index_t file_index{};
error_code e;
for (auto const i : f.file_range())
{
// files moved out to absolute paths are not moved
if (f.file_absolute_path(i)) continue;
std::string const old_path = combine_path(save_path, f.file_path(i));
std::string const new_path = combine_path(new_save_path, f.file_path(i));
if (flags == move_flags_t::dont_replace && exists(new_path))
{
if (ret == status_t::no_error) ret = status_t::need_full_check;
continue;
}
move_file(old_path, new_path, e);
// if the source file doesn't exist. That's not a problem
// we just ignore that file
if (e == boost::system::errc::no_such_file_or_directory)
e.clear();
else if (e
&& e != boost::system::errc::invalid_argument
&& e != boost::system::errc::permission_denied)
{
// moving the file failed
// on OSX, the error when trying to rename a file across different
// volumes is EXDEV, which will make it fall back to copying.
e.clear();
copy_file(old_path, new_path, e);
if (!e) copied_files[i] = true;
}
if (e)
{
ec.ec = e;
ec.file(i);
ec.operation = operation_t::file_rename;
file_index = i;
break;
}
}
if (!e && pf)
{
pf->move_partfile(new_save_path, e);
| ||
relevance 0 | ../src/storage_utils.cpp:475 | this should probably be moved to default_storage::initialize |
this should probably be moved to default_storage::initialize../src/storage_utils.cpp:475 if (error && error != boost::system::errc::no_such_file_or_directory)
{
ec.file(file_index_t(-1));
ec.ec = error;
ec.operation = operation_t::file_remove;
}
}
}
bool verify_resume_data(add_torrent_params const& rd
, aux::vector<std::string, file_index_t> const& links
, file_storage const& fs
, aux::vector<download_priority_t, file_index_t> const& file_priority
, stat_cache& stat
, std::string const& save_path
, storage_error& ec)
{
#ifdef TORRENT_DISABLE_MUTABLE_TORRENTS
TORRENT_UNUSED(links);
#else
if (!links.empty())
{
TORRENT_ASSERT(int(links.size()) == fs.num_files());
// if this is a mutable torrent, and we need to pick up some files
// from other torrents, do that now. Note that there is an inherent
// race condition here. We checked if the files existed on a different
// thread a while ago. These files may no longer exist or may have been
// moved. If so, we just fail. The user is responsible to not touch
// other torrents until a new mutable torrent has been completely
// added.
for (auto const idx : fs.file_range())
{
std::string const& s = links[idx];
if (s.empty()) continue;
error_code err;
std::string file_path = fs.file_path(idx, save_path);
hard_link(s, file_path, err);
// if the file already exists, that's not an error
| ||
relevance 0 | ../src/disk_io_thread.cpp:492 | in this case, the piece should probably not be flushed yet. are there any more cases where it should? |
in this case, the piece should probably not be flushed yet. are there
any more cases where it should?../src/disk_io_thread.cpp:492 {
DLOG("[%d hash-done] ", static_cast<int>(i));
continue;
}
#if DEBUG_DISK_THREAD
if (pe->num_dirty < pe->blocks_in_piece)
{
DLOG("[%d dirty:%d] ", static_cast<int>(i), int(pe->num_dirty));
}
else if (pe->hashing_done == 0 && hash_cursor < pe->blocks_in_piece)
{
DLOG("[%d cursor:%d] ", static_cast<int>(i), hash_cursor);
}
else
{
DLOG("[%d xx] ", static_cast<int>(i));
}
#endif
range_full = false;
break;
}
if (!range_full)
{
DLOG("not flushing\n");
return 0;
}
DLOG("\n");
// now, build a iovec for all pieces that we want to flush, so that they
// can be flushed in a single atomic operation. This is especially important
// when there are more than 1 disk thread, to make sure they don't
// interleave in undesired places.
// in order to remember where each piece boundary ended up in the iovec,
// we keep the indices in the iovec_offset array
cont_pieces = static_cast<int>(range_end) - static_cast<int>(range_start);
int const blocks_to_flush = int(p->blocks_in_piece * cont_pieces);
TORRENT_ALLOCA(iov, iovec_t, blocks_to_flush);
TORRENT_ALLOCA(flushing, int, blocks_to_flush);
// this is the offset into iov and flushing for each piece
TORRENT_ALLOCA(iovec_offset, int, cont_pieces + 1);
int iov_len = 0;
// this is the block index each piece starts at
int block_start = 0;
// keep track of the pieces that have had their refcount incremented
// so we know to decrement them later
TORRENT_ALLOCA(refcount_pieces, int, cont_pieces);
| ||
relevance 0 | ../src/disk_io_thread.cpp:943 | it would be nice to optimize this by having the cache pieces also ordered by |
it would be nice to optimize this by having the cache
pieces also ordered by../src/disk_io_thread.cpp:943 // we really should not have any pieces left. This is only called
// from disk_io_thread::do_delete, which is a fence job and should
// have any other jobs active, i.e. there should not be any references
// keeping pieces or blocks alive
if ((flags & flush_delete_cache) && (flags & flush_expect_clear))
{
auto const& storage_pieces = storage->cached_pieces();
for (auto const& p : storage_pieces)
{
cached_piece_entry* pe = m_disk_cache.find_piece(storage, p.piece);
TORRENT_PIECE_ASSERT(pe->num_dirty == 0, pe);
}
}
#endif
}
else
{
auto range = m_disk_cache.all_pieces();
while (range.first != range.second)
{
if ((flags & (flush_read_cache | flush_delete_cache)) == 0)
{
// if we're not flushing the read cache, and not deleting the
// cache, skip pieces with no dirty blocks, i.e. read cache
// pieces
while (range.first->num_dirty == 0)
{
++range.first;
if (range.first == range.second) return;
}
}
cached_piece_entry* pe = const_cast<cached_piece_entry*>(&*range.first);
flush_piece(pe, flags, completed_jobs, l);
range = m_disk_cache.all_pieces();
}
}
}
// this is called if we're exceeding (or about to exceed) the cache
// size limit. This means we should not restrict ourselves to contiguous
// blocks of write cache line size, but try to flush all old blocks
// this is why we pass in 1 as cont_block to the flushing functions
void disk_io_thread::try_flush_write_blocks(int num, jobqueue_t& completed_jobs
, std::unique_lock<std::mutex>& l)
{
DLOG("try_flush_write_blocks: %d\n", num);
auto const range = m_disk_cache.write_lru_pieces();
aux::vector<std::pair<std::shared_ptr<storage_interface>, piece_index_t>> pieces;
pieces.reserve(m_disk_cache.num_write_lru_pieces());
| ||
relevance 0 | ../src/disk_io_thread.cpp:985 | instead of doing a lookup each time through the loop, save cached_piece_entry pointers with piece_refcount incremented to pin them |
instead of doing a lookup each time through the loop, save
cached_piece_entry pointers with piece_refcount incremented to pin them../src/disk_io_thread.cpp:985 // blocks of write cache line size, but try to flush all old blocks
// this is why we pass in 1 as cont_block to the flushing functions
void disk_io_thread::try_flush_write_blocks(int num, jobqueue_t& completed_jobs
, std::unique_lock<std::mutex>& l)
{
DLOG("try_flush_write_blocks: %d\n", num);
auto const range = m_disk_cache.write_lru_pieces();
aux::vector<std::pair<std::shared_ptr<storage_interface>, piece_index_t>> pieces;
pieces.reserve(m_disk_cache.num_write_lru_pieces());
for (auto p = range; p.get() && num > 0; p.next())
{
cached_piece_entry* e = p.get();
if (e->num_dirty == 0) continue;
pieces.emplace_back(e->storage, e->piece);
}
for (auto const& p : pieces)
{
cached_piece_entry* pe = m_disk_cache.find_piece(p.first.get(), p.second);
if (pe == nullptr) continue;
// another thread may flush this piece while we're looping and
// evict it into a read piece and then also evict it to ghost
if (pe->cache_state != cached_piece_entry::write_lru) continue;
#if TORRENT_USE_ASSERTS
pe->piece_log.push_back(piece_log_t(piece_log_t::try_flush_write_blocks, -1));
#endif
++pe->piece_refcount;
kick_hasher(pe, l);
num -= try_flush_hashed(pe, 1, completed_jobs, l);
--pe->piece_refcount;
m_disk_cache.maybe_free_piece(pe);
}
// when the write cache is under high pressure, it is likely
// counter productive to actually do this, since a piece may
// not have had its flush_hashed job run on it
// so only do it if no other thread is currently flushing
if (num == 0 || m_stats_counters[counters::num_writing_threads] > 0) return;
// if we still need to flush blocks, start over and flush
// everything in LRU order (degrade to lru cache eviction)
for (auto const& p : pieces)
{
cached_piece_entry* pe = m_disk_cache.find_piece(p.first.get(), p.second);
if (pe == nullptr) continue;
| ||
relevance 0 | ../src/disk_io_thread.cpp:1185 | in the future, propagate exceptions back to the handlers |
in the future, propagate exceptions back to the handlers../src/disk_io_thread.cpp:1185
std::shared_ptr<storage_interface> storage = j->storage;
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
if (j->storage)
{
std::unique_lock<std::mutex> l(m_cache_mutex);
auto const& pieces = j->storage->cached_pieces();
for (auto const& p : pieces)
TORRENT_ASSERT(p.storage == j->storage);
}
#endif
if (storage && storage->m_settings == nullptr)
storage->m_settings = &m_settings;
TORRENT_ASSERT(static_cast<int>(j->action) < int(job_functions.size()));
m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, 1);
// call disk function
status_t ret = status_t::no_error;
try
{
int const idx = static_cast<int>(j->action);
ret = (this->*(job_functions[static_cast<std::size_t>(idx)]))(j, completed_jobs);
}
catch (boost::system::system_error const& err)
{
ret = status_t::fatal_disk_error;
j->error.ec = err.code();
j->error.operation = operation_t::exception;
}
catch (std::bad_alloc const&)
{
ret = status_t::fatal_disk_error;
j->error.ec = errors::no_memory;
j->error.operation = operation_t::exception;
}
catch (std::exception const&)
{
ret = status_t::fatal_disk_error;
j->error.ec = boost::asio::error::fault;
j->error.operation = operation_t::exception;
}
// note that -2 errors are OK
TORRENT_ASSERT(ret != status_t::fatal_disk_error
|| (j->error.ec && j->error.operation != operation_t::unknown));
m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, -1);
| ||
relevance 0 | ../src/disk_io_thread.cpp:1243 | a potentially more efficient solution would be to have a special queue for retry jobs, that's only ever run when a job completes, in any thread. It would only work if counters::num_running_disk_jobs > 0 |
a potentially more efficient solution would be to have a special
queue for retry jobs, that's only ever run when a job completes, in
any thread. It would only work if counters::num_running_disk_jobs > 0../src/disk_io_thread.cpp:1243 check_cache_level(l, completed_jobs);
TORRENT_ASSERT(l.owns_lock());
--m_cache_check_state;
}
}
else
{
m_cache_check_state = cache_check_reinvoke;
}
l.unlock();
if (ret == retry_job)
{
job_queue& q = queue_for_job(j);
std::unique_lock<std::mutex> l2(m_job_mutex);
// to avoid busy looping here, give up
// our quanta in case there aren't any other
// jobs to run in between
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
bool const need_sleep = q.m_queued_jobs.empty();
q.m_queued_jobs.push_back(j);
l2.unlock();
if (need_sleep) std::this_thread::yield();
return;
}
if (ret == defer_handler) return;
j->ret = ret;
completed_jobs.push_back(j);
}
status_t disk_io_thread::do_uncached_read(disk_io_job* j)
{
j->argument = disk_buffer_holder(*this, m_disk_cache.allocate_buffer("send buffer"), 0x4000);
auto& buffer = boost::get<disk_buffer_holder>(j->argument);
if (buffer.get() == nullptr)
{
j->error.ec = error::no_memory;
j->error.operation = operation_t::alloc_cache_piece;
return status_t::fatal_disk_error;
}
time_point const start_time = clock_type::now();
open_mode_t const file_flags = file_flags_for_job(j
| ||
relevance 0 | ../src/disk_io_thread.cpp:2003 | this is potentially very expensive. One way to solve it would be to have a fence for just this one piece. |
this is potentially very expensive. One way to solve
it would be to have a fence for just this one piece.../src/disk_io_thread.cpp:2003 disk_io_job* j = allocate_job(job_action_t::file_priority);
j->storage = m_torrents[storage]->shared_from_this();
j->argument = std::move(prios);
j->callback = std::move(handler);
add_fence_job(j);
}
void disk_io_thread::async_clear_piece(storage_index_t const storage
, piece_index_t const index, std::function<void(piece_index_t)> handler)
{
disk_io_job* j = allocate_job(job_action_t::clear_piece);
j->storage = m_torrents[storage]->shared_from_this();
j->piece = index;
j->callback = std::move(handler);
// regular jobs are not guaranteed to be executed in-order
// since clear piece must guarantee that all write jobs that
// have been issued finish before the clear piece job completes
add_fence_job(j);
}
void disk_io_thread::clear_piece(storage_index_t const storage
, piece_index_t const index)
{
storage_interface* st = m_torrents[storage].get();
std::unique_lock<std::mutex> l(m_cache_mutex);
cached_piece_entry* pe = m_disk_cache.find_piece(st, index);
if (pe == nullptr) return;
TORRENT_PIECE_ASSERT(pe->hashing == false, pe);
pe->hashing_done = 0;
pe->hash.reset();
// evict_piece returns true if the piece was in fact
// evicted. A piece may fail to be evicted if there
// are still outstanding operations on it, which should
// never be the case when this function is used
// in fact, no jobs should really be hung on this piece
// at this point
jobqueue_t jobs;
bool const ok = m_disk_cache.evict_piece(pe, jobs, block_cache::allow_ghost);
TORRENT_PIECE_ASSERT(ok, pe);
TORRENT_UNUSED(ok);
fail_jobs(storage_error(boost::asio::error::operation_aborted), jobs);
}
void disk_io_thread::kick_hasher(cached_piece_entry* pe, std::unique_lock<std::mutex>& l)
{
| ||
relevance 0 | ../src/disk_io_thread.cpp:2265 | we should probably just hang the job on the piece and make sure the hasher gets kicked |
we should probably just hang the job on the piece and make sure the hasher gets kicked../src/disk_io_thread.cpp:2265 if (pe == nullptr)
{
std::uint16_t const cache_state = std::uint16_t((j->flags & disk_interface::volatile_read)
? cached_piece_entry::volatile_read_lru
: cached_piece_entry::read_lru1);
pe = m_disk_cache.allocate_piece(j, cache_state);
}
if (pe == nullptr)
{
j->error.ec = error::no_memory;
j->error.operation = operation_t::alloc_cache_piece;
return status_t::fatal_disk_error;
}
if (pe->hashing)
{
TORRENT_PIECE_ASSERT(pe->hash, pe);
// another thread is hashing this piece right now
// try again in a little bit
DLOG("do_hash: retry\n");
return retry_job;
}
pe->hashing = 1;
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1
|| pe->cache_state == cached_piece_entry::read_lru2, pe);
piece_refcount_holder refcount_holder(pe);
if (!pe->hash)
{
pe->hashing_done = 0;
pe->hash.reset(new partial_hash);
}
partial_hash* ph = pe->hash.get();
int const blocks_in_piece = (piece_size + default_block_size - 1) / default_block_size;
// we don't care about anything to the left of ph->offset
// since those blocks have already been hashed.
// we just care about [firs_block, first_block + blocks_left]
int const first_block = ph->offset / default_block_size;
int const blocks_left = blocks_in_piece - first_block;
// ph->offset
// | first_block
// | |
// v v
// +---+---+---+---+---+---+
// | | | | | | |
| ||
relevance 0 | ../src/settings_pack.cpp:276 | deprecate this |
deprecate this../src/settings_pack.cpp:276 SET(outgoing_port, 0, nullptr),
SET(num_outgoing_ports, 0, nullptr),
SET(peer_tos, 0x20, &session_impl::update_peer_tos),
SET(active_downloads, 3, &session_impl::trigger_auto_manage),
SET(active_seeds, 5, &session_impl::trigger_auto_manage),
SET(active_checking, 1, &session_impl::trigger_auto_manage),
SET(active_dht_limit, 88, nullptr),
SET(active_tracker_limit, 1600, nullptr),
SET(active_lsd_limit, 60, nullptr),
SET(active_limit, 500, &session_impl::trigger_auto_manage),
DEPRECATED_SET(active_loaded_limit, 0, &session_impl::trigger_auto_manage),
SET(auto_manage_interval, 30, nullptr),
SET(seed_time_limit, 24 * 60 * 60, nullptr),
SET(auto_scrape_interval, 1800, nullptr),
SET(auto_scrape_min_interval, 300, nullptr),
SET(max_peerlist_size, 3000, nullptr),
SET(max_paused_peerlist_size, 1000, nullptr),
SET(min_announce_interval, 5 * 60, nullptr),
SET(auto_manage_startup, 60, nullptr),
SET(seeding_piece_quota, 20, nullptr),
SET(max_rejects, 50, nullptr),
SET(recv_socket_buffer_size, 0, &session_impl::update_socket_buffer_size),
SET(send_socket_buffer_size, 0, &session_impl::update_socket_buffer_size),
SET(max_peer_recv_buffer_size, 2 * 1024 * 1024, nullptr),
DEPRECATED_SET(file_checks_delay_per_block, 0, nullptr),
SET(read_cache_line_size, 32, nullptr),
SET(write_cache_line_size, 16, nullptr),
SET(optimistic_disk_retry, 10 * 60, nullptr),
SET(max_suggest_pieces, 16, nullptr),
SET(local_service_announce_interval, 5 * 60, nullptr),
SET(dht_announce_interval, 15 * 60, &session_impl::update_dht_announce_interval),
SET(udp_tracker_token_expiry, 60, nullptr),
DEPRECATED_SET(default_cache_min_age, 1, nullptr),
SET(num_optimistic_unchoke_slots, 0, nullptr),
SET(default_est_reciprocation_rate, 16000, nullptr),
SET(increase_est_reciprocation_rate, 20, nullptr),
SET(decrease_est_reciprocation_rate, 3, nullptr),
SET(max_pex_peers, 50, nullptr),
SET(tick_interval, 500, nullptr),
SET(share_mode_target, 3, nullptr),
SET(upload_rate_limit, 0, &session_impl::update_upload_rate),
SET(download_rate_limit, 0, &session_impl::update_download_rate),
DEPRECATED_SET(local_upload_rate_limit, 0, &session_impl::update_local_upload_rate),
DEPRECATED_SET(local_download_rate_limit, 0, &session_impl::update_local_download_rate),
SET(dht_upload_rate_limit, 8000, &session_impl::update_dht_upload_rate_limit),
SET(unchoke_slots_limit, 8, &session_impl::update_unchoke_limit),
DEPRECATED_SET(half_open_limit, 0, nullptr),
SET(connections_limit, 200, &session_impl::update_connections_limit),
SET(connections_slack, 10, nullptr),
SET(utp_target_delay, 100, nullptr),
SET(utp_gain_factor, 3000, nullptr),
| ||
relevance 0 | ../src/settings_pack.cpp:517 | it would be nice to reserve() these vectors up front |
it would be nice to reserve() these vectors up front../src/settings_pack.cpp:517 s.set_str(settings_pack::string_type_base | i, str_settings[i].default_value);
TORRENT_ASSERT(s.get_str(settings_pack::string_type_base + i) == str_settings[i].default_value);
}
for (int i = 0; i < settings_pack::num_int_settings; ++i)
{
s.set_int(settings_pack::int_type_base | i, int_settings[i].default_value);
TORRENT_ASSERT(s.get_int(settings_pack::int_type_base + i) == int_settings[i].default_value);
}
for (int i = 0; i < settings_pack::num_bool_settings; ++i)
{
s.set_bool(settings_pack::bool_type_base | i, bool_settings[i].default_value);
TORRENT_ASSERT(s.get_bool(settings_pack::bool_type_base + i) == bool_settings[i].default_value);
}
}
settings_pack default_settings()
{
settings_pack ret;
for (int i = 0; i < settings_pack::num_string_settings; ++i)
{
if (str_settings[i].default_value == nullptr) continue;
ret.set_str(settings_pack::string_type_base + i, str_settings[i].default_value);
}
for (int i = 0; i < settings_pack::num_int_settings; ++i)
{
ret.set_int(settings_pack::int_type_base + i, int_settings[i].default_value);
}
for (int i = 0; i < settings_pack::num_bool_settings; ++i)
{
ret.set_bool(settings_pack::bool_type_base + i, bool_settings[i].default_value);
}
return ret;
}
int default_int_value(int const name)
{
TORRENT_ASSERT((name & settings_pack::type_mask) == settings_pack::int_type_base);
return int_settings[name - settings_pack::int_type_base].default_value;
}
void apply_pack(settings_pack const* pack, aux::session_settings& sett
, aux::session_impl* ses)
{
using fun_t = void (aux::session_impl::*)();
std::vector<fun_t> callbacks;
sett.bulk_set([&](aux::session_settings_single_thread& s)
| ||
relevance 0 | ../src/http_parser.cpp:138 | remove to_string() if we're in C++14 |
remove to_string() if we're in C++14../src/http_parser.cpp:138 // component or hostname.
if (end <= start + 2) end = std::string::npos;
// if this fails, the referrer is just url-scheme and hostname. We can
// just append the location to it.
if (end != std::string::npos)
url.resize(end);
// however, we may still need to insert a '/' in case neither side
// has one. We know the location doesn't start with a / already.
// so, if the referrer doesn't end with one, add it.
ensure_trailing_slash(url);
url += location;
}
return url;
}
std::string const& http_parser::header(string_view const key) const
{
static std::string const empty;
auto const i = m_header.find(key.to_string());
if (i == m_header.end()) return empty;
return i->second;
}
boost::optional<seconds32> http_parser::header_duration(string_view const key) const
{
| ||
relevance 0 | ../src/http_parser.cpp:146 | remove to_string() if we're in C++14 |
remove to_string() if we're in C++14../src/http_parser.cpp:146
// however, we may still need to insert a '/' in case neither side
// has one. We know the location doesn't start with a / already.
// so, if the referrer doesn't end with one, add it.
ensure_trailing_slash(url);
url += location;
}
return url;
}
std::string const& http_parser::header(string_view const key) const
{
static std::string const empty;
auto const i = m_header.find(key.to_string());
if (i == m_header.end()) return empty;
return i->second;
}
boost::optional<seconds32> http_parser::header_duration(string_view const key) const
{
auto const i = m_header.find(key.to_string());
if (i == m_header.end()) return boost::none;
auto const val = std::atol(i->second.c_str());
if (val <= 0) return boost::none;
return seconds32(val);
}
http_parser::~http_parser() = default;
http_parser::http_parser(int const flags) : m_flags(flags) {}
std::tuple<int, int> http_parser::incoming(
span<char const> recv_buffer, bool& error)
{
TORRENT_ASSERT(recv_buffer.size() >= m_recv_buffer.size());
std::tuple<int, int> ret(0, 0);
std::ptrdiff_t start_pos = m_recv_buffer.size();
// early exit if there's nothing new in the receive buffer
if (start_pos == recv_buffer.size()) return ret;
m_recv_buffer = recv_buffer;
if (m_state == error_state)
{
error = true;
return ret;
}
char const* pos = recv_buffer.data() + m_recv_pos;
restart_response:
| ||
relevance 0 | ../src/pe_crypto.cpp:62 | it would be nice to get the literal working |
it would be nice to get the literal working../src/pe_crypto.cpp:62#include <boost/multiprecision/cpp_int.hpp>
// for backwards compatibility with boost < 1.60 which was before export_bits
// and import_bits were introduced
#if BOOST_VERSION < 106000
#include "libtorrent/aux_/cppint_import_export.hpp"
#endif
#include "libtorrent/aux_/disable_warnings_pop.hpp"
#include "libtorrent/random.hpp"
#include "libtorrent/aux_/alloca.hpp"
#include "libtorrent/pe_crypto.hpp"
#include "libtorrent/hasher.hpp"
namespace libtorrent {
namespace mp = boost::multiprecision;
namespace {
key_t const dh_prime
("0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A63A36210000000000090563");
}
std::array<char, 96> export_key(key_t const& k)
{
std::array<char, 96> ret;
auto* begin = reinterpret_cast<std::uint8_t*>(ret.data());
std::uint8_t* end = mp::export_bits(k, begin, 8);
| ||
relevance 0 | ../src/pe_crypto.cpp:73 | it would be nice to be able to export to a fixed width field, so we wouldn't have to shift it later |
it would be nice to be able to export to a fixed width field, so
we wouldn't have to shift it later../src/pe_crypto.cpp:73#include "libtorrent/random.hpp"
#include "libtorrent/aux_/alloca.hpp"
#include "libtorrent/pe_crypto.hpp"
#include "libtorrent/hasher.hpp"
namespace libtorrent {
namespace mp = boost::multiprecision;
namespace {
key_t const dh_prime
("0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A63A36210000000000090563");
}
std::array<char, 96> export_key(key_t const& k)
{
std::array<char, 96> ret;
auto* begin = reinterpret_cast<std::uint8_t*>(ret.data());
std::uint8_t* end = mp::export_bits(k, begin, 8);
if (end < begin + 96)
{
int const len = int(end - begin);
std::memmove(begin + 96 - len, begin, aux::numeric_cast<std::size_t>(len));
std::memset(begin, 0, aux::numeric_cast<std::size_t>(96 - len));
}
return ret;
}
void rc4_init(const unsigned char* in, std::size_t len, rc4 *state);
std::size_t rc4_encrypt(unsigned char *out, std::size_t outlen, rc4 *state);
// Set the prime P and the generator, generate local public key
dh_key_exchange::dh_key_exchange()
{
aux::array<std::uint8_t, 96> random_key;
aux::random_bytes({reinterpret_cast<char*>(random_key.data())
, static_cast<std::ptrdiff_t>(random_key.size())});
// create local key (random)
mp::import_bits(m_dh_local_secret, random_key.begin(), random_key.end());
// key = (2 ^ secret) % prime
m_dh_local_key = mp::powm(key_t(2), m_dh_local_secret, dh_prime);
}
// compute shared secret given remote public key
void dh_key_exchange::compute_secret(std::uint8_t const* remote_pubkey)
{
TORRENT_ASSERT(remote_pubkey);
key_t key;
| ||
relevance 0 | ../src/udp_socket.cpp:528 | use the system resolver_interface here |
use the system resolver_interface here../src/udp_socket.cpp:528 m_proxy_settings = ps;
if (m_abort) return;
if (ps.type == settings_pack::socks5
|| ps.type == settings_pack::socks5_pw)
{
// connect to socks5 server and open up the UDP tunnel
m_socks5_connection = std::make_shared<socks5>(lt::get_io_service(m_socket)
, m_listen_socket, alerts);
m_socks5_connection->start(ps);
}
}
// ===================== SOCKS 5 =========================
void socks5::start(aux::proxy_settings const& ps)
{
m_proxy_settings = ps;
tcp::resolver::query q(ps.hostname, to_string(ps.port).data());
ADD_OUTSTANDING_ASYNC("socks5::on_name_lookup");
m_resolver.async_resolve(q, std::bind(
&socks5::on_name_lookup, self(), _1, _2));
}
void socks5::on_name_lookup(error_code const& e, tcp::resolver::iterator i)
{
COMPLETE_ASYNC("socks5::on_name_lookup");
if (m_abort) return;
if (e == boost::asio::error::operation_aborted) return;
if (e)
{
if (m_alerts.should_post<socks5_alert>())
m_alerts.emplace_alert<socks5_alert>(m_listen_socket.get_local_endpoint()
, operation_t::hostname_lookup, e);
++m_failures;
retry_connection();
return;
}
// only set up a SOCKS5 tunnel for sockets with the same address family
// as the proxy
// this is a hack to mitigate excessive SOCKS5 tunnels, until this can get
// fixed properly.
for (;;)
{
if (i == tcp::resolver::iterator{})
| ||
relevance 0 | ../src/udp_socket.cpp:644 | perhaps an attempt should be made to bind m_socks5_sock to the device of m_listen_socket |
perhaps an attempt should be made to bind m_socks5_sock to the
device of m_listen_socket../src/udp_socket.cpp:644 if (ec)
{
if (m_alerts.should_post<socks5_alert>())
m_alerts.emplace_alert<socks5_alert>(m_proxy_addr, operation_t::sock_option, ec);
ec.clear();
}
#endif
#endif
tcp::endpoint const bind_ep(m_listen_socket.get_local_endpoint().address(), 0);
m_socks5_sock.bind(bind_ep, ec);
if (ec)
{
if (m_alerts.should_post<socks5_alert>())
m_alerts.emplace_alert<socks5_alert>(m_proxy_addr, operation_t::sock_bind, ec);
++m_failures;
retry_connection();
return;
}
ADD_OUTSTANDING_ASYNC("socks5::on_connected");
m_socks5_sock.async_connect(m_proxy_addr
, std::bind(&socks5::on_connected, self(), _1));
ADD_OUTSTANDING_ASYNC("socks5::on_connect_timeout");
m_timer.expires_from_now(seconds(10));
m_timer.async_wait(std::bind(&socks5::on_connect_timeout
, self(), _1));
}
void socks5::on_connect_timeout(error_code const& e)
{
COMPLETE_ASYNC("socks5::on_connect_timeout");
if (e == boost::asio::error::operation_aborted) return;
if (m_abort) return;
if (m_alerts.should_post<socks5_alert>())
m_alerts.emplace_alert<socks5_alert>(m_proxy_addr, operation_t::connect, errors::timed_out);
error_code ignore;
m_socks5_sock.close(ignore);
++m_failures;
retry_connection();
}
void socks5::on_connected(error_code const& e)
{
| ||
relevance 0 | ../src/file_storage.cpp:996 | padfiles should be removed |
padfiles should be removed../src/file_storage.cpp:996 void file_storage::swap(file_storage& ti) noexcept
{
using std::swap;
swap(ti.m_files, m_files);
swap(ti.m_file_hashes, m_file_hashes);
swap(ti.m_symlinks, m_symlinks);
swap(ti.m_mtime, m_mtime);
swap(ti.m_paths, m_paths);
swap(ti.m_name, m_name);
swap(ti.m_total_size, m_total_size);
swap(ti.m_num_pieces, m_num_pieces);
swap(ti.m_piece_length, m_piece_length);
}
void file_storage::optimize(int const pad_file_limit, int alignment
, bool const tail_padding)
{
if (alignment == -1)
alignment = m_piece_length;
std::int64_t off = 0;
int padding_file = 0;
for (auto i = m_files.begin(); i != m_files.end(); ++i)
{
if ((off % alignment) == 0)
{
// this file position is aligned, pick the largest
// available file to put here. If we encounter a file whose size is
// divisible by `alignment`, we pick that immediately, since that
// will not affect whether we're at an aligned position and will
// improve packing of files
auto best_match = i;
for (auto k = i; k != m_files.end(); ++k)
{
// a file whose size fits the alignment always takes priority,
// since it will let us keep placing aligned files
if ((k->size % aux::numeric_cast<std::uint64_t>(alignment)) == 0)
{
best_match = k;
break;
}
// otherwise, pick the largest file, to have as many bytes be
// aligned.
if (best_match->size < k->size) best_match = k;
}
if (best_match != i)
{
int const index = int(best_match - m_files.begin());
int const cur_index = int(i - m_files.begin());
| ||
relevance 0 | ../src/file_storage.cpp:1149 | in C++17 this could be string_view |
in C++17 this could be string_view../src/file_storage.cpp:1149 e.pad_file = true;
offset += size;
++pad_file_counter;
if (!m_mtime.empty()) m_mtime.resize(index + 1, 0);
if (!m_file_hashes.empty()) m_file_hashes.resize(index + 1, nullptr);
if (index != cur_index) reorder_file(index, cur_index);
}
void file_storage::sanitize_symlinks()
{
// symlinks are unusual, this function is optimized assuming there are no
// symbolic links in the torrent. If we find one symbolic link, we'll
// build the hash table of files it's allowed to refer to, but don't pay
// that price up-front.
std::unordered_map<std::string, file_index_t> file_map;
bool file_map_initialized = false;
// lazily instantiated set of all valid directories a symlink may point to
std::unordered_set<std::string> dir_map;
bool dir_map_initialized = false;
// symbolic links that points to directories
std::unordered_map<std::string, std::string> dir_links;
// we validate symlinks in (potentially) 2 passes over the files.
// remaining symlinks to validate after the first pass
std::vector<file_index_t> symlinks_to_validate;
for (auto const i : file_range())
{
if (!(file_flags(i) & file_storage::flag_symlink)) continue;
if (!file_map_initialized)
{
for (auto const j : file_range())
file_map.insert({internal_file_path(j), j});
file_map_initialized = true;
}
internal_file_entry const& fe = m_files[i];
TORRENT_ASSERT(fe.symlink_index < int(m_symlinks.size()));
// symlink targets are only allowed to point to files or directories in
// this torrent.
{
std::string target = m_symlinks[fe.symlink_index];
if (is_complete(target))
{
| ||
relevance 0 | ../src/block_cache.cpp:1009 | it's somewhat expensive to iterate over this linked list. Presumably because of the random access of memory. It would be nice if pieces with no evictable blocks weren't in this list |
it's somewhat expensive
to iterate over this linked list. Presumably because of the random
access of memory. It would be nice if pieces with no evictable blocks
weren't in this list../src/block_cache.cpp:1009 }
else if (m_last_cache_op == ghost_hit_lru1)
{
// when we insert new items or move things from L1 to L2
// evict blocks from L2
lru_list[1] = &m_lru[cached_piece_entry::read_lru2];
lru_list[2] = &m_lru[cached_piece_entry::read_lru1];
}
else
{
// when we get cache hits in L2 evict from L1
lru_list[1] = &m_lru[cached_piece_entry::read_lru1];
lru_list[2] = &m_lru[cached_piece_entry::read_lru2];
}
// end refers to which end of the ARC cache we're evicting
// from. The LFU or the LRU end
for (int end = 0; num > 0 && end < 3; ++end)
{
// iterate over all blocks in order of last being used (oldest first) and
for (auto i = lru_list[end]->iterate(); i.get() && num > 0;)
{
cached_piece_entry* pe = i.get();
TORRENT_PIECE_ASSERT(pe->in_use, pe);
i.next();
if (pe == ignore)
continue;
if (pe->ok_to_evict() && pe->num_blocks == 0)
{
#if TORRENT_USE_INVARIANT_CHECKS
for (int j = 0; j < pe->blocks_in_piece; ++j)
TORRENT_PIECE_ASSERT(pe->blocks[j].buf == nullptr, pe);
#endif
TORRENT_PIECE_ASSERT(pe->refcount == 0, pe);
move_to_ghost(pe);
continue;
}
TORRENT_PIECE_ASSERT(pe->num_dirty == 0, pe);
// all blocks are pinned in this piece, skip it
if (pe->num_blocks <= pe->pinned) continue;
// go through the blocks and evict the ones that are not dirty and not
// referenced
int removed = 0;
for (int j = 0; j < pe->blocks_in_piece && num > 0; ++j)
{
cached_block_entry& b = pe->blocks[j];
| ||
relevance 0 | ../src/block_cache.cpp:1080 | this should probably only be done every n:th time |
this should probably only be done every n:th time../src/block_cache.cpp:1080 }
if (pe->ok_to_evict() && pe->num_blocks == 0)
{
#if TORRENT_USE_INVARIANT_CHECKS
for (int j = 0; j < pe->blocks_in_piece; ++j)
TORRENT_PIECE_ASSERT(pe->blocks[j].buf == nullptr, pe);
#endif
move_to_ghost(pe);
}
}
}
// if we can't evict enough blocks from the read cache, also look at write
// cache pieces for blocks that have already been written to disk and can be
// evicted the first pass, we only evict blocks that have been hashed, the
// second pass we flush anything this is potentially a very expensive
// operation, since we're likely to have iterate every single block in the
// cache, and we might not get to evict anything.
if (num > 0 && m_read_cache_size > m_pinned_blocks)
{
for (int pass = 0; pass < 2 && num > 0; ++pass)
{
for (auto i = m_lru[cached_piece_entry::write_lru].iterate(); i.get() && num > 0;)
{
cached_piece_entry* pe = i.get();
TORRENT_PIECE_ASSERT(pe->in_use, pe);
i.next();
if (pe == ignore)
continue;
if (pe->ok_to_evict() && pe->num_blocks == 0)
{
#if TORRENT_USE_INVARIANT_CHECKS
for (int j = 0; j < pe->blocks_in_piece; ++j)
TORRENT_PIECE_ASSERT(pe->blocks[j].buf == nullptr, pe);
#endif
TORRENT_PIECE_ASSERT(pe->refcount == 0, pe);
erase_piece(pe);
continue;
}
// all blocks in this piece are dirty
if (pe->num_dirty == pe->num_blocks)
continue;
int end = pe->blocks_in_piece;
| ||
relevance 0 | ../src/block_cache.cpp:1714 | create a holder for refcounts that automatically decrement |
create a holder for refcounts that automatically decrement../src/block_cache.cpp:1714 j->argument = disk_buffer_holder(allocator
, allocate_buffer("send buffer"), 0x4000);
if (!boost::get<disk_buffer_holder>(j->argument)) return -2;
while (size > 0)
{
TORRENT_PIECE_ASSERT(pe->blocks[block].buf, pe);
int const to_copy = std::min(default_block_size - block_offset, size);
std::memcpy(boost::get<disk_buffer_holder>(j->argument).get()
+ buffer_offset
, pe->blocks[block].buf + block_offset
, aux::numeric_cast<std::size_t>(to_copy));
pe->blocks[block].cache_hit = 1;
size -= to_copy;
block_offset = 0;
buffer_offset += to_copy;
++block;
}
// we incremented the refcount for both of these blocks.
// now decrement it.
dec_block_refcount(pe, start_block, ref_reading);
if (blocks_to_read == 2) dec_block_refcount(pe, start_block + 1, ref_reading);
maybe_free_piece(pe);
return j->d.io.buffer_size;
}
void block_cache::reclaim_block(storage_interface* st, aux::block_cache_reference const& ref)
{
TORRENT_ASSERT(st != nullptr);
int const blocks_per_piece = (st->files().piece_length() + default_block_size - 1) / default_block_size;
piece_index_t const piece(ref.cookie / blocks_per_piece);
int const block(ref.cookie % blocks_per_piece);
cached_piece_entry* pe = find_piece(st, piece);
TORRENT_ASSERT(pe);
if (pe == nullptr) return;
TORRENT_PIECE_ASSERT(pe->in_use, pe);
TORRENT_PIECE_ASSERT(pe->blocks[block].buf, pe);
dec_block_refcount(pe, block, block_cache::ref_reading);
TORRENT_PIECE_ASSERT(m_send_buffer_blocks > 0, pe);
--m_send_buffer_blocks;
maybe_free_piece(pe);
}
bool block_cache::maybe_free_piece(cached_piece_entry* pe)
{
if (!pe->ok_to_evict()
| ||
relevance 0 | ../src/kademlia/item.cpp:146 | implement ctor for entry from bdecode_node? |
implement ctor for entry from bdecode_node?../src/kademlia/item.cpp:146 return ed25519_sign({str, len}, pk, sk);
}
item::item(public_key const& pk, span<char const> salt)
: m_salt(salt.data(), static_cast<std::size_t>(salt.size()))
, m_pk(pk)
, m_seq(0)
, m_mutable(true)
{}
item::item(entry v)
: m_value(std::move(v))
, m_seq(0)
, m_mutable(false)
{}
item::item(bdecode_node const& v)
: m_seq(0)
, m_mutable(false)
{
m_value = v;
}
item::item(entry v, span<char const> salt
, sequence_number const seq, public_key const& pk, secret_key const& sk)
{
assign(std::move(v), salt, seq, pk, sk);
}
void item::assign(entry v)
{
m_mutable = false;
m_value = std::move(v);
}
void item::assign(entry v, span<char const> salt
, sequence_number const seq, public_key const& pk, secret_key const& sk)
{
std::array<char, 1000> buffer;
int const bsize = bencode(buffer.begin(), v);
TORRENT_ASSERT(bsize <= 1000);
m_sig = sign_mutable_item(span<char const>(buffer).first(bsize)
, salt, seq, pk, sk);
m_salt.assign(salt.data(), static_cast<std::size_t>(salt.size()));
m_pk = pk;
m_seq = seq;
m_mutable = true;
m_value = std::move(v);
}
void item::assign(bdecode_node const& v)
| ||
relevance 0 | ../src/kademlia/node.cpp:1170 | keep the returned value to pass as a limit to write_nodes_entries when implemented |
keep the returned value to pass as a limit
to write_nodes_entries when implemented../src/kademlia/node.cpp:1170 }
}
else if (query == "sample_infohashes")
{
static key_desc_t const msg_desc[] = {
{"target", bdecode_node::string_t, 20, 0},
{"want", bdecode_node::list_t, 0, key_desc_t::optional},
};
bdecode_node msg_keys[2];
if (!verify_message(arg_ent, msg_desc, msg_keys, error_string))
{
m_counters.inc_stats_counter(counters::dht_invalid_sample_infohashes);
incoming_error(e, error_string);
return;
}
m_counters.inc_stats_counter(counters::dht_sample_infohashes_in);
sha1_hash const target(msg_keys[0].string_ptr());
m_storage.get_infohashes_sample(reply);
write_nodes_entries(target, msg_keys[1], reply);
}
else
{
// if we don't recognize the message but there's a
// 'target' or 'info_hash' in the arguments, treat it
// as find_node to be future compatible
bdecode_node target_ent = arg_ent.dict_find_string("target");
if (!target_ent || target_ent.string_length() != 20)
{
target_ent = arg_ent.dict_find_string("info_hash");
if (!target_ent || target_ent.string_length() != 20)
{
incoming_error(e, "unknown message");
return;
}
}
sha1_hash const target(target_ent.string_ptr());
// always return nodes as well as peers
write_nodes_entries(target, arg_ent.dict_find_list("want"), reply);
}
}
| ||
relevance 0 | ../src/kademlia/node.cpp:1198 | limit number of entries in the result |
limit number of entries in the result../src/kademlia/node.cpp:1198 // if we don't recognize the message but there's a
// 'target' or 'info_hash' in the arguments, treat it
// as find_node to be future compatible
bdecode_node target_ent = arg_ent.dict_find_string("target");
if (!target_ent || target_ent.string_length() != 20)
{
target_ent = arg_ent.dict_find_string("info_hash");
if (!target_ent || target_ent.string_length() != 20)
{
incoming_error(e, "unknown message");
return;
}
}
sha1_hash const target(target_ent.string_ptr());
// always return nodes as well as peers
write_nodes_entries(target, arg_ent.dict_find_list("want"), reply);
}
}
void node::write_nodes_entries(sha1_hash const& info_hash
, bdecode_node const& want, entry& r)
{
// if no wants entry was specified, include a nodes
// entry based on the protocol the request came in with
if (want.type() != bdecode_node::list_t)
{
std::vector<node_entry> n;
m_table.find_node(info_hash, n, 0);
r[protocol_nodes_key()] = write_nodes_entry(n);
return;
}
// if there is a wants entry then we may need to reach into
// another node's routing table to get nodes of the requested type
// we use a map maintained by the owning dht_tracker to find the
// node associated with each string in the want list, which may
// include this node
for (int i = 0; i < want.list_size(); ++i)
{
bdecode_node wanted = want.list_at(i);
if (wanted.type() != bdecode_node::string_t)
continue;
node* wanted_node = m_get_foreign_node(info_hash, wanted.string_value().to_string());
if (!wanted_node) continue;
std::vector<node_entry> n;
wanted_node->m_table.find_node(info_hash, n, 0);
r[wanted_node->protocol_nodes_key()] = write_nodes_entry(n);
}
}
| ||
relevance 0 | ../src/kademlia/routing_table.cpp:275 | This is temporary. For now, only report the largest routing table (of potentially multiple ones, for multi-homed systems) in next major version, break the ABI and support reporting all of them in the dht_stats_alert |
This is temporary. For now, only report the largest routing table
(of potentially multiple ones, for multi-homed systems)
in next major version, break the ABI and support reporting all of them in
the dht_stats_alert../src/kademlia/routing_table.cpp:275 , m_bucket_size(bucket_size)
{
// bucket sizes must be a power of 2
TORRENT_ASSERT_VAL(((bucket_size - 1) & bucket_size) == 0, bucket_size);
TORRENT_UNUSED(log);
m_buckets.reserve(30);
}
int routing_table::bucket_limit(int bucket) const
{
if (!m_settings.extended_routing_table) return m_bucket_size;
static const aux::array<int, 4> size_exceptions{{{16, 8, 4, 2}}};
if (bucket < size_exceptions.end_index())
return m_bucket_size * size_exceptions[bucket];
return m_bucket_size;
}
void routing_table::status(std::vector<dht_routing_bucket>& s) const
{
if (s.size() > m_buckets.size()) return;
s.clear();
for (auto const& i : m_buckets)
{
dht_routing_bucket b;
b.num_nodes = int(i.live_nodes.size());
b.num_replacements = int(i.replacements.size());
s.push_back(b);
}
}
#if TORRENT_ABI_VERSION == 1
| ||
relevance 0 | ../src/kademlia/routing_table.cpp:300 | arvidn note when it's across IPv4 and IPv6, adding (dht_global_nodes) would make sense. in the future though, where we may have one DHT node per external interface (which may be multiple of the same address family), then it becomes a bit trickier |
arvidn note
when it's across IPv4 and IPv6, adding (dht_global_nodes) would
make sense. in the future though, where we may have one DHT node
per external interface (which may be multiple of the same address
family), then it becomes a bit trickier../src/kademlia/routing_table.cpp:300 if (s.size() > m_buckets.size()) return;
s.clear();
for (auto const& i : m_buckets)
{
dht_routing_bucket b;
b.num_nodes = int(i.live_nodes.size());
b.num_replacements = int(i.replacements.size());
s.push_back(b);
}
}
#if TORRENT_ABI_VERSION == 1
void routing_table::status(session_status& s) const
{
int dht_nodes;
int dht_node_cache;
int ignore;
std::tie(dht_nodes, dht_node_cache, ignore) = size();
s.dht_nodes += dht_nodes;
s.dht_node_cache += dht_node_cache;
s.dht_global_nodes += num_global_nodes();
for (auto const& i : m_buckets)
{
dht_routing_bucket b;
b.num_nodes = int(i.live_nodes.size());
b.num_replacements = int(i.replacements.size());
#if TORRENT_ABI_VERSION == 1
b.last_active = 0;
#endif
s.dht_routing_table.push_back(b);
}
}
#endif
std::tuple<int, int, int> routing_table::size() const
{
int nodes = 0;
int replacements = 0;
int confirmed = 0;
for (auto const& i : m_buckets)
{
nodes += int(i.live_nodes.size());
confirmed += static_cast<int>(std::count_if(i.live_nodes.begin(), i.live_nodes.end()
, [](node_entry const& k) { return k.confirmed(); } ));
replacements += int(i.replacements.size());
}
return std::make_tuple(nodes, replacements, confirmed);
}
| ||
relevance 0 | ../src/kademlia/routing_table.cpp:504 | this need to take bucket "prefix" into account. It should be unified with add_node_impl() |
this need to take bucket "prefix" into account. It should be unified
with add_node_impl()../src/kademlia/routing_table.cpp:504{
for (auto i = m_buckets.begin() , end(m_buckets.end()); i != end; ++i)
{
for (auto j = i->replacements.begin(); j != i->replacements.end(); ++j)
{
if (j->addr() != ep.address()) continue;
if (j->port() != ep.port()) continue;
return std::make_tuple(&*j, i, &i->replacements);
}
for (auto j = i->live_nodes.begin(); j != i->live_nodes.end(); ++j)
{
if (j->addr() != ep.address()) continue;
if (j->port() != ep.port()) continue;
return std::make_tuple(&*j, i, &i->live_nodes);
}
}
return std::tuple<node_entry*, routing_table::table_t::iterator, bucket_t*>(
nullptr, m_buckets.end(), nullptr);
}
void routing_table::fill_from_replacements(table_t::iterator bucket)
{
bucket_t& b = bucket->live_nodes;
bucket_t& rb = bucket->replacements;
int const bucket_size = bucket_limit(int(std::distance(m_buckets.begin(), bucket)));
if (int(b.size()) >= bucket_size) return;
// sort by RTT first, to find the node with the lowest
// RTT that is pinged
std::sort(rb.begin(), rb.end());
while (int(b.size()) < bucket_size && !rb.empty())
{
auto j = std::find_if(rb.begin(), rb.end(), std::bind(&node_entry::pinged, _1));
if (j == rb.end()) break;
b.push_back(*j);
rb.erase(j);
}
}
void routing_table::prune_empty_bucket()
{
if (m_buckets.back().live_nodes.empty()
&& m_buckets.back().replacements.empty())
{
m_buckets.erase(m_buckets.end() - 1);
}
}
void routing_table::remove_node(node_entry* n, bucket_t* b)
| ||
relevance 0 | ../src/kademlia/put_data.cpp:88 | what if o is not an instance of put_data_observer? This need to be redesigned for better type safety. |
what if o is not an instance of put_data_observer? This need to be
redesigned for better type safety.../src/kademlia/put_data.cpp:88 }
}
void put_data::done()
{
m_done = true;
#ifndef TORRENT_DISABLE_LOGGING
get_node().observer()->log(dht_logger::traversal, "[%u] %s DONE, response %d, timeout %d"
, id(), name(), num_responses(), num_timeouts());
#endif
m_put_callback(m_data, num_responses());
traversal_algorithm::done();
}
bool put_data::invoke(observer_ptr o)
{
if (m_done) return false;
auto* po = static_cast<put_data_observer*>(o.get());
entry e;
e["y"] = "q";
e["q"] = "put";
entry& a = e["a"];
a["v"] = m_data.value();
a["token"] = po->m_token;
if (m_data.is_mutable())
{
a["k"] = m_data.pk().bytes;
a["seq"] = m_data.seq().value;
a["sig"] = m_data.sig().bytes;
if (!m_data.salt().empty())
{
a["salt"] = m_data.salt();
}
}
m_node.stats_counters().inc_stats_counter(counters::dht_put_out);
return m_node.m_rpc.invoke(e, o->target_ep(), o);
}
} } // namespace libtorrent::dht
| ||
relevance 0 | ../src/kademlia/node_id.cpp:64 | it's a little bit weird to return 159 - leading zeroes. It should probably be 160 - leading zeroes, but all other code in here is tuned to this expectation now, and it doesn't really matter (other than complexity) |
it's a little bit weird to return 159 - leading zeroes. It should
probably be 160 - leading zeroes, but all other code in here is tuned to
this expectation now, and it doesn't really matter (other than complexity)../src/kademlia/node_id.cpp:64
// returns the distance between the two nodes
// using the kademlia XOR-metric
node_id distance(node_id const& n1, node_id const& n2)
{
return n1 ^ n2;
}
// returns true if: distance(n1, ref) < distance(n2, ref)
bool compare_ref(node_id const& n1, node_id const& n2, node_id const& ref)
{
node_id const lhs = n1 ^ ref;
node_id const rhs = n2 ^ ref;
return lhs < rhs;
}
// returns n in: 2^n <= distance(n1, n2) < 2^(n+1)
// useful for finding out which bucket a node belongs to
int distance_exp(node_id const& n1, node_id const& n2)
{
return std::max(159 - distance(n1, n2).count_leading_zeroes(), 0);
}
int min_distance_exp(node_id const& n1, std::vector<node_id> const& ids)
{
TORRENT_ASSERT(ids.size() > 0);
int min = 160; // see distance_exp for the why of this constant
for (auto const& node_id : ids)
{
min = std::min(min, distance_exp(n1, node_id));
}
return min;
}
node_id generate_id_impl(address const& ip_, std::uint32_t r)
{
std::uint8_t* ip = nullptr;
static std::uint8_t const v4mask[] = { 0x03, 0x0f, 0x3f, 0xff };
static std::uint8_t const v6mask[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff };
std::uint8_t const* mask = nullptr;
int num_octets = 0;
address_v4::bytes_type b4{};
address_v6::bytes_type b6{};
if (ip_.is_v6())
{
b6 = ip_.to_v6().to_bytes();
ip = b6.data();
| ||
relevance 0 | ../src/kademlia/dht_tracker.cpp:303 | pick the closest node rather than the first |
pick the closest node rather than the first../src/kademlia/dht_tracker.cpp:303 n.second.dht.new_write_key();
#ifndef TORRENT_DISABLE_LOGGING
m_log->log(dht_logger::tracker, "*** new write key***");
#endif
}
void dht_tracker::update_storage_node_ids()
{
std::vector<sha1_hash> ids;
for (auto& n : m_nodes)
ids.push_back(n.second.dht.nid());
m_storage.update_node_ids(ids);
}
node* dht_tracker::get_node(node_id const& id, std::string const& family_name)
{
TORRENT_UNUSED(id);
for (auto& n : m_nodes)
{
if (n.second.dht.protocol_family_name() == family_name)
return &n.second.dht;
}
return nullptr;
}
void dht_tracker::get_peers(sha1_hash const& ih
, std::function<void(std::vector<tcp::endpoint> const&)> f)
{
for (auto& n : m_nodes)
n.second.dht.get_peers(ih, f, {}, {});
}
void dht_tracker::announce(sha1_hash const& ih, int listen_port
, announce_flags_t const flags
, std::function<void(std::vector<tcp::endpoint> const&)> f)
{
for (auto& n : m_nodes)
n.second.dht.announce(ih, listen_port, flags, f);
}
void dht_tracker::sample_infohashes(udp::endpoint const& ep, sha1_hash const& target
, std::function<void(time_duration
, int, std::vector<sha1_hash>
, std::vector<std::pair<sha1_hash, udp::endpoint>>)> f)
{
for (auto& n : m_nodes)
{
if (ep.protocol() != (n.first.get_external_address().is_v4() ? udp::v4() : udp::v6()))
continue;
| ||
relevance 0 | ../include/libtorrent/config.hpp:47 | don't include that here. Make each header that use the export macros include it instead. and move it to aux_ |
don't include that here. Make each header that use the export macros
include it instead. and move it to aux_../include/libtorrent/config.hpp:47CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TORRENT_CONFIG_HPP_INCLUDED
#define TORRENT_CONFIG_HPP_INCLUDED
#include <cstddef>
#include "libtorrent/aux_/disable_warnings_push.hpp"
#define _FILE_OFFSET_BITS 64
#include <boost/config.hpp>
#include <boost/version.hpp>
#include "libtorrent/aux_/disable_warnings_pop.hpp"
#include "libtorrent/aux_/export.hpp"
#ifdef __linux__
#include <linux/version.h> // for LINUX_VERSION_CODE and KERNEL_VERSION
#endif // __linux
#if defined __MINGW64__ || defined __MINGW32__
// GCC warns on format codes that are incompatible with glibc, which the windows
// format codes are. So we need to disable those for mingw targets
#pragma GCC diagnostic ignored "-Wformat"
#pragma GCC diagnostic ignored "-Wformat-extra-args"
#endif
#if defined __GNUC__
#ifdef _GLIBCXX_CONCEPT_CHECKS
#define TORRENT_COMPLETE_TYPES_REQUIRED 1
#endif
// ======= SUNPRO =========
#elif defined __SUNPRO_CC
#define TORRENT_COMPLETE_TYPES_REQUIRED 1
// ======= MSVC =========
#elif defined BOOST_MSVC
// class X needs to have dll-interface to be used by clients of class Y
#pragma warning(disable:4251)
| ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:47 | move this out of counters |
move this out of counters../include/libtorrent/performance_counters.hpp:47CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TORRENT_PERFORMANCE_COUNTERS_HPP_INCLUDED
#define TORRENT_PERFORMANCE_COUNTERS_HPP_INCLUDED
#include "libtorrent/config.hpp"
#include "libtorrent/aux_/array.hpp"
#include <cstdint>
#include <atomic>
#include <mutex>
namespace libtorrent {
struct TORRENT_EXTRA_EXPORT counters
{
enum stats_counter_t
{
// the number of peers that were disconnected this
// tick due to protocol error
error_peers,
disconnected_peers,
eof_peers,
connreset_peers,
connrefused_peers,
connaborted_peers,
notconnected_peers,
perm_peers,
buffer_peers,
unreachable_peers,
broken_pipe_peers,
addrinuse_peers,
no_access_peers,
invalid_arg_peers,
aborted_peers,
piece_requests,
max_piece_requests,
invalid_piece_requests,
choked_piece_requests,
cancelled_piece_requests,
piece_rejects,
error_incoming_peers,
error_outgoing_peers,
error_rc4_peers,
error_encrypted_peers,
error_tcp_peers,
| ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:151 | should keepalives be in here too? how about dont-have, share-mode, upload-only |
should keepalives be in here too?
how about dont-have, share-mode, upload-only../include/libtorrent/performance_counters.hpp:151 // successful incoming connections (not rejected for any reason)
incoming_connections,
// counts events where the network
// thread wakes up
on_read_counter,
on_write_counter,
on_tick_counter,
on_lsd_counter,
on_lsd_peer_counter,
on_udp_counter,
on_accept_counter,
on_disk_queue_counter,
on_disk_counter,
#if TORRENT_ABI_VERSION == 1
torrent_evicted_counter,
#endif
// bittorrent message counters
num_incoming_choke,
num_incoming_unchoke,
num_incoming_interested,
num_incoming_not_interested,
num_incoming_have,
num_incoming_bitfield,
num_incoming_request,
num_incoming_piece,
num_incoming_cancel,
num_incoming_dht_port,
num_incoming_suggest,
num_incoming_have_all,
num_incoming_have_none,
num_incoming_reject,
num_incoming_allowed_fast,
num_incoming_ext_handshake,
num_incoming_pex,
num_incoming_metadata,
num_incoming_extended,
num_outgoing_choke,
num_outgoing_unchoke,
num_outgoing_interested,
num_outgoing_not_interested,
num_outgoing_have,
num_outgoing_bitfield,
num_outgoing_request,
num_outgoing_piece,
num_outgoing_cancel,
num_outgoing_dht_port,
num_outgoing_suggest,
| ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:493 | some space could be saved here by making gauges 32 bits |
some space could be saved here by making gauges 32 bits../include/libtorrent/performance_counters.hpp:493 | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:494 | restore these to regular integers. Instead have one copy of the counters per thread and collect them at convenient synchronization points |
restore these to regular integers. Instead have one copy
of the counters per thread and collect them at convenient
synchronization points../include/libtorrent/performance_counters.hpp:494#ifdef ATOMIC_LLONG_LOCK_FREE
#define TORRENT_COUNTER_NOEXCEPT noexcept
#else
#define TORRENT_COUNTER_NOEXCEPT
#endif
counters() TORRENT_COUNTER_NOEXCEPT;
counters(counters const&) TORRENT_COUNTER_NOEXCEPT;
counters& operator=(counters const&) TORRENT_COUNTER_NOEXCEPT;
// returns the new value
std::int64_t inc_stats_counter(int c, std::int64_t value = 1) TORRENT_COUNTER_NOEXCEPT;
std::int64_t operator[](int i) const TORRENT_COUNTER_NOEXCEPT;
void set_value(int c, std::int64_t value) TORRENT_COUNTER_NOEXCEPT;
void blend_stats_counter(int c, std::int64_t value, int ratio) TORRENT_COUNTER_NOEXCEPT;
private:
#ifdef ATOMIC_LLONG_LOCK_FREE
aux::array<std::atomic<std::int64_t>, num_counters> m_stats_counter;
#else
// if the atomic type is't lock-free, use a single lock instead, for
// the whole array
mutable std::mutex m_mutex;
aux::array<std::int64_t, num_counters> m_stats_counter;
#endif
};
}
#endif
| ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:186 | make this a raw pointer (to save size in the first cache line) and make the constructor take a raw pointer. torrent objects should always outlive their peers |
make this a raw pointer (to save size in
the first cache line) and make the constructor
take a raw pointer. torrent objects should always
outlive their peers../include/libtorrent/peer_connection.hpp:186 , m_snubbed(false)
, m_interesting(false)
, m_choked(true)
, m_ignore_stats(false)
{}
// explicitly disallow assignment, to silence msvc warning
peer_connection_hot_members& operator=(peer_connection_hot_members const&) = delete;
protected:
// the pieces the other end have
typed_bitfield<piece_index_t> m_have_piece;
// this is the torrent this connection is
// associated with. If the connection is an
// incoming connection, this is set to zero
// until the info_hash is received. Then it's
// set to the torrent it belongs to.
std::weak_ptr<torrent> m_torrent;
public:
// a back reference to the session
// the peer belongs to.
aux::session_interface& m_ses;
// settings that apply to this peer
aux::session_settings const& m_settings;
protected:
// this is true if this connection has been added
// to the list of connections that will be closed.
bool m_disconnecting:1;
// this is true until this socket has become
// writable for the first time (i.e. the
// connection completed). While connecting
// the timeout will not be triggered. This is
// because windows XP SP2 may delay connection
// attempts, which means that the connection
// may not even have been attempted when the
// time out is reached.
bool m_connecting:1;
// this is set to true if the last time we tried to
// pick a piece to download, we could only find
// blocks that were already requested from other
// peers. In this case, we should not try to pick
| ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:985 | factor this out into its own class with a virtual interface torrent and session should implement this interface |
factor this out into its own class with a virtual interface
torrent and session should implement this interface../include/libtorrent/peer_connection.hpp:985
// the local endpoint for this peer, i.e. our address
// and our port. If this is set for outgoing connections
// before the connection completes, it means we want to
// force the connection to be bound to the specified interface.
// if it ends up being bound to a different local IP, the connection
// is closed.
tcp::endpoint m_local;
// remote peer's id
peer_id m_peer_id;
protected:
template <typename Fun, typename... Args>
void wrap(Fun f, Args&&... a);
// statistics about upload and download speeds
// and total amount of uploads and downloads for
// this peer
stat m_statistics;
// the number of outstanding bytes expected
// to be received by extensions
int m_extension_outstanding_bytes = 0;
// the number of time critical requests
// queued up in the m_request_queue that
// soon will be committed to the download
// queue. This is included in download_queue_time()
// so that it can be used while adding more
// requests and take the previous requests
// into account without submitting it all
// immediately
int m_queued_time_critical = 0;
// the number of bytes we are currently reading
// from disk, that will be added to the send
// buffer as soon as they complete
int m_reading_bytes = 0;
// options used for the piece picker. These flags will
// be augmented with flags controlled by other settings
// like sequential download etc. These are here to
// let plugins control flags that should always be set
picker_options_t m_picker_options{};
// the number of invalid piece-requests
// we have got from this peer. If the request
// queue gets empty, and there have been
// invalid requests, we can assume the
| ||
relevance 0 | ../include/libtorrent/string_view.hpp:40 | replace this by the standard string_view in C++17 |
replace this by the standard string_view in C++17../include/libtorrent/string_view.hpp:40AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TORRENT_STRING_VIEW_HPP_INCLUDED
#define TORRENT_STRING_VIEW_HPP_INCLUDED
#include <boost/version.hpp>
#include "libtorrent/aux_/disable_warnings_push.hpp"
#if BOOST_VERSION < 106100
#include <boost/utility/string_ref.hpp>
#include <cstring> // for strchr
namespace libtorrent {
using string_view = boost::string_ref;
using wstring_view = boost::wstring_ref;
// internal
inline string_view::size_type find_first_of(string_view const v, char const c
, string_view::size_type pos)
{
while (pos < v.size())
{
if (v[pos] == c) return pos;
++pos;
}
return string_view::npos;
}
// internal
inline string_view::size_type find_first_of(string_view const v, char const* c
, string_view::size_type pos)
{
while (pos < v.size())
{
if (std::strchr(c, v[pos]) != nullptr) return pos;
++pos;
}
return string_view::npos;
| ||
relevance 0 | ../include/libtorrent/piece_picker.hpp:766 | should this be allocated lazily? |
should this be allocated lazily?../include/libtorrent/piece_picker.hpp:766 // download list it may live in now
std::vector<downloading_piece>::iterator update_piece_state(
std::vector<downloading_piece>::iterator dp);
private:
#if TORRENT_USE_ASSERTS || TORRENT_USE_INVARIANT_CHECKS
index_range<download_queue_t> categories() const
{ return {{}, piece_picker::piece_pos::num_download_categories}; }
#endif
// the following vectors are mutable because they sometimes may
// be updated lazily, triggered by const functions
// this maps indices to number of peers that has this piece and
// index into the m_piece_info vectors.
// piece_pos::we_have_index means that we have the piece, so it
// doesn't exist in the piece_info buckets
// pieces with the filtered flag set doesn't have entries in
// the m_piece_info buckets either
mutable aux::vector<piece_pos, piece_index_t> m_piece_map;
// this indicates whether a block has been marked as a pad
// block or not. It's indexed by block index, i.e. piece_index
// * blocks_per_piece + block. These blocks should not be
// picked and are considered to be had
| ||
relevance 0 | ../include/libtorrent/piece_picker.hpp:773 | this could be a much more efficient data structure |
this could be a much more efficient data structure../include/libtorrent/piece_picker.hpp:773#if TORRENT_USE_ASSERTS || TORRENT_USE_INVARIANT_CHECKS
index_range<download_queue_t> categories() const
{ return {{}, piece_picker::piece_pos::num_download_categories}; }
#endif
// the following vectors are mutable because they sometimes may
// be updated lazily, triggered by const functions
// this maps indices to number of peers that has this piece and
// index into the m_piece_info vectors.
// piece_pos::we_have_index means that we have the piece, so it
// doesn't exist in the piece_info buckets
// pieces with the filtered flag set doesn't have entries in
// the m_piece_info buckets either
mutable aux::vector<piece_pos, piece_index_t> m_piece_map;
// this indicates whether a block has been marked as a pad
// block or not. It's indexed by block index, i.e. piece_index
// * blocks_per_piece + block. These blocks should not be
// picked and are considered to be had
bitfield m_pad_blocks;
// tracks the number of blocks in a specific piece that are pad blocks
std::unordered_map<piece_index_t, int> m_pads_in_piece;
// when the adjecent_piece affinity is enabled, this contains the most
// recent "extents" of adjecent pieces that have been requested from
// this is mutable because it's updated by functions to pick pieces, which
// are const. That's an efficient place to update it, since it's being
// traversed already.
mutable std::vector<piece_extent_t> m_recent_extents;
// the number of bits set in the m_pad_blocks bitfield, i.e.
// the number of blocks marked as pads
int m_num_pad_blocks = 0;
// the number of pad blocks that we already have
int m_have_pad_blocks = 0;
// the number of pad blocks part of filtered pieces we don't have
int m_filtered_pad_blocks = 0;
// the number of pad blocks we have that are also filtered
int m_have_filtered_pad_blocks = 0;
// the number of seeds. These are not added to
// the availability counters of the pieces
int m_seeds = 0;
// the number of pieces that have passed the hash check
int m_num_passed = 0;
| ||
relevance 0 | ../include/libtorrent/piece_picker.hpp:846 | it would be more intuitive to account "wanted" pieces instead of filtered |
it would be more intuitive to account "wanted" pieces
instead of filtered../include/libtorrent/piece_picker.hpp:846 , download_queue_t> m_downloads;
// this holds the information of the blocks in partially downloaded
// pieces. the downloading_piece::info index point into this vector for
// its storage
aux::vector<block_info> m_block_info;
// these are block ranges in m_block_info that are free. The numbers
// in here, when multiplied by m_blocks_per_piece is the index to the
// first block in the range that's free to use by a new downloading_piece.
// this is a free-list.
std::vector<std::uint16_t> m_free_block_infos;
std::uint16_t m_blocks_per_piece = 0;
std::uint16_t m_blocks_in_last_piece = 0;
// the number of filtered pieces that we don't already
// have. total_number_of_pieces - number_of_pieces_we_have
// - num_filtered is supposed to the number of pieces
// we still want to download
int m_num_filtered = 0;
// the number of pieces we have that also are filtered
int m_num_have_filtered = 0;
// we have all pieces in the range [0, m_cursor)
// m_cursor is the first piece we don't have
piece_index_t m_cursor{0};
// we have all pieces in the range [m_reverse_cursor, end)
// m_reverse_cursor is the first piece where we also have
// all the subsequent pieces
piece_index_t m_reverse_cursor{0};
// the number of pieces we have (i.e. passed + flushed).
// This includes pieces that we have filtered but still have
int m_num_have = 0;
// if this is set to true, it means update_pieces()
// has to be called before accessing m_pieces.
mutable bool m_dirty = false;
public:
enum { max_pieces = (std::numeric_limits<int>::max)() - 1 };
};
}
#endif // TORRENT_PIECE_PICKER_HPP_INCLUDED
| ||
relevance 0 | ../include/libtorrent/upnp.hpp:146 | support using the windows API for UPnP operations as well |
support using the windows API for UPnP operations as well../include/libtorrent/upnp.hpp:146{
bool in_error_code = false;
bool exit = false;
int error_code = -1;
};
struct ip_address_parse_state: error_code_parse_state
{
bool in_ip_address = false;
std::string ip_address;
};
TORRENT_EXTRA_EXPORT void find_control_url(int type, string_view, parse_state& state);
TORRENT_EXTRA_EXPORT void find_error_code(int type, string_view string
, error_code_parse_state& state);
TORRENT_EXTRA_EXPORT void find_ip_address(int type, string_view string
, ip_address_parse_state& state);
struct TORRENT_EXTRA_EXPORT upnp final
: std::enable_shared_from_this<upnp>
, single_threaded
{
upnp(io_service& ios
, aux::session_settings const& settings
, aux::portmap_callback& cb
, address_v4 const& listen_address
, address_v4 const& netmask
, std::string listen_device);
~upnp();
void start();
// Attempts to add a port mapping for the specified protocol. Valid protocols are
// ``upnp::tcp`` and ``upnp::udp`` for the UPnP class and ``natpmp::tcp`` and
// ``natpmp::udp`` for the NAT-PMP class.
//
// ``external_port`` is the port on the external address that will be mapped. This
// is a hint, you are not guaranteed that this port will be available, and it may
// end up being something else. In the portmap_alert_ notification, the actual
// external port is reported.
//
// ``local_port`` is the port in the local machine that the mapping should forward
// to.
//
// The return value is an index that identifies this port mapping. This is used
// to refer to mappings that fails or succeeds in the portmap_error_alert_ and
// portmap_alert_ respectively. If The mapping fails immediately, the return value
// is -1, which means failure. There will not be any error alert notification for
// mappings that fail with a -1 return value.
| ||
relevance 0 | ../include/libtorrent/block_cache.hpp:225 | make this 32 bits and to count seconds since the block cache was created |
make this 32 bits and to count seconds since the block cache was created../include/libtorrent/block_cache.hpp:225 tailqueue<disk_io_job> read_jobs;
piece_index_t get_piece() const { return piece; }
void* get_storage() const { return storage.get(); }
bool operator==(cached_piece_entry const& rhs) const
{ return piece == rhs.piece && storage.get() == rhs.storage.get(); }
// if this is set, we'll be calculating the hash
// for this piece. This member stores the interim
// state while we're calculating the hash.
std::unique_ptr<partial_hash> hash;
// the pointers to the block data. If this is a ghost
// cache entry, there won't be any data here
aux::unique_ptr<cached_block_entry[]> blocks;
// the last time a block was written to this piece
// plus the minimum amount of time the block is guaranteed
// to stay in the cache
time_point expire = min_time();
piece_index_t piece{0};
// the number of dirty blocks in this piece
std::uint64_t num_dirty:14;
// the number of blocks in the cache for this piece
std::uint64_t num_blocks:14;
// the total number of blocks in this piece (and the number
// of elements in the blocks array)
std::uint64_t blocks_in_piece:14;
// ---- 64 bit boundary ----
// while we have an outstanding async hash operation
// working on this piece, 'hashing' is set to 1
// When the operation returns, this is set to 0.
std::uint16_t hashing:1;
// if we've completed at least one hash job on this
// piece, and returned it. This is set to one
std::uint16_t hashing_done:1;
// if this is true, whenever refcount hits 0,
// this piece should be deleted from the cache
// (not just demoted)
std::uint16_t marked_for_deletion:1;
// this is set to true once we flush blocks past
| ||
relevance 0 | ../include/libtorrent/sha1_hash.hpp:58 | find a better place for these functions |
find a better place for these functions../include/libtorrent/sha1_hash.hpp:58#include <string>
#include <cstring>
#include <array>
#include "libtorrent/aux_/disable_warnings_push.hpp"
#include <boost/range/combine.hpp>
#include "libtorrent/aux_/disable_warnings_pop.hpp"
#include "libtorrent/config.hpp"
#include "libtorrent/assert.hpp"
#include "libtorrent/aux_/byteswap.hpp"
#include "libtorrent/aux_/ffs.hpp"
#include "libtorrent/span.hpp"
#if TORRENT_USE_IOSTREAM
#include <iosfwd>
#endif // TORRENT_USE_IOSTREAM
namespace libtorrent {
namespace aux {
TORRENT_EXTRA_EXPORT void bits_shift_left(span<std::uint32_t> number, int n);
TORRENT_EXTRA_EXPORT void bits_shift_right(span<std::uint32_t> number, int n);
}
// This type holds an N digest or any other kind of N bits
// sequence. It implements a number of convenience functions, such
// as bit operations, comparison operators etc.
//
// This data structure is 32 bits aligned, like it's the case for
// each SHA-N specification.
template <std::ptrdiff_t N>
class digest32
{
static_assert(N % 32 == 0, "N must be a multiple of 32");
static constexpr std::ptrdiff_t number_size = N / 32;
static constexpr int bits_in_byte = 8;
public:
using difference_type = std::ptrdiff_t;
using index_type = std::ptrdiff_t;
// the size of the hash in bytes
static constexpr difference_type size() noexcept { return N / bits_in_byte; }
// constructs an all-zero digest
digest32() noexcept { clear(); }
digest32(digest32 const&) noexcept = default;
digest32& operator=(digest32 const&) noexcept = default;
| ||
relevance 0 | ../include/libtorrent/proxy_base.hpp:188 | it would be nice to remember the bind port and bind once we know where the proxy is m_sock.bind(endpoint, ec); |
it would be nice to remember the bind port and bind once we know where the proxy is
m_sock.bind(endpoint, ec);../include/libtorrent/proxy_base.hpp:188 void bind(endpoint_type const& /* endpoint */)
{
// m_sock.bind(endpoint);
}
#endif
error_code cancel(error_code& ec)
{
return m_sock.cancel(ec);
}
void bind(endpoint_type const& /* endpoint */, error_code& /* ec */)
{
// the reason why we ignore binds here is because we don't
// (necessarily) yet know what address family the proxy
// will resolve to, and binding to the wrong one would
// break our connection attempt later. The caller here
// doesn't necessarily know that we're proxying, so this
// bind address is based on the final endpoint, not the
// proxy.
}
#ifndef BOOST_NO_EXCEPTIONS
void open(protocol_type const&)
{
// m_sock.open(p);
}
#endif
void open(protocol_type const&, error_code&)
{
// we need to ignore this for the same reason as stated
// for ignoring bind()
// m_sock.open(p, ec);
}
#ifndef BOOST_NO_EXCEPTIONS
void close()
{
m_remote_endpoint = endpoint_type();
m_sock.close();
m_resolver.cancel();
}
#endif
void close(error_code& ec)
{
m_remote_endpoint = endpoint_type();
m_sock.close(ec);
m_resolver.cancel();
}
| ||
relevance 0 | ../include/libtorrent/peer_connection_interface.hpp:50 | make this interface smaller! |
make this interface smaller!../include/libtorrent/peer_connection_interface.hpp:50
*/
#ifndef TORRENT_PEER_CONNECTION_INTERFACE_HPP
#define TORRENT_PEER_CONNECTION_INTERFACE_HPP
#include "libtorrent/fwd.hpp"
#include "libtorrent/socket.hpp"
#include "libtorrent/error_code.hpp"
#include "libtorrent/alert_types.hpp"
#include "libtorrent/operations.hpp" // for operation_t enum
#include "libtorrent/units.hpp"
namespace libtorrent {
struct torrent_peer;
class stat;
using disconnect_severity_t = aux::strong_typedef<std::uint8_t, struct disconnect_severity_tag>;
struct TORRENT_EXTRA_EXPORT peer_connection_interface
{
static constexpr disconnect_severity_t normal{0};
static constexpr disconnect_severity_t failure{1};
static constexpr disconnect_severity_t peer_error{2};
virtual tcp::endpoint const& remote() const = 0;
virtual tcp::endpoint local_endpoint() const = 0;
virtual void disconnect(error_code const& ec
, operation_t op, disconnect_severity_t = peer_connection_interface::normal) = 0;
virtual peer_id const& pid() const = 0;
virtual peer_id our_pid() const = 0;
virtual void set_holepunch_mode() = 0;
virtual torrent_peer* peer_info_struct() const = 0;
virtual void set_peer_info(torrent_peer* pi) = 0;
virtual bool is_outgoing() const = 0;
virtual void add_stat(std::int64_t downloaded, std::int64_t uploaded) = 0;
virtual bool fast_reconnect() const = 0;
virtual bool is_choked() const = 0;
virtual bool failed() const = 0;
virtual stat const& statistics() const = 0;
virtual void get_peer_info(peer_info& p) const = 0;
#ifndef TORRENT_DISABLE_LOGGING
virtual bool should_log(peer_log_alert::direction_t direction) const = 0;
virtual void peer_log(peer_log_alert::direction_t direction
, char const* event, char const* fmt = "", ...) const noexcept TORRENT_FORMAT(4,5) = 0;
#endif
protected:
~peer_connection_interface() {}
};
}
| ||
relevance 0 | ../include/libtorrent/announce_entry.hpp:86 | include the number of peers received from this tracker, at last announce |
include the number of peers received from this tracker, at last
announce../include/libtorrent/announce_entry.hpp:86 std::string message;
// if this tracker failed the last time it was contacted
// this error code specifies what error occurred
error_code last_error;
// the local endpoint of the listen interface associated with this endpoint
tcp::endpoint local_endpoint;
// the time of next tracker announce
time_point32 next_announce = (time_point32::min)();
// no announces before this time
time_point32 min_announce = (time_point32::min)();
private:
// internal
aux::listen_socket_handle socket;
public:
// these are either -1 or the scrape information this tracker last
// responded with. *incomplete* is the current number of downloaders in
// the swarm, *complete* is the current number of seeds in the swarm and
// *downloaded* is the cumulative number of completed downloads of this
// torrent, since the beginning of time (from this tracker's point of
// view).
// if this tracker has returned scrape data, these fields are filled in
// with valid numbers. Otherwise they are set to -1. ``incomplete`` counts
// the number of current downloaders. ``complete`` counts the number of
// current peers completed the download, or "seeds". ``downloaded`` is the
// cumulative number of completed downloads.
int scrape_incomplete = -1;
int scrape_complete = -1;
int scrape_downloaded = -1;
// the number of times in a row we have failed to announce to this
// tracker.
std::uint8_t fails : 7;
// true while we're waiting for a response from the tracker.
bool updating : 1;
// set to true when we get a valid response from an announce
// with event=started. If it is set, we won't send start in the subsequent
// announces.
bool start_sent : 1;
// set to true when we send a event=completed.
bool complete_sent : 1;
| ||
relevance 0 | ../include/libtorrent/broadcast_socket.hpp:60 | refactor these out too |
refactor these out too../include/libtorrent/broadcast_socket.hpp:60#include "libtorrent/address.hpp"
#include "libtorrent/error_code.hpp"
#include "libtorrent/string_view.hpp"
#include "libtorrent/span.hpp"
#include <memory>
#include <list>
#include <array>
namespace libtorrent {
TORRENT_EXTRA_EXPORT bool is_global(address const& a);
TORRENT_EXTRA_EXPORT bool is_local(address const& a);
TORRENT_EXTRA_EXPORT bool is_link_local(address const& addr);
TORRENT_EXTRA_EXPORT bool is_loopback(address const& addr);
TORRENT_EXTRA_EXPORT bool is_any(address const& addr);
TORRENT_EXTRA_EXPORT bool is_teredo(address const& addr);
TORRENT_EXTRA_EXPORT bool is_ip_address(std::string const& host);
// internal
template <typename Endpoint>
bool is_v4(Endpoint const& ep)
{
return ep.protocol() == Endpoint::protocol_type::v4();
}
template <typename Endpoint>
bool is_v6(Endpoint const& ep)
{
return ep.protocol() == Endpoint::protocol_type::v6();
}
// determines if the operating system supports IPv6
TORRENT_EXTRA_EXPORT bool supports_ipv6();
address ensure_v6(address const& a);
using receive_handler_t = std::function<void(udp::endpoint const& from
, span<char const> buffer)>;
class TORRENT_EXTRA_EXPORT broadcast_socket
{
public:
explicit broadcast_socket(udp::endpoint const& multicast_endpoint);
~broadcast_socket() { close(); }
void open(receive_handler_t handler, io_service& ios
, error_code& ec, bool loopback = true);
enum flags_t { flag_broadcast = 1 };
void send(char const* buffer, int size, error_code& ec, int flags = 0);
void send_to(char const* buffer, int size, udp::endpoint const& to, error_code& ec);
| ||
relevance 0 | ../include/libtorrent/identify_client.hpp:47 | hide this declaration when deprecated functions are disabled, and remove its internal use |
hide this declaration when deprecated functions are disabled, and
remove its internal use../include/libtorrent/identify_client.hpp:47CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TORRENT_IDENTIFY_CLIENT_HPP_INCLUDED
#define TORRENT_IDENTIFY_CLIENT_HPP_INCLUDED
#include "libtorrent/config.hpp"
#if TORRENT_ABI_VERSION == 1
#include "libtorrent/aux_/disable_warnings_push.hpp"
#include <boost/optional.hpp>
#include "libtorrent/aux_/disable_warnings_pop.hpp"
#endif
#include "libtorrent/peer_id.hpp"
#include "libtorrent/fingerprint.hpp"
namespace libtorrent {
namespace aux {
TORRENT_EXTRA_EXPORT
std::string identify_client_impl(const peer_id& p);
}
// these functions don't really need to be public. This mechanism of
// advertising client software and version is also out-dated.
// This function can can be used to extract a string describing a client
// version from its peer-id. It will recognize most clients that have this
// kind of identification in the peer-id.
TORRENT_DEPRECATED_EXPORT
std::string identify_client(const peer_id& p);
#if TORRENT_ABI_VERSION == 1
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#endif
#ifdef _MSC_VER
#pragma warning(push, 1)
#pragma warning(disable: 4996)
| ||
relevance 0 | ../include/libtorrent/utp_stream.hpp:426 | implement blocking write. Low priority since it's not used (yet) |
implement blocking write. Low priority since it's not used (yet)../include/libtorrent/utp_stream.hpp:426 , end(buffers.end()); i != end; ++i)
#endif
{
using boost::asio::buffer_cast;
using boost::asio::buffer_size;
add_read_buffer(buffer_cast<void*>(*i), buffer_size(*i));
#if TORRENT_USE_ASSERTS
buf_size += buffer_size(*i);
#endif
}
std::size_t ret = read_some(true);
TORRENT_ASSERT(ret <= buf_size);
TORRENT_ASSERT(ret > 0);
return ret;
}
template <class Const_Buffers>
std::size_t write_some(Const_Buffers const& /* buffers */, error_code& /* ec */)
{
TORRENT_ASSERT(false && "not implemented!");
return 0;
}
#ifndef BOOST_NO_EXCEPTIONS
template <class Mutable_Buffers>
std::size_t read_some(Mutable_Buffers const& buffers)
{
error_code ec;
std::size_t ret = read_some(buffers, ec);
if (ec)
boost::throw_exception(boost::system::system_error(ec));
return ret;
}
template <class Const_Buffers>
std::size_t write_some(Const_Buffers const& buffers)
{
error_code ec;
std::size_t ret = write_some(buffers, ec);
if (ec)
boost::throw_exception(boost::system::system_error(ec));
return ret;
}
#endif
template <class Const_Buffers, class Handler>
void async_write_some(Const_Buffers const& buffers, Handler const& handler)
{
if (m_impl == nullptr)
{
m_io_service.post(std::bind<void>(handler
| ||
relevance 0 | ../include/libtorrent/torrent_info.hpp:600 | there may be some opportunities to optimize the size if torrent_info. specifically to turn some std::string and std::vector into pointers |
there may be some opportunities to optimize the size if torrent_info.
specifically to turn some std::string and std::vector into pointers../include/libtorrent/torrent_info.hpp:600 { return m_info_section; }
// internal
bool add_merkle_nodes(std::map<int, sha1_hash> const& subtree
, piece_index_t piece);
std::map<int, sha1_hash> build_merkle_list(piece_index_t piece) const;
// internal
void internal_set_creator(string_view const);
void internal_set_creation_date(std::time_t);
void internal_set_comment(string_view const);
// returns whether or not this is a merkle torrent.
// see `BEP 30`__.
//
// __ https://www.bittorrent.org/beps/bep_0030.html
bool is_merkle_torrent() const { return !m_merkle_tree.empty(); }
private:
bool parse_torrent_file(bdecode_node const& libtorrent, error_code& ec);
bool parse_torrent_file(bdecode_node const& libtorrent, error_code& ec, int piece_limit);
void resolve_duplicate_filenames();
// the slow path, in case we detect/suspect a name collision
void resolve_duplicate_filenames_slow();
#if TORRENT_USE_INVARIANT_CHECKS
friend class invariant_access;
void check_invariant() const;
#endif
// not assignable
torrent_info const& operator=(torrent_info const&);
void copy_on_write();
file_storage m_files;
// if m_files is modified, it is first copied into
// m_orig_files so that the original name and
// filenames are preserved.
// the original filenames are required to build URLs for web seeds for
// instance
copy_ptr<const file_storage> m_orig_files;
// the URLs to the trackers
aux::vector<announce_entry> m_urls;
std::vector<web_seed_entry> m_web_seeds;
| ||
relevance 0 | ../include/libtorrent/torrent_info.hpp:665 | change the type to std::shared_ptr in C++17 |
change the type to std::shared_ptr in C++17../include/libtorrent/torrent_info.hpp:665
// these or strings of the "collections" key from the torrent file. The
// pointers point directly into the info_section buffer and when copied,
// these pointers must be corrected to point into the new buffer. The
// int is the length of the string. Strings are not 0-terminated.
std::vector<std::pair<char const*, int>> m_collections;
// these are the collections from outside of the info-dict. These are
// owning strings, since we only keep the info-section around, these
// cannot be pointers into that buffer.
std::vector<std::string> m_owned_collections;
// if this is a merkle torrent, this is the merkle
// tree. It has space for merkle_num_nodes(merkle_num_leafs(num_pieces))
// hashes
aux::vector<sha1_hash> m_merkle_tree;
// this is a copy of the info section from the torrent.
// it use maintained in this flat format in order to
// make it available through the metadata extension
boost::shared_array<char> m_info_section;
// this is a pointer into the m_info_section buffer
// pointing to the first byte of the first SHA-1 hash
char const* m_piece_hashes = nullptr;
// if a comment is found in the torrent file
// this will be set to that comment
std::string m_comment;
// an optional string naming the software used
// to create the torrent file
std::string m_created_by;
// the info section parsed. points into m_info_section
// parsed lazily
mutable bdecode_node m_info_dict;
// if a creation date is found in the torrent file
// this will be set to that, otherwise it'll be
// 1970, Jan 1
std::time_t m_creation_date = 0;
// the hash that identifies this torrent
sha1_hash m_info_hash;
// the number of bytes in m_info_section
std::int32_t m_info_section_size = 0;
// the index to the first leaf. This is where the hash for the
// first piece is stored
| ||
relevance 0 | ../include/libtorrent/torrent.hpp:246 | make this a raw pointer. perhaps keep the shared_ptr around further down the object to maintain an owner |
make this a raw pointer. perhaps keep the shared_ptr
around further down the object to maintain an owner../include/libtorrent/torrent.hpp:246 web_seed_t& operator=(web_seed_t const&) = default;
web_seed_t(web_seed_t const&) = default;
#endif
};
struct TORRENT_EXTRA_EXPORT torrent_hot_members
{
torrent_hot_members(aux::session_interface& ses
, add_torrent_params const& p, bool session_paused);
protected:
// the piece picker. This is allocated lazily. When we don't
// have anything in the torrent (for instance, if it hasn't
// been started yet) or if we have everything, there is no
// picker. It's allocated on-demand the first time we need
// it in torrent::need_picker(). In order to tell the
// difference between having everything and nothing in
// the case there is no piece picker, see m_have_all.
std::unique_ptr<piece_picker> m_picker;
std::shared_ptr<torrent_info> m_torrent_file;
// a back reference to the session
// this torrent belongs to.
aux::session_interface& m_ses;
// this vector is sorted at all times, by the pointer value.
// use sorted_insert() and sorted_find() on it. The GNU STL
// implementation on Darwin uses significantly less memory to
// represent a vector than a set, and this set is typically
// relatively small, and it's cheap to copy pointers.
aux::vector<peer_connection*> m_connections;
// the scrape data from the tracker response, this
// is optional and may be 0xffffff
std::uint32_t m_complete:24;
// set to true when this torrent may not download anything
bool m_upload_mode:1;
// this is set to false as long as the connections
// of this torrent hasn't been initialized. If we
// have metadata from the start, connections are
// initialized immediately, if we didn't have metadata,
// they are initialized right after files_checked().
// valid_resume_data() will return false as long as
// the connections aren't initialized, to avoid
// them from altering the piece-picker before it
// has been initialized with files_checked().
bool m_connections_initialized:1;
| ||
relevance 0 | ../include/libtorrent/torrent.hpp:424 | make graceful pause also finish all sending blocks before disconnecting |
make graceful pause also finish all sending blocks
before disconnecting../include/libtorrent/torrent.hpp:424 bool is_self_connection(peer_id const& pid) const;
void on_resume_data_checked(status_t status, storage_error const& error);
void on_force_recheck(status_t status, storage_error const& error);
void on_piece_hashed(piece_index_t piece, sha1_hash const& piece_hash
, storage_error const& error);
void files_checked();
void start_checking();
void start_announcing();
void stop_announcing();
void send_upload_only();
#ifndef TORRENT_DISABLE_SHARE_MODE
void send_share_mode();
void set_share_mode(bool s);
bool share_mode() const { return m_share_mode; }
#endif
bool graceful_pause() const { return m_graceful_pause_mode; }
torrent_flags_t flags() const;
void set_flags(torrent_flags_t flags, torrent_flags_t mask);
void set_upload_mode(bool b);
bool upload_mode() const { return m_upload_mode || m_graceful_pause_mode; }
bool is_upload_only() const { return is_finished() || upload_mode(); }
int seed_rank(aux::session_settings const& s) const;
void add_piece(piece_index_t piece, char const* data, add_piece_flags_t flags);
void on_disk_write_complete(storage_error const& error
, peer_request const& p);
void set_progress_ppm(int p) { m_progress_ppm = std::uint32_t(p); }
struct read_piece_struct
{
boost::shared_array<char> piece_data;
int blocks_left;
bool fail;
error_code error;
};
void read_piece(piece_index_t piece);
void on_disk_read_complete(disk_buffer_holder block, disk_job_flags_t, storage_error const& se
, peer_request const& r, std::shared_ptr<read_piece_struct> rp);
storage_mode_t storage_mode() const;
// this will flag the torrent as aborted. The main
// loop in session_impl will check for this state
| ||
relevance 0 | ../include/libtorrent/torrent.hpp:1304 | this wastes 5 bits per file |
this wastes 5 bits per file../include/libtorrent/torrent.hpp:1304#ifndef TORRENT_DISABLE_EXTENSIONS
std::list<std::shared_ptr<torrent_plugin>> m_extensions;
#endif
// used for tracker announces
deadline_timer m_tracker_timer;
// used to detect when we are active or inactive for long enough
// to trigger the auto-manage logic
deadline_timer m_inactivity_timer;
// this is the upload and download statistics for the whole torrent.
// it's updated from all its peers once every second.
libtorrent::stat m_stat;
// -----------------------------
// this vector is allocated lazily. If no file priorities are
// ever changed, this remains empty. Any unallocated slot
// implicitly means the file has priority 4.
aux::vector<download_priority_t, file_index_t> m_file_priority;
// any file priority updates attempted while another file priority update
// is in-progress/outstanding with the disk I/O thread, are queued up in
// this dictionary. Once the outstanding update comes back, all of these
// are applied in one batch
std::map<file_index_t, download_priority_t> m_deferred_file_priorities;
// this object is used to track download progress of individual files
aux::file_progress m_file_progress;
// a queue of the most recent low-availability pieces we accessed on disk.
// These are good candidates for suggesting other peers to request from
// us.
aux::suggest_piece m_suggest_pieces;
aux::vector<announce_entry> m_trackers;
#ifndef TORRENT_DISABLE_STREAMING
// this list is sorted by time_critical_piece::deadline
std::vector<time_critical_piece> m_time_critical_pieces;
#endif
std::string m_trackerid;
#if TORRENT_ABI_VERSION == 1
// deprecated in 1.1
std::string m_username;
std::string m_password;
#endif
std::string m_save_path;
| ||
relevance 0 | ../include/libtorrent/kademlia/msg.hpp:85 | move this to its own .hpp/.cpp pair? |
move this to its own .hpp/.cpp pair?../include/libtorrent/kademlia/msg.hpp:85 int flags;
enum {
// this argument is optional, parsing will not
// fail if it's not present
optional = 1,
// for dictionaries, the following entries refer
// to child nodes to this node, up until and including
// the next item that has the last_child flag set.
// these flags are nestable
parse_children = 2,
// this is the last item in a child dictionary
last_child = 4,
// the size argument refers to that the size
// has to be divisible by the number, instead
// of having that exact size
size_divisible = 8
};
};
TORRENT_EXTRA_EXPORT bool verify_message_impl(bdecode_node const& msg, span<key_desc_t const> desc
, span<bdecode_node> ret, span<char> error);
// verifies that a message has all the required
// entries and returns them in ret
template <int Size>
bool verify_message(bdecode_node const& msg, key_desc_t const (&desc)[Size]
, bdecode_node (&ret)[Size], span<char> error)
{
return verify_message_impl(msg, desc, ret, error);
}
} }
#endif
| ||
relevance 0 | ../include/libtorrent/kademlia/item.hpp:58 | since this is a public function, it should probably be moved out of this header and into one with other public functions. |
since this is a public function, it should probably be moved
out of this header and into one with other public functions.../include/libtorrent/kademlia/item.hpp:58#include <libtorrent/entry.hpp>
#include <libtorrent/span.hpp>
#include <libtorrent/kademlia/types.hpp>
namespace libtorrent { namespace dht {
// calculate the target hash for an immutable item.
TORRENT_EXTRA_EXPORT sha1_hash item_target_id(span<char const> v);
// calculate the target hash for a mutable item.
TORRENT_EXTRA_EXPORT sha1_hash item_target_id(span<char const> salt
, public_key const& pk);
TORRENT_EXTRA_EXPORT bool verify_mutable_item(
span<char const> v
, span<char const> salt
, sequence_number seq
, public_key const& pk
, signature const& sig);
// given a byte range ``v`` and an optional byte range ``salt``, a
// sequence number, public key ``pk`` (must be 32 bytes) and a secret key
// ``sk`` (must be 64 bytes), this function produces a signature which
// is written into a 64 byte buffer pointed to by ``sig``. The caller
// is responsible for allocating the destination buffer that's passed in
// as the ``sig`` argument. Typically it would be allocated on the stack.
TORRENT_EXPORT signature sign_mutable_item(
span<char const> v
, span<char const> salt
, sequence_number seq
, public_key const& pk
, secret_key const& sk);
class TORRENT_EXTRA_EXPORT item
{
public:
item() {}
item(public_key const& pk, span<char const> salt);
explicit item(entry v);
item(entry v
, span<char const> salt
, sequence_number seq
, public_key const& pk
, secret_key const& sk);
explicit item(bdecode_node const& v);
void assign(entry v);
void assign(entry v, span<char const> salt
, sequence_number seq
, public_key const& pk
| ||
relevance 0 | ../include/libtorrent/aux_/deprecated.hpp:41 | figure out which version of clang this is supported in |
figure out which version of clang this is supported in../include/libtorrent/aux_/deprecated.hpp:41IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TORRENT_DEPRECATED_HPP_INCLUDED
#define TORRENT_DEPRECATED_HPP_INCLUDED
#if defined __clang__
// ====== CLANG ========
# if !defined TORRENT_BUILDING_LIBRARY
# define TORRENT_DEPRECATED __attribute__ ((deprecated))
# define TORRENT_DEPRECATED_ENUM __attribute__ ((deprecated))
# define TORRENT_DEPRECATED_MEMBER __attribute__ ((deprecated))
# endif
#elif defined __GNUC__
// ======== GCC ========
// deprecation markup is only enabled when libtorrent
// headers are included by clients, not while building
// libtorrent itself
# if __GNUC__ >= 3 && !defined TORRENT_BUILDING_LIBRARY
# define TORRENT_DEPRECATED __attribute__ ((deprecated))
# endif
# if __GNUC__ >= 6 && !defined TORRENT_BUILDING_LIBRARY
# define TORRENT_DEPRECATED_ENUM __attribute__ ((deprecated))
# define TORRENT_DEPRECATED_MEMBER __attribute__ ((deprecated))
# endif
#elif defined _MSC_VER
// ======= MSVC =========
// deprecation markup is only enabled when libtorrent
// headers are included by clients, not while building
// libtorrent itself
#if !defined TORRENT_BUILDING_LIBRARY
# define TORRENT_DEPRECATED __declspec(deprecated)
#endif
| ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:241 | make these direct members and generate shared_ptrs to them which alias the listen_socket_t shared_ptr |
make these direct members and generate shared_ptrs to them
which alias the listen_socket_t shared_ptr../include/libtorrent/aux_/session_impl.hpp:241 if (udp_sock) return udp_sock->sock.local_port();
return 0;
}
// 0 is natpmp 1 is upnp
// the order of these arrays determines the priorty in
// which their ports will be announced to peers
aux::array<listen_port_mapping, 2, portmap_transport> tcp_port_mapping;
aux::array<listen_port_mapping, 2, portmap_transport> udp_port_mapping;
// indicates whether this is an SSL listen socket or not
transport ssl = transport::plaintext;
listen_socket_flags_t flags = accept_incoming;
// the actual sockets (TCP listen socket and UDP socket)
// An entry does not necessarily have a UDP or TCP socket. One of these
// pointers may be nullptr!
// These must be shared_ptr to avoid a dangling reference if an
// incoming packet is in the event queue when the socket is erased
std::shared_ptr<tcp::acceptor> sock;
std::shared_ptr<aux::session_udp_socket> udp_sock;
// since udp packets are expected to be dispatched frequently, this saves
// time on handler allocation every time we read again.
aux::handler_storage<TORRENT_READ_HANDLER_MAX_SIZE> udp_handler_storage;
std::shared_ptr<natpmp> natpmp_mapper;
std::shared_ptr<upnp> upnp_mapper;
std::shared_ptr<struct lsd> lsd;
// set to true when we receive an incoming connection from this listen
// socket
bool incoming_connection = false;
};
struct TORRENT_EXTRA_EXPORT listen_endpoint_t
{
listen_endpoint_t(address const& adr, int p, std::string dev, transport s
, listen_socket_flags_t f, address const& nmask = address{})
: addr(adr), netmask(nmask), port(p), device(std::move(dev)), ssl(s), flags(f) {}
bool operator==(listen_endpoint_t const& o) const
{
return addr == o.addr
&& port == o.port
&& device == o.device
&& ssl == o.ssl
&& flags == o.flags;
}
| ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:1048 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:1048 ssl::context* ssl_ctx() override { return &m_ssl_ctx; }
void on_incoming_utp_ssl(std::shared_ptr<socket_type> const& s);
void ssl_handshake(error_code const& ec, std::shared_ptr<socket_type> s);
#endif
// round-robin index into m_outgoing_interfaces
mutable std::uint8_t m_interface_index = 0;
std::shared_ptr<listen_socket_t> setup_listener(
listen_endpoint_t const& lep, error_code& ec);
#ifndef TORRENT_DISABLE_DHT
dht::dht_state m_dht_state;
#endif
// this is initialized to the unchoke_interval
// session_setting and decreased every second.
// when it reaches zero, it is reset to the
// unchoke_interval and the unchoke set is
// recomputed.
int m_unchoke_time_scaler = 0;
// this is used to decide when to recalculate which
// torrents to keep queued and which to activate
| ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:1053 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:1053
// round-robin index into m_outgoing_interfaces
mutable std::uint8_t m_interface_index = 0;
std::shared_ptr<listen_socket_t> setup_listener(
listen_endpoint_t const& lep, error_code& ec);
#ifndef TORRENT_DISABLE_DHT
dht::dht_state m_dht_state;
#endif
// this is initialized to the unchoke_interval
// session_setting and decreased every second.
// when it reaches zero, it is reset to the
// unchoke_interval and the unchoke set is
// recomputed.
int m_unchoke_time_scaler = 0;
// this is used to decide when to recalculate which
// torrents to keep queued and which to activate
int m_auto_manage_time_scaler = 0;
// works like unchoke_time_scaler but it
// is only decreased when the unchoke set
// is recomputed, and when it reaches zero,
// the optimistic unchoke is moved to another peer.
| ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:1060 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:1060
#ifndef TORRENT_DISABLE_DHT
dht::dht_state m_dht_state;
#endif
// this is initialized to the unchoke_interval
// session_setting and decreased every second.
// when it reaches zero, it is reset to the
// unchoke_interval and the unchoke set is
// recomputed.
int m_unchoke_time_scaler = 0;
// this is used to decide when to recalculate which
// torrents to keep queued and which to activate
int m_auto_manage_time_scaler = 0;
// works like unchoke_time_scaler but it
// is only decreased when the unchoke set
// is recomputed, and when it reaches zero,
// the optimistic unchoke is moved to another peer.
int m_optimistic_unchoke_time_scaler = 0;
// works like unchoke_time_scaler. Each time
// it reaches 0, and all the connections are
// used, the worst connection will be disconnected
// from the torrent with the most peers
int m_disconnect_time_scaler = 90;
// when this scaler reaches zero, it will
// scrape one of the auto managed, paused,
// torrents.
int m_auto_scrape_time_scaler = 180;
// statistics gathered from all torrents.
stat m_stat;
// implements session_interface
void sent_bytes(int bytes_payload, int bytes_protocol) override;
void received_bytes(int bytes_payload, int bytes_protocol) override;
void trancieve_ip_packet(int bytes, bool ipv6) override;
void sent_syn(bool ipv6) override;
void received_synack(bool ipv6) override;
#if TORRENT_ABI_VERSION == 1
int m_peak_up_rate = 0;
#endif
void on_tick(error_code const& e);
void try_connect_more_peers();
void auto_manage_checking_torrents(std::vector<torrent*>& list
| ||
relevance 0 | ../include/libtorrent/aux_/session_interface.hpp:221 | it would be nice to not have this be part of session_interface |
it would be nice to not have this be part of session_interface../include/libtorrent/aux_/session_interface.hpp:221 virtual void deferred_submit_jobs() = 0;
virtual std::uint16_t listen_port() const = 0;
virtual std::uint16_t ssl_listen_port() const = 0;
virtual int listen_port(aux::transport ssl, address const& local_addr) = 0;
virtual void for_each_listen_socket(std::function<void(aux::listen_socket_handle const&)> f) = 0;
// ask for which interface and port to bind outgoing peer connections on
virtual tcp::endpoint bind_outgoing_socket(socket_type& s, address const&
remote_address, error_code& ec) const = 0;
virtual bool verify_bound_address(address const& addr, bool utp
, error_code& ec) = 0;
#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
virtual std::vector<std::shared_ptr<torrent>> find_collection(
std::string const& collection) const = 0;
#endif
virtual proxy_settings proxy() const = 0;
#if TORRENT_USE_I2P
virtual proxy_settings i2p_proxy() const = 0;
virtual char const* i2p_session() const = 0;
#endif
virtual void prioritize_connections(std::weak_ptr<torrent> t) = 0;
virtual void trigger_auto_manage() = 0;
virtual void apply_settings_pack(std::shared_ptr<settings_pack> pack) = 0;
virtual session_settings const& settings() const = 0;
// the tracker request object must be moved in
virtual void queue_tracker_request(tracker_request&& req
, std::weak_ptr<request_callback> c) = 0;
void queue_tracker_request(tracker_request const& req
, std::weak_ptr<request_callback> c) = delete;
// peer-classes
virtual void set_peer_classes(peer_class_set* s, address const& a, int st) = 0;
virtual peer_class_pool const& peer_classes() const = 0;
virtual peer_class_pool& peer_classes() = 0;
virtual bool ignore_unchoke_slots_set(peer_class_set const& set) const = 0;
virtual int copy_pertinent_channels(peer_class_set const& set
, int channel, bandwidth_channel** dst, int m) = 0;
virtual int use_quota_overhead(peer_class_set& set, int amount_down, int amount_up) = 0;
virtual bandwidth_manager* get_bandwidth_manager(int channel) = 0;
| ||