Merge branch 'maint-0.2.7' into release-0.2.7

This commit is contained in:
Nick Mathewson 2016-04-07 10:46:45 -04:00
commit 24aaeef664
13 changed files with 4731 additions and 1366 deletions

4
changes/bug15221 Normal file
View File

@ -0,0 +1,4 @@
o Minor bugfixes (sandbox):
- Allow the setrlimit syscall, and the prlimit and prlimit64 syscalls,
which some libc implementations
use under the hood. Fixes bug 15221. Bugfix on 0.2.5.1-alpha.

8
changes/bug16248 Normal file
View File

@ -0,0 +1,8 @@
o Major bugfixes (dns proxy mode, crash):
- Avoid crashing when running as a DNS proxy. Closes bug 16248; bugfix on
0.2.0.1-alpha. Patch from 'cypherpunks'.
o Minor features (bug-resistance):
- Make Tor survive errors involving connections without a corresponding
event object. Previously we'd fail with an assertion; now we produce a
log message. Related to bug 16248.

7
changes/bug18570 Normal file
View File

@ -0,0 +1,7 @@
o Minor bugfixes (correctness):
- Fix a bad memory handling bug that would occur if we had queued
a cell on a channel's incoming queue. Fortunately, we can't actually
queue a cell like that as our code is constructed today, but it's best
to avoid this kind of error, even if there isn't any code that triggers
it today. Fixes bug 18570; bugfix on 0.2.4.4-alpha.

4
changes/geoip-april2016 Normal file
View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the April 5 2016 Maxmind GeoLite2
Country database.

View File

@ -177,11 +177,20 @@ static int filter_nopar_gen[] = {
SCMP_SYS(mmap),
#endif
SCMP_SYS(munmap),
#ifdef __NR_prlimit
SCMP_SYS(prlimit),
#endif
#ifdef __NR_prlimit64
SCMP_SYS(prlimit64),
#endif
SCMP_SYS(read),
SCMP_SYS(rt_sigreturn),
SCMP_SYS(sched_getaffinity),
SCMP_SYS(sendmsg),
SCMP_SYS(set_robust_list),
#ifdef __NR_setrlimit
SCMP_SYS(setrlimit),
#endif
#ifdef __NR_sigreturn
SCMP_SYS(sigreturn),
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -2652,6 +2652,11 @@ channel_process_cells(channel_t *chan)
/*
* Process cells until we're done or find one we have no current handler
* for.
*
* We must free the cells here after calling the handler, since custody
* of the buffer was given to the channel layer when they were queued;
* see comments on memory management in channel_queue_cell() and in
* channel_queue_var_cell() below.
*/
while (NULL != (q = TOR_SIMPLEQ_FIRST(&chan->incoming_queue))) {
tor_assert(q);
@ -2669,6 +2674,7 @@ channel_process_cells(channel_t *chan)
q->u.fixed.cell, chan,
U64_PRINTF_ARG(chan->global_identifier));
chan->cell_handler(chan, q->u.fixed.cell);
tor_free(q->u.fixed.cell);
tor_free(q);
} else if (q->type == CELL_QUEUE_VAR &&
chan->var_cell_handler) {
@ -2681,6 +2687,7 @@ channel_process_cells(channel_t *chan)
q->u.var.var_cell, chan,
U64_PRINTF_ARG(chan->global_identifier));
chan->var_cell_handler(chan, q->u.var.var_cell);
tor_free(q->u.var.var_cell);
tor_free(q);
} else {
/* Can't handle this one */
@ -2701,6 +2708,7 @@ channel_queue_cell(channel_t *chan, cell_t *cell)
{
int need_to_queue = 0;
cell_queue_entry_t *q;
cell_t *cell_copy = NULL;
tor_assert(chan);
tor_assert(cell);
@ -2728,8 +2736,19 @@ channel_queue_cell(channel_t *chan, cell_t *cell)
U64_PRINTF_ARG(chan->global_identifier));
chan->cell_handler(chan, cell);
} else {
/* Otherwise queue it and then process the queue if possible. */
q = cell_queue_entry_new_fixed(cell);
/*
* Otherwise queue it and then process the queue if possible.
*
* We queue a copy, not the original pointer - it might have been on the
* stack in connection_or_process_cells_from_inbuf() (or another caller
* if we ever have a subclass other than channel_tls_t), or be freed
* there after we return. This is the uncommon case; the non-copying
* fast path occurs in the if (!need_to_queue) case above when the
* upper layer has installed cell handlers.
*/
cell_copy = tor_malloc_zero(sizeof(cell_t));
memcpy(cell_copy, cell, sizeof(cell_t));
q = cell_queue_entry_new_fixed(cell_copy);
log_debug(LD_CHANNEL,
"Queueing incoming cell_t %p for channel %p "
"(global ID " U64_FORMAT ")",
@ -2755,6 +2774,7 @@ channel_queue_var_cell(channel_t *chan, var_cell_t *var_cell)
{
int need_to_queue = 0;
cell_queue_entry_t *q;
var_cell_t *cell_copy = NULL;
tor_assert(chan);
tor_assert(var_cell);
@ -2783,8 +2803,18 @@ channel_queue_var_cell(channel_t *chan, var_cell_t *var_cell)
U64_PRINTF_ARG(chan->global_identifier));
chan->var_cell_handler(chan, var_cell);
} else {
/* Otherwise queue it and then process the queue if possible. */
q = cell_queue_entry_new_var(var_cell);
/*
* Otherwise queue it and then process the queue if possible.
*
* We queue a copy, not the original pointer - it might have been on the
* stack in connection_or_process_cells_from_inbuf() (or another caller
* if we ever have a subclass other than channel_tls_t), or be freed
* there after we return. This is the uncommon case; the non-copying
* fast path occurs in the if (!need_to_queue) case above when the
* upper layer has installed cell handlers.
*/
cell_copy = var_cell_copy(var_cell);
q = cell_queue_entry_new_var(cell_copy);
log_debug(LD_CHANNEL,
"Queueing incoming var_cell_t %p for channel %p "
"(global ID " U64_FORMAT ")",

View File

@ -1009,6 +1009,11 @@ channel_tls_time_process_cell(cell_t *cell, channel_tls_t *chan, int *time,
* for cell types specific to the handshake for this transport protocol and
* handles them, and queues all other cells to the channel_t layer, which
* eventually will hand them off to command.c.
*
* The channel layer itself decides whether the cell should be queued or
* can be handed off immediately to the upper-layer code. It is responsible
* for copying in the case that it queues; we merely pass pointers through
* which we get from connection_or_process_cells_from_inbuf().
*/
void
@ -1106,6 +1111,12 @@ channel_tls_handle_cell(cell_t *cell, or_connection_t *conn)
* related and live below the channel_t layer, so no variable-length
* cells ever get delivered in the current implementation, but I've left
* the mechanism in place for future use.
*
* If we were handing them off to the upper layer, the channel_t queueing
* code would be responsible for memory management, and we'd just be passing
* pointers through from connection_or_process_cells_from_inbuf(). That
* caller always frees them after this function returns, so this function
* should never free var_cell.
*/
void

View File

@ -488,6 +488,28 @@ var_cell_new(uint16_t payload_len)
return cell;
}
/**
* Copy a var_cell_t
*/
var_cell_t *
var_cell_copy(const var_cell_t *src)
{
var_cell_t *copy = NULL;
size_t size = 0;
if (src != NULL) {
size = STRUCT_OFFSET(var_cell_t, payload) + src->payload_len;
copy = tor_malloc_zero(size);
copy->payload_len = src->payload_len;
copy->command = src->command;
copy->circ_id = src->circ_id;
memcpy(copy->payload, src->payload, copy->payload_len);
}
return copy;
}
/** Release all space held by <b>cell</b>. */
void
var_cell_free(var_cell_t *cell)
@ -2060,6 +2082,19 @@ connection_or_process_cells_from_inbuf(or_connection_t *conn)
{
var_cell_t *var_cell;
/*
* Note on memory management for incoming cells: below the channel layer,
* we shouldn't need to consider its internal queueing/copying logic. It
* is safe to pass cells to it on the stack or on the heap, but in the
* latter case we must be sure we free them later.
*
* The incoming cell queue code in channel.c will (in the common case)
* decide it can pass them to the upper layer immediately, in which case
* those functions may run directly on the cell pointers we pass here, or
* it may decide to queue them, in which case it will allocate its own
* buffer and copy the cell.
*/
while (1) {
log_debug(LD_OR,
TOR_SOCKET_T_FORMAT": starting, inbuf_datalen %d "

View File

@ -97,6 +97,7 @@ void cell_pack(packed_cell_t *dest, const cell_t *src, int wide_circ_ids);
int var_cell_pack_header(const var_cell_t *cell, char *hdr_out,
int wide_circ_ids);
var_cell_t *var_cell_new(uint16_t payload_len);
var_cell_t *var_cell_copy(const var_cell_t *src);
void var_cell_free(var_cell_t *cell);
/** DOCDOC */

View File

@ -567,6 +567,45 @@ connection_is_reading(connection_t *conn)
(conn->read_event && event_pending(conn->read_event, EV_READ, NULL));
}
/** Check whether <b>conn</b> is correct in having (or not having) a
* read/write event (passed in <b>ev</b). On success, return 0. On failure,
* log a warning and return -1. */
static int
connection_check_event(connection_t *conn, struct event *ev)
{
int bad;
if (conn->type == CONN_TYPE_AP && TO_EDGE_CONN(conn)->is_dns_request) {
/* DNS requests which we launch through the dnsserv.c module do not have
* any underlying socket or any underlying linked connection, so they
* shouldn't have any attached events either.
*/
bad = ev != NULL;
} else {
/* Everytyhing else should have an underlying socket, or a linked
* connection (which is also tracked with a read_event/write_event pair).
*/
bad = ev == NULL;
}
if (bad) {
log_warn(LD_BUG, "Event missing on connection %p [%s;%s]. "
"socket=%d. linked=%d. "
"is_dns_request=%d. Marked_for_close=%s:%d",
conn,
conn_type_to_string(conn->type),
conn_state_to_string(conn->type, conn->state),
(int)conn->s, (int)conn->linked,
(conn->type == CONN_TYPE_AP && TO_EDGE_CONN(conn)->is_dns_request),
conn->marked_for_close_file ? conn->marked_for_close_file : "-",
conn->marked_for_close
);
log_backtrace(LOG_WARN, LD_BUG, "Backtrace attached.");
return -1;
}
return 0;
}
/** Tell the main loop to stop notifying <b>conn</b> of any read events. */
MOCK_IMPL(void,
connection_stop_reading,(connection_t *conn))
@ -578,7 +617,9 @@ connection_stop_reading,(connection_t *conn))
return;
});
tor_assert(conn->read_event);
if (connection_check_event(conn, conn->read_event) < 0) {
return;
}
if (conn->linked) {
conn->reading_from_linked_conn = 0;
@ -603,7 +644,9 @@ connection_start_reading,(connection_t *conn))
return;
});
tor_assert(conn->read_event);
if (connection_check_event(conn, conn->read_event) < 0) {
return;
}
if (conn->linked) {
conn->reading_from_linked_conn = 1;
@ -643,7 +686,9 @@ connection_stop_writing,(connection_t *conn))
return;
});
tor_assert(conn->write_event);
if (connection_check_event(conn, conn->write_event) < 0) {
return;
}
if (conn->linked) {
conn->writing_to_linked_conn = 0;
@ -669,7 +714,9 @@ connection_start_writing,(connection_t *conn))
return;
});
tor_assert(conn->write_event);
if (connection_check_event(conn, conn->write_event) < 0) {
return;
}
if (conn->linked) {
conn->writing_to_linked_conn = 1;

View File

@ -25,7 +25,9 @@ extern uint64_t estimated_total_queue_size;
static int test_chan_accept_cells = 0;
static int test_chan_fixed_cells_recved = 0;
static cell_t * test_chan_last_seen_fixed_cell_ptr = NULL;
static int test_chan_var_cells_recved = 0;
static var_cell_t * test_chan_last_seen_var_cell_ptr = NULL;
static int test_cells_written = 0;
static int test_destroy_not_pending_calls = 0;
static int test_doesnt_want_writes_count = 0;
@ -70,6 +72,7 @@ static void test_channel_flushmux(void *arg);
static void test_channel_incoming(void *arg);
static void test_channel_lifecycle(void *arg);
static void test_channel_multi(void *arg);
static void test_channel_queue_incoming(void *arg);
static void test_channel_queue_size(void *arg);
static void test_channel_write(void *arg);
@ -179,7 +182,7 @@ chan_test_cell_handler(channel_t *ch,
tt_assert(ch);
tt_assert(cell);
tor_free(cell);
test_chan_last_seen_fixed_cell_ptr = cell;
++test_chan_fixed_cells_recved;
done:
@ -214,7 +217,7 @@ chan_test_var_cell_handler(channel_t *ch,
tt_assert(ch);
tt_assert(var_cell);
tor_free(var_cell);
test_chan_last_seen_var_cell_ptr = var_cell;
++test_chan_var_cells_recved;
done:
@ -608,7 +611,7 @@ test_channel_dumpstats(void *arg)
make_fake_cell(cell);
old_count = test_chan_fixed_cells_recved;
channel_queue_cell(ch, cell);
cell = NULL;
tor_free(cell);
tt_int_op(test_chan_fixed_cells_recved, ==, old_count + 1);
tt_assert(ch->n_bytes_recved > 0);
tt_assert(ch->n_cells_recved > 0);
@ -819,7 +822,7 @@ test_channel_incoming(void *arg)
make_fake_cell(cell);
old_count = test_chan_fixed_cells_recved;
channel_queue_cell(ch, cell);
cell = NULL;
tor_free(cell);
tt_int_op(test_chan_fixed_cells_recved, ==, old_count + 1);
/* Receive a variable-size cell */
@ -827,7 +830,7 @@ test_channel_incoming(void *arg)
make_fake_var_cell(var_cell);
old_count = test_chan_var_cells_recved;
channel_queue_var_cell(ch, var_cell);
var_cell = NULL;
tor_free(cell);
tt_int_op(test_chan_var_cells_recved, ==, old_count + 1);
/* Close it */
@ -1422,6 +1425,113 @@ test_channel_queue_impossible(void *arg)
return;
}
static void
test_channel_queue_incoming(void *arg)
{
channel_t *ch = NULL;
cell_t *cell = NULL;
var_cell_t *var_cell = NULL;
int old_fixed_count, old_var_count;
(void)arg;
/* Mock these for duration of the test */
MOCK(scheduler_channel_doesnt_want_writes,
scheduler_channel_doesnt_want_writes_mock);
MOCK(scheduler_release_channel,
scheduler_release_channel_mock);
/* Accept cells to lower layer */
test_chan_accept_cells = 1;
/* Use default overhead factor */
test_overhead_estimate = 1.0f;
ch = new_fake_channel();
tt_assert(ch);
/* Start it off in OPENING */
ch->state = CHANNEL_STATE_OPENING;
/* We'll need a cmux */
ch->cmux = circuitmux_alloc();
/* Test cell handler getters */
tt_ptr_op(channel_get_cell_handler(ch), ==, NULL);
tt_ptr_op(channel_get_var_cell_handler(ch), ==, NULL);
/* Try to register it */
channel_register(ch);
tt_assert(ch->registered);
/* Open it */
channel_change_state(ch, CHANNEL_STATE_OPEN);
tt_int_op(ch->state, ==, CHANNEL_STATE_OPEN);
/* Assert that the incoming queue is empty */
tt_assert(TOR_SIMPLEQ_EMPTY(&(ch->incoming_queue)));
/* Queue an incoming fixed-length cell */
cell = tor_malloc_zero(sizeof(cell_t));
make_fake_cell(cell);
channel_queue_cell(ch, cell);
/* Assert that the incoming queue has one entry */
tt_int_op(chan_cell_queue_len(&(ch->incoming_queue)), ==, 1);
/* Queue an incoming var cell */
var_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
make_fake_var_cell(var_cell);
channel_queue_var_cell(ch, var_cell);
/* Assert that the incoming queue has two entries */
tt_int_op(chan_cell_queue_len(&(ch->incoming_queue)), ==, 2);
/*
* Install cell handlers; this will drain the queue, so save the old
* cell counters first
*/
old_fixed_count = test_chan_fixed_cells_recved;
old_var_count = test_chan_var_cells_recved;
channel_set_cell_handlers(ch,
chan_test_cell_handler,
chan_test_var_cell_handler);
tt_ptr_op(channel_get_cell_handler(ch), ==, chan_test_cell_handler);
tt_ptr_op(channel_get_var_cell_handler(ch), ==, chan_test_var_cell_handler);
/* Assert cells were received */
tt_int_op(test_chan_fixed_cells_recved, ==, old_fixed_count + 1);
tt_int_op(test_chan_var_cells_recved, ==, old_var_count + 1);
/*
* Assert that the pointers are different from the cells we allocated;
* when queueing cells with no incoming cell handlers installed, the
* channel layer should copy them to a new buffer, and free them after
* delivery. These pointers will have already been freed by the time
* we get here, so don't dereference them.
*/
tt_ptr_op(test_chan_last_seen_fixed_cell_ptr, !=, cell);
tt_ptr_op(test_chan_last_seen_var_cell_ptr, !=, var_cell);
/* Assert queue is now empty */
tt_assert(TOR_SIMPLEQ_EMPTY(&(ch->incoming_queue)));
/* Close it; this contains an assertion that the incoming queue is empty */
channel_mark_for_close(ch);
tt_int_op(ch->state, ==, CHANNEL_STATE_CLOSING);
chan_test_finish_close(ch);
tt_int_op(ch->state, ==, CHANNEL_STATE_CLOSED);
channel_run_cleanup();
ch = NULL;
done:
free_fake_channel(ch);
tor_free(cell);
tor_free(var_cell);
UNMOCK(scheduler_channel_doesnt_want_writes);
UNMOCK(scheduler_release_channel);
return;
}
static void
test_channel_queue_size(void *arg)
{
@ -1666,6 +1776,7 @@ struct testcase_t channel_tests[] = {
{ "lifecycle_2", test_channel_lifecycle_2, TT_FORK, NULL, NULL },
{ "multi", test_channel_multi, TT_FORK, NULL, NULL },
{ "queue_impossible", test_channel_queue_impossible, TT_FORK, NULL, NULL },
{ "queue_incoming", test_channel_queue_incoming, TT_FORK, NULL, NULL },
{ "queue_size", test_channel_queue_size, TT_FORK, NULL, NULL },
{ "write", test_channel_write, TT_FORK, NULL, NULL },
END_OF_TESTCASES