cMerge branch 'maint-0.2.8' into release-0.2.8

This commit is contained in:
Nick Mathewson 2016-05-25 09:27:35 -04:00
commit b5e2b3844a
4 changed files with 22 additions and 4 deletions

7
changes/memarea_overflow Normal file
View File

@ -0,0 +1,7 @@
o Minor bugfixes (pointer arithmetic):
- Fix a bug in memarea_alloc() that could have resulted in remote heap
write access, if Tor had ever passed an unchecked size to
memarea_alloc(). Fortunately, all the sizes we pass to memarea_alloc()
are pre-checked to be less than 128 kilobytes. Fixes bug 19150; bugfix
on 0.2.1.1-alpha. Bug found by Guido Vranken.

7
changes/rsa_init_bug Normal file
View File

@ -0,0 +1,7 @@
o Major bugfixes (key management):
- If OpenSSL fails to generate an RSA key, do not retain a dangling pointer
to the previous (uninitialized) key value. The impact here should be
limited to a difficult-to-trigger crash, if OpenSSL is running an
engine that makes key generation failures possible, or if OpenSSL runs
out of memory. Fixes bug 19152; bugfix on 0.2.1.10-alpha. Found by
Yuan Jochen Kang, Suman Jana, and Baishakhi Ray.

View File

@ -585,8 +585,10 @@ MOCK_IMPL(int,
{
tor_assert(env);
if (env->key)
if (env->key) {
RSA_free(env->key);
env->key = NULL;
}
{
BIGNUM *e = BN_new();

View File

@ -83,8 +83,7 @@ typedef struct memarea_chunk_t {
struct memarea_chunk_t *next_chunk;
size_t mem_size; /**< How much RAM is available in mem, total? */
char *next_mem; /**< Next position in mem to allocate data at. If it's
* greater than or equal to mem+mem_size, this chunk is
* full. */
* equal to mem+mem_size, this chunk is full. */
#ifdef USE_ALIGNED_ATTRIBUTE
/** Actual content of the memory chunk. */
char mem[FLEXIBLE_ARRAY_MEMBER] __attribute__((aligned(MEMAREA_ALIGN)));
@ -205,7 +204,10 @@ memarea_alloc(memarea_t *area, size_t sz)
tor_assert(sz < SIZE_T_CEILING);
if (sz == 0)
sz = 1;
if (chunk->next_mem+sz > chunk->U_MEM+chunk->mem_size) {
tor_assert(chunk->next_mem <= chunk->U_MEM + chunk->mem_size);
const size_t space_remaining =
(chunk->U_MEM + chunk->mem_size) - chunk->next_mem;
if (sz > space_remaining) {
if (sz+CHUNK_HEADER_SIZE >= CHUNK_SIZE) {
/* This allocation is too big. Stick it in a special chunk, and put
* that chunk second in the list. */