Add sentinel values to the end of memarea chunks.

This might detect some possible causes of bug 930, and will at least
make sure we aren't doing some dumb memory-corruption stuff with the heap
and router-parsing.
This commit is contained in:
Nick Mathewson 2009-05-12 15:10:23 -04:00
parent a271c5370a
commit d9650cfa50
2 changed files with 37 additions and 3 deletions

View File

@ -3,6 +3,9 @@ Changes in version 0.2.2.1-alpha - 2009-??-??
- New --digests command-line switch to output the digests of the source
files Tor was built with.
- The "torify" script now uses torsocks where available.
- The memarea code now uses a sentinel value at the end of each area
to make sure nothing writes beyond the end of an area. This might
help debug some conceivable causes of bug 930.
o Deprecated and removed features:
- The controller no longer accepts the old obsolete "addr-mappings/"

View File

@ -13,6 +13,10 @@
#include "compat.h"
#include "log.h"
/** If true, we try to detect any attempts to write beyond the length of a
* memarea. */
#define USE_SENTINELS
/** All returned pointers should be aligned to the nearest multiple of this
* value. */
#define MEMAREA_ALIGN SIZEOF_VOID_P
@ -25,6 +29,24 @@
#error "void* is neither 4 nor 8 bytes long. I don't know how to align stuff."
#endif
#ifdef USE_SENTINELS
#define SENTINEL_VAL 0x90806622u
#define SENTINEL_LEN sizeof(uint32_t)
#define SET_SENTINEL(chunk) \
STMT_BEGIN \
set_uint32( &(chunk)->u.mem[chunk->mem_size], SENTINEL_VAL ); \
STMT_END
#define CHECK_SENTINEL(chunk) \
STMT_BEGIN \
uint32_t sent_val = get_uint32(&(chunk)->u.mem[chunk->mem_size]); \
tor_assert(sent_val == SENTINEL_VAL); \
STMT_END
#else
#define SENTINEL_LEN 0
#define SET_SENTINEL(chunk) STMT_NIL
#define CHECK_SENTINEL(chunk) STMT_NIL
#endif
/** Increment <b>ptr</b> until it is aligned to MEMAREA_ALIGN. */
static INLINE void *
realign_pointer(void *ptr)
@ -78,15 +100,20 @@ alloc_chunk(size_t sz, int freelist_ok)
freelist = res->next_chunk;
res->next_chunk = NULL;
--freelist_len;
CHECK_SENTINEL(res);
return res;
} else {
size_t chunk_size = freelist_ok ? CHUNK_SIZE : sz;
memarea_chunk_t *res = tor_malloc_roundup(&chunk_size);
memarea_chunk_t *res;
chunk_size += SENTINEL_LEN;
res = tor_malloc_roundup(&chunk_size);
res->next_chunk = NULL;
res->mem_size = chunk_size - CHUNK_HEADER_SIZE;
res->mem_size = chunk_size - CHUNK_HEADER_SIZE - SENTINEL_LEN;
res->next_mem = res->u.mem;
tor_assert(res->next_mem+res->mem_size == ((char*)res)+chunk_size);
tor_assert(res->next_mem+res->mem_size+SENTINEL_LEN ==
((char*)res)+chunk_size);
tor_assert(realign_pointer(res->next_mem) == res->next_mem);
SET_SENTINEL(res);
return res;
}
}
@ -96,6 +123,7 @@ alloc_chunk(size_t sz, int freelist_ok)
static void
chunk_free(memarea_chunk_t *chunk)
{
CHECK_SENTINEL(chunk);
if (freelist_len < MAX_FREELIST_LEN) {
++freelist_len;
chunk->next_chunk = freelist;
@ -182,6 +210,7 @@ memarea_alloc(memarea_t *area, size_t sz)
memarea_chunk_t *chunk = area->first;
char *result;
tor_assert(chunk);
CHECK_SENTINEL(chunk);
if (chunk->next_mem+sz > chunk->u.mem+chunk->mem_size) {
if (sz+CHUNK_HEADER_SIZE >= CHUNK_SIZE) {
/* This allocation is too big. Stick it in a special chunk, and put
@ -255,6 +284,7 @@ memarea_get_stats(memarea_t *area, size_t *allocated_out, size_t *used_out)
size_t a = 0, u = 0;
memarea_chunk_t *chunk;
for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
CHECK_SENTINEL(chunk);
a += CHUNK_HEADER_SIZE + chunk->mem_size;
tor_assert(chunk->next_mem >= chunk->u.mem);
u += CHUNK_HEADER_SIZE + (chunk->next_mem - chunk->u.mem);
@ -271,6 +301,7 @@ memarea_assert_ok(memarea_t *area)
tor_assert(area->first);
for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
CHECK_SENTINEL(chunk);
tor_assert(chunk->next_mem >= chunk->u.mem);
tor_assert(chunk->next_mem <= chunk->u.mem+chunk->mem_size+MEMAREA_ALIGN);
}