Merge branch 'maint-0.3.1' into release-0.3.1

This commit is contained in:
Nick Mathewson 2017-09-04 15:48:12 -04:00
commit c515fbc732
4 changed files with 95 additions and 6 deletions

6
changes/bug22752_simple Normal file
View File

@ -0,0 +1,6 @@
o Major bugfixes (windows, directory cache):
- On windows, do not try to delete cached consensus documents and
diffs, until they unmapped from memory. Allow the diff storage
directory to grow larger in order to handle files that might
need to stay around longer. Fixes bug 22752; bugfix on
0.3.1.1-alpha.

View File

@ -9,6 +9,14 @@
#define CCE_MAGIC 0x17162253
#ifdef _WIN32
/* On Windows, unlink won't work on a file if the file is actively mmap()ed.
* That forces us to be less aggressive about unlinking files, and causes other
* changes throughout our logic.
*/
#define MUST_UNMAP_TO_UNLINK
#endif
/**
* A consensus_cache_entry_t is a reference-counted handle to an
* item in a consensus_cache_t. It can be mmapped into RAM, or not,
@ -49,6 +57,11 @@ struct consensus_cache_t {
storage_dir_t *dir;
/** List of all the entries in the directory. */
smartlist_t *entries;
/** The maximum number of entries that we'd like to allow in this cache.
* This is the same as the storagedir limit when MUST_UNMAP_TO_UNLINK is
* not defined. */
unsigned max_entries;
};
static void consensus_cache_clear(consensus_cache_t *cache);
@ -64,9 +77,26 @@ static void consensus_cache_entry_unmap(consensus_cache_entry_t *ent);
consensus_cache_t *
consensus_cache_open(const char *subdir, int max_entries)
{
int storagedir_max_entries;
consensus_cache_t *cache = tor_malloc_zero(sizeof(consensus_cache_t));
char *directory = get_datadir_fname(subdir);
cache->dir = storage_dir_new(directory, max_entries);
cache->max_entries = max_entries;
#ifdef MUST_UNMAP_TO_UNLINK
/* If we can't unlink the files that we're still using, then we need to
* tell the storagedir backend to allow far more files than this consensus
* cache actually wants, so that it can hold files which, from this cache's
* perspective, have become useless.
*/
#define VERY_LARGE_STORAGEDIR_LIMIT (1000*1000)
storagedir_max_entries = VERY_LARGE_STORAGEDIR_LIMIT;
#else
/* Otherwise, we can just tell the storagedir to use the same limits
* as this cache. */
storagedir_max_entries = max_entries;
#endif
cache->dir = storage_dir_new(directory, storagedir_max_entries);
tor_free(directory);
if (!cache->dir) {
tor_free(cache);
@ -77,6 +107,24 @@ consensus_cache_open(const char *subdir, int max_entries)
return cache;
}
/** Return true if it's okay to put more entries in this cache than
* its official file limit.
*
* (We need this method on Windows, where we can't unlink files that are still
* in use, and therefore might need to temporarily exceed the file limit until
* the no-longer-wanted files are deletable.)
*/
int
consensus_cache_may_overallocate(consensus_cache_t *cache)
{
(void) cache;
#ifdef MUST_UNMAP_TO_UNLINK
return 1;
#else
return 0;
#endif
}
/**
* Tell the sandbox (if any) configured by <b>cfg</b> to allow the
* operations that <b>cache</b> will need.
@ -85,6 +133,19 @@ int
consensus_cache_register_with_sandbox(consensus_cache_t *cache,
struct sandbox_cfg_elem **cfg)
{
#ifdef MUST_UNMAP_TO_UNLINK
/* Our Linux sandbox doesn't support huge file lists like the one that would
* be generated by using VERY_LARGE_STORAGEDIR_LIMIT above in
* consensus_cache_open(). Since the Linux sandbox is the only one we have
* right now, we just assert that we never reach this point when we've had
* to use VERY_LARGE_STORAGEDIR_LIMIT.
*
* If at some point in the future we have a different sandbox mechanism that
* can handle huge file lists, we can remove this assertion or make it
* conditional.
*/
tor_assert_nonfatal_unreached();
#endif
return storage_dir_register_with_sandbox(cache->dir, cfg);
}
@ -406,23 +467,36 @@ int
consensus_cache_get_n_filenames_available(consensus_cache_t *cache)
{
tor_assert(cache);
int max = storage_dir_get_max_files(cache->dir);
int max = cache->max_entries;
int used = smartlist_len(storage_dir_list(cache->dir));
#ifdef MUST_UNMAP_TO_UNLINK
if (used > max)
return 0;
#else
tor_assert_nonfatal(max >= used);
#endif
return max - used;
}
/**
* Delete every element of <b>cache</b> has been marked with
* consensus_cache_entry_mark_for_removal. If <b>force</b> is false,
* retain those entries which are not in use except by the cache.
* consensus_cache_entry_mark_for_removal. If <b>force</b> is false,
* retain those entries which are in use by something other than the cache.
*/
void
consensus_cache_delete_pending(consensus_cache_t *cache, int force)
{
SMARTLIST_FOREACH_BEGIN(cache->entries, consensus_cache_entry_t *, ent) {
tor_assert_nonfatal(ent->in_cache == cache);
if (! force) {
int force_ent = force;
#ifdef MUST_UNMAP_TO_UNLINK
/* We cannot delete anything with an active mmap on win32, so no
* force-deletion. */
if (ent->map) {
force_ent = 0;
}
#endif
if (! force_ent) {
if (ent->refcnt > 1 || BUG(ent->in_cache == NULL)) {
/* Somebody is using this entry right now */
continue;

View File

@ -14,6 +14,7 @@ HANDLE_DECL(consensus_cache_entry, consensus_cache_entry_t, )
consensus_cache_t *consensus_cache_open(const char *subdir, int max_entries);
void consensus_cache_free(consensus_cache_t *cache);
struct sandbox_cfg_elem;
int consensus_cache_may_overallocate(consensus_cache_t *cache);
int consensus_cache_register_with_sandbox(consensus_cache_t *cache,
struct sandbox_cfg_elem **cfg);
void consensus_cache_unmap_lazy(consensus_cache_t *cache, time_t cutoff);

View File

@ -1136,7 +1136,7 @@ consdiffmgr_ensure_space_for_files(int n)
return 0;
}
// Let's get more assertive: clean out unused stuff, and force-remove
// the files.
// the files that we can.
consdiffmgr_cleanup();
consensus_cache_delete_pending(cache, 1);
const int n_to_remove = n - consensus_cache_get_n_filenames_available(cache);
@ -1159,6 +1159,14 @@ consdiffmgr_ensure_space_for_files(int n)
smartlist_free(objects);
consensus_cache_delete_pending(cache, 1);
if (consensus_cache_may_overallocate(cache)) {
/* If we're allowed to throw extra files into the cache, let's do so
* rather getting upset.
*/
return 0;
}
if (BUG(n_marked < n_to_remove))
return -1;
else