Compare commits

..

No commits in common. "master" and "maint-0.2.7-redux" have entirely different histories.

737 changed files with 63805 additions and 235607 deletions

View File

@ -1,62 +0,0 @@
version: 1.0.{build}
clone_depth: 50
environment:
compiler: mingw
matrix:
- target: i686-w64-mingw32
compiler_path: mingw32
openssl_path: /c/OpenSSL-Win32
- target: x86_64-w64-mingw32
compiler_path: mingw64
openssl_path: /c/OpenSSL-Win64
install:
- ps: >-
Function Execute-Command ($commandPath)
{
& $commandPath $args 2>&1
if ( $LastExitCode -ne 0 ) {
$host.SetShouldExit( $LastExitCode )
}
}
Function Execute-Bash ()
{
Execute-Command 'c:\msys64\usr\bin\bash' '-e' '-c' $args
}
Execute-Command "C:\msys64\usr\bin\pacman" -Sy --noconfirm openssl-devel openssl libevent-devel libevent mingw-w64-i686-libevent mingw-w64-x86_64-libevent mingw-w64-i686-openssl mingw-w64-x86_64-openssl mingw-w64-i686-zstd mingw-w64-x86_64-zstd
build_script:
- ps: >-
if ($env:compiler -eq "mingw") {
$oldpath = ${env:Path} -split ';'
$buildpath = @("C:\msys64\${env:compiler_path}\bin", "C:\msys64\usr\bin") + $oldpath
$env:Path = @($buildpath) -join ';'
$env:build = @("${env:APPVEYOR_BUILD_FOLDER}", $env:target) -join '\'
Set-Location "${env:APPVEYOR_BUILD_FOLDER}"
Execute-Bash 'autoreconf -i'
mkdir "${env:build}"
Set-Location "${env:build}"
Execute-Bash "../configure --prefix=/${env:compiler_path} --build=${env:target} --host=${env:target} --disable-asciidoc --enable-fatal-warnings --with-openssl-dir=${env:openssl_path}"
Execute-Bash "V=1 make -j2"
Execute-Bash "V=1 make -j2 install"
}
test_script:
- ps: >-
if ($env:compiler -eq "mingw") {
$oldpath = ${env:Path} -split ';'
$buildpath = @("C:\msys64\${env:compiler_path}\bin") + $oldpath
$env:Path = $buildpath -join ';'
Set-Location "${env:build}"
Execute-Bash "VERBOSE=1 make -j2 check"
}
on_success:
- cmd: C:\Python27\python.exe %APPVEYOR_BUILD_FOLDER%\scripts\test\appveyor-irc-notify.py irc.oftc.net:6697 tor-ci success
on_failure:
- cmd: C:\Python27\python.exe %APPVEYOR_BUILD_FOLDER%\scripts\test\appveyor-irc-notify.py irc.oftc.net:6697 tor-ci failure

51
.gitignore vendored
View File

@ -3,7 +3,6 @@
.#*
*~
*.swp
*.swo
# C stuff
*.o
*.obj
@ -19,8 +18,6 @@
.dirstamp
*.trs
*.log
# Calltool stuff
.*.graph
# Stuff made by our makefiles
*.bak
# Python droppings
@ -30,18 +27,11 @@
cscope.*
# OSX junk
*.dSYM
.DS_Store
# updateFallbackDirs.py temp files
details-*.json
uptime-*.json
*.full_url
*.last_modified
# /
/Makefile
/Makefile.in
/aclocal.m4
/ar-lib
/autom4te.cache
/build-stamp
/compile
@ -60,7 +50,6 @@ uptime-*.json
/stamp-h
/stamp-h.in
/stamp-h1
/TAGS
/test-driver
/tor.sh
/tor.spec
@ -71,7 +60,6 @@ uptime-*.json
/Tor*Bundle.dmg
/tor-*-win32.exe
/coverage_html/
/callgraph/
# /contrib/
/contrib/dist/tor.sh
@ -99,6 +87,11 @@ uptime-*.json
/doc/tor.html
/doc/tor.html.in
/doc/tor.1.xml
/doc/tor-fw-helper.1
/doc/tor-fw-helper.1.in
/doc/tor-fw-helper.html
/doc/tor-fw-helper.html.in
/doc/tor-fw-helper.1.xml
/doc/tor-gencert.1
/doc/tor-gencert.1.in
/doc/tor-gencert.html
@ -127,18 +120,12 @@ uptime-*.json
/src/Makefile
/src/Makefile.in
# /src/trace
/src/trace/libor-trace.a
# /src/common/
/src/common/Makefile
/src/common/Makefile.in
/src/common/libor.a
/src/common/libor-testing.a
/src/common/libor.lib
/src/common/libor-ctime.a
/src/common/libor-ctime-testing.a
/src/common/libor-ctime.lib
/src/common/libor-crypto.a
/src/common/libor-crypto-testing.a
/src/common/libor-crypto.lib
@ -161,8 +148,6 @@ uptime-*.json
/src/ext/ed25519/ref10/libed25519_ref10.lib
/src/ext/ed25519/donna/libed25519_donna.a
/src/ext/ed25519/donna/libed25519_donna.lib
/src/ext/keccak-tiny/libkeccak-tiny.a
/src/ext/keccak-tiny/libkeccak-tiny.lib
# /src/or/
/src/or/Makefile
@ -175,12 +160,6 @@ uptime-*.json
/src/or/libtor-testing.a
/src/or/libtor.lib
# /src/rust
/src/rust/.cargo/config
/src/rust/.cargo/registry
/src/rust/target
/src/rust/registry
# /src/test
/src/test/Makefile
/src/test/Makefile.in
@ -192,27 +171,19 @@ uptime-*.json
/src/test/test-child
/src/test/test-memwipe
/src/test/test-ntor-cl
/src/test/test-hs-ntor-cl
/src/test/test-switch-id
/src/test/test-timers
/src/test/test_workqueue
/src/test/test.exe
/src/test/test-slow.exe
/src/test/test-bt-cl.exe
/src/test/test-child.exe
/src/test/test-ntor-cl.exe
/src/test/test-hs-ntor-cl.exe
/src/test/test-memwipe.exe
/src/test/test-switch-id.exe
/src/test/test-timers.exe
/src/test/test_workqueue.exe
# /src/test/fuzz
/src/test/fuzz/fuzz-*
/src/test/fuzz/lf-fuzz-*
/src/test/test_zero_length_keys.sh
/src/test/test_ntor.sh
/src/test/test_bt.sh
# /src/tools/
/src/tools/libtorrunner.a
/src/tools/tor-checkkey
/src/tools/tor-resolve
/src/tools/tor-cov-resolve
@ -230,6 +201,12 @@ uptime-*.json
/src/trunnel/libor-trunnel-testing.a
/src/trunnel/libor-trunnel.a
# /src/tools/tor-fw-helper/
/src/tools/tor-fw-helper/tor-fw-helper
/src/tools/tor-fw-helper/tor-fw-helper.exe
/src/tools/tor-fw-helper/Makefile
/src/tools/tor-fw-helper/Makefile.in
# /src/win32/
/src/win32/Makefile
/src/win32/Makefile.in

View File

@ -1,45 +0,0 @@
before_script:
- apt-get update -qq
- apt-get upgrade -qy
build:
script:
- apt-get install -qy --fix-missing automake build-essential
libevent-dev libssl-dev zlib1g-dev
libseccomp-dev liblzma-dev libscrypt-dev
- ./autogen.sh
- ./configure --disable-asciidoc --enable-fatal-warnings
--disable-silent-rules
- make check || (e=$?; cat test-suite.log; exit $e)
- make install
update:
only:
- schedules
script:
- "apt-get install -y --fix-missing git openssh-client"
# Run ssh-agent (inside the build environment)
- eval $(ssh-agent -s)
# Add the SSH key stored in SSH_PRIVATE_KEY variable to the agent store
- ssh-add <(echo "$DEPLOY_KEY")
# For Docker builds disable host key checking. Be aware that by adding that
# you are susceptible to man-in-the-middle attacks.
# WARNING: Use this only with the Docker executor, if you use it with shell
# you will overwrite your user's SSH config.
- mkdir -p ~/.ssh
- '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" > ~/.ssh/config'
# In order to properly check the server's host key, assuming you created the
# SSH_SERVER_HOSTKEYS variable previously, uncomment the following two lines
# instead.
- mkdir -p ~/.ssh
- '[[ -f /.dockerenv ]] && echo "$SSH_SERVER_HOSTKEYS" > ~/.ssh/known_hosts'
- echo "merging from torgit"
- git config --global user.email "labadmin@oniongit.eu"
- git config --global user.name "gitadmin"
- "mkdir tor"
- "cd tor"
- git clone --bare https://git.torproject.org/tor.git
- git push --mirror git@oniongit.eu:network/tor.git

3
.gitmodules vendored
View File

@ -1,3 +0,0 @@
[submodule "src/ext/rust"]
path = src/ext/rust
url = https://git.torproject.org/tor-rust-dependencies

View File

@ -1,10 +1,8 @@
language: c
## Comment out the compiler list for now to allow an explicit build
## matrix.
# compiler:
# - gcc
# - clang
compiler:
- gcc
- clang
notifications:
irc:
@ -30,10 +28,6 @@ dist: trusty
## We don't need sudo. (The "apt:" stanza after this allows us to not need sudo;
## otherwise, we would need it for getting dependencies.)
##
## We override this in the explicit build matrix to work around a
## Travis CI environment regression
## https://github.com/travis-ci/travis-ci/issues/9033
sudo: false
## (Linux only) Download our dependencies
@ -60,76 +54,18 @@ env:
global:
## The Travis CI environment allows us two cores, so let's use both.
- MAKEFLAGS="-j 2"
matrix:
## Leave at least one entry here or Travis seems to generate a
## matrix entry with empty matrix environment variables. Leaving
## more than one entry causes unwanted matrix entries with
## unspecified compilers.
- RUST_OPTIONS="--enable-rust --enable-cargo-online-mode"
# - RUST_OPTIONS="--enable-rust" TOR_RUST_DEPENDENCIES=true
# - RUST_OPTIONS=""
matrix:
## Uncomment to allow the build to report success (with non-required
## sub-builds continuing to run) if all required sub-builds have
## succeeded. This is somewhat buggy currently: it can cause
## duplicate notifications and prematurely report success if a
## single sub-build has succeeded. See
## https://github.com/travis-ci/travis-ci/issues/1696
# fast_finish: true
## Uncomment the appropriate lines below to allow the build to
## report success even if some less-critical sub-builds fail and it
## seems likely to take a while for someone to fix it. Currently
## Travis CI doesn't distinguish "all builds succeeded" from "some
## non-required sub-builds failed" except on the individual build's
## page, which makes it somewhat annoying to detect from the
## branches and build history pages. See
## https://github.com/travis-ci/travis-ci/issues/8716
allow_failures:
# - env: RUST_OPTIONS="--enable-rust" TOR_RUST_DEPENDENCIES=true
# - env: RUST_OPTIONS="--enable-rust --enable-cargo-online-mode
# - compiler: clang
## Create explicit matrix entries to work around a Travis CI
## environment issue. Missing keys inherit from the first list
## entry under that key outside the "include" clause.
include:
- compiler: gcc
- compiler: gcc
env: RUST_OPTIONS="--enable-rust" TOR_RUST_DEPENDENCIES=true
- compiler: gcc
env: RUST_OPTIONS=""
- compiler: gcc
env: COVERAGE_OPTIONS="--enable-coverage"
- compiler: gcc
env: DISTCHECK="yes" RUST_OPTIONS=""
- compiler: gcc
env: DISTCHECK="yes" RUST_OPTIONS="--enable-rust --enable-cargo-online-mode"
- compiler: gcc
env: MODULES_OPTIONS="--disable-module-dirauth"
## The "sudo: required" forces non-containerized builds, working
## around a Travis CI environment issue: clang LeakAnalyzer fails
## because it requires ptrace and the containerized environment no
## longer allows ptrace.
- compiler: clang
sudo: required
- compiler: clang
sudo: required
env: RUST_OPTIONS="--enable-rust" TOR_RUST_DEPENDENCIES=true
- compiler: clang
sudo: required
env: RUST_OPTIONS=""
- compiler: clang
sudo: required
env: MODULES_OPTIONS="--disable-module-dirauth"
## If one build in the matrix fails (e.g. if building withour Rust and Clang
## fails, but building with Rust and GCC is still going), then cancel the
## entire job early and call the whole thing a failure.
fast_finish: true
before_install:
## If we're on OSX, homebrew usually needs to updated first
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update ; fi
## Download rustup
- if [[ "$RUST_OPTIONS" != "" ]]; then curl -Ssf -o rustup.sh https://sh.rustup.rs; fi
- if [[ "$COVERAGE_OPTIONS" != "" ]]; then pip install --user cpp-coveralls; fi
- curl -Ssf -o rustup.sh https://sh.rustup.rs
install:
## If we're on OSX use brew to install required dependencies (for Linux, see the "apt:" section above)
@ -140,30 +76,13 @@ install:
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then { brew outdated xz || brew upgrade xz; }; fi
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then { brew outdated libscrypt || brew upgrade libscrypt; }; fi
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then { brew outdated zstd || brew upgrade zstd; }; fi
## Install the stable channels of rustc and cargo and setup our toolchain environment
- if [[ "$RUST_OPTIONS" != "" ]]; then sh rustup.sh -y --default-toolchain stable; fi
- if [[ "$RUST_OPTIONS" != "" ]]; then source $HOME/.cargo/env; fi
## Get some info about rustc and cargo
- if [[ "$RUST_OPTIONS" != "" ]]; then which rustc; fi
- if [[ "$RUST_OPTIONS" != "" ]]; then which cargo; fi
- if [[ "$RUST_OPTIONS" != "" ]]; then rustc --version; fi
- if [[ "$RUST_OPTIONS" != "" ]]; then cargo --version; fi
## If we're testing rust builds in offline-mode, then set up our vendored dependencies
- if [[ "$TOR_RUST_DEPENDENCIES" == "true" ]]; then export TOR_RUST_DEPENDENCIES=$PWD/src/ext/rust/crates; fi
script:
- ./autogen.sh
- ./configure $RUST_OPTIONS $COVERAGE_OPTIONS $MODULES_OPTIONS --disable-asciidoc --enable-fatal-warnings --disable-silent-rules --enable-fragile-hardening
- ./configure $RUST_OPTIONS --disable-asciidoc --enable-gcc-warnings --disable-silent-rules --enable-fragile-hardening
## We run `make check` because that's what https://jenkins.torproject.org does.
- if [[ "$DISTCHECK" == "" ]]; then make check; fi
- if [[ "$DISTCHECK" != "" ]]; then make distcheck DISTCHECK_CONFIGURE_FLAGS="$RUST_OPTIONS $COVERAGE_OPTIONS --disable-asciidoc --enable-fatal-warnings --disable-silent-rules --enable-fragile-hardening"; fi
- make check
after_failure:
## `make check` will leave a log file with more details of test failures.
- if [[ "$DISTCHECK" == "" ]]; then cat test-suite.log; fi
## `make distcheck` puts it somewhere different.
- if [[ "$DISTCHECK" != "" ]]; then make show-distdir-testlog; fi
after_success:
## If this build was one that produced coverage, upload it.
- if [[ "$COVERAGE_OPTIONS" != "" ]]; then coveralls -b . --exclude src/test --exclude src/trunnel --gcov-options '\-p'; fi
- cat test-suite.log

View File

@ -1,39 +0,0 @@
Contributing to Tor
-------------------
### Getting started
Welcome!
We have a bunch of documentation about how to develop Tor in the
doc/HACKING/ directory. We recommend that you start with
doc/HACKING/README.1st.md , and then go from there. It will tell
you how to find your way around the source code, how to get
involved with the Tor community, how to write patches, and much
more!
You don't have to be a C developer to help with Tor: have a look
at https://www.torproject.org/getinvolved/volunteer !
The Tor Project is committed to fostering a inclusive community
where people feel safe to engage, share their points of view, and
participate. For the latest version of our Code of Conduct, please
see
https://gitweb.torproject.org/community/policies.git/plain/code_of_conduct.txt
### License issues
Tor is distributed under the license terms in the LICENSE -- in
brief, the "3-clause BSD license". If you send us code to
distribute with Tor, it needs to be code that we can distribute
under those terms. Please don't send us patches unless you agree
to allow this.
Some compatible licenses include:
- 3-clause BSD
- 2-clause BSD
- CC0 Public Domain Dedication

8763
ChangeLog

File diff suppressed because it is too large Load Diff

View File

@ -446,6 +446,12 @@ MAX_INITIALIZER_LINES = 30
SHOW_USED_FILES = YES
# If the sources in your project are distributed over multiple directories
# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
# in the documentation. The default is NO.
SHOW_DIRECTORIES = NO
# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
# This will remove the Files entry from the Quick Index and from the
# Folder Tree View (if specified). The default is YES.
@ -754,6 +760,12 @@ HTML_FOOTER =
HTML_STYLESHEET =
# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
# files or namespaces will be aligned in HTML using tables. If set to
# NO a bullet list will be used.
HTML_ALIGN_MEMBERS = YES
# If the GENERATE_HTMLHELP tag is set to YES, additional index files
# will be generated that can be used as input for tools like the
# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
@ -1035,6 +1047,18 @@ GENERATE_XML = NO
XML_OUTPUT = xml
# The XML_SCHEMA tag can be used to specify an XML schema,
# which can be used by a validating XML parser to check the
# syntax of the XML files.
XML_SCHEMA =
# The XML_DTD tag can be used to specify an XML DTD,
# which can be used by a validating XML parser to check the
# syntax of the XML files.
XML_DTD =
# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
# dump the program listings (including syntax highlighting
# and cross-referencing information) to the XML output. Note that
@ -1240,7 +1264,7 @@ HAVE_DOT = NO
# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
# containing the font.
DOT_FONTNAME =
DOT_FONTNAME = FreeSans
# By default doxygen will tell dot to use the output directory to look for the
# FreeSans.ttf font (which doxygen will put there itself). If you specify a

106
LICENSE
View File

@ -13,7 +13,7 @@ Tor is distributed under this license:
Copyright (c) 2001-2004, Roger Dingledine
Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson
Copyright (c) 2007-2017, The Tor Project, Inc.
Copyright (c) 2007-2015, The Tor Project, Inc.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@ -270,110 +270,6 @@ src/ext/readpassphrase.[ch] are distributed under this license:
Agency (DARPA) and Air Force Research Laboratory, Air Force
Materiel Command, USAF, under agreement number F39502-99-1-0512.
===============================================================================
src/ext/mulodi4.c is distributed under this license:
=========================================================================
compiler_rt License
=========================================================================
The compiler_rt library is dual licensed under both the
University of Illinois "BSD-Like" license and the MIT license.
As a user of this code you may choose to use it under either
license. As a contributor, you agree to allow your code to be
used under both.
Full text of the relevant licenses is included below.
=========================================================================
University of Illinois/NCSA
Open Source License
Copyright (c) 2009-2016 by the contributors listed in CREDITS.TXT
All rights reserved.
Developed by:
LLVM Team
University of Illinois at Urbana-Champaign
http://llvm.org
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal with the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
* Redistributions of source code must retain the above
copyright notice, this list of conditions and the following
disclaimers.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimers in the documentation and/or other materials
provided with the distribution.
* Neither the names of the LLVM Team, University of Illinois
at Urbana-Champaign, nor the names of its contributors may
be used to endorse or promote products derived from this
Software without specific prior written permission.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS WITH THE SOFTWARE.
=========================================================================
Copyright (c) 2009-2015 by the contributors listed in CREDITS.TXT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
=========================================================================
Copyrights and Licenses for Third Party Software Distributed with LLVM:
=========================================================================
The LLVM software contains code written by third parties. Such
software will have its own individual LICENSE.TXT file in the
directory in which it appears. This file will describe the
copyrights, license, and restrictions which apply to that code.
The disclaimer of warranty in the University of Illinois Open
Source License applies to all code in the LLVM Distribution, and
nothing in any of the other licenses gives permission to use the
names of the LLVM Team or the University of Illinois to endorse
or promote products derived from this Software.
===============================================================================
If you got Tor as a static binary with OpenSSL included, then you should know:
"This product includes software developed by the OpenSSL Project

View File

@ -1,73 +1,47 @@
# Copyright (c) 2001-2004, Roger Dingledine
# Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson
# Copyright (c) 2007-2017, The Tor Project, Inc.
# Copyright (c) 2007-2015, The Tor Project, Inc.
# See LICENSE for licensing information
# "foreign" means we don't follow GNU package layout standards
# 1.9 means we require automake vesion 1.9
AUTOMAKE_OPTIONS = foreign 1.9 subdir-objects
ACLOCAL_AMFLAGS = -I m4
noinst_LIBRARIES=
EXTRA_DIST=
noinst_HEADERS=
bin_PROGRAMS=
EXTRA_PROGRAMS=
CLEANFILES=
TESTS=
noinst_PROGRAMS=
DISTCLEANFILES=
bin_SCRIPTS=
AM_CPPFLAGS=
AM_CFLAGS=@TOR_SYSTEMD_CFLAGS@ @CFLAGS_BUGTRAP@ @TOR_LZMA_CFLAGS@ @TOR_ZSTD_CFLAGS@
SHELL=@SHELL@
if COVERAGE_ENABLED
TESTING_TOR_BINARY=$(top_builddir)/src/or/tor-cov$(EXEEXT)
else
TESTING_TOR_BINARY=$(top_builddir)/src/or/tor$(EXEEXT)
endif
if USE_RUST
rust_ldadd=$(top_builddir)/src/rust/target/release/@TOR_RUST_STATIC_NAME@ \
@TOR_RUST_EXTRA_LIBS@
else
rust_ldadd=
endif
AM_CFLAGS = @TOR_SYSTEMD_CFLAGS@
SHELL = @SHELL@
include src/include.am
include doc/include.am
include contrib/include.am
EXTRA_DIST+= \
ChangeLog \
CONTRIBUTING \
INSTALL \
LICENSE \
Makefile.nmake \
README \
ReleaseNotes \
scripts/maint/checkSpace.pl
## This tells etags how to find mockable function definitions.
AM_ETAGSFLAGS=--regex='{c}/MOCK_IMPL([^,]+,\W*\([a-zA-Z0-9_]+\)\W*,/\1/s'
ReleaseNotes
if COVERAGE_ENABLED
TEST_CFLAGS=-fno-inline -fprofile-arcs -ftest-coverage
if DISABLE_ASSERTS_IN_UNIT_TESTS
TEST_CPPFLAGS=-DTOR_UNIT_TESTS -DTOR_COVERAGE -DDISABLE_ASSERTS_IN_UNIT_TESTS @TOR_MODULES_ALL_ENABLED@
else
TEST_CPPFLAGS=-DTOR_UNIT_TESTS -DTOR_COVERAGE @TOR_MODULES_ALL_ENABLED@
endif
TEST_CPPFLAGS=-DTOR_UNIT_TESTS -DTOR_COVERAGE
TEST_NETWORK_FLAGS=--coverage --hs-multi-client 1
else
TEST_CFLAGS=
TEST_CPPFLAGS=-DTOR_UNIT_TESTS @TOR_MODULES_ALL_ENABLED@
TEST_CPPFLAGS=-DTOR_UNIT_TESTS
TEST_NETWORK_FLAGS=--hs-multi-client 1
endif
TEST_NETWORK_WARNING_FLAGS=--quiet --only-warnings
if LIBFUZZER_ENABLED
TEST_CFLAGS += -fsanitize-coverage=trace-pc-guard,trace-cmp,trace-div
# not "edge"
endif
TEST_NETWORK_ALL_LOG_DIR=$(top_builddir)/test_network_log
TEST_NETWORK_ALL_DRIVER_FLAGS=--color-tests yes
@ -92,14 +66,14 @@ dist-rpm: dist-gzip
echo "RPM build finished"; \
#end of dist-rpm
dist: check
doxygen:
doxygen && cd doc/doxygen/latex && make
test: all
$(top_builddir)/src/test/test
check-local: check-spaces check-changes
need-chutney-path:
@if test ! -d "$$CHUTNEY_PATH"; then \
echo '$$CHUTNEY_PATH was not set.'; \
@ -114,24 +88,22 @@ need-chutney-path:
# Note that test-network requires a copy of Chutney in $CHUTNEY_PATH.
# Chutney can be cloned from https://git.torproject.org/chutney.git .
test-network: need-chutney-path $(TESTING_TOR_BINARY) src/tools/tor-gencert
test-network: need-chutney-path all
$(top_srcdir)/src/test/test-network.sh $(TEST_NETWORK_FLAGS)
# Run all available tests using automake's test-driver
# only run IPv6 tests if we can ping6 ::1 (localhost)
# only run IPv6 tests if we can ping ::1 (localhost)
# some IPv6 tests will fail without an IPv6 DNS server (see #16971 and #17011)
# only run mixed tests if we have a tor-stable binary
# Try the syntax for BSD ping6, Linux ping6, and Linux ping -6,
# because they're incompatible
test-network-all: need-chutney-path test-driver $(TESTING_TOR_BINARY) src/tools/tor-gencert
# see #17015 for autodetection of different tor versions
test-network-all: need-chutney-path all test-driver
mkdir -p $(TEST_NETWORK_ALL_LOG_DIR)
@flavors="$(TEST_CHUTNEY_FLAVORS)"; \
if ping6 -q -c 1 -o ::1 >/dev/null 2>&1 || ping6 -q -c 1 -W 1 ::1 >/dev/null 2>&1 || ping -6 -c 1 -W 1 ::1 >/dev/null 2>&1; then \
echo "ping6 ::1 or ping ::1 succeeded, running IPv6 flavors: $(TEST_CHUTNEY_FLAVORS_IPV6)."; \
if ping6 -q -o ::1 >/dev/null 2>&1; then \
echo "ping6 ::1 succeeded, running IPv6 flavors: $(TEST_CHUTNEY_FLAVORS_IPV6)."; \
flavors="$$flavors $(TEST_CHUTNEY_FLAVORS_IPV6)"; \
else \
echo "ping6 ::1 and ping ::1 failed, skipping IPv6 flavors: $(TEST_CHUTNEY_FLAVORS_IPV6)."; \
echo "ping6 ::1 failed, skipping IPv6 flavors: $(TEST_CHUTNEY_FLAVORS_IPV6)."; \
skip_flavors="$$skip_flavors $(TEST_CHUTNEY_FLAVORS_IPV6)"; \
fi; \
if command -v tor-stable >/dev/null 2>&1; then \
@ -145,8 +117,7 @@ test-network-all: need-chutney-path test-driver $(TESTING_TOR_BINARY) src/tools/
echo "SKIP: $$f"; \
done; \
for f in $$flavors; do \
$(SHELL) $(top_srcdir)/test-driver --test-name $$f --log-file $(TEST_NETWORK_ALL_LOG_DIR)/$$f.log --trs-file $(TEST_NETWORK_ALL_LOG_DIR)/$$f.trs $(TEST_NETWORK_ALL_DRIVER_FLAGS) $(top_srcdir)/src/test/test-network.sh --flavor $$f $(TEST_NETWORK_FLAGS); \
$(top_srcdir)/src/test/test-network.sh $(TEST_NETWORK_WARNING_FLAGS); \
./test-driver --test-name $$f --log-file $(TEST_NETWORK_ALL_LOG_DIR)/$$f.log --trs-file $(TEST_NETWORK_ALL_LOG_DIR)/$$f.trs $(TEST_NETWORK_ALL_DRIVER_FLAGS) $(top_srcdir)/src/test/test-network.sh --flavor $$f $(TEST_NETWORK_FLAGS); \
done; \
echo "Log and result files are available in $(TEST_NETWORK_ALL_LOG_DIR)."; \
! grep -q FAIL test_network_log/*.trs
@ -159,10 +130,10 @@ need-stem-path:
fi
test-stem: need-stem-path $(TESTING_TOR_BINARY)
@$(PYTHON) "$$STEM_SOURCE_DIR"/run_tests.py --tor "$(TESTING_TOR_BINARY)" --all --log notice --target RUN_ALL;
@$(PYTHON) "$$STEM_SOURCE_DIR"/run_tests.py --tor $(TESTING_TOR_BINARY) --all --log notice --target RUN_ALL;
test-stem-full: need-stem-path $(TESTING_TOR_BINARY)
@$(PYTHON) "$$STEM_SOURCE_DIR"/run_tests.py --tor "$(TESTING_TOR_BINARY)" --all --log notice --target RUN_ALL,ONLINE -v;
@$(PYTHON) "$$STEM_SOURCE_DIR"/run_tests.py --tor $(TESTING_TOR_BINARY) --all --log notice --target RUN_ALL,ONLINE -v;
test-full: need-stem-path need-chutney-path check test-network test-stem
@ -200,16 +171,13 @@ coverage-html-full: all
genhtml --branch-coverage -o "$(HTML_COVER_DIR)" "$(HTML_COVER_DIR)/lcov.info"
# Avoid strlcpy.c, strlcat.c, aes.c, OpenBSD_malloc_Linux.c, sha256.c,
# tinytest*.[ch]
# eventdns.[hc], tinytest*.[ch]
check-spaces:
if USE_PERL
$(PERL) $(top_srcdir)/scripts/maint/checkSpace.pl -C \
$(top_srcdir)/scripts/maint/checkSpace.pl -C \
$(top_srcdir)/src/common/*.[ch] \
$(top_srcdir)/src/or/*.[ch] \
$(top_srcdir)/src/test/*.[ch] \
$(top_srcdir)/src/test/*/*.[ch] \
$(top_srcdir)/src/tools/*.[ch]
endif
check-docs: all
$(PERL) $(top_builddir)/scripts/maint/checkOptionDocs.pl
@ -218,42 +186,16 @@ check-logs:
$(top_srcdir)/scripts/maint/checkLogs.pl \
$(top_srcdir)/src/*/*.[ch] | sort -n
.PHONY: check-typos
check-typos:
@if test -x "`which misspell 2>&1;true`"; then \
echo "Checking for Typos ..."; \
(misspell \
$(top_srcdir)/src/[^e]*/*.[ch] \
$(top_srcdir)/doc \
$(top_srcdir)/contrib \
$(top_srcdir)/scripts \
$(top_srcdir)/README \
$(top_srcdir)/ChangeLog \
$(top_srcdir)/INSTALL \
$(top_srcdir)/ReleaseNotes \
$(top_srcdir)/LICENSE); \
else \
echo "Tor can use misspell to check for typos."; \
echo "It seems that you don't have misspell installed."; \
echo "You can install the latest version of misspell here: https://github.com/client9/misspell#install"; \
fi
.PHONY: check-changes
check-changes:
if USEPYTHON
@if test -d "$(top_srcdir)/changes"; then \
$(PYTHON) $(top_srcdir)/scripts/maint/lintChanges.py $(top_srcdir)/changes; \
$(PYTHON) $(top_srcdir)/scripts/maint/lintChanges.py $(top_srcdir)/changes/*; \
fi
endif
.PHONY: update-versions
update-versions:
$(PERL) $(top_builddir)/scripts/maint/updateVersions.pl
.PHONY: callgraph
callgraph:
$(top_builddir)/scripts/maint/run_calltool.sh
version:
@echo "Tor @VERSION@"
@if test -d "$(top_srcdir)/.git" && test -x "`which git 2>&1;true`"; then \
@ -266,20 +208,3 @@ mostlyclean-local:
rm -rf $(HTML_COVER_DIR)
rm -rf $(top_builddir)/doc/doxygen
rm -rf $(TEST_NETWORK_ALL_LOG_DIR)
clean-local:
rm -rf $(top_builddir)/src/rust/target
rm -rf $(top_builddir)/src/rust/.cargo/registry
if USE_RUST
distclean-local: distclean-rust
endif
# This relies on some internal details of how automake implements
# distcheck. We check two directories because automake-1.15 changed
# from $(distdir)/_build to $(distdir)/_build/sub.
show-distdir-testlog:
@if test -d "$(distdir)/_build/sub"; then \
cat $(distdir)/_build/sub/$(TEST_SUITE_LOG); \
else \
cat $(distdir)/_build/$(TEST_SUITE_LOG); fi

5
README
View File

@ -26,7 +26,4 @@ Frequently Asked Questions:
To get started working on Tor development:
See the doc/HACKING directory.
Release timeline:
https://trac.torproject.org/projects/tor/wiki/org/teams/NetworkTeam/CoreTorReleases
See the doc/HACKING file.

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@ dnl Helper macros for Tor configure.ac
dnl Copyright (c) 2001-2004, Roger Dingledine
dnl Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson
dnl Copyright (c) 2007-2008, Roger Dingledine, Nick Mathewson
dnl Copyright (c) 2007-2017, The Tor Project, Inc.
dnl Copyright (c) 2007-2015, The Tor Project, Inc.
dnl See LICENSE for licensing information
AC_DEFUN([TOR_EXTEND_CODEPATH],
@ -42,21 +42,20 @@ AC_DEFUN([TOR_DEFINE_CODEPATH],
AC_SUBST(TOR_LDFLAGS_$2)
])
dnl 1: flags
dnl 2: try to link too if this is nonempty.
dnl 3: what to do on success compiling
dnl 4: what to do on failure compiling
AC_DEFUN([TOR_TRY_COMPILE_WITH_CFLAGS], [
dnl 1:flags
dnl 2:also try to link (yes: non-empty string)
dnl will set yes or no in $tor_can_link_$1 (as modified by AS_VAR_PUSHDEF)
AC_DEFUN([TOR_CHECK_CFLAGS], [
AS_VAR_PUSHDEF([VAR],[tor_cv_cflags_$1])
AC_CACHE_CHECK([whether the compiler accepts $1], VAR, [
tor_saved_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS -pedantic -Werror $1"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[]])],
AC_TRY_COMPILE([], [return 0;],
[AS_VAR_SET(VAR,yes)],
[AS_VAR_SET(VAR,no)])
if test x$2 != x; then
AS_VAR_PUSHDEF([can_link],[tor_can_link_$1])
AC_LINK_IFELSE([AC_LANG_PROGRAM([[]], [[]])],
AC_TRY_LINK([], [return 0;],
[AS_VAR_SET(can_link,yes)],
[AS_VAR_SET(can_link,no)])
AS_VAR_POPDEF([can_link])
@ -64,20 +63,11 @@ AC_DEFUN([TOR_TRY_COMPILE_WITH_CFLAGS], [
CFLAGS="$tor_saved_CFLAGS"
])
if test x$VAR = xyes; then
$3
else
$4
CFLAGS="$CFLAGS $1"
fi
AS_VAR_POPDEF([VAR])
])
dnl 1:flags
dnl 2:also try to link (yes: non-empty string)
dnl will set yes or no in $tor_can_link_$1 (as modified by AS_VAR_PUSHDEF)
AC_DEFUN([TOR_CHECK_CFLAGS], [
TOR_TRY_COMPILE_WITH_CFLAGS($1, $2, CFLAGS="$CFLAGS $1", true)
])
dnl 1:flags
dnl 2:extra ldflags
dnl 3:extra libraries
@ -93,7 +83,7 @@ AC_DEFUN([TOR_CHECK_LDFLAGS], [
AC_RUN_IFELSE([AC_LANG_PROGRAM([#include <stdio.h>], [fputs("", stdout)])],
[AS_VAR_SET(VAR,yes)],
[AS_VAR_SET(VAR,no)],
[AC_LINK_IFELSE([AC_LANG_PROGRAM([[]], [[]])],
[AC_TRY_LINK([], [return 0;],
[AS_VAR_SET(VAR,yes)],
[AS_VAR_SET(VAR,no)])])
CFLAGS="$tor_saved_CFLAGS"
@ -113,21 +103,21 @@ if test x$2 = xdevpkg; then
h=" headers for"
fi
if test -f /etc/debian_version && test x"$tor_$1_$2_debian" != x; then
AC_MSG_WARN([On Debian, you can install$h $1 using "apt-get install $tor_$1_$2_debian"])
AC_WARN([On Debian, you can install$h $1 using "apt-get install $tor_$1_$2_debian"])
if test x"$tor_$1_$2_debian" != x"$tor_$1_devpkg_debian"; then
AC_MSG_WARN([ You will probably need $tor_$1_devpkg_debian too.])
AC_WARN([ You will probably need $tor_$1_devpkg_debian too.])
fi
fi
if test -f /etc/fedora-release && test x"$tor_$1_$2_redhat" != x; then
AC_MSG_WARN([On Fedora, you can install$h $1 using "dnf install $tor_$1_$2_redhat"])
AC_WARN([On Fedora Core, you can install$h $1 using "yum install $tor_$1_$2_redhat"])
if test x"$tor_$1_$2_redhat" != x"$tor_$1_devpkg_redhat"; then
AC_MSG_WARN([ You will probably need to install $tor_$1_devpkg_redhat too.])
AC_WARN([ You will probably need to install $tor_$1_devpkg_redhat too.])
fi
else
if test -f /etc/redhat-release && test x"$tor_$1_$2_redhat" != x; then
AC_MSG_WARN([On most Redhat-based systems, you can get$h $1 by installing the $tor_$1_$2_redhat RPM package])
AC_WARN([On most Redhat-based systems, you can get$h $1 by installing the $tor_$1_$2_redhat RPM package])
if test x"$tor_$1_$2_redhat" != x"$tor_$1_devpkg_redhat"; then
AC_MSG_WARN([ You will probably need to install $tor_$1_devpkg_redhat too.])
AC_WARN([ You will probably need to install $tor_$1_devpkg_redhat too.])
fi
fi
fi
@ -245,10 +235,7 @@ if test "$cross_compiling" != yes; then
LDFLAGS="$tor_tryextra $orig_LDFLAGS"
fi
AC_RUN_IFELSE([AC_LANG_PROGRAM([$5], [$6])],
[runnable=yes], [runnable=no],
[AC_LINK_IFELSE([AC_LANG_PROGRAM([[]], [[]])],
[runnable=yes],
[runnable=no])])
[runnable=yes], [runnable=no])
if test "$runnable" = yes; then
tor_cv_library_$1_linker_option=$tor_tryextra
break

View File

@ -1,12 +1,12 @@
#!/bin/sh
if [ -x "`which autoreconf 2>/dev/null`" ] ; then
opt="-i -f -W all,error"
opt="-if"
for i in $@; do
case "$i" in
-v)
opt="${opt} -v"
opt=$opt"v"
;;
esac
done

2
changes/19271 Normal file
View File

@ -0,0 +1,2 @@
o Directory authority changes:
- Urras is no longer a directory authority. Closes ticket 19271.

View File

@ -1,6 +0,0 @@
o Major bugfixes (security, directory authority, denial-of-service):
- Fix a bug that could have allowed an attacker to force a
directory authority to use up all its RAM by passing it a
maliciously crafted protocol versions string. Fixes bug 25517;
bugfix on 0.2.9.4-alpha. This issue is also tracked as
TROVE-2018-005.

3
changes/bifroest Normal file
View File

@ -0,0 +1,3 @@
o Directory authority changes (also in 0.2.8.7):
- The "Tonga" bridge authority has been retired; the new bridge
authority is "Bifroest". Closes tickets 19728 and 19690.

11
changes/buf-sentinel Normal file
View File

@ -0,0 +1,11 @@
o Major features (security fixes):
- Prevent a class of security bugs caused by treating the contents
of a buffer chunk as if they were a NUL-terminated string. At
least one such bug seems to be present in all currently used
versions of Tor, and would allow an attacker to remotely crash
most Tor instances, especially those compiled with extra compiler
hardening. With this defense in place, such bugs can't crash Tor,
though we should still fix them as they occur. Closes ticket 20384
(TROVE-2016-10-001).

8
changes/bug16248 Normal file
View File

@ -0,0 +1,8 @@
o Major bugfixes (dns proxy mode, crash):
- Avoid crashing when running as a DNS proxy. Closes bug 16248; bugfix on
0.2.0.1-alpha. Patch from 'cypherpunks'.
o Minor features (bug-resistance):
- Make Tor survive errors involving connections without a corresponding
event object. Previously we'd fail with an assertion; now we produce a
log message. Related to bug 16248.

4
changes/bug17354 Normal file
View File

@ -0,0 +1,4 @@
o Minor bugfixes (sandbox):
- Add the "hidserv-stats" filename to our sandbox filter for the
HiddenServiceStatistics option to work properly. Fixes bug 17354;
bugfix on tor-0.2.6.2-alpha~54^2~1. Patch from David Goulet.

4
changes/bug17906 Normal file
View File

@ -0,0 +1,4 @@
o Minor features (authorities):
- Update the V3 identity key for dannenberg, it was changed on
18 November 2015.
Closes task #17906. Patch by "teor".

6
changes/bug18089 Normal file
View File

@ -0,0 +1,6 @@
o Minor fixes (security):
- Make memwipe() do nothing when passed a NULL pointer
or zero size. Check size argument to memwipe() for underflow.
Closes bug #18089. Reported by "gk", patch by "teor".
Bugfix on 0.2.3.25 and 0.2.4.6-alpha (#7352),
commit 49dd5ef3 on 7 Nov 2012.

7
changes/bug18162 Normal file
View File

@ -0,0 +1,7 @@
o Major bugfixes (security, pointers):
- Avoid a difficult-to-trigger heap corruption attack when extending
a smartlist to contain over 16GB of pointers. Fixes bug #18162;
bugfix on Tor 0.1.1.11-alpha, which fixed a related bug
incompletely. Reported by Guido Vranken.

6
changes/bug18710 Normal file
View File

@ -0,0 +1,6 @@
o Major bugfixes (DNS proxy):
- Stop a crash that could occur when a client running with DNSPort
received a query with multiple address types, where the first
address type was not supported. Found and fixed by Scott Dial.
Fixes bug 18710; bugfix on 0.2.5.4-alpha.

10
changes/bug20384 Normal file
View File

@ -0,0 +1,10 @@
o Major features (security fixes):
- Prevent a class of security bugs caused by treating the contents
of a buffer chunk as if they were a NUL-terminated string. At
least one such bug seems to be present in all currently used
versions of Tor, and would allow an attacker to remotely crash
most Tor instances, especially those compiled with extra compiler
hardening. With this defense in place, such bugs can't crash Tor,
though we should still fix them as they occur. Closes ticket
20384 (TROVE-2016-10-001).

11
changes/bug21018 Normal file
View File

@ -0,0 +1,11 @@
o Major bugfixes (parsing, security):
- Fix a bug in parsing that could cause clients to read a single
byte past the end of an allocated region. This bug could be
used to cause hardened clients (built with
--enable-expensive-hardening) to crash if they tried to visit
a hostile hidden service. Non-hardened clients are only
affected depending on the details of their platform's memory
allocator. Fixes bug 21018; bugfix on 0.2.0.8-alpha. Found by
using libFuzzer. Also tracked as TROVE-2016-12-002 and as
CVE-2016-1254.

3
changes/bug22490 Normal file
View File

@ -0,0 +1,3 @@
o Minor bugfixes (correctness):
- Avoid undefined behavior when parsing IPv6 entries from the geoip6
file. Fixes bug 22490; bugfix on 0.2.4.6-alpha.

8
changes/bug22636 Normal file
View File

@ -0,0 +1,8 @@
o Build features:
- Tor's repository now includes a Travis Continuous Integration (CI)
configuration file (.travis.yml). This is meant to help new developers and
contributors who fork Tor to a Github repository be better able to test
their changes, and understand what we expect to pass. To use this new build
feature, you must fork Tor to your Github account, then go into the
"Integrations" menu in the repository settings for your fork and enable
Travis, then push your changes.

12
changes/bug22737 Normal file
View File

@ -0,0 +1,12 @@
o Minor bugfixes (defensive programming, undefined behavior):
- Fix a memset() off the end of an array when packing cells. This
bug should be harmless in practice, since the corrupted bytes
are still in the same structure, and are always padding bytes,
ignored, or immediately overwritten, depending on compiler
behavior. Nevertheless, because the memset()'s purpose is to
make sure that any other cell-handling bugs can't expose bytes
to the network, we need to fix it. Fixes bug 22737; bugfix on
0.2.4.11-alpha. Fixes CID 1401591.

7
changes/bug22789 Normal file
View File

@ -0,0 +1,7 @@
o Major bugfixes (openbsd, denial-of-service):
- Avoid an assertion failure bug affecting our implementation of
inet_pton(AF_INET6) on certain OpenBSD systems whose strtol()
handling of "0xfoo" differs from what we had expected.
Fixes bug 22789; bugfix on 0.2.3.8-alpha. Also tracked as
TROVE-2017-007.

View File

@ -1,3 +0,0 @@
o Minor bugfixes (onion services):
- Fix a bug that blocked the creation of ephemeral v3 onion services. Fixes
bug 25939; bugfix on 0.3.4.1-alpha.

View File

@ -1,5 +0,0 @@
o Minor bugfixes (test coverage tools):
- Update our "cov-diff" script to handle output from the latest
version of gcov, and to remove extraneous timestamp information
from its output. Fixes bugs 26101 and 26102; bugfix on
0.2.5.1-alpha.

View File

@ -1,7 +0,0 @@
o Minor bugfixes (compatibility, openssl):
- Work around a change in OpenSSL 1.1.1 where
return values that would previously indicate "no password" now
indicate an empty password. Without this workaround, Tor instances
running with OpenSSL 1.1.1 would accept descriptors that other Tor
instances would reject. Fixes bug 26116; bugfix on 0.2.5.16.

View File

@ -1,6 +0,0 @@
o Minor bugfixes (controller):
- Improve accuracy of the BUILDTIMEOUT_SET control port event's
TIMEOUT_RATE and CLOSE_RATE fields. (We were previously miscounting
the total number of circuits for these field values.) Fixes bug
26121; bugfix on 0.3.3.1-alpha.

View File

@ -1,3 +0,0 @@
o Minor bugfixes (compilation):
- Fix compilation when building with OpenSSL 1.1.0 with the
"no-deprecated" flag enabled. Fixes bug 26156; bugfix on 0.3.4.1-alpha.

View File

@ -1,4 +0,0 @@
o Minor bugfixes (hardening):
- Prevent a possible out-of-bounds smartlist read in
protover_compute_vote(). Fixes bug 26196; bugfix on
0.2.9.4-alpha.

View File

@ -1,4 +0,0 @@
o Minor bugfixes (control port):
- Do not count 0-length RELAY_COMMAND_DATA cells as valid data in CIRC_BW
events. Previously, such cells were counted entirely in the OVERHEAD
field. Now they are not. Fixes bug 26259; bugfix on 0.3.4.1-alpha.

View File

@ -1,4 +0,0 @@
o Documentation:
- In code comment, point the reader to the exact section
in Tor specification that specifies circuit close error
code values. Resolves ticket 25237.

4
changes/geoip-april2016 Normal file
View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the April 5 2016 Maxmind GeoLite2
Country database.

4
changes/geoip-april2017 Normal file
View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the April 4 2017 Maxmind GeoLite2
Country database.

4
changes/geoip-august2016 Normal file
View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the August 2 2016 Maxmind GeoLite2
Country database.

View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the December 7 2016 Maxmind GeoLite2
Country database.

View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the February 2 2016 Maxmind GeoLite2
Country database.

View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the February 8 2017 Maxmind GeoLite2
Country database.

View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the January 5 2016 Maxmind GeoLite2
Country database.

View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the January 4 2017 Maxmind GeoLite2
Country database.

4
changes/geoip-july2016 Normal file
View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the July 6 2016 Maxmind GeoLite2
Country database.

4
changes/geoip-july2017 Normal file
View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the July 4 2017 Maxmind GeoLite2
Country database.

4
changes/geoip-jun2016 Normal file
View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the June 7 2016 Maxmind GeoLite2
Country database.

4
changes/geoip-june2017 Normal file
View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the June 8 2017 Maxmind GeoLite2
Country database.

4
changes/geoip-march2016 Normal file
View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the March 3 2016 Maxmind GeoLite2
Country database.

4
changes/geoip-march2017 Normal file
View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the March 7 2017 Maxmind GeoLite2
Country database.

4
changes/geoip-may2016 Normal file
View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the May 4 2016 Maxmind GeoLite2
Country database.

4
changes/geoip-may2017 Normal file
View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the May 2 2017 Maxmind GeoLite2
Country database.

View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the November 3 2016 Maxmind GeoLite2
Country database.

View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the October 4 2016 Maxmind GeoLite2
Country database.

View File

@ -0,0 +1,4 @@
o Minor features:
- Update geoip and geoip6 to the September 6 2016 Maxmind GeoLite2
Country database.

7
changes/rsa_init_bug Normal file
View File

@ -0,0 +1,7 @@
o Major bugfixes (key management):
- If OpenSSL fails to generate an RSA key, do not retain a dangling pointer
to the previous (uninitialized) key value. The impact here should be
limited to a difficult-to-trigger crash, if OpenSSL is running an
engine that makes key generation failures possible, or if OpenSSL runs
out of memory. Fixes bug 19152; bugfix on 0.2.1.10-alpha. Found by
Yuan Jochen Kang, Suman Jana, and Baishakhi Ray.

View File

@ -1,4 +0,0 @@
o Minor features (continuous integration):
- Add the necessary configuration files for continuous integration
testing on Windows, via the Appveyor platform. Closes ticket 25549.
Patches from Marcin Cieślak and Isis Lovecruft.

8
changes/trove-2017-001.2 Normal file
View File

@ -0,0 +1,8 @@
o Major bugfixes (parsing):
- Fix an integer underflow bug when comparing malformed Tor versions.
This bug is harmless, except when Tor has been built with
--enable-expensive-hardening, which would turn it into a crash;
or on Tor 0.2.9.1-alpha through Tor 0.2.9.8, which were built with
-ftrapv by default.
Part of TROVE-2017-001. Fixes bug 21278; bugfix on
0.0.8pre1. Found by OSS-Fuzz.

7
changes/trove-2017-005 Normal file
View File

@ -0,0 +1,7 @@
o Major bugfixes (hidden service, relay, security):
- Fix an assertion failure caused by receiving a BEGIN_DIR cell on
a hidden service rendezvous circuit. Fixes bug 22494, tracked as
TROVE-2017-005 and CVE-2017-0376; bugfix on 0.2.2.1-alpha. Found
by armadev.

File diff suppressed because it is too large Load Diff

View File

@ -2,17 +2,6 @@
# syntax specified in http://clang.llvm.org/docs/SanitizerSpecialCaseList.html
# for more info see http://clang.llvm.org/docs/AddressSanitizer.html
#
# Tor notes: This file is obsolete!
#
# It was necessary in order to apply the sanitizers to all of tor. But
# we don't believe that's a good idea: some parts of tor need constant-time
# behavior that is hard to guarantee with these sanitizers.
#
# If you need this behavior, then please consider --enable-expensive-hardening,
# and report bugs as needed.
#
# usage:
# 1. configure tor build:
# ./configure \

View File

@ -87,7 +87,7 @@ RATE_UP=5000
# machine does any other network activity. That is not very fun.
RATE_UP_TOR=1500
# RATE_UP_TOR_CEIL is the maximum rate allowed for all Tor traffic in
# RATE_UP_TOR_CEIL is the maximum rate allowed for all Tor trafic in
# kbits/sec.
RATE_UP_TOR_CEIL=5000

View File

@ -8,7 +8,7 @@
!include "LogicLib.nsh"
!include "FileFunc.nsh"
!insertmacro GetParameters
!define VERSION "0.3.4.1-alpha-dev"
!define VERSION "0.2.7.8-dev"
!define INSTALLER "tor-${VERSION}-win32.exe"
!define WEBSITE "https://www.torproject.org/"
!define LICENSE "LICENSE"

659
doc/HACKING Normal file
View File

@ -0,0 +1,659 @@
Hacking Tor: An Incomplete Guide
================================
Getting started
---------------
For full information on how Tor is supposed to work, look at the files in
https://gitweb.torproject.org/torspec.git/tree
For an explanation of how to change Tor's design to work differently, look at
https://gitweb.torproject.org/torspec.git/blob_plain/HEAD:/proposals/001-process.txt
For the latest version of the code, get a copy of git, and
git clone https://git.torproject.org/git/tor
We talk about Tor on the tor-talk mailing list. Design proposals and
discussion belong on the tor-dev mailing list. We hang around on
irc.oftc.net, with general discussion happening on #tor and development
happening on #tor-dev.
How we use Git branches
-----------------------
Each main development series (like 0.2.1, 0.2.2, etc) has its main work
applied to a single branch. At most one series can be the development series
at a time; all other series are maintenance series that get bug-fixes only.
The development series is built in a git branch called "master"; the
maintenance series are built in branches called "maint-0.2.0", "maint-0.2.1",
and so on. We regularly merge the active maint branches forward.
For all series except the development series, we also have a "release" branch
(as in "release-0.2.1"). The release series is based on the corresponding
maintenance series, except that it deliberately lags the maint series for
most of its patches, so that bugfix patches are not typically included in a
maintenance release until they've been tested for a while in a development
release. Occasionally, we'll merge an urgent bugfix into the release branch
before it gets merged into maint, but that's rare.
If you're working on a bugfix for a bug that occurs in a particular version,
base your bugfix branch on the "maint" branch for the first supported series
that has that bug. (As of June 2013, we're supporting 0.2.3 and later.) If
you're working on a new feature, base it on the master branch.
How we log changes
------------------
When you do a commit that needs a ChangeLog entry, add a new file to
the "changes" toplevel subdirectory. It should have the format of a
one-entry changelog section from the current ChangeLog file, as in
o Major bugfixes:
- Fix a potential buffer overflow. Fixes bug 99999; bugfix on
0.3.1.4-beta.
To write a changes file, first categorize the change. Some common categories
are: Minor bugfixes, Major bugfixes, Minor features, Major features, Code
simplifications and refactoring. Then say what the change does. If
it's a bugfix, mention what bug it fixes and when the bug was
introduced. To find out which Git tag the change was introduced in,
you can use "git describe --contains <sha1 of commit>".
If at all possible, try to create this file in the same commit where you are
making the change. Please give it a distinctive name that no other branch will
use for the lifetime of your change. To verify the format of the changes file,
you can use "make check-changes".
When we go to make a release, we will concatenate all the entries
in changes to make a draft changelog, and clear the directory. We'll
then edit the draft changelog into a nice readable format.
What needs a changes file?::
A not-exhaustive list: Anything that might change user-visible
behavior. Anything that changes internals, documentation, or the build
system enough that somebody could notice. Big or interesting code
rewrites. Anything about which somebody might plausibly wonder "when
did that happen, and/or why did we do that" 6 months down the line.
Why use changes files instead of Git commit messages?::
Git commit messages are written for developers, not users, and they
are nigh-impossible to revise after the fact.
Why use changes files instead of entries in the ChangeLog?::
Having every single commit touch the ChangeLog file tended to create
zillions of merge conflicts.
Useful tools
------------
These aren't strictly necessary for hacking on Tor, but they can help track
down bugs.
Jenkins
~~~~~~~
https://jenkins.torproject.org
Dmalloc
~~~~~~~
The dmalloc library will keep track of memory allocation, so you can find out
if we're leaking memory, doing any double-frees, or so on.
dmalloc -l ~/dmalloc.log
(run the commands it tells you)
./configure --with-dmalloc
Valgrind
~~~~~~~~
valgrind --leak-check=yes --error-limit=no --show-reachable=yes src/or/tor
(Note that if you get a zillion openssl warnings, you will also need to
pass --undef-value-errors=no to valgrind, or rebuild your openssl
with -DPURIFY.)
Coverity
~~~~~~~~
Nick regularly runs the coverity static analyzer on the Tor codebase.
The preprocessor define __COVERITY__ is used to work around instances
where coverity picks up behavior that we wish to permit.
clang Static Analyzer
~~~~~~~~~~~~~~~~~~~~~
The clang static analyzer can be run on the Tor codebase using Xcode (WIP)
or a command-line build.
The preprocessor define __clang_analyzer__ is used to work around instances
where clang picks up behavior that we wish to permit.
clang Runtime Sanitizers
~~~~~~~~~~~~~~~~
To build the Tor codebase with the clang Address and Undefined Behavior
sanitizers, see the file contrib/clang/sanitize_blacklist.txt.
Preprocessor workarounds for instances where clang picks up behavior that
we wish to permit are also documented in the blacklist file.
Running lcov for unit test coverage
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lcov is a utility that generates pretty HTML reports of test code coverage.
To generate such a report:
-----
./configure --enable-coverage
make
make coverage-html
$BROWSER ./coverage_html/index.html
-----
This will run the tor unit test suite `./src/test/test` and generate the HTML
coverage code report under the directory ./coverage_html/. To change the
output directory, use `make coverage-html HTML_COVER_DIR=./funky_new_cov_dir`.
Coverage diffs using lcov are not currently implemented, but are being
investigated (as of July 2014).
Running the unit tests
~~~~~~~~~~~~~~~~~~~~~~
To quickly run all the tests distributed with Tor:
-----
make check
-----
To run the fast unit tests only:
-----
make test
-----
To selectively run just some tests (the following can be combined
arbitrarily):
-----
./src/test/test <name_of_test> [<name of test 2>] ...
./src/test/test <prefix_of_name_of_test>.. [<prefix_of_name_of_test2>..] ...
./src/test/test :<name_of_excluded_test> [:<name_of_excluded_test2]...
-----
To run all tests, including those based on Stem or Chutney:
-----
make test-full
-----
To run all tests, including those basedd on Stem or Chutney that require a
working connection to the internet:
-----
make test-full-online
-----
Running gcov for unit test coverage
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-----
./configure --enable-coverage
make
make check
# or--- make test-full ? make test-full-online?
mkdir coverage-output
./scripts/test/coverage coverage-output
-----
(On OSX, you'll need to start with "--enable-coverage CC=clang".)
Then, look at the .gcov files in coverage-output. '-' before a line means
that the compiler generated no code for that line. '######' means that the
line was never reached. Lines with numbers were called that number of times.
If that doesn't work:
* Try configuring Tor with --disable-gcc-hardening
* You might need to run 'make clean' after you run './configure'.
If you make changes to Tor and want to get another set of coverage results,
you can run "make reset-gcov" to clear the intermediary gcov output.
If you have two different "coverage-output" directories, and you want to see
a meaningful diff between them, you can run:
-----
./scripts/test/cov-diff coverage-output1 coverage-output2 | less
-----
In this diff, any lines that were visited at least once will have coverage
"1". This lets you inspect what you (probably) really want to know: which
untested lines were changed? Are there any new untested lines?
Running integration tests
~~~~~~~~~~~~~~~~~~~~~~~~~
We have the beginnings of a set of scripts to run integration tests using
Chutney. To try them, set CHUTNEY_PATH to your chutney source directory, and
run "make test-network".
We also have scripts to run integration tests using Stem. To try them, set
STEM_SOURCE_DIR to your Stem source directory, and run "test-stem".
Profiling Tor with oprofile
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The oprofile tool runs (on Linux only!) to tell you what functions Tor is
spending its CPU time in, so we can identify berformance pottlenecks.
Here are some basic instructions
- Build tor with debugging symbols (you probably already have, unless
you messed with CFLAGS during the build process).
- Build all the libraries you care about with debugging symbols
(probably you only care about libssl, maybe zlib and Libevent).
- Copy this tor to a new directory
- Copy all the libraries it uses to that dir too (ldd ./tor will
tell you)
- Set LD_LIBRARY_PATH to include that dir. ldd ./tor should now
show you it's using the libs in that dir
- Run that tor
- Reset oprofiles counters/start it
* "opcontrol --reset; opcontrol --start", if Nick remembers right.
- After a while, have it dump the stats on tor and all the libs
in that dir you created.
* "opcontrol --dump;"
* "opreport -l that_dir/*"
- Profit
Generating and analyzing a callgraph
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1. Run ./scripts/maint/generate_callgraph.sh . This will generate a
bunch of files in a new ./callgraph directory.
2. Run ./scripts/maint/analyze_callgraph.py callgraph/src/*/* . This
will do a lot of graph operations and then dump out a new
"callgraph.pkl" file, containing data in Python's "pickle" format.
3. Run ./scripts/maint/display_callgraph.py . It will display:
- the number of functions reachable from each function.
- all strongly-connnected components in the Tor callgraph
- the largest bottlenecks in the largest SCC in the Tor callgraph.
Note that currently the callgraph generator can't detect calls that pass
through function pointers.
Coding conventions
------------------
Patch checklist
~~~~~~~~~~~~~~~
If possible, send your patch as one of these (in descending order of
preference)
- A git branch we can pull from
- Patches generated by git format-patch
- A unified diff
Did you remember...
- To build your code while configured with --enable-gcc-warnings?
- To run "make check-spaces" on your code?
- To run "make check-docs" to see whether all new options are on
the manpage?
- To write unit tests, as possible?
- To base your code on the appropriate branch?
- To include a file in the "changes" directory as appropriate?
Whitespace and C conformance
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Invoke "make check-spaces" from time to time, so it can tell you about
deviations from our C whitespace style. Generally, we use:
- Unix-style line endings
- K&R-style indentation
- No space before newlines
- A blank line at the end of each file
- Never more than one blank line in a row
- Always spaces, never tabs
- No more than 79-columns per line.
- Two spaces per indent.
- A space between control keywords and their corresponding paren
"if (x)", "while (x)", and "switch (x)", never "if(x)", "while(x)", or
"switch(x)".
- A space between anything and an open brace.
- No space between a function name and an opening paren. "puts(x)", not
"puts (x)".
- Function declarations at the start of the line.
We try hard to build without warnings everywhere. In particular, if you're
using gcc, you should invoke the configure script with the option
"--enable-gcc-warnings". This will give a bunch of extra warning flags to
the compiler, and help us find divergences from our preferred C style.
Getting emacs to edit Tor source properly
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Nick likes to put the following snippet in his .emacs file:
-----
(add-hook 'c-mode-hook
(lambda ()
(font-lock-mode 1)
(set-variable 'show-trailing-whitespace t)
(let ((fname (expand-file-name (buffer-file-name))))
(cond
((string-match "^/home/nickm/src/libevent" fname)
(set-variable 'indent-tabs-mode t)
(set-variable 'c-basic-offset 4)
(set-variable 'tab-width 4))
((string-match "^/home/nickm/src/tor" fname)
(set-variable 'indent-tabs-mode nil)
(set-variable 'c-basic-offset 2))
((string-match "^/home/nickm/src/openssl" fname)
(set-variable 'indent-tabs-mode t)
(set-variable 'c-basic-offset 8)
(set-variable 'tab-width 8))
))))
-----
You'll note that it defaults to showing all trailing whitespace. The "cond"
test detects whether the file is one of a few C free software projects that I
often edit, and sets up the indentation level and tab preferences to match
what they want.
If you want to try this out, you'll need to change the filename regex
patterns to match where you keep your Tor files.
If you use emacs for editing Tor and nothing else, you could always just say:
-----
(add-hook 'c-mode-hook
(lambda ()
(font-lock-mode 1)
(set-variable 'show-trailing-whitespace t)
(set-variable 'indent-tabs-mode nil)
(set-variable 'c-basic-offset 2)))
-----
There is probably a better way to do this. No, we are probably not going
to clutter the files with emacs stuff.
Functions to use
~~~~~~~~~~~~~~~~
We have some wrapper functions like tor_malloc, tor_free, tor_strdup, and
tor_gettimeofday; use them instead of their generic equivalents. (They
always succeed or exit.)
You can get a full list of the compatibility functions that Tor provides by
looking through src/common/util.h and src/common/compat.h. You can see the
available containers in src/common/containers.h. You should probably
familiarize yourself with these modules before you write too much code, or
else you'll wind up reinventing the wheel.
Use 'INLINE' instead of 'inline', so that we work properly on Windows.
Calling and naming conventions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Whenever possible, functions should return -1 on error and 0 on success.
For multi-word identifiers, use lowercase words combined with
underscores. (e.g., "multi_word_identifier"). Use ALL_CAPS for macros and
constants.
Typenames should end with "_t".
Function names should be prefixed with a module name or object name. (In
general, code to manipulate an object should be a module with the same name
as the object, so it's hard to tell which convention is used.)
Functions that do things should have imperative-verb names
(e.g. buffer_clear, buffer_resize); functions that return booleans should
have predicate names (e.g. buffer_is_empty, buffer_needs_resizing).
If you find that you have four or more possible return code values, it's
probably time to create an enum. If you find that you are passing three or
more flags to a function, it's probably time to create a flags argument that
takes a bitfield.
What To Optimize
~~~~~~~~~~~~~~~~
Don't optimize anything if it's not in the critical path. Right now, the
critical path seems to be AES, logging, and the network itself. Feel free to
do your own profiling to determine otherwise.
Log conventions
~~~~~~~~~~~~~~~
https://www.torproject.org/docs/faq#LogLevel
No error or warning messages should be expected during normal OR or OP
operation.
If a library function is currently called such that failure always means ERR,
then the library function should log WARN and let the caller log ERR.
Every message of severity INFO or higher should either (A) be intelligible
to end-users who don't know the Tor source; or (B) somehow inform the
end-users that they aren't expected to understand the message (perhaps
with a string like "internal error"). Option (A) is to be preferred to
option (B).
Doxygen
~~~~~~~~
We use the 'doxygen' utility to generate documentation from our
source code. Here's how to use it:
1. Begin every file that should be documented with
/**
* \file filename.c
* \brief Short description of the file.
**/
(Doxygen will recognize any comment beginning with /** as special.)
2. Before any function, structure, #define, or variable you want to
document, add a comment of the form:
/** Describe the function's actions in imperative sentences.
*
* Use blank lines for paragraph breaks
* - and
* - hyphens
* - for
* - lists.
*
* Write <b>argument_names</b> in boldface.
*
* \code
* place_example_code();
* between_code_and_endcode_commands();
* \endcode
*/
3. Make sure to escape the characters "<", ">", "\", "%" and "#" as "\<",
"\>", "\\", "\%", and "\#".
4. To document structure members, you can use two forms:
struct foo {
/** You can put the comment before an element; */
int a;
int b; /**< Or use the less-than symbol to put the comment
* after the element. */
};
5. To generate documentation from the Tor source code, type:
$ doxygen -g
To generate a file called 'Doxyfile'. Edit that file and run
'doxygen' to generate the API documentation.
6. See the Doxygen manual for more information; this summary just
scratches the surface.
Doxygen comment conventions
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Say what functions do as a series of one or more imperative sentences, as
though you were telling somebody how to be the function. In other words, DO
NOT say:
/** The strtol function parses a number.
*
* nptr -- the string to parse. It can include whitespace.
* endptr -- a string pointer to hold the first thing that is not part
* of the number, if present.
* base -- the numeric base.
* returns: the resulting number.
*/
long strtol(const char *nptr, char **nptr, int base);
Instead, please DO say:
/** Parse a number in radix <b>base</b> from the string <b>nptr</b>,
* and return the result. Skip all leading whitespace. If
* <b>endptr</b> is not NULL, set *<b>endptr</b> to the first character
* after the number parsed.
**/
long strtol(const char *nptr, char **nptr, int base);
Doxygen comments are the contract in our abstraction-by-contract world: if
the functions that call your function rely on it doing something, then your
function should mention that it does that something in the documentation. If
you rely on a function doing something beyond what is in its documentation,
then you should watch out, or it might do something else later.
Putting out a new release
-------------------------
Here are the steps Roger takes when putting out a new Tor release:
1) Use it for a while, as a client, as a relay, as a hidden service,
and as a directory authority. See if it has any obvious bugs, and
resolve those.
1.5) As applicable, merge the maint-X branch into the release-X branch.
2) Gather the changes/* files into a changelog entry, rewriting many
of them and reordering to focus on what users and funders would find
interesting and understandable.
2.1) Make sure that everything that wants a bug number has one.
Make sure that everything which is a bugfix says what version
it was a bugfix on.
2.2) Concatenate them.
2.3) Sort them by section. Within each section, sort by "version it's
a bugfix on", else by numerical ticket order.
2.4) Clean them up:
Standard idioms:
"Fixes bug 9999; bugfix on 0.3.3.3-alpha."
One space after a period.
Make stuff very terse
Make sure each section name ends with a colon
Describe the user-visible problem right away
Mention relevant config options by name. If they're rare or unusual,
remind people what they're for
Avoid starting lines with open-paren
Present and imperative tense: not past.
'Relays', not 'servers' or 'nodes' or 'Tor relays'.
"Stop FOOing", not "Fix a bug where we would FOO".
Try not to let any given section be longer than about a page. Break up
long sections into subsections by some sort of common subtopic. This
guideline is especially important when organizing Release Notes for
new stable releases.
If a given changes stanza showed up in a different release (e.g.
maint-0.2.1), be sure to make the stanzas identical (so people can
distinguish if these are the same change).
2.5) Merge them in.
2.6) Clean everything one last time.
2.7) Run ./scripts/maint/format_changelog.py to make it prettier.
3) Compose a short release blurb to highlight the user-facing
changes. Insert said release blurb into the ChangeLog stanza. If it's
a stable release, add it to the ReleaseNotes file too. If we're adding
to a release-0.2.x branch, manually commit the changelogs to the later
git branches too.
4) In maint-0.2.x, bump the version number in configure.ac and run
scripts/maint/updateVersions.pl to update version numbers in other
places, and commit. Then merge maint-0.2.x into release-0.2.x.
(NOTE: TO bump the version number, edit configure.ac, and then run
either make, or 'perl scripts/maint/updateVersions.pl', depending on
your version.)
5) Make dist, put the tarball up somewhere, and tell #tor about it. Wait
a while to see if anybody has problems building it. Try to get Sebastian
or somebody to try building it on Windows.
6) Get at least two of weasel/arma/sebastian to put the new version number
in their approved versions list.
7) Sign the tarball, then sign and push the git tag:
gpg -ba <the_tarball>
git tag -u <keyid> tor-0.2.x.y-status
git push origin tag tor-0.2.x.y-status
8a) scp the tarball and its sig to the dist website, i.e.
/srv/dist-master.torproject.org/htdocs/ on dist-master. When you want
it to go live, you run "static-update-component dist.torproject.org"
on dist-master.
8b) Edit "include/versions.wmi" and "Makefile" to note the new version.
9) Email the packagers (cc'ing tor-assistants) that a new tarball is up.
The current list of packagers is:
{weasel,gk,mikeperry} at torproject dot org
{blueness} at gentoo dot org
{paul} at invizbox dot io
{ondrej.mikle} at gmail dot com
{lfleischer} at archlinux dot org
10) Add the version number to Trac. To do this, go to Trac, log in,
select "Admin" near the top of the screen, then select "Versions" from
the menu on the left. At the right, there will be an "Add version"
box. By convention, we enter the version in the form "Tor:
0.2.2.23-alpha" (or whatever the version is), and we select the date as
the date in the ChangeLog.
11) Forward-port the ChangeLog.
12) Wait up to a day or two (for a development release), or until most
packages are up (for a stable release), and mail the release blurb and
changelog to tor-talk or tor-announce.
(We might be moving to faster announcements, but don't announce until
the website is at least updated.)
13) If it's a stable release, bump the version number in the maint-x.y.z
branch to "newversion-dev", and do a "merge -s ours" merge to avoid
taking that change into master. Do a similar 'merge -s theirs'
merge to get the change (and only that change) into release. (Some
of the build scripts require that maint merge cleanly into release.)

View File

@ -1,437 +0,0 @@
Coding conventions for Tor
==========================
tl;dr:
- Run configure with `--enable-fatal-warnings`
- Document your functions
- Write unit tests
- Run `make check` before submitting a patch
- Run `make distcheck` if you have made changes to build system components
- Add a file in `changes` for your branch.
Patch checklist
---------------
If possible, send your patch as one of these (in descending order of
preference)
- A git branch we can pull from
- Patches generated by git format-patch
- A unified diff
Did you remember...
- To build your code while configured with `--enable-fatal-warnings`?
- To run `make check-docs` to see whether all new options are on
the manpage?
- To write unit tests, as possible?
- To run `make test-full` to test against all unit and integration tests (or
`make test-full-online` if you have a working connection to the internet)?
- To test that the distribution will actually work via `make distcheck`?
- To base your code on the appropriate branch?
- To include a file in the `changes` directory as appropriate?
If you are submitting a major patch or new feature, or want to in the future...
- Set up Chutney and Stem, see HACKING/WritingTests.md
- Run `make test-full` to test against all unit and integration tests.
If you have changed build system components:
- Please run `make distcheck`
- For example, if you have changed Makefiles, autoconf files, or anything
else that affects the build system.
License issues
==============
Tor is distributed under the license terms in the LICENSE -- in
brief, the "3-clause BSD license". If you send us code to
distribute with Tor, it needs to be code that we can distribute
under those terms. Please don't send us patches unless you agree
to allow this.
Some compatible licenses include:
- 3-clause BSD
- 2-clause BSD
- CC0 Public Domain Dedication
How we use Git branches
=======================
Each main development series (like 0.2.1, 0.2.2, etc) has its main work
applied to a single branch. At most one series can be the development series
at a time; all other series are maintenance series that get bug-fixes only.
The development series is built in a git branch called "master"; the
maintenance series are built in branches called "maint-0.2.0", "maint-0.2.1",
and so on. We regularly merge the active maint branches forward.
For all series except the development series, we also have a "release" branch
(as in "release-0.2.1"). The release series is based on the corresponding
maintenance series, except that it deliberately lags the maint series for
most of its patches, so that bugfix patches are not typically included in a
maintenance release until they've been tested for a while in a development
release. Occasionally, we'll merge an urgent bugfix into the release branch
before it gets merged into maint, but that's rare.
If you're working on a bugfix for a bug that occurs in a particular version,
base your bugfix branch on the "maint" branch for the first supported series
that has that bug. (As of June 2013, we're supporting 0.2.3 and later.)
If you're working on a new feature, base it on the master branch. If you're
working on a new feature and it will take a while to implement and/or you'd
like to avoid the possibility of unrelated bugs in Tor while you're
implementing your feature, consider branching off of the latest maint- branch.
_Never_ branch off a relase- branch. Don't branch off a tag either: they come
from release branches. Doing so will likely produce a nightmare of merge
conflicts in the ChangeLog when it comes time to merge your branch into Tor.
Best advice: don't try to keep an independent branch forked for more than 6
months and expect it to merge cleanly. Try to merge pieces early and often.
How we log changes
==================
When you do a commit that needs a ChangeLog entry, add a new file to
the `changes` toplevel subdirectory. It should have the format of a
one-entry changelog section from the current ChangeLog file, as in
- Major bugfixes:
- Fix a potential buffer overflow. Fixes bug 99999; bugfix on
0.3.1.4-beta.
To write a changes file, first categorize the change. Some common categories
are: Minor bugfixes, Major bugfixes, Minor features, Major features, Code
simplifications and refactoring. Then say what the change does. If
it's a bugfix, mention what bug it fixes and when the bug was
introduced. To find out which Git tag the change was introduced in,
you can use `git describe --contains <sha1 of commit>`.
If at all possible, try to create this file in the same commit where you are
making the change. Please give it a distinctive name that no other branch will
use for the lifetime of your change. To verify the format of the changes file,
you can use `make check-changes`. This is run automatically as part of
`make check` -- if it fails, we must fix it before we release. These
checks are implemented in `scripts/maint/lintChanges.py`.
Changes file style guide:
* Changes files begin with " o Header (subheading):". The header
should usually be "Minor/Major bugfixes/features". The subheading is a
particular area within Tor. See the ChangeLog for examples.
* Make everything terse.
* Write from the user's point of view: describe the user-visible changes
right away.
* Mention configuration options by name. If they're rare or unusual,
remind people what they're for.
* Describe changes in the present tense and in the imperative: not past.
* Every bugfix should have a sentence of the form "Fixes bug 1234; bugfix
on 0.1.2.3-alpha", describing what bug was fixed and where it came from.
* "Relays", not "servers", "nodes", or "Tor relays".
When we go to make a release, we will concatenate all the entries
in changes to make a draft changelog, and clear the directory. We'll
then edit the draft changelog into a nice readable format.
What needs a changes file?
* A not-exhaustive list: Anything that might change user-visible
behavior. Anything that changes internals, documentation, or the build
system enough that somebody could notice. Big or interesting code
rewrites. Anything about which somebody might plausibly wonder "when
did that happen, and/or why did we do that" 6 months down the line.
What does not need a changes file?
* Bugfixes for code that hasn't shipped in any released version of Tor
Why use changes files instead of Git commit messages?
* Git commit messages are written for developers, not users, and they
are nigh-impossible to revise after the fact.
Why use changes files instead of entries in the ChangeLog?
* Having every single commit touch the ChangeLog file tended to create
zillions of merge conflicts.
Whitespace and C conformance
----------------------------
Invoke `make check-spaces` from time to time, so it can tell you about
deviations from our C whitespace style. Generally, we use:
- Unix-style line endings
- K&R-style indentation
- No space before newlines
- A blank line at the end of each file
- Never more than one blank line in a row
- Always spaces, never tabs
- No more than 79-columns per line.
- Two spaces per indent.
- A space between control keywords and their corresponding paren
`if (x)`, `while (x)`, and `switch (x)`, never `if(x)`, `while(x)`, or
`switch(x)`.
- A space between anything and an open brace.
- No space between a function name and an opening paren. `puts(x)`, not
`puts (x)`.
- Function declarations at the start of the line.
We try hard to build without warnings everywhere. In particular, if
you're using gcc, you should invoke the configure script with the
option `--enable-fatal-warnings`. This will tell the compiler
to make all warnings into errors.
Functions to use; functions not to use
--------------------------------------
We have some wrapper functions like `tor_malloc`, `tor_free`, `tor_strdup`, and
`tor_gettimeofday;` use them instead of their generic equivalents. (They
always succeed or exit.)
You can get a full list of the compatibility functions that Tor provides by
looking through `src/common/util*.h` and `src/common/compat*.h`. You can see the
available containers in `src/common/containers*.h`. You should probably
familiarize yourself with these modules before you write too much code, or
else you'll wind up reinventing the wheel.
We don't use `strcat` or `strcpy` or `sprintf` of any of those notoriously broken
old C functions. Use `strlcat`, `strlcpy`, or `tor_snprintf/tor_asprintf` instead.
We don't call `memcmp()` directly. Use `fast_memeq()`, `fast_memneq()`,
`tor_memeq()`, or `tor_memneq()` for most purposes.
Also see a longer list of functions to avoid in:
https://people.torproject.org/~nickm/tor-auto/internal/this-not-that.html
Floating point math is hard
---------------------------
Floating point arithmetic as typically implemented by computers is
very counterintuitive. Failure to adequately analyze floating point
usage can result in surprising behavior and even security
vulnerabilities!
General advice:
- Don't use floating point.
- If you must use floating point, document how the limits of
floating point precision and calculation accuracy affect function
outputs.
- Try to do as much as possible of your calculations using integers
(possibly acting as fixed-point numbers) and convert to floating
point for display.
- If you must send floating point numbers on the wire, serialize
them in a platform-independent way. Tor avoids exchanging
floating-point values, but when it does, it uses ASCII numerals,
with a decimal point (".").
- Binary fractions behave very differently from decimal fractions.
Make sure you understand how these differences affect your
calculations.
- Every floating point arithmetic operation is an opportunity to
lose precision, overflow, underflow, or otherwise produce
undesired results. Addition and subtraction tend to be worse
than multiplication and division (due to things like catastrophic
cancellation). Try to arrange your calculations to minimize such
effects.
- Changing the order of operations changes the results of many
floating-point calculations. Be careful when you simplify
calculations! If the order is significant, document it using a
code comment.
- Comparing most floating point values for equality is unreliable.
Avoid using `==`, instead, use `>=` or `<=`. If you use an
epsilon value, make sure it's appropriate for the ranges in
question.
- Different environments (including compiler flags and per-thread
state on a single platform!) can get different results from the
same floating point calculations. This means you can't use
floats in anything that needs to be deterministic, like consensus
generation. This also makes reliable unit tests of
floating-point outputs hard to write.
For additional useful advice (and a little bit of background), see
[What Every Programmer Should Know About Floating-Point
Arithmetic](http://floating-point-gui.de/).
A list of notable (and surprising) facts about floating point
arithmetic is at [Floating-point
complexities](https://randomascii.wordpress.com/2012/04/05/floating-point-complexities/).
Most of that [series of posts on floating
point](https://randomascii.wordpress.com/category/floating-point/) is
helpful.
For more detailed (and math-intensive) background, see [What Every
Computer Scientist Should Know About Floating-Point
Arithmetic](https://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html).
Other C conventions
-------------------
The `a ? b : c` trinary operator only goes inside other expressions;
don't use it as a replacement for if. (You can ignore this inside macro
definitions when necessary.)
Assignment operators shouldn't nest inside other expressions. (You can
ignore this inside macro definitions when necessary.)
Functions not to write
----------------------
Try to never hand-write new code to parse or generate binary
formats. Instead, use trunnel if at all possible. See
https://gitweb.torproject.org/trunnel.git/tree
for more information about trunnel.
For information on adding new trunnel code to Tor, see src/trunnel/README
Calling and naming conventions
------------------------------
Whenever possible, functions should return -1 on error and 0 on success.
For multi-word identifiers, use lowercase words combined with
underscores. (e.g., `multi_word_identifier`). Use ALL_CAPS for macros and
constants.
Typenames should end with `_t`.
Function names should be prefixed with a module name or object name. (In
general, code to manipulate an object should be a module with the same name
as the object, so it's hard to tell which convention is used.)
Functions that do things should have imperative-verb names
(e.g. `buffer_clear`, `buffer_resize`); functions that return booleans should
have predicate names (e.g. `buffer_is_empty`, `buffer_needs_resizing`).
If you find that you have four or more possible return code values, it's
probably time to create an enum. If you find that you are passing three or
more flags to a function, it's probably time to create a flags argument that
takes a bitfield.
What To Optimize
----------------
Don't optimize anything if it's not in the critical path. Right now, the
critical path seems to be AES, logging, and the network itself. Feel free to
do your own profiling to determine otherwise.
Log conventions
---------------
`https://www.torproject.org/docs/faq#LogLevel`
No error or warning messages should be expected during normal OR or OP
operation.
If a library function is currently called such that failure always means ERR,
then the library function should log WARN and let the caller log ERR.
Every message of severity INFO or higher should either (A) be intelligible
to end-users who don't know the Tor source; or (B) somehow inform the
end-users that they aren't expected to understand the message (perhaps
with a string like "internal error"). Option (A) is to be preferred to
option (B).
Assertions In Tor
-----------------
Assertions should be used for bug-detection only. Don't use assertions to
detect bad user inputs, network errors, resource exhaustion, or similar
issues.
Tor is always built with assertions enabled, so try to only use
`tor_assert()` for cases where you are absolutely sure that crashing is the
least bad option. Many bugs have been caused by use of `tor_assert()` when
another kind of check would have been safer.
If you're writing an assertion to test for a bug that you _can_ recover from,
use `tor_assert_nonfatal()` in place of `tor_assert()`. If you'd like to
write a conditional that incorporates a nonfatal assertion, use the `BUG()`
macro, as in:
if (BUG(ptr == NULL))
return -1;
Allocator conventions
---------------------
By convention, any tor type with a name like `abc_t` should be allocated
by a function named `abc_new()`. This function should never return
NULL.
Also, a type named `abc_t` should be freed by a function named `abc_free_()`.
Don't call this `abc_free_()` function directly -- instead, wrap it in a
macro called `abc_free()`, using the `FREE_AND_NULL` macro:
void abc_free_(abc_t *obj);
#define abc_free(obj) FREE_AND_NULL(abc_t, abc_free_, (obj))
This macro will free the underlying `abc_t` object, and will also set
the object pointer to NULL.
You should define all `abc_free_()` functions to accept NULL inputs:
void
abc_free_(abc_t *obj)
{
if (!obj)
return;
tor_free(obj->name);
thing_free(obj->thing);
tor_free(obj);
}
If you need a free function that takes a `void *` argument (for example,
to use it as a function callback), define it with a name like
`abc_free_void()`:
static void
abc_free_void_(void *obj)
{
abc_free_(obj);
}
Doxygen comment conventions
---------------------------
Say what functions do as a series of one or more imperative sentences, as
though you were telling somebody how to be the function. In other words, DO
NOT say:
/** The strtol function parses a number.
*
* nptr -- the string to parse. It can include whitespace.
* endptr -- a string pointer to hold the first thing that is not part
* of the number, if present.
* base -- the numeric base.
* returns: the resulting number.
*/
long strtol(const char *nptr, char **nptr, int base);
Instead, please DO say:
/** Parse a number in radix <b>base</b> from the string <b>nptr</b>,
* and return the result. Skip all leading whitespace. If
* <b>endptr</b> is not NULL, set *<b>endptr</b> to the first character
* after the number parsed.
**/
long strtol(const char *nptr, char **nptr, int base);
Doxygen comments are the contract in our abstraction-by-contract world: if
the functions that call your function rely on it doing something, then your
function should mention that it does that something in the documentation. If
you rely on a function doing something beyond what is in its documentation,
then you should watch out, or it might do something else later.

View File

@ -1,523 +0,0 @@
Rust Coding Standards
=======================
You MUST follow the standards laid out in `.../doc/HACKING/CodingStandards.md`,
where applicable.
Module/Crate Declarations
---------------------------
Each Tor C module which is being rewritten MUST be in its own crate.
See the structure of `.../src/rust` for examples.
In your crate, you MUST use `lib.rs` ONLY for pulling in external
crates (e.g. `extern crate libc;`) and exporting public objects from
other Rust modules (e.g. `pub use mymodule::foo;`). For example, if
you create a crate in `.../src/rust/yourcrate`, your Rust code should
live in `.../src/rust/yourcrate/yourcode.rs` and the public interface
to it should be exported in `.../src/rust/yourcrate/lib.rs`.
If your code is to be called from Tor C code, you MUST define a safe
`ffi.rs`. See the "Safety" section further down for more details.
For example, in a hypothetical `tor_addition` Rust module:
In `.../src/rust/tor_addition/addition.rs`:
pub fn get_sum(a: i32, b: i32) -> i32 {
a + b
}
In `.../src/rust/tor_addition/lib.rs`:
pub use addition::*;
In `.../src/rust/tor_addition/ffi.rs`:
#[no_mangle]
pub extern "C" fn tor_get_sum(a: c_int, b: c_int) -> c_int {
get_sum(a, b)
}
If your Rust code must call out to parts of Tor's C code, you must
declare the functions you are calling in the `external` crate, located
at `.../src/rust/external`.
<!-- XXX get better examples of how to declare these externs, when/how they -->
<!-- XXX are unsafe, what they are expected to do —isis -->
Modules should strive to be below 500 lines (tests excluded). Single
responsibility and limited dependencies should be a guiding standard.
If you have any external modules as dependencies (e.g. `extern crate
libc;`), you MUST declare them in your crate's `lib.rs` and NOT in any
other module.
Dependencies and versions
---------------------------
In general, we use modules from only the Rust standard library
whenever possible. We will review including external crates on a
case-by-case basis.
If a crate only contains traits meant for compatibility between Rust
crates, such as [the digest crate](https://crates.io/crates/digest) or
[the failure crate](https://crates.io/crates/failure), it is very likely
permissible to add it as a dependency. However, a brief review should
be conducted as to the usefulness of implementing external traits
(i.e. how widespread is the usage, how many other crates either
implement the traits or have trait bounds based upon them), as well as
the stability of the traits (i.e. if the trait is going to change, we'll
potentially have to re-do all our implementations of it).
For large external libraries, especially which implement features which
would be labour-intensive to reproduce/maintain ourselves, such as
cryptographic or mathematical/statistics libraries, only crates which
have stabilised to 1.0.0 should be considered, however, again, we may
make exceptions on a case-by-case basis.
Currently, Tor requires that you use the latest stable Rust version. At
some point in the future, we will freeze on a given stable Rust version,
to ensure backward compatibility with stable distributions that ship it.
Updating/Adding Dependencies
------------------------------
To add/remove/update dependencies, first add your dependencies,
exactly specifying their versions, into the appropriate *crate-level*
`Cargo.toml` in `src/rust/` (i.e. *not* `/src/rust/Cargo.toml`, but
instead the one for your crate). Also, investigate whether your
dependency has any optional dependencies which are unnecessary but are
enabled by default. If so, you'll likely be able to enable/disable
them via some feature, e.g.:
```toml
[dependencies]
foo = { version = "1.0.0", default-features = false }
```
Next, run `/scripts/maint/updateRustDependencies.sh`. Then, go into
`src/ext/rust` and commit the changes to the `tor-rust-dependencies`
repo.
Documentation
---------------
You MUST include `#[deny(missing_docs)]` in your crate.
For function/method comments, you SHOULD include a one-sentence, "first person"
description of function behaviour (see requirements for documentation as
described in `.../src/HACKING/CodingStandards.md`), then an `# Inputs` section
for inputs or initialisation values, a `# Returns` section for return
values/types, a `# Warning` section containing warnings for unsafe behaviours or
panics that could happen. For publicly accessible
types/constants/objects/functions/methods, you SHOULD also include an
`# Examples` section with runnable doctests.
You MUST document your module with _module docstring_ comments,
i.e. `//!` at the beginning of each line.
Style
-------
You SHOULD consider breaking up large literal numbers with `_` when it makes it
more human readable to do so, e.g. `let x: u64 = 100_000_000_000`.
Testing
---------
All code MUST be unittested and integration tested.
Public functions/objects exported from a crate SHOULD include doctests
describing how the function/object is expected to be used.
Integration tests SHOULD go into a `tests/` directory inside your
crate. Unittests SHOULD go into their own module inside the module
they are testing, e.g. in `.../src/rust/tor_addition/addition.rs` you
should put:
#[cfg(test)]
mod test {
use super::*;
#[test]
fn addition_with_zero() {
let sum: i32 = get_sum(5i32, 0i32);
assert_eq!(sum, 5);
}
}
Benchmarking
--------------
The external `test` crate can be used for most benchmarking. However, using
this crate requires nightly Rust. Since we may want to switch to a more
stable Rust compiler eventually, we shouldn't do things which will automatically
break builds for stable compilers. Therefore, you MUST feature-gate your
benchmarks in the following manner.
If you wish to benchmark some of your Rust code, you MUST put the
following in the `[features]` section of your crate's `Cargo.toml`:
[features]
bench = []
Next, in your crate's `lib.rs` you MUST put:
#[cfg(all(test, feature = "bench"))]
extern crate test;
This ensures that the external crate `test`, which contains utilities
for basic benchmarks, is only used when running benchmarks via `cargo
bench --features bench`.
Finally, to write your benchmark code, in
`.../src/rust/tor_addition/addition.rs` you SHOULD put:
#[cfg(all(test, features = "bench"))]
mod bench {
use test::Bencher;
use super::*;
#[bench]
fn addition_small_integers(b: &mut Bencher) {
b.iter(| | get_sum(5i32, 0i32));
}
}
Fuzzing
---------
If you wish to fuzz parts of your code, please see the
[`cargo fuzz`](https://github.com/rust-fuzz/cargo-fuzz) crate, which uses
[libfuzzer-sys](https://github.com/rust-fuzz/libfuzzer-sys).
Whitespace & Formatting
-------------------------
You MUST run `rustfmt` (https://github.com/rust-lang-nursery/rustfmt)
on your code before your code will be merged. You can install rustfmt
by doing `cargo install rustfmt-nightly` and then run it with `cargo
fmt`.
Safety
--------
You SHOULD read [the nomicon](https://doc.rust-lang.org/nomicon/) before writing
Rust FFI code. It is *highly advised* that you read and write normal Rust code
before attempting to write FFI or any other unsafe code.
Here are some additional bits of advice and rules:
0. Any behaviours which Rust considers to be undefined are forbidden
From https://doc.rust-lang.org/reference/behavior-considered-undefined.html:
> Behavior considered undefined
>
> The following is a list of behavior which is forbidden in all Rust code,
> including within unsafe blocks and unsafe functions. Type checking provides the
> guarantee that these issues are never caused by safe code.
>
> * Data races
> * Dereferencing a null/dangling raw pointer
> * Reads of [undef](http://llvm.org/docs/LangRef.html#undefined-values)
> (uninitialized) memory
> * Breaking the
> [pointer aliasing rules](http://llvm.org/docs/LangRef.html#pointer-aliasing-rules)
> with raw pointers (a subset of the rules used by C)
> * `&mut T` and `&T` follow LLVMs scoped noalias model, except if the `&T`
> contains an `UnsafeCell<U>`. Unsafe code must not violate these aliasing
> guarantees.
> * Mutating non-mutable data (that is, data reached through a shared
> reference or data owned by a `let` binding), unless that data is
> contained within an `UnsafeCell<U>`.
> * Invoking undefined behavior via compiler intrinsics:
> - Indexing outside of the bounds of an object with
> `std::ptr::offset` (`offset` intrinsic), with the exception of
> one byte past the end which is permitted.
> - Using `std::ptr::copy_nonoverlapping_memory` (`memcpy32`/`memcpy64`
> intrinsics) on overlapping buffers
> * Invalid values in primitive types, even in private fields/locals:
> - Dangling/null references or boxes
> - A value other than `false` (0) or `true` (1) in a `bool`
> - A discriminant in an `enum` not included in the type definition
> - A value in a `char` which is a surrogate or above `char::MAX`
> - Non-UTF-8 byte sequences in a `str`
> * Unwinding into Rust from foreign code or unwinding from Rust into foreign
> code. Rust's failure system is not compatible with exception handling in other
> languages. Unwinding must be caught and handled at FFI boundaries.
1. `unwrap()`
If you call `unwrap()`, anywhere, even in a test, you MUST include
an inline comment stating how the unwrap will either 1) never fail,
or 2) should fail (i.e. in a unittest).
You SHOULD NOT use `unwrap()` anywhere in which it is possible to handle the
potential error with either `expect()` or the eel operator, `?`.
For example, consider a function which parses a string into an integer:
fn parse_port_number(config_string: &str) -> u16 {
u16::from_str_radix(config_string, 10).unwrap()
}
There are numerous ways this can fail, and the `unwrap()` will cause the
whole program to byte the dust! Instead, either you SHOULD use `expect()`
(or another equivalent function which will return an `Option` or a `Result`)
and change the return type to be compatible:
fn parse_port_number(config_string: &str) -> Option<u16> {
u16::from_str_radix(config_string, 10).expect("Couldn't parse port into a u16")
}
or you SHOULD use `or()` (or another similar method):
fn parse_port_number(config_string: &str) -> Option<u16> {
u16::from_str_radix(config_string, 10).or(Err("Couldn't parse port into a u16")
}
Using methods like `or()` can be particularly handy when you must do
something afterwards with the data, for example, if we wanted to guarantee
that the port is high. Combining these methods with the eel operator (`?`)
makes this even easier:
fn parse_port_number(config_string: &str) -> Result<u16, Err> {
let port = u16::from_str_radix(config_string, 10).or(Err("Couldn't parse port into a u16"))?;
if port > 1024 {
return Ok(port);
} else {
return Err("Low ports not allowed");
}
}
2. `unsafe`
If you use `unsafe`, you MUST describe a contract in your
documentation which describes how and when the unsafe code may
fail, and what expectations are made w.r.t. the interfaces to
unsafe code. This is also REQUIRED for major pieces of FFI between
C and Rust.
When creating an FFI in Rust for C code to call, it is NOT REQUIRED
to declare the entire function `unsafe`. For example, rather than doing:
#[no_mangle]
pub unsafe extern "C" fn increment_and_combine_numbers(mut numbers: [u8; 4]) -> u32 {
for number in &mut numbers {
*number += 1;
}
std::mem::transmute::<[u8; 4], u32>(numbers)
}
You SHOULD instead do:
#[no_mangle]
pub extern "C" fn increment_and_combine_numbers(mut numbers: [u8; 4]) -> u32 {
for index in 0..numbers.len() {
numbers[index] += 1;
}
unsafe {
std::mem::transmute::<[u8; 4], u32>(numbers)
}
}
3. Pass only C-compatible primitive types and bytes over the boundary
Rust's C-compatible primitive types are integers and floats.
These types are declared in the [libc crate](https://doc.rust-lang.org/libc/x86_64-unknown-linux-gnu/libc/index.html#types).
Most Rust objects have different [representations](https://doc.rust-lang.org/libc/x86_64-unknown-linux-gnu/libc/index.html#types)
in C and Rust, so they can't be passed using FFI.
Tor currently uses the following Rust primitive types from libc for FFI:
* defined-size integers: `uint32_t`
* native-sized integers: `c_int`
* native-sized floats: `c_double`
* native-sized raw pointers: `* c_void`, `* c_char`, `** c_char`
TODO: C smartlist to Stringlist conversion using FFI
The only non-primitive type which may cross the FFI boundary is
bytes, e.g. `&[u8]`. This SHOULD be done on the Rust side by
passing a pointer (`*mut libc::c_char`). The length can be passed
explicitly (`libc::size_t`), or the string can be NUL-byte terminated
C string.
One might be tempted to do this via doing
`CString::new("blah").unwrap().into_raw()`. This has several problems:
a) If you do `CString::new("bl\x00ah")` then the unwrap() will fail
due to the additional NULL terminator, causing a dangling
pointer to be returned (as well as a potential use-after-free).
b) Returning the raw pointer will cause the CString to run its deallocator,
which causes any C code which tries to access the contents to dereference a
NULL pointer.
c) If we were to do `as_raw()` this would result in a potential double-free
since the Rust deallocator would run and possibly Tor's deallocator.
d) Calling `into_raw()` without later using the same pointer in Rust to call
`from_raw()` and then deallocate in Rust can result in a
[memory leak](https://doc.rust-lang.org/std/ffi/struct.CString.html#method.into_raw).
[It was determined](https://github.com/rust-lang/rust/pull/41074) that this
is safe to do if you use the same allocator in C and Rust and also specify
the memory alignment for CString (except that there is no way to specify
the alignment for CString). It is believed that the alignment is always 1,
which would mean it's safe to dealloc the resulting `*mut c_char` in Tor's
C code. However, the Rust developers are not willing to guarantee the
stability of, or a contract for, this behaviour, citing concerns that this
is potentially extremely and subtly unsafe.
4. Perform an allocation on the other side of the boundary
After crossing the boundary, the other side MUST perform an
allocation to copy the data and is therefore responsible for
freeing that memory later.
5. No touching other language's enums
Rust enums should never be touched from C (nor can they be safely
`#[repr(C)]`) nor vice versa:
> "The chosen size is the default enum size for the target platform's C
> ABI. Note that enum representation in C is implementation defined, so this is
> really a "best guess". In particular, this may be incorrect when the C code
> of interest is compiled with certain flags."
(from https://gankro.github.io/nomicon/other-reprs.html)
6. Type safety
Wherever possible and sensical, you SHOULD create new types in a
manner which prevents type confusion or misuse. For example,
rather than using an untyped mapping between strings and integers
like so:
use std::collections::HashMap;
pub fn get_elements_with_over_9000_points(map: &HashMap<String, usize>) -> Vec<String> {
...
}
It would be safer to define a new type, such that some other usage
of `HashMap<String, usize>` cannot be confused for this type:
pub struct DragonBallZPowers(pub HashMap<String, usize>);
impl DragonBallZPowers {
pub fn over_nine_thousand<'a>(&'a self) -> Vec<&'a String> {
let mut powerful_enough: Vec<&'a String> = Vec::with_capacity(5);
for (character, power) in &self.0 {
if *power > 9000 {
powerful_enough.push(character);
}
}
powerful_enough
}
}
Note the following code, which uses Rust's type aliasing, is valid
but it does NOT meet the desired type safety goals:
pub type Power = usize;
pub fn over_nine_thousand(power: &Power) -> bool {
if *power > 9000 {
return true;
}
false
}
// We can still do the following:
let his_power: usize = 9001;
over_nine_thousand(&his_power);
7. Unsafe mucking around with lifetimes
Because lifetimes are technically, in type theory terms, a kind, i.e. a
family of types, individual lifetimes can be treated as types. For example,
one can arbitrarily extend and shorten lifetime using `std::mem::transmute`:
struct R<'a>(&'a i32);
unsafe fn extend_lifetime<'b>(r: R<'b>) -> R<'static> {
std::mem::transmute::<R<'b>, R<'static>>(r)
}
unsafe fn shorten_invariant_lifetime<'b, 'c>(r: &'b mut R<'static>) -> &'b mut R<'c> {
std::mem::transmute::<&'b mut R<'static>, &'b mut R<'c>>(r)
}
Calling `extend_lifetime()` would cause an `R` passed into it to live forever
for the life of the program (the `'static` lifetime). Similarly,
`shorten_invariant_lifetime()` could be used to take something meant to live
forever, and cause it to disappear! This is incredibly unsafe. If you're
going to be mucking around with lifetimes like this, first, you better have
an extremely good reason, and second, you may as be honest and explicit about
it, and for ferris' sake just use a raw pointer.
In short, just because lifetimes can be treated like types doesn't mean you
should do it.
8. Doing excessively unsafe things when there's a safer alternative
Similarly to #7, often there are excessively unsafe ways to do a task and a
simpler, safer way. You MUST choose the safer option where possible.
For example, `std::mem::transmute` can be abused in ways where casting with
`as` would be both simpler and safer:
// Don't do this
let ptr = &0;
let ptr_num_transmute = unsafe { std::mem::transmute::<&i32, usize>(ptr)};
// Use an `as` cast instead
let ptr_num_cast = ptr as *const i32 as usize;
In fact, using `std::mem::transmute` for *any* reason is a code smell and as
such SHOULD be avoided.
9. Casting integers with `as`
This is generally fine to do, but it has some behaviours which you should be
aware of. Casting down chops off the high bits, e.g.:
let x: u32 = 4294967295;
println!("{}", x as u16); // prints 65535
Some cases which you MUST NOT do include:
* Casting an `u128` down to an `f32` or vice versa (e.g.
`u128::MAX as f32` but this isn't only a problem with overflowing
as it is also undefined behaviour for `42.0f32 as u128`),
* Casting between integers and floats when the thing being cast
cannot fit into the type it is being casted into, e.g.:
println!("{}", 42949.0f32 as u8); // prints 197 in debug mode and 0 in release
println!("{}", 1.04E+17 as u8); // prints 0 in both modes
println!("{}", (0.0/0.0) as i64); // prints whatever the heck LLVM wants
Because this behaviour is undefined, it can even produce segfaults in
safe Rust code. For example, the following program built in release
mode segfaults:
#[inline(never)]
pub fn trigger_ub(sl: &[u8; 666]) -> &[u8] {
// Note that the float is out of the range of `usize`, invoking UB when casting.
let idx = 1e99999f64 as usize;
&sl[idx..] // The bound check is elided due to `idx` being of an undefined value.
}
fn main() {
println!("{}", trigger_ub(&[1; 666])[999999]); // ~ out of bound
}
And in debug mode panics with:
thread 'main' panicked at 'slice index starts at 140721821254240 but ends at 666', /checkout/src/libcore/slice/mod.rs:754:4

View File

@ -1,123 +0,0 @@
= Fuzzing Tor
== The simple version (no fuzzing, only tests)
Check out fuzzing-corpora, and set TOR_FUZZ_CORPORA to point to the place
where you checked it out.
To run the fuzzing test cases in a deterministic fashion, use:
make test-fuzz-corpora
This won't actually fuzz Tor! It will just run all the fuzz binaries
on our existing set of testcases for the fuzzer.
== Different kinds of fuzzing
Right now we support three different kinds of fuzzer.
First, there's American Fuzzy Lop (AFL), a fuzzer that works by forking
a target binary and passing it lots of different inputs on stdin. It's the
trickiest one to set up, so I'll be describing it more below.
Second, there's libFuzzer, a llvm-based fuzzer that you link in as a library,
and it runs a target function over and over. To use this one, you'll need to
have a reasonably recent clang and libfuzzer installed. At that point, you
just build with --enable-expensive-hardening and --enable-libfuzzer. That
will produce a set of binaries in src/test/fuzz/lf-fuzz-* . These programs
take as input a series of directories full of fuzzing examples. For more
information on libfuzzer, see http://llvm.org/docs/LibFuzzer.html
Third, there's Google's OSS-Fuzz infrastructure, which expects to get all of
its. For more on this, see https://github.com/google/oss-fuzz and the
projects/tor subdirectory. You'll need to mess around with Docker a bit to
test this one out; it's meant to run on Google's infrastructure.
In all cases, you'll need some starting examples to give the fuzzer when it
starts out. There's a set in the "fuzzing-corpora" git repository. Try
setting TOR_FUZZ_CORPORA to point to a checkout of that repository
== Writing Tor fuzzers
A tor fuzzing harness should have:
* a fuzz_init() function to set up any necessary global state.
* a fuzz_main() function to receive input and pass it to a parser.
* a fuzz_cleanup() function to clear global state.
Most fuzzing frameworks will produce many invalid inputs - a tor fuzzing
harness should rejecting invalid inputs without crashing or behaving badly.
But the fuzzing harness should crash if tor fails an assertion, triggers a
bug, or accesses memory it shouldn't. This helps fuzzing frameworks detect
"interesting" cases.
== Guided Fuzzing with AFL
There is no HTTPS, hash, or signature for American Fuzzy Lop's source code, so
its integrity can't be verified. That said, you really shouldn't fuzz on a
machine you care about, anyway.
To Build:
Get AFL from http://lcamtuf.coredump.cx/afl/ and unpack it
cd afl
make
cd ../tor
PATH=$PATH:../afl/ CC="../afl/afl-gcc" ./configure --enable-expensive-hardening
AFL_HARDEN=1 make clean fuzzers
To Find The ASAN Memory Limit: (64-bit only)
On 64-bit platforms, afl needs to know how much memory ASAN uses,
because ASAN tends to allocate a ridiculous amount of virtual memory,
and then not actually use it.
Read afl/docs/notes_for_asan.txt for more details.
Download recidivm from http://jwilk.net/software/recidivm
Download the signature
Check the signature
tar xvzf recidivm*.tar.gz
cd recidivm*
make
/path/to/recidivm -v src/test/fuzz/fuzz-http
Use the final "ok" figure as the input to -m when calling afl-fuzz
(Normally, recidivm would output a figure automatically, but in some cases,
the fuzzing harness will hang when the memory limit is too small.)
You could also just say "none" instead of the memory limit below, if you
don't care about memory limits.
To Run:
mkdir -p src/test/fuzz/fuzz_http_findings
../afl/afl-fuzz -i ${TOR_FUZZ_CORPORA}/http -o src/test/fuzz/fuzz_http_findings -m <asan-memory-limit> -- src/test/fuzz/fuzz-http
AFL has a multi-core mode, check the documentation for details.
You might find the included fuzz-multi.sh script useful for this.
macOS (OS X) requires slightly more preparation, including:
* using afl-clang (or afl-clang-fast from the llvm directory)
* disabling external crash reporting (AFL will guide you through this step)
== Triaging Issues
Crashes are usually interesting, particularly if using AFL_HARDEN=1 and --enable-expensive-hardening. Sometimes crashes are due to bugs in the harness code.
Hangs might be interesting, but they might also be spurious machine slowdowns.
Check if a hang is reproducible before reporting it. Sometimes, processing
valid inputs may take a second or so, particularly with the fuzzer and
sanitizers enabled.
To see what fuzz-http is doing with a test case, call it like this:
src/test/fuzz/fuzz-http --debug < /path/to/test.case
(Logging is disabled while fuzzing to increase fuzzing speed.)
== Reporting Issues
Please report any issues discovered using the process in Tor's security issue
policy:
https://trac.torproject.org/projects/tor/wiki/org/meetings/2016SummerDevMeeting/Notes/SecurityIssuePolicy

View File

@ -1,188 +0,0 @@
Getting started in Tor development
==================================
Congratulations! You've found this file, and you're reading it! This
means that you might be interested in getting started in developing Tor.
(This guide is just about Tor itself--the small network program at the
heart of the Tor network--and not about all the other programs in the
whole Tor ecosystem.)
If you are looking for a more bare-bones, less user-friendly information
dump of important information, you might like reading the "torguts"
documents linked to below. You should probably read it before you write
your first patch.
Required background
-------------------
First, I'm going to assume that you can build Tor from source, and that
you know enough of the C language to read and write it. (See the README
file that comes with the Tor source for more information on building it,
and any high-quality guide to C for information on programming.)
I'm also going to assume that you know a little bit about how to use
Git, or that you're able to follow one of the several excellent guides
at http://git-scm.org to learn.
Most Tor developers develop using some Unix-based system, such as Linux,
BSD, or OSX. It's okay to develop on Windows if you want, but you're
going to have a more difficult time.
Getting your first patch into Tor
---------------------------------
Once you've reached this point, here's what you need to know.
1. Get the source.
We keep our source under version control in Git. To get the latest
version, run
git clone https://git.torproject.org/git/tor
This will give you a checkout of the master branch. If you're
going to fix a bug that appears in a stable version, check out the
appropriate "maint" branch, as in:
git checkout maint-0.2.7
2. Find your way around the source
Our overall code structure is explained in the "torguts" documents,
currently at
git clone https://git.torproject.org/user/nickm/torguts.git
Find a part of the code that looks interesting to you, and start
looking around it to see how it fits together!
We do some unusual things in our codebase. Our testing-related
practices and kludges are explained in doc/WritingTests.txt.
If you see something that doesn't make sense, we love to get
questions!
3. Find something cool to hack on.
You may already have a good idea of what you'd like to work on, or
you might be looking for a way to contribute.
Many people have gotten started by looking for an area where they
personally felt Tor was underperforming, and investigating ways to
fix it. If you're looking for ideas, you can head to our bug
tracker at trac.torproject.org and look for tickets that have
received the "easy" tag: these are ones that developers think would
be pretty simple for a new person to work on. For a bigger
challenge, you might want to look for tickets with the "lorax"
keyword: these are tickets that the developers think might be a
good idea to build, but which we have no time to work on any time
soon.
Or you might find another open ticket that piques your
interest. It's all fine!
For your first patch, it is probably NOT a good idea to make
something huge or invasive. In particular, you should probably
avoid:
* Major changes spread across many parts of the codebase.
* Major changes to programming practice or coding style.
* Huge new features or protocol changes.
4. Meet the developers!
We discuss stuff on the tor-dev mailing list and on the #tor-dev
IRC channel on OFTC. We're generally friendly and approachable,
and we like to talk about how Tor fits together. If we have ideas
about how something should be implemented, we'll be happy to share
them.
We currently have a patch workshop at least once a week, where
people share patches they've made and discuss how to make them
better. The time might change in the future, but generally,
there's no bad time to talk, and ask us about patch ideas.
5. Do you need to write a design proposal?
If your idea is very large, or it will require a change to Tor's
protocols, there needs to be a written design proposal before it
can be merged. (We use this process to manage changes in the
protocols.) To write one, see the instructions at
https://gitweb.torproject.org/torspec.git/tree/proposals/001-process.txt
. If you'd like help writing a proposal, just ask! We're happy to
help out with good ideas.
You might also like to look around the rest of that directory, to
see more about open and past proposed changes to Tor's behavior.
6. Writing your patch
As you write your code, you'll probably want it to fit in with the
standards of the rest of the Tor codebase so it will be easy for us
to review and merge. You can learn our coding standards in
doc/HACKING.
If your patch is large and/or is divided into multiple logical
components, remember to divide it into a series of Git commits. A
series of small changes is much easier to review than one big lump.
7. Testing your patch
We prefer that all new or modified code have unit tests for it to
ensure that it runs correctly. Also, all code should actually be
_run_ by somebody, to make sure it works.
See doc/WritingTests.txt for more information on how we test things
in Tor. If you'd like any help writing tests, just ask! We're
glad to help out.
8. Submitting your patch
We review patches through tickets on our bugtracker at
trac.torproject.org. You can either upload your patches there, or
put them at a public git repository somewhere we can fetch them
(like github or bitbucket) and then paste a link on the appropriate
trac ticket.
Once your patches are available, write a short explanation of what
you've done on trac, and then change the status of the ticket to
needs_review.
9. Review, Revision, and Merge
With any luck, somebody will review your patch soon! If not, you
can ask on the IRC channel; sometimes we get really busy and take
longer than we should. But don't let us slow you down: you're the
one who's offering help here, and we should respect your time and
contributions.
When your patch is reviewed, one of these things will happen:
* The reviewer will say "looks good to me" and your
patch will get merged right into Tor. [Assuming we're not
in the middle of a code-freeze window. If the codebase is
frozen, your patch will go into the next release series.]
* OR the reviewer will say "looks good, just needs some small
changes!" And then the reviewer will make those changes,
and merge the modified patch into Tor.
* OR the reviewer will say "Here are some questions and
comments," followed by a bunch of stuff that the reviewer
thinks should change in your code, or questions that the
reviewer has.
At this point, you might want to make the requested changes
yourself, and comment on the trac ticket once you have done
so. Or if you disagree with any of the comments, you should
say so! And if you won't have time to make some of the
changes, you should say that too, so that other developers
will be able to pick up the unfinished portion.
Congratulations! You have now written your first patch, and gotten
it integrated into mainline Tor.

View File

@ -1,181 +0,0 @@
Hacking on Rust in Tor
========================
Getting Started
-----------------
Please read or review our documentation on Rust coding standards
(`.../doc/HACKING/CodingStandardsRust.md`) before doing anything.
Please also read
[the Rust Code of Conduct](https://www.rust-lang.org/en-US/conduct.html). We
aim to follow the good example set by the Rust community and be
excellent to one another. Let's be careful with each other, so we can
be memory-safe together!
Next, please contact us before rewriting anything! Rust in Tor is still
an experiment. It is an experiment that we very much want to see
succeed, so we're going slowly and carefully. For the moment, it's also
a completely volunteer-driven effort: while many, if not most, of us are
paid to work on Tor, we are not yet funded to write Rust code for Tor.
Please be patient with the other people who are working on getting more
Rust code into Tor, because they are graciously donating their free time
to contribute to this effort.
Resources for learning Rust
-----------------------------
**Beginning resources**
The primary resource for learning Rust is
[The Book](https://doc.rust-lang.org/book/). If you'd like to start writing
Rust immediately, without waiting for anything to install, there is
[an interactive browser-based playground](https://play.rust-lang.org/).
**Advanced resources**
If you're interested in playing with various Rust compilers and viewing
a very nicely displayed output of the generated assembly, there is
[the Godbolt compiler explorer](https://rust.godbolt.org/)
For learning how to write unsafe Rust, read
[The Rustonomicon](https://doc.rust-lang.org/nomicon/).
For learning everything you ever wanted to know about Rust macros, there
is
[The Little Book of Rust Macros](https://danielkeep.github.io/tlborm/book/index.html).
For learning more about FFI and Rust, see Jake Goulding's
[Rust FFI Omnibus](http://jakegoulding.com/rust-ffi-omnibus/).
Compiling Tor with Rust enabled
---------------------------------
You will need to run the `configure` script with the `--enable-rust`
flag to explicitly build with Rust. Additionally, you will need to
specify where to fetch Rust dependencies, as we allow for either
fetching dependencies from Cargo or specifying a local directory.
**Fetch dependencies from Cargo**
./configure --enable-rust --enable-cargo-online-mode
**Using a local dependency cache**
You'll need the following Rust dependencies (as of this writing):
libc==0.2.39
We vendor our Rust dependencies in a separate repo using
[cargo-vendor](https://github.com/alexcrichton/cargo-vendor). To use
them, do:
git submodule init
git submodule update
To specify the local directory containing the dependencies, (assuming
you are in the top level of the repository) configure tor with:
TOR_RUST_DEPENDENCIES='path_to_dependencies_directory' ./configure --enable-rust
(Note that TOR_RUST_DEPENDENCIES must be the full path to the directory; it
cannot be relative.)
Assuming you used the above `git submodule` commands and you're in the
topmost directory of the repository, this would be:
TOR_RUST_DEPENDENCIES=`pwd`/src/ext/rust/crates ./configure --enable-rust
Identifying which modules to rewrite
======================================
The places in the Tor codebase that are good candidates for porting to
Rust are:
1. loosely coupled to other Tor submodules,
2. have high test coverage, and
3. would benefit from being implemented in a memory safe language.
Help in either identifying places such as this, or working to improve
existing areas of the C codebase by adding regression tests and
simplifying dependencies, would be really helpful.
Furthermore, as submodules in C are implemented in Rust, this is a good
opportunity to refactor, add more tests, and split modules into smaller
areas of responsibility.
A good first step is to build a module-level callgraph to understand how
interconnected your target module is.
git clone https://git.torproject.org/user/nickm/calltool.git
cd tor
CFLAGS=0 ./configure
../calltool/src/main.py module_callgraph
The output will tell you each module name, along with a set of every module that
the module calls. Modules which call fewer other modules are better targets.
Writing your Rust module
==========================
Strive to change the C API as little as possible.
We are currently targeting Rust nightly, *for now*. We expect this to
change moving forward, as we understand more about which nightly
features we need. It is on our TODO list to try to cultivate good
standing with various distro maintainers of `rustc` and `cargo`, in
order to ensure that whatever version we solidify on is readily
available.
If parts of your Rust code needs to stay in sync with C code (such as
handling enums across the FFI boundary), annonotate these places in a
comment structured as follows:
/// C_RUST_COUPLED: <path_to_file> `<name_of_c_object>`
Where <name_of_c_object> can be an enum, struct, constant, etc. Then,
do the same in the C code, to note that rust will need to be changed
when the C does.
Adding your Rust module to Tor's build system
-----------------------------------------------
0. Your translation of the C module should live in its own crate(s)
in the `.../tor/src/rust/` directory.
1. Add your crate to `.../tor/src/rust/Cargo.toml`, in the
`[workspace.members]` section.
2. Add your crate's files to src/rust/include.am
If your crate should be available to C (rather than just being included as a
dependency of other Rust modules):
0. Declare the crate as a dependency of tor_rust in
`src/rust/tor_util/Cargo.toml` and include it in
`src/rust/tor_rust/lib.rs`
How to test your Rust code
----------------------------
Everything should be tested full stop. Even non-public functionality.
Be sure to edit `.../tor/src/test/test_rust.sh` to add the name of your
crate to the `crates` variable! This will ensure that `cargo test` is
run on your crate.
Configure Tor's build system to build with Rust enabled:
./configure --enable-fatal-warnings --enable-rust --enable-cargo-online-mode
Tor's test should be run by doing:
make check
Tor's integration tests should also pass:
make test-stem
Submitting a patch
=====================
Please follow the instructions in `.../doc/HACKING/GettingStarted.md`.

View File

@ -1,365 +0,0 @@
Useful tools
============
These aren't strictly necessary for hacking on Tor, but they can help track
down bugs.
Travis CI
---------
It's CI. Looks like this: https://travis-ci.org/torproject/tor.
Runs automatically on Pull Requests sent to torproject/tor. You can set it up
for your fork to build commits outside of PRs too:
1. sign up for GitHub: https://github.com/join
2. fork https://github.com/torproject/tor:
https://help.github.com/articles/fork-a-repo/
3. follow https://docs.travis-ci.com/user/getting-started/#To-get-started-with-Travis-CI.
skip steps involving `.travis.yml` (we already have one).
Builds should show up on the web at travis-ci.com and on IRC at #tor-ci on
OFTC. If they don't, ask #tor-dev (also on OFTC).
Jenkins
-------
https://jenkins.torproject.org
Dmalloc
-------
The dmalloc library will keep track of memory allocation, so you can find out
if we're leaking memory, doing any double-frees, or so on.
dmalloc -l -/dmalloc.log
(run the commands it tells you)
./configure --with-dmalloc
Valgrind
--------
valgrind --leak-check=yes --error-limit=no --show-reachable=yes src/or/tor
(Note that if you get a zillion openssl warnings, you will also need to
pass `--undef-value-errors=no` to valgrind, or rebuild your openssl
with `-DPURIFY`.)
Coverity
--------
Nick regularly runs the coverity static analyzer on the Tor codebase.
The preprocessor define `__COVERITY__` is used to work around instances
where coverity picks up behavior that we wish to permit.
clang Static Analyzer
---------------------
The clang static analyzer can be run on the Tor codebase using Xcode (WIP)
or a command-line build.
The preprocessor define `__clang_analyzer__` is used to work around instances
where clang picks up behavior that we wish to permit.
clang Runtime Sanitizers
------------------------
To build the Tor codebase with the clang Address and Undefined Behavior
sanitizers, see the file `contrib/clang/sanitize_blacklist.txt`.
Preprocessor workarounds for instances where clang picks up behavior that
we wish to permit are also documented in the blacklist file.
Running lcov for unit test coverage
-----------------------------------
Lcov is a utility that generates pretty HTML reports of test code coverage.
To generate such a report:
./configure --enable-coverage
make
make coverage-html
$BROWSER ./coverage_html/index.html
This will run the tor unit test suite `./src/test/test` and generate the HTML
coverage code report under the directory `./coverage_html/`. To change the
output directory, use `make coverage-html HTML_COVER_DIR=./funky_new_cov_dir`.
Coverage diffs using lcov are not currently implemented, but are being
investigated (as of July 2014).
Running the unit tests
----------------------
To quickly run all the tests distributed with Tor:
make check
To run the fast unit tests only:
make test
To selectively run just some tests (the following can be combined
arbitrarily):
./src/test/test <name_of_test> [<name of test 2>] ...
./src/test/test <prefix_of_name_of_test>.. [<prefix_of_name_of_test2>..] ...
./src/test/test :<name_of_excluded_test> [:<name_of_excluded_test2]...
To run all tests, including those based on Stem or Chutney:
make test-full
To run all tests, including those based on Stem or Chutney that require a
working connection to the internet:
make test-full-online
Running gcov for unit test coverage
-----------------------------------
./configure --enable-coverage
make
make check
# or--- make test-full ? make test-full-online?
mkdir coverage-output
./scripts/test/coverage coverage-output
(On OSX, you'll need to start with `--enable-coverage CC=clang`.)
If that doesn't work:
* Try configuring Tor with `--disable-gcc-hardening`
* You might need to run `make clean` after you run `./configure`.
Then, look at the .gcov files in `coverage-output`. '-' before a line means
that the compiler generated no code for that line. '######' means that the
line was never reached. Lines with numbers were called that number of times.
For more details about how to read gcov output, see the [Invoking
gcov](https://gcc.gnu.org/onlinedocs/gcc/Invoking-Gcov.html) chapter
of the GCC manual.
If you make changes to Tor and want to get another set of coverage results,
you can run `make reset-gcov` to clear the intermediary gcov output.
If you have two different `coverage-output` directories, and you want to see
a meaningful diff between them, you can run:
./scripts/test/cov-diff coverage-output1 coverage-output2 | less
In this diff, any lines that were visited at least once will have coverage "1",
and line numbers are deleted. This lets you inspect what you (probably) really
want to know: which untested lines were changed? Are there any new untested
lines?
If you run ./scripts/test/cov-exclude, it marks excluded unreached
lines with 'x', and excluded reached lines with '!!!'.
Running integration tests
-------------------------
We have the beginnings of a set of scripts to run integration tests using
Chutney. To try them, set CHUTNEY_PATH to your chutney source directory, and
run `make test-network`.
We also have scripts to run integration tests using Stem. To try them, set
`STEM_SOURCE_DIR` to your Stem source directory, and run `test-stem`.
Profiling Tor
-------------
Ongoing notes about Tor profiling can be found at
https://pad.riseup.net/p/profiling-tor
Profiling Tor with oprofile
---------------------------
The oprofile tool runs (on Linux only!) to tell you what functions Tor is
spending its CPU time in, so we can identify performance bottlenecks.
Here are some basic instructions
- Build tor with debugging symbols (you probably already have, unless
you messed with CFLAGS during the build process).
- Build all the libraries you care about with debugging symbols
(probably you only care about libssl, maybe zlib and Libevent).
- Copy this tor to a new directory
- Copy all the libraries it uses to that dir too (`ldd ./tor` will
tell you)
- Set LD_LIBRARY_PATH to include that dir. `ldd ./tor` should now
show you it's using the libs in that dir
- Run that tor
- Reset oprofiles counters/start it
* `opcontrol --reset; opcontrol --start`, if Nick remembers right.
- After a while, have it dump the stats on tor and all the libs
in that dir you created.
* `opcontrol --dump;`
* `opreport -l that_dir/*`
- Profit
Profiling Tor with perf
-----------------------
This works with a running Tor, and requires root.
1. Decide how long you want to profile for. Start with (say) 30 seconds. If that
works, try again with longer times.
2. Find the PID of your running tor process.
3. Run `perf record --call-graph dwarf -p <PID> sleep <SECONDS>`
(You may need to do this as root.)
You might need to add `-e cpu-clock` as an option to the perf record line
above, if you are on an older CPU without access to hardware profiling
events, or in a VM, or something.
4. Now you have a perf.data file. Have a look at it with `perf report
--no-children --sort symbol,dso` or `perf report --no-children --sort
symbol,dso --stdio --header`. How does it look?
5a. Once you have a nice big perf.data file, you can compress it, encrypt it,
and send it to your favorite Tor developers.
5b. Or maybe you'd rather not send a nice big perf.data file. Who knows what's
in that!? It's kinda scary. To generate a less scary file, you can use `perf
report -g > <FILENAME>.out`. Then you can compress that and put it somewhere
public.
Profiling Tor with gperftools aka Google-performance-tools
----------------------------------------------------------
This should work on nearly any unixy system. It doesn't seem to be compatible
with RunAsDaemon though.
Beforehand, install google-perftools.
1. You need to rebuild Tor, hack the linking steps to add `-lprofiler` to the
libs. You can do this by adding `LIBS=-lprofiler` when you call `./configure`.
Now you can run Tor with profiling enabled, and use the pprof utility to look at
performance! See the gperftools manual for more info, but basically:
2. Run `env CPUPROFILE=/tmp/profile src/or/tor -f <path/torrc>`. The profile file
is not written to until Tor finishes execuction.
3. Run `pprof src/or/tor /tm/profile` to start the REPL.
Generating and analyzing a callgraph
------------------------------------
0. Build Tor on linux or mac, ideally with -O0 or -fno-inline.
1. Clone 'https://gitweb.torproject.org/user/nickm/calltool.git/' .
Follow the README in that repository.
Note that currently the callgraph generator can't detect calls that pass
through function pointers.
Getting emacs to edit Tor source properly
-----------------------------------------
Nick likes to put the following snippet in his .emacs file:
(add-hook 'c-mode-hook
(lambda ()
(font-lock-mode 1)
(set-variable 'show-trailing-whitespace t)
(let ((fname (expand-file-name (buffer-file-name))))
(cond
((string-match "^/home/nickm/src/libevent" fname)
(set-variable 'indent-tabs-mode t)
(set-variable 'c-basic-offset 4)
(set-variable 'tab-width 4))
((string-match "^/home/nickm/src/tor" fname)
(set-variable 'indent-tabs-mode nil)
(set-variable 'c-basic-offset 2))
((string-match "^/home/nickm/src/openssl" fname)
(set-variable 'indent-tabs-mode t)
(set-variable 'c-basic-offset 8)
(set-variable 'tab-width 8))
))))
You'll note that it defaults to showing all trailing whitespace. The `cond`
test detects whether the file is one of a few C free software projects that I
often edit, and sets up the indentation level and tab preferences to match
what they want.
If you want to try this out, you'll need to change the filename regex
patterns to match where you keep your Tor files.
If you use emacs for editing Tor and nothing else, you could always just say:
(add-hook 'c-mode-hook
(lambda ()
(font-lock-mode 1)
(set-variable 'show-trailing-whitespace t)
(set-variable 'indent-tabs-mode nil)
(set-variable 'c-basic-offset 2)))
There is probably a better way to do this. No, we are probably not going
to clutter the files with emacs stuff.
Doxygen
-------
We use the 'doxygen' utility to generate documentation from our
source code. Here's how to use it:
1. Begin every file that should be documented with
/**
* \file filename.c
* \brief Short description of the file.
*/
(Doxygen will recognize any comment beginning with /** as special.)
2. Before any function, structure, #define, or variable you want to
document, add a comment of the form:
/** Describe the function's actions in imperative sentences.
*
* Use blank lines for paragraph breaks
* - and
* - hyphens
* - for
* - lists.
*
* Write <b>argument_names</b> in boldface.
*
* \code
* place_example_code();
* between_code_and_endcode_commands();
* \endcode
*/
3. Make sure to escape the characters `<`, `>`, `\`, `%` and `#` as `\<`,
`\>`, `\\`, `\%` and `\#`.
4. To document structure members, you can use two forms:
struct foo {
/** You can put the comment before an element; */
int a;
int b; /**< Or use the less-than symbol to put the comment
* after the element. */
};
5. To generate documentation from the Tor source code, type:
$ doxygen -g
to generate a file called `Doxyfile`. Edit that file and run
`doxygen` to generate the API documentation.
6. See the Doxygen manual for more information; this summary just
scratches the surface.

View File

@ -1,88 +0,0 @@
How to review a patch
=====================
Some folks have said that they'd like to review patches more often, but they
don't know how.
So, here are a bunch of things to check for when reviewing a patch!
Note that if you can't do every one of these, that doesn't mean you can't do
a good review! Just make it clear what you checked for and what you didn't.
Top-level smell-checks
----------------------
(Difficulty: easy)
- Does it compile with `--enable-fatal-warnings`?
- Does `make check-spaces` pass?
- Does `make check-changes` pass?
- Does it have a reasonable amount of tests? Do they pass? Do they leak
memory?
- Do all the new functions, global variables, types, and structure members have
documentation?
- Do all the functions, global variables, types, and structure members with
modified behavior have modified documentation?
- Do all the new torrc options have documentation?
- If this changes Tor's behavior on the wire, is there a design proposal?
- If this changes anything in the code, is there a "changes" file?
Let's look at the code!
-----------------------
- Does the code conform to CodingStandards.txt?
- Does the code leak memory?
- If two or more pointers ever point to the same object, is it clear which
pointer "owns" the object?
- Are all allocated resources freed?
- Are all pointers that should be const, const?
- Are `#defines` used for 'magic' numbers?
- Can you understand what the code is trying to do?
- Can you convince yourself that the code really does that?
- Is there duplicated code that could be turned into a function?
Let's look at the documentation!
--------------------------------
- Does the documentation confirm to CodingStandards.txt?
- Does it make sense?
- Can you predict what the function will do from its documentation?
Let's think about security!
---------------------------
- If there are any arrays, buffers, are you 100% sure that they cannot
overflow?
- If there is any integer math, can it overflow or underflow?
- If there are any allocations, are you sure there are corresponding
deallocations?
- Is there a safer pattern that could be used in any case?
- Have they used one of the Forbidden Functions?
(Also see your favorite secure C programming guides.)

View File

@ -1,111 +0,0 @@
# Modules in Tor #
This document describes the build system and coding standards when writing a
module in Tor.
## What is a module? ##
In the context of the tor code base, a module is a subsystem that we can
selectively enable or disable, at `configure` time.
Currently, there is only one module:
- Directory Authority subsystem (dirauth)
It is located in its own directory in `src/or/dirauth/`. To disable it, one
need to pass `--disable-module-dirauth` at configure time. All modules are
currently enabled by default.
## Build System ##
The changes to the build system are pretty straightforward.
1. Locate in the `configure.ac` file this define: `m4_define(MODULES`. It
contains a list (white-space separated) of the module in tor. Add yours to
the list.
2. Use the `AC_ARG_ENABLE([module-dirauth]` template for your new module. We
use the "disable module" approach instead of enabling them one by one. So,
by default, tor will build all the modules.
This will define the `HAVE_MODULE_<name>` statement which can be used in
the C code to conditionally compile things for your module. And the
`BUILD_MODULE_<name>` is also defined for automake files (e.g: include.am).
3. In the `src/or/include.am` file, locate the `MODULE_DIRAUTH_SOURCES` value.
You need to create your own `_SOURCES` variable for your module and then
conditionally add the it to `LIBTOR_A_SOURCES` if you should build the
module.
It is then **very** important to add your SOURCES variable to
`src_or_libtor_testing_a_SOURCES` so the tests can build it.
4. Do the same for header files, locate `ORHEADERS +=` which always add all
headers of all modules so the symbol can be found for the module entry
points.
Finally, your module will automatically be included in the
`TOR_MODULES_ALL_ENABLED` variable which is used to build the unit tests. They
always build everything in order to tests everything.
## Coding ##
As mentioned above, a module must be isolated in its own directory (name of
the module) in `src/or/`.
There are couples of "rules" you want to follow:
* Minimize as much as you can the number of entry points into your module.
Less is always better but of course that doesn't work out for every use
case. However, it is a good thing to always keep that in mind.
* Do **not** use the `HAVE_MODULE_<name>` define outside of the module code
base. Every entry point should have a second definition if the module is
disabled. For instance:
```
#ifdef HAVE_MODULE_DIRAUTH
int sr_init(int save_to_disk);
#else /* HAVE_MODULE_DIRAUTH */
static inline int
sr_init(int save_to_disk)
{
(void) save_to_disk;
return 0;
}
#endif /* HAVE_MODULE_DIRAUTH */
```
The main reason for this approach is to avoid having conditional code
everywhere in the code base. It should be centralized as much as possible
which helps maintainability but also avoids conditional spaghetti code
making the code much more difficult to follow/understand.
* It is possible that you end up with code that needs to be used by the rest
of the code base but is still part of your module. As a good example, if you
look at `src/or/shared_random_client.c`: it contains code needed by the hidden
service subsystem but mainly related to the shared random subsystem very
specific to the dirauth module.
This is fine but try to keep it as lean as possible and never use the same
filename as the one in the module. For example, this is a bad idea and
should never be done:
- `src/or/shared_random.c`
- `src/or/dirauth/shared_random.c`
* When you include headers from the module, **always** use the full module
path in your statement. Example:
`#include "dirauth/dirvote.h"`
The main reason is that we do **not** add the module include path by default
so it needs to be specified. But also, it helps our human brain understand
which part comes from a module or not.
Even **in** the module itself, use the full include path like above.

View File

@ -1,62 +0,0 @@
In this directory
-----------------
This directory has helpful information about what you need to know to
hack on Tor!
First, read `GettingStarted.md` to learn how to get a start in Tor
development.
If you've decided to write a patch, `CodingStandards.txt` will give
you a bunch of information about how we structure our code.
It's important to get code right! Reading `WritingTests.md` will
tell you how to write and run tests in the Tor codebase.
There are a bunch of other programs we use to help maintain and
develop the codebase: `HelpfulTools.md` can tell you how to use them
with Tor.
If it's your job to put out Tor releases, see `ReleasingTor.md` so
that you don't miss any steps!
-----------------------
For full information on how Tor is supposed to work, look at the files in
`https://gitweb.torproject.org/torspec.git/tree`.
For an explanation of how to change Tor's design to work differently, look at
`https://gitweb.torproject.org/torspec.git/blob_plain/HEAD:/proposals/001-process.txt`.
For the latest version of the code, get a copy of git, and
git clone https://git.torproject.org/git/tor
We talk about Tor on the `tor-talk` mailing list. Design proposals and
discussion belong on the `tor-dev` mailing list. We hang around on
irc.oftc.net, with general discussion happening on #tor and development
happening on `#tor-dev`.
The other files in this `HACKING` directory may also be useful as you
get started working with Tor.
Happy hacking!
-----------------------
XXXXX also describe
doc/HACKING/WritingTests.md
torguts.git
torspec.git
The design paper
freehaven.net/anonbib
XXXX describe these and add links.

View File

@ -1,222 +0,0 @@
Putting out a new release
-------------------------
Here are the steps that the maintainer should take when putting out a
new Tor release:
=== 0. Preliminaries
1. Get at least three of weasel/arma/Sebastian/Sina to put the new
version number in their approved versions list. Give them a few
days to do this if you can.
2. If this is going to be an important security release, give the packagers
some advance warning: See this list of packagers in IV.3 below.
3. Given the release date for Tor, ask the TB team about the likely release
date of a TB that contains it. See note below in "commit, upload,
announce".
=== I. Make sure it works
1. Use it for a while, as a client, as a relay, as a hidden service,
and as a directory authority. See if it has any obvious bugs, and
resolve those.
As applicable, merge the `maint-X` branch into the `release-X` branch.
But you've been doing that all along, right?
2. Are all of the jenkins builders happy? See jenkins.torproject.org.
What about the bsd buildbots?
See http://buildbot.pixelminers.net/builders/
What about Coverity Scan?
What about clang scan-build?
Does 'make distcheck' complain?
How about 'make test-stem' and 'make test-network' and
`make test-network-full`?
- Are all those tests still happy with --enable-expensive-hardening ?
Any memory leaks?
=== II. Write a changelog
1a. (Alpha release variant)
Gather the `changes/*` files into a changelog entry, rewriting many
of them and reordering to focus on what users and funders would find
interesting and understandable.
To do this, first run `./scripts/maint/lintChanges.py changes/*` and
fix as many warnings as you can. Then run `./scripts/maint/sortChanges.py
changes/* > changelog.in` to combine headings and sort the entries.
After that, it's time to hand-edit and fix the issues that lintChanges
can't find:
1. Within each section, sort by "version it's a bugfix on", else by
numerical ticket order.
2. Clean them up:
Make stuff very terse
Make sure each section name ends with a colon
Describe the user-visible problem right away
Mention relevant config options by name. If they're rare or unusual,
remind people what they're for
Avoid starting lines with open-paren
Present and imperative tense: not past.
'Relays', not 'servers' or 'nodes' or 'Tor relays'.
"Stop FOOing", not "Fix a bug where we would FOO".
Try not to let any given section be longer than about a page. Break up
long sections into subsections by some sort of common subtopic. This
guideline is especially important when organizing Release Notes for
new stable releases.
If a given changes stanza showed up in a different release (e.g.
maint-0.2.1), be sure to make the stanzas identical (so people can
distinguish if these are the same change).
3. Clean everything one last time.
4. Run `./scripts/maint/format_changelog.py --inplace` to make it prettier
1b. (old-stable release variant)
For stable releases that backport things from later, we try to compose
their releases, we try to make sure that we keep the changelog entries
identical to their original versions, with a 'backport from 0.x.y.z'
note added to each section. So in this case, once you have the items
from the changes files copied together, don't use them to build a new
changelog: instead, look up the corrected versions that were merged
into ChangeLog in the master branch, and use those.
2. Compose a short release blurb to highlight the user-facing
changes. Insert said release blurb into the ChangeLog stanza. If it's
a stable release, add it to the ReleaseNotes file too. If we're adding
to a release-* branch, manually commit the changelogs to the later
git branches too.
3. If there are changes that require or suggest operator intervention
before or during the update, mail operators (either dirauth or relays
list) with a headline that indicates that an action is required or
appreciated.
4. If you're doing the first stable release in a series, you need to
create a ReleaseNotes for the series as a whole. To get started
there, copy all of the Changelog entries from the series into a new
file, and run `./scripts/maint/sortChanges.py` on it. That will
group them by category. Then kill every bugfix entry for fixing
bugs that were introduced within that release series; those aren't
relevant changes since the last series. At that point, it's time
to start sorting and condensing entries. (Generally, we don't edit the
text of existing entries, though.)
=== III. Making the source release.
1. In `maint-0.?.x`, bump the version number in `configure.ac` and run
`perl scripts/maint/updateVersions.pl` to update version numbers in other
places, and commit. Then merge `maint-0.?.x` into `release-0.?.x`.
(NOTE: To bump the version number, edit `configure.ac`, and then run
either `make`, or `perl scripts/maint/updateVersions.pl`, depending on
your version.)
When you merge the maint branch forward to the next maint branch, or into
master, merge it with "-s ours" to avoid a needless version bump.
2. Make distcheck, put the tarball up in somewhere (how about your
homedir on your homedir on people.torproject.org?) , and tell `#tor`
about it. Wait a while to see if anybody has problems building it.
(Though jenkins is usually pretty good about catching these things.)
=== IV. Commit, upload, announce
1. Sign the tarball, then sign and push the git tag:
gpg -ba <the_tarball>
git tag -u <keyid> tor-0.3.x.y-status
git push origin tag tor-0.3.x.y-status
(You must do this before you update the website: it relies on finding
the version by tag.)
2. scp the tarball and its sig to the dist website, i.e.
`/srv/dist-master.torproject.org/htdocs/` on dist-master. When you want
it to go live, you run "static-update-component dist.torproject.org"
on dist-master.
In the webwml.git repository, `include/versions.wmi` and `Makefile`
to note the new version.
(NOTE: Due to #17805, there can only be one stable version listed at
once. Nonetheless, do not call your version "alpha" if it is stable,
or people will get confused.)
3. Email the packagers (cc'ing tor-team) that a new tarball is up.
The current list of packagers is:
- {weasel,gk,mikeperry} at torproject dot org
- {blueness} at gentoo dot org
- {paul} at invizbox dot io
- {vincent} at invizbox dot com
- {lfleischer} at archlinux dot org
- {Nathan} at freitas dot net
- {mike} at tig dot as
- {tails-rm} at boum dot org
- {simon} at sdeziel.info
- {yuri} at freebsd.org
- {mh+tor} at scrit.ch
Also, email tor-packagers@lists.torproject.org.
4. Add the version number to Trac. To do this, go to Trac, log in,
select "Admin" near the top of the screen, then select "Versions" from
the menu on the left. At the right, there will be an "Add version"
box. By convention, we enter the version in the form "Tor:
0.2.2.23-alpha" (or whatever the version is), and we select the date as
the date in the ChangeLog.
5. Double-check: did the version get recommended in the consensus yet? Is
the website updated? If not, don't announce until they have the
up-to-date versions, or people will get confused.
6. Mail the release blurb and ChangeLog to tor-talk (development release) or
tor-announce (stable).
Post the changelog on the blog as well. You can generate a
blog-formatted version of the changelog with the -B option to
format-changelog.
When you post, include an estimate of when the next TorBrowser
releases will come out that include this Tor release. This will
usually track https://wiki.mozilla.org/RapidRelease/Calendar , but it
can vary.
=== V. Aftermath and cleanup
1. If it's a stable release, bump the version number in the
`maint-x.y.z` branch to "newversion-dev", and do a `merge -s ours`
merge to avoid taking that change into master.
2. Forward-port the ChangeLog (and ReleaseNotes if appropriate).
3. Keep an eye on the blog post, to moderate comments and answer questions.

View File

@ -1,91 +0,0 @@
# Tracing #
This document describes how the event tracing subsystem works in tor so
developers can add events to the code base but also hook them to an event
tracing framework.
## Basics ###
Event tracing is separated in two concepts, trace events and a tracer. The
tracing subsystem can be found in `src/trace`. The `events.h` header file is
the main file that maps the different tracers to trace events.
### Events ###
A trace event is basically a function from which we can pass any data that
we want to collect. In addition, we specify a context for the event such as
a subsystem and an event name.
A trace event in tor has the following standard format:
tor_trace(subsystem, event\_name, args...)
The `subsystem` parameter is the name of the subsytem the trace event is in.
For example that could be "scheduler" or "vote" or "hs". The idea is to add
some context to the event so when we collect them we know where it's coming
from. The `event_name` is the name of the event which helps a lot with
adding some semantic to the event. Finally, `args` is any number of
arguments we want to collect.
Here is an example of a possible tracepoint in main():
tor_trace(main, init_phase, argc)
The above is a tracepoint in the `main` subsystem with `init_phase` as the
event name and the `int argc` is passed to the event as well.
How `argc` is collected or used has nothing to do with the instrumentation
(adding trace events to the code). It is the work of the tracer so this is why
the trace events and collection framework (tracer) are decoupled. You _can_
have trace events without a tracer.
### Tracer ###
In `src/trace/events.h`, we map the `tor_trace()` function to the right
tracer. A tracer support is only enabled at compile time. For instance, the
file `src/trace/debug.h` contains the mapping of the generic tracing function
`tor_trace()` to the `log_debug()` function. More specialized function can be
mapped depending on the tracepoint.
## Build System ##
This section describes how it is integrated into the build system of tor.
By default, every tracing events are disabled in tor that is `tor_trace()`
is a NOP.
To enable a tracer, there is a configure option on the form of:
--enable-tracing-<tracer>
We have an option that will send every trace events to a `log_debug()` (as
mentionned above) which will print you the subsystem and name of the event but
not the arguments for technical reasons. This is useful if you want to quickly
see if your trace event is being hit or well written. To do so, use this
configure option:
--enable-tracing-debug
## Instrument Tor ##
This is pretty easy. Let's say you want to add a trace event in
`src/or/rendcache.c`, you only have to add this include statement:
#include "trace/events.h"
Once done, you can add as many as you want `tor_trace()` that you need.
Please use the right subsystem (here it would be `hs`) and a unique name that
tells what the event is for. For example:
tor_trace(hs, store_desc_as_client, desc, desc_id);
If you look in `src/trace/events.h`, you'll see that if tracing is enabled it
will be mapped to a function called:
tor_trace_hs_store_desc_as_client(desc, desc_id)
And the point of all this is for that function to be defined in a new file
that you might want to add named `src/trace/hs.{c|h}` which would defined how
to collect the data for the `tor_trace_hs_store_desc_as_client()` function
like for instance sending it to a `log_debug()` or do more complex operations
or use a userspace tracer like LTTng (https://lttng.org).

View File

@ -1,499 +0,0 @@
Writing tests for Tor: an incomplete guide
==========================================
Tor uses a variety of testing frameworks and methodologies to try to
keep from introducing bugs. The major ones are:
1. Unit tests written in C and shipped with the Tor distribution.
2. Integration tests written in Python and shipped with the Tor
distribution.
3. Integration tests written in Python and shipped with the Stem
library. Some of these use the Tor controller protocol.
4. System tests written in Python and SH, and shipped with the
Chutney package. These work by running many instances of Tor
locally, and sending traffic through them.
5. The Shadow network simulator.
How to run these tests
----------------------
### The easy version
To run all the tests that come bundled with Tor, run `make check`.
To run the Stem tests as well, fetch stem from the git repository,
set `STEM_SOURCE_DIR` to the checkout, and run `make test-stem`.
To run the Chutney tests as well, fetch chutney from the git repository,
set `CHUTNEY_PATH` to the checkout, and run `make test-network`.
To run all of the above, run `make test-full`.
To run all of the above, plus tests that require a working connection to the
internet, run `make test-full-online`.
### Running particular subtests
The Tor unit tests are divided into separate programs and a couple of
bundled unit test programs.
Separate programs are easy. For example, to run the memwipe tests in
isolation, you just run `./src/test/test-memwipe`.
To run tests within the unit test programs, you can specify the name
of the test. The string ".." can be used as a wildcard at the end of the
test name. For example, to run all the cell format tests, enter
`./src/test/test cellfmt/..`.
Many tests that need to mess with global state run in forked subprocesses in
order to keep from contaminating one another. But when debugging a failing test,
you might want to run it without forking a subprocess. To do so, use the
`--no-fork` option with a single test. (If you specify it along with
multiple tests, they might interfere.)
You can turn on logging in the unit tests by passing one of `--debug`,
`--info`, `--notice`, or `--warn`. By default only errors are displayed.
Unit tests are divided into `./src/test/test` and `./src/test/test-slow`.
The former are those that should finish in a few seconds; the latter tend to
take more time, and may include CPU-intensive operations, deliberate delays,
and stuff like that.
### Finding test coverage
Test coverage is a measurement of which lines your tests actually visit.
When you configure Tor with the `--enable-coverage` option, it should
build with support for coverage in the unit tests, and in a special
`tor-cov` binary.
Then, run the tests you'd like to see coverage from. If you have old
coverage output, you may need to run `reset-gcov` first.
Now you've got a bunch of files scattered around your build directories
called `*.gcda`. In order to extract the coverage output from them, make a
temporary directory for them and run `./scripts/test/coverage ${TMPDIR}`,
where `${TMPDIR}` is the temporary directory you made. This will create a
`.gcov` file for each source file under tests, containing that file's source
annotated with the number of times the tests hit each line. (You'll need to
have gcov installed.)
You can get a summary of the test coverage for each file by running
`./scripts/test/cov-display ${TMPDIR}/*` . Each line lists the file's name,
the number of uncovered lines, the number of uncovered lines, and the
coverage percentage.
For a summary of the test coverage for each _function_, run
`./scripts/test/cov-display -f ${TMPDIR}/*`.
For more details on using gcov, including the helper scripts in
scripts/test, see HelpfulTools.md.
### Comparing test coverage
Sometimes it's useful to compare test coverage for a branch you're writing to
coverage from another branch (such as git master, for example). But you
can't run `diff` on the two coverage outputs directly, since the actual
number of times each line is executed aren't so important, and aren't wholly
deterministic.
Instead, follow the instructions above for each branch, creating a separate
temporary directory for each. Then, run `./scripts/test/cov-diff ${D1}
${D2}`, where D1 and D2 are the directories you want to compare. This will
produce a diff of the two directories, with all lines normalized to be either
covered or uncovered.
To count new or modified uncovered lines in D2, you can run:
./scripts/test/cov-diff ${D1} ${D2}" | grep '^+ *\#' | wc -l
### Marking lines as unreachable by tests
You can mark a specific line as unreachable by using the special
string LCOV_EXCL_LINE. You can mark a range of lines as unreachable
with LCOV_EXCL_START... LCOV_EXCL_STOP. Note that older versions of
lcov don't understand these lines.
You can post-process .gcov files to make these lines 'unreached' by
running ./scripts/test/cov-exclude on them. It marks excluded
unreached lines with 'x', and excluded reached lines with '!!!'.
Note: you should never do this unless the line is meant to 100%
unreachable by actual code.
What kinds of test should I write?
----------------------------------
Integration testing and unit testing are complementary: it's probably a
good idea to make sure that your code is hit by both if you can.
If your code is very-low level, and its behavior is easily described in
terms of a relation between inputs and outputs, or a set of state
transitions, then it's a natural fit for unit tests. (If not, please
consider refactoring it until most of it _is_ a good fit for unit
tests!)
If your code adds new externally visible functionality to Tor, it would
be great to have a test for that functionality. That's where
integration tests more usually come in.
Unit and regression tests: Does this function do what it's supposed to?
-----------------------------------------------------------------------
Most of Tor's unit tests are made using the "tinytest" testing framework.
You can see a guide to using it in the tinytest manual at
https://github.com/nmathewson/tinytest/blob/master/tinytest-manual.md
To add a new test of this kind, either edit an existing C file in `src/test/`,
or create a new C file there. Each test is a single function that must
be indexed in the table at the end of the file. We use the label "done:" as
a cleanup point for all test functions.
If you have created a new test file, you will need to:
1. Add the new test file to include.am
2. In `test.h`, include the new test cases (testcase_t)
3. In `test.c`, add the new test cases to testgroup_t testgroups
(Make sure you read `tinytest-manual.md` before proceeding.)
I use the term "unit test" and "regression tests" very sloppily here.
### A simple example
Here's an example of a test function for a simple function in util.c:
static void
test_util_writepid(void *arg)
{
(void) arg;
char *contents = NULL;
const char *fname = get_fname("tmp_pid");
unsigned long pid;
char c;
write_pidfile(fname);
contents = read_file_to_str(fname, 0, NULL);
tt_assert(contents);
int n = sscanf(contents, "%lu\n%c", &pid, &c);
tt_int_op(n, OP_EQ, 1);
tt_int_op(pid, OP_EQ, getpid());
done:
tor_free(contents);
}
This should look pretty familiar to you if you've read the tinytest
manual. One thing to note here is that we use the testing-specific
function `get_fname` to generate a file with respect to a temporary
directory that the tests use. You don't need to delete the file;
it will get removed when the tests are done.
Also note our use of `OP_EQ` instead of `==` in the `tt_int_op()` calls.
We define `OP_*` macros to use instead of the binary comparison
operators so that analysis tools can more easily parse our code.
(Coccinelle really hates to see `==` used as a macro argument.)
Finally, remember that by convention, all `*_free()` functions that
Tor defines are defined to accept NULL harmlessly. Thus, you don't
need to say `if (contents)` in the cleanup block.
### Exposing static functions for testing
Sometimes you need to test a function, but you don't want to expose
it outside its usual module.
To support this, Tor's build system compiles a testing version of
each module, with extra identifiers exposed. If you want to
declare a function as static but available for testing, use the
macro `STATIC` instead of `static`. Then, make sure there's a
macro-protected declaration of the function in the module's header.
For example, `crypto_curve25519.h` contains:
#ifdef CRYPTO_CURVE25519_PRIVATE
STATIC int curve25519_impl(uint8_t *output, const uint8_t *secret,
const uint8_t *basepoint);
#endif
The `crypto_curve25519.c` file and the `test_crypto.c` file both define
`CRYPTO_CURVE25519_PRIVATE`, so they can see this declaration.
### STOP! Does this test really test?
When writing tests, it's not enough to just generate coverage on all the
lines of the code that you're testing: It's important to make sure that
the test _really tests_ the code.
For example, here is a _bad_ test for the unlink() function (which is
supposed to remove a file).
static void
test_unlink_badly(void *arg)
{
(void) arg;
int r;
const char *fname = get_fname("tmpfile");
/* If the file isn't there, unlink returns -1 and sets ENOENT */
r = unlink(fname);
tt_int_op(n, OP_EQ, -1);
tt_int_op(errno, OP_EQ, ENOENT);
/* If the file DOES exist, unlink returns 0. */
write_str_to_file(fname, "hello world", 0);
r = unlink(fnme);
tt_int_op(r, OP_EQ, 0);
done:
tor_free(contents);
}
This test might get very high coverage on unlink(). So why is it a
bad test? Because it doesn't check that unlink() *actually removes the
named file*!
Remember, the purpose of a test is to succeed if the code does what
it's supposed to do, and fail otherwise. Try to design your tests so
that they check for the code's intended and documented functionality
as much as possible.
### Mock functions for testing in isolation
Often we want to test that a function works right, but the function to
be tested depends on other functions whose behavior is hard to observe,
or which require a working Tor network, or something like that.
To write tests for this case, you can replace the underlying functions
with testing stubs while your unit test is running. You need to declare
the underlying function as 'mockable', as follows:
MOCK_DECL(returntype, functionname, (argument list));
and then later implement it as:
MOCK_IMPL(returntype, functionname, (argument list))
{
/* implementation here */
}
For example, if you had a 'connect to remote server' function, you could
declare it as:
MOCK_DECL(int, connect_to_remote, (const char *name, status_t *status));
When you declare a function this way, it will be declared as normal in
regular builds, but when the module is built for testing, it is declared
as a function pointer initialized to the actual implementation.
In your tests, if you want to override the function with a temporary
replacement, you say:
MOCK(functionname, replacement_function_name);
And later, you can restore the original function with:
UNMOCK(functionname);
For more information, see the definitions of this mocking logic in
`testsupport.h`.
### Okay but what should my tests actually do?
We talk above about "test coverage" -- making sure that your tests visit
every line of code, or every branch of code. But visiting the code isn't
enough: we want to verify that it's correct.
So when writing tests, try to make tests that should pass with any correct
implementation of the code, and that should fail if the code doesn't do what
it's supposed to do.
You can write "black-box" tests or "glass-box" tests. A black-box test is
one that you write without looking at the structure of the function. A
glass-box one is one you implement while looking at how the function is
implemented.
In either case, make sure to consider common cases *and* edge cases; success
cases and failure csaes.
For example, consider testing this function:
/** Remove all elements E from sl such that E==element. Preserve
* the order of any elements before E, but elements after E can be
* rearranged.
*/
void smartlist_remove(smartlist_t *sl, const void *element);
In order to test it well, you should write tests for at least all of the
following cases. (These would be black-box tests, since we're only looking
at the declared behavior for the function:
* Remove an element that is in the smartlist.
* Remove an element that is not in the smartlist.
* Remove an element that appears in the smartlist more than once.
And your tests should verify that it behaves correct. At minimum, you should
test:
* That other elements before E are in the same order after you call the
functions.
* That the target element is really removed.
* That _only_ the target element is removed.
When you consider edge cases, you might try:
* Remove an element from an empty list.
* Remove an element from a singleton list containing that element.
* Remove an element for a list containing several instances of that
element, and nothing else.
Now let's look at the implementation:
void
smartlist_remove(smartlist_t *sl, const void *element)
{
int i;
if (element == NULL)
return;
for (i=0; i < sl->num_used; i++)
if (sl->list[i] == element) {
sl->list[i] = sl->list[--sl->num_used]; /* swap with the end */
i--; /* so we process the new i'th element */
sl->list[sl->num_used] = NULL;
}
}
Based on the implementation, we now see three more edge cases to test:
* Removing NULL from the list.
* Removing an element from the end of the list
* Removing an element from a position other than the end of the list.
### What should my tests NOT do?
Tests shouldn't require a network connection.
Whenever possible, tests shouldn't take more than a second. Put the test
into test/slow if it genuinely needs to be run.
Tests should not alter global state unless they run with `TT_FORK`: Tests
should not require other tests to be run before or after them.
Tests should not leak memory or other resources. To find out if your tests
are leaking memory, run them under valgrind (see HelpfulTools.txt for more
information on how to do that).
When possible, tests should not be over-fit to the implementation. That is,
the test should verify that the documented behavior is implemented, but
should not break if other permissible behavior is later implemented.
### Advanced techniques: Namespaces
Sometimes, when you're doing a lot of mocking at once, it's convenient to
isolate your identifiers within a single namespace. If this were C++, we'd
already have namespaces, but for C, we do the best we can with macros and
token-pasting.
We have some macros defined for this purpose in `src/test/test.h`. To use
them, you define `NS_MODULE` to a prefix to be used for your identifiers, and
then use other macros in place of identifier names. See `src/test/test.h` for
more documentation.
Integration tests: Calling Tor from the outside
-----------------------------------------------
Some tests need to invoke Tor from the outside, and shouldn't run from the
same process as the Tor test program. Reasons for doing this might include:
* Testing the actual behavior of Tor when run from the command line
* Testing that a crash-handler correctly logs a stack trace
* Verifying that violating a sandbox or capability requirement will
actually crash the program.
* Needing to run as root in order to test capability inheritance or
user switching.
To add one of these, you generally want a new C program in `src/test`. Add it
to `TESTS` and `noinst_PROGRAMS` if it can run on its own and return success or
failure. If it needs to be invoked multiple times, or it needs to be
wrapped, add a new shell script to `TESTS`, and the new program to
`noinst_PROGRAMS`. If you need access to any environment variable from the
makefile (eg `${PYTHON}` for a python interpreter), then make sure that the
makefile exports them.
Writing integration tests with Stem
-----------------------------------
The 'stem' library includes extensive tests for the Tor controller protocol.
You can run stem tests from tor with `make test-stem`, or see
`https://stem.torproject.org/faq.html#how-do-i-run-the-tests`.
To see what tests are available, have a look around the `test/*` directory in
stem. The first thing you'll notice is that there are both `unit` and `integ`
tests. The former are for tests of the facilities provided by stem itself that
can be tested on their own, without the need to hook up a tor process. These
are less relevant, unless you want to develop a new stem feature. The latter,
however, are a very useful tool to write tests for controller features. They
provide a default environment with a connected tor instance that can be
modified and queried. Adding more integration tests is a great way to increase
the test coverage inside Tor, especially for controller features.
Let's assume you actually want to write a test for a previously untested
controller feature. I'm picking the `exit-policy/*` GETINFO queries. Since
these are a controller feature that we want to write an integration test for,
the right file to modify is
`https://gitweb.torproject.org/stem.git/tree/test/integ/control/controller.py`.
First off we notice that there is an integration test called
`test_get_exit_policy()` that's already written. This exercises the interaction
of stem's `Controller.get_exit_policy()` method, and is not relevant for our
test since there are no stem methods to make use of all `exit-policy/*`
queries (if there were, likely they'd be tested already. Maybe you want to
write a stem feature, but I chose to just add tests).
Our test requires a tor controller connection, so we'll use the
`@require_controller` annotation for our `test_exit_policy()` method. We need a
controller instance, which we get from
`test.runner.get_runner().get_tor_controller()`. The attached Tor instance is
configured as a client, but the exit-policy GETINFO queries need a relay to
work, so we have to change the config (using `controller.set_options()`). This
is OK for us to do, we just have to remember to set DisableNetwork so we don't
actually start an exit relay and also to undo the changes we made (by calling
`controller.reset_conf()` at the end of our test). Additionally, we have to
configure a static Address for Tor to use, because it refuses to build a
descriptor when it can't guess a suitable IP address. Unfortunately, these
kinds of tripwires are everywhere. Don't forget to file appropriate tickets if
you notice any strange behaviour that seems totally unreasonable.
Check out the `test_exit_policy()` function in abovementioned file to see the
final implementation for this test.
System testing with Chutney
---------------------------
The 'chutney' program configures and launches a set of Tor relays,
authorities, and clients on your local host. It has a `test network`
functionality to send traffic through them and verify that the traffic
arrives correctly.
You can write new test networks by adding them to `networks`. To add
them to Tor's tests, add them to the `test-network` or `test-network-all`
targets in `Makefile.am`.
(Adding new kinds of program to chutney will still require hacking the
code.)

View File

@ -1,98 +0,0 @@
# Using `simpleperf` to collect CPU profiling on Android
This document describes how you can use Android's `simpleperf`
command-line tool to get CPU profiling information from Tor via the
Orbot application. The tool is particularly useful for Tor development
because it is able to profile native applications on the platform
whereas a lot of the normal tooling for the Android platform is only
able to collect information from Java-based applications.
## Prerequisites
Before using `simpleperf` there is a couple of steps that must be
followed. You should make sure you have both a recent installation of
the Android Software Development Kit (SDK) and Native Development Kit
(NDK) installed. These can be found on the Android Developers website.
1. Follow the build instructions from the `BUILD` file in the Orbot
repository and build an Orbot APK (Android Package) file with
debugging enabled. Make sure that when you build the native content of
the Orbot application that you run the `make -C external` command with
an additional `DEBUG=1` as parameter to ensure that the Orbot build
process does not strip the debug symbols from the Tor binary.
2. (Optional) Uninstall and clean-up your old Orbot installation that
is most likely downloaded from Google's Play Store or via fdroid:
$ adb shell pm clear org.torproject.android
$ adb uninstall org.torproject.android
3. Install the Android Package you generated in step 1:
$ adb install /path/to/your/app-fullperm-debug.apk
4. Check on your device that the newly installed Orbot actually works
and behaves in the way you expect it to.
## Profiling using `simpleperf`
The `simpleperf` tool can be found in the `simpleperf/` directory in
the directory where you installed the Android NDK to. In this
directory there is a set of Python files that will help you deploy the
tool to a device and collect the measurement data such that you can
analyze the results on your computer rather than on your phone.
1. Change directory to the location of the `simpleperf` directory.
2. Open the `app_profiler.config` file and change
`app_package_name` to `org.torproject.android`, `apk_file_path` to
the path of your Orbot Android Package (APK file).
3. Optionally change the duration parameter in the `record_options`
variable in `app_profiler.config` to the duration which you would like
to collect samples in. The value is specified in seconds.
4. Run the app profiler using `python app_profiler.py`. This helper
script will push the `simpleperf` tool to your device, start the
profiler, and once it has completed copy the generated `perf.data`
file over to your computer with the results.
### Analyzing the results
You can inspect your resulting `perf.data` file via a simple GUI
program `python report.py` or via the command-line tool `simpleperf
report`. I've found the GUI tool to be easier to navigate around with
than the command-line tool.
The `-g` option can be passed to the command line `simpleperf report`
tool allows you to see the call graph of functions and how much time
was spend on the call.
## Tips & Tricks
- When you have installed Orbot the first time, you will notice that
if you get a shell on the Android device that there is no Tor binary
available. This is because Orbot unpacks the Tor binary first time it
is executed and places it under the `app_bin/` directory on the
device.
To access binaries, `torrc` files, and other useful information on
the device do the following:
$ adb shell
(device):/ $ run-as org.torproject.android
(device):/data/data/org.torproject.android $ ls
app_bin app_data cache databases files lib shared_prefs
Descriptors, control authentication cookie, state, and other files can be
found in the `app_data` directory. The `torrc` can be found in the `app_bin/`
directory.
- You can enable logging in Tor via the syslog (or android) log
mechanism with:
$ adb shell
(device):/ $ run-as org.torproject.android
(device):/data/data/org.torproject.android $ echo -e "\nLog info syslog" >> app_bin/torrc
Start Tor the normal way via Orbot and collect the logs from your computer using
$ adb logcat

273
doc/WritingTests.txt Normal file
View File

@ -0,0 +1,273 @@
Writing tests for Tor: an incomplete guide
==========================================
Tor uses a variety of testing frameworks and methodologies to try to
keep from introducing bugs. The major ones are:
1. Unit tests written in C and shipped with the Tor distribution.
2. Integration tests written in Python and shipped with the Tor
distribution.
3. Integration tests written in Python and shipped with the Stem
library. Some of these use the Tor controller protocol.
4. System tests written in Python and SH, and shipped with the
Chutney package. These work by running many instances of Tor
locally, and sending traffic through them.
5. The Shadow network simulator.
How to run these tests
----------------------
=== The easy version
To run all the tests that come bundled with Tor, run "make check"
To run the Stem tests as well, fetch stem from the git repository,
set STEM_SOURCE_DIR to the checkout, and run "make test-stem".
To run the Chutney tests as well, fetch chutney from the git repository,
set CHUTNEY_PATH to the checkout, and run "make test-network".
To run all of the above, run "make test-full".
To run all of the above, plus tests that require a working connection to the
internet, run "make test-full-online".
=== Running particular subtests
The Tor unit tests are divided into separate programs and a couple of
bundled unit test programs.
Separate programs are easy. For example, to run the memwipe tests in
isolation, you just run ./src/test/test-memwipe .
To run tests within the unit test programs, you can specify the name
of the test. The string ".." can be used as a wildcard at the end of the
test name. For example, to run all the cell format tests, enter
"./src/test/test cellfmt/..". To run
Many tests that need to mess with global state run in forked subprocesses in
order to keep from contaminating one another. But when debugging a failing test,
you might want to run it without forking a subprocess. To do so, use the
"--no-fork" option with a single test. (If you specify it along with
multiple tests, they might interfere.)
You can turn on logging in the unit tests by passing one of "--debug",
"--info", "--notice", or "--warn". By default only errors are displayed.
Unit tests are divided into "./src/test/test" and "./src/test/test-slow".
The former are those that should finish in a few seconds; the latter tend to
take more time, and may include CPU-intensive operations, deliberate delays,
and stuff like that.
=== Finding test coverage
When you configure Tor with the --enable-coverage option, it should
build with support for coverage in the unit tests, and in a special
"tor-cov" binary.
Then, run the tests you'd like to see coverage from. If you have old
coverage output, you may need to run "reset-gcov" first.
Now you've got a bunch of files scattered around your build directories
called "*.gcda". In order to extract the coverage output from them, make a
temporary directory for them and run "./scripts/test/coverage ${TMPDIR}",
where ${TMPDIR} is the temporary directory you made. This will create a
".gcov" file for each source file under tests, containing that file's source
annotated with the number of times the tests hit each line. (You'll need to
have gcov installed.)
You can get a summary of the test coverage for each file by running
"./scripts/test/cov-display ${TMPDIR}/*" . Each line lists the file's name,
the number of uncovered lines, the number of uncovered lines, and the
coverage percentage.
For a summary of the test coverage for each _function_, run
"./scripts/test/cov-display -f ${TMPDIR}/*" .
=== Comparing test coverage
Sometimes it's useful to compare test coverage for a branch you're writing to
coverage from another branch (such as git master, for example). But you
can't run "diff" on the two coverage outputs directly, since the actual
number of times each line is executed aren't so important, and aren't wholly
deterministic.
Instead, follow the instructions above for each branch, creating a separate
temporary directory for each. Then, run "./scripts/test/cov-diff ${D1}
${D2}", where D1 and D2 are the directories you want to compare. This will
produce a diff of the two directories, with all lines normalized to be either
covered or uncovered.
To count new or modified uncovered lines in D2, you can run:
"./scripts/test/cov-diff ${D1} ${D2}" | grep '^+ *\#' |wc -l
What kinds of test should I write?
----------------------------------
Integration testing and unit testing are complementary: it's probably a
good idea to make sure that your code is hit by both if you can.
If your code is very-low level, and its behavior is easily described in
terms of a relation between inputs and outputs, or a set of state
transitions, then it's a natural fit for unit tests. (If not, please
consider refactoring it until most of it _is_ a good fit for unit
tests!)
If your code adds new externally visible functionality to Tor, it would
be great to have a test for that functionality. That's where
integration tests more usually come in.
Unit and regression tests: Does this function do what it's supposed to?
-----------------------------------------------------------------------
Most of Tor's unit tests are made using the "tinytest" testing framework.
You can see a guide to using it in the tinytest manual at
https://github.com/nmathewson/tinytest/blob/master/tinytest-manual.md
To add a new test of this kind, either edit an existing C file in src/test/,
or create a new C file there. Each test is a single function that must
be indexed in the table at the end of the file. We use the label "done:" as
a cleanup point for all test functions.
(Make sure you read tinytest-manual.md before proceeding.)
I use the term "unit test" and "regression tests" very sloppily here.
=== A simple example
Here's an example of a test function for a simple function in util.c:
static void
test_util_writepid(void *arg)
{
(void) arg;
char *contents = NULL;
const char *fname = get_fname("tmp_pid");
unsigned long pid;
char c;
write_pidfile(fname);
contents = read_file_to_str(fname, 0, NULL);
tt_assert(contents);
int n = sscanf(contents, "%lu\n%c", &pid, &c);
tt_int_op(n, OP_EQ, 1);
tt_int_op(pid, OP_EQ, getpid());
done:
tor_free(contents);
}
This should look pretty familiar to you if you've read the tinytest
manual. One thing to note here is that we use the testing-specific
function "get_fname" to generate a file with respect to a temporary
directory that the tests use. You don't need to delete the file;
it will get removed when the tests are done.
Also note our use of OP_EQ instead of == in the tt_int_op() calls.
We define OP_* macros to use instead of the binary comparison
operators so that analysis tools can more easily parse our code.
(Coccinelle really hates to see == used as a macro argument.)
Finally, remember that by convention, all *_free() functions that
Tor defines are defined to accept NULL harmlessly. Thus, you don't
need to say "if (contents)" in the cleanup block.
=== Exposing static functions for testing
Sometimes you need to test a function, but you don't want to expose
it outside its usual module.
To support this, Tor's build system compiles a testing version of
each module, with extra identifiers exposed. If you want to
declare a function as static but available for testing, use the
macro "STATIC" instead of "static". Then, make sure there's a
macro-protected declaration of the function in the module's header.
For example, crypto_curve25519.h contains:
#ifdef CRYPTO_CURVE25519_PRIVATE
STATIC int curve25519_impl(uint8_t *output, const uint8_t *secret,
const uint8_t *basepoint);
#endif
The crypto_curve25519.c file and the test_crypto.c file both define
CRYPTO_CURVE25519_PRIVATE, so they can see this declaration.
=== Mock functions for testing in isolation
Often we want to test that a function works right, but the function to
be tested depends on other functions whose behavior is hard to observe,
or which require a working Tor network, or something like that.
To write tests for this case, you can replace the underlying functions
with testing stubs while your unit test is running. You need to declare
the underlying function as 'mockable', as follows:
MOCK_DECL(returntype, functionname, (argument list));
and then later implement it as:
MOCK_IMPL(returntype, functionname, (argument list))
{
/* implementation here */
}
For example, if you had a 'connect to remote server' function, you could
declare it as:
MOCK_DECL(int, connect_to_remote, (const char *name, status_t *status));
When you declare a function this way, it will be declared as normal in
regular builds, but when the module is built for testing, it is declared
as a function pointer initialized to the actual implementation.
In your tests, if you want to override the function with a temporary
replacement, you say:
MOCK(functionname, replacement_function_name);
And later, you can restore the original function with:
UNMOCK(functionname);
For more information, see the definitions of this mocking logic in
testsupport.h.
=== Advanced techniques: Namespaces
XXXX write this. danah boyd made us some really awesome stuff here.
Integration tests: Calling Tor from the outside
-----------------------------------------------
XXXX WRITEME
Writing integration tests with Stem
-----------------------------------
XXXX WRITEME
System testing with Chutney
---------------------------
XXXX WRITEME
Who knows what evil lurks in the timings of networks? The Shadow knows!
-----------------------------------------------------------------------
XXXX WRITEME

View File

@ -19,7 +19,7 @@ if [ "$1" = "html" ]; then
base=${output%%.html.in}
if [ "$2" != none ]; then
TZ=UTC "$2" -d manpage -o $output $input;
"$2" -d manpage -o $output $input;
else
echo "==================================";
echo;

View File

@ -42,7 +42,7 @@ Here's a workaround:
Before even building the source RPM, install fedora-packager and instruct
the build system to use rpmbuild-md5 like this:
dnf install fedora-packager
yum install fedora-packager
export RPMBUILD=rpmbuild-md5
Then proceed as usual to create the source RPM and binary RPMs:

View File

@ -12,11 +12,17 @@
# part of the source distribution, so that people without asciidoc can
# just use the .1 and .html files.
all_mans = doc/tor doc/tor-gencert doc/tor-resolve doc/torify
base_mans = doc/tor doc/tor-gencert doc/tor-resolve doc/torify
all_mans = $(base_mans)
if USE_FW_HELPER
install_mans = $(all_mans)
else
install_mans = $(base_mans)
endif
if USE_ASCIIDOC
nodist_man1_MANS = $(all_mans:=.1)
doc_DATA = $(all_mans:=.html)
nodist_man1_MANS = $(install_mans:=.1)
doc_DATA = $(install_mans:=.html)
html_in = $(all_mans:=.html.in)
man_in = $(all_mans:=.1.in)
txt_in = $(all_mans:=.1.txt)
@ -28,23 +34,12 @@ nodist_man1_MANS =
doc_DATA =
endif
EXTRA_DIST+= doc/asciidoc-helper.sh \
EXTRA_DIST+= doc/HACKING doc/asciidoc-helper.sh \
$(html_in) $(man_in) $(txt_in) \
doc/state-contents.txt \
doc/torrc_format.txt \
doc/TUNING \
doc/HACKING/README.1st.md \
doc/HACKING/CodingStandards.md \
doc/HACKING/CodingStandardsRust.md \
doc/HACKING/Fuzzing.md \
doc/HACKING/GettingStarted.md \
doc/HACKING/GettingStartedRust.md \
doc/HACKING/HelpfulTools.md \
doc/HACKING/HowToReview.md \
doc/HACKING/Module.md \
doc/HACKING/ReleasingTor.md \
doc/HACKING/Tracing.md \
doc/HACKING/WritingTests.md
doc/WritingTests.txt
docdir = @docdir@
@ -89,5 +84,5 @@ doc/tor-gencert.1: doc/tor-gencert.1.in
doc/tor-resolve.1: doc/tor-resolve.1.in
doc/torify.1: doc/torify.1.in
CLEANFILES+= $(asciidoc_product)
CLEANFILES+= $(asciidoc_product) config.log
DISTCLEANFILES+= $(html_in) $(man_in)

View File

@ -68,7 +68,7 @@ OPTIONS
Number of months that the certificate should be valid. Default: 12.
**--passphrase-fd** __FILEDES__::
Filedescriptor to read the passphrase from. Ends at the first NUL or
Filedescriptor to read the file descriptor from. Ends at the first NUL or
newline. Default: read from the terminal.
**-a** __address__:__port__::

File diff suppressed because it is too large Load Diff

View File

@ -17,23 +17,25 @@ SYNOPSIS
DESCRIPTION
-----------
**torify** is a simple wrapper that calls torsocks with a tor-specific
configuration file.
**torify** is a simple wrapper that attempts to find the best underlying Tor
wrapper available on a system. It calls torsocks with a tor specific
configuration file. +
It is provided for backward compatibility; instead you should use torsocks.
torsocks is an improved wrapper that explicitly rejects UDP, safely resolves DNS
lookups and properly socksifies your TCP connections. +
Please note that since both method use LD_PRELOAD, torify cannot be applied to
suid binaries.
WARNING
-------
When used with torsocks, torify should not leak DNS requests or UDP data.
When used with torsocks, torify should not leak DNS requests or UDP data. +
torify can leak ICMP data.
torify will not ensure that different requests are processed on
different circuits.
Both will leak ICMP data.
SEE ALSO
--------
**tor**(1), **torsocks**(1)
**tor**(1), **tor-resolve**(1), **torsocks**(1)
AUTHORS
-------

View File

@ -18,10 +18,9 @@ does, not what it should do.
; specified in RFC5234.
; A file is interpreted as every Entry in the file, in order.
TorrcFile = *Line [ UnterminatedLine ]
TorrcFile = *Line
Line = BlankLine LF / Entry LF
UnterminatedLine = BlankLine / Entry
Line = BlankLine / Entry
BlankLine = *WSP OptComment LF
BlankLine =/ *WSP LF
@ -70,12 +69,6 @@ does, not what it should do.
; Anything besides NUL and LF
NonLF = %x01-%x09 / %x0b - %xff
; Note that on windows, we open our configuration files in "text" mode,
; which causes CRLF pairs to be interpreted as LF. So, on windows:
; LF = [ %x0d ] %x0a
; but everywhere else,
LF = %0x0a
OCTDIG = '0' - '7'
KC = Any character except an isspace() character or '#' or NUL
@ -182,7 +175,7 @@ and\
friends
# Backslashes in the middle of a line are included as-is. The key of
# this one is "Too" and the value is "Many\\Backsl\ashes \here" (with
# this one is "Too" and the value is "Many\\Backsl\ashes here" (with
# backslashes in that last string as-is)
Too \
Many\\\
@ -192,7 +185,7 @@ here
# And here's the really yucky part. If a comment appears in a multi-line
# entry, the entry is still able to continue on the next line, as in the
# following, where the key is "This" and the value is
# "entry and some are silly"
# "entry and some are silly"
This entry \
# has comments \
and some \

View File

@ -41,8 +41,8 @@ AU_ALIAS([VL_CHECK_SIGN], [AX_CHECK_SIGN])
AC_DEFUN([AX_CHECK_SIGN], [
typename=`echo $1 | sed "s/@<:@^a-zA-Z0-9_@:>@/_/g"`
AC_CACHE_CHECK([whether $1 is signed], ax_cv_decl_${typename}_signed, [
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[$4]],
[[ int foo @<:@ 1 - 2 * !((($1) -1) < 0) @:>@ ]])],
AC_TRY_COMPILE([$4],
[ int foo @<:@ 1 - 2 * !((($1) -1) < 0) @:>@ ],
[ eval "ax_cv_decl_${typename}_signed=\"yes\"" ],
[ eval "ax_cv_decl_${typename}_signed=\"no\"" ])])
symbolname=`echo $1 | sed "s/@<:@^a-zA-Z0-9_@:>@/_/g" | tr "a-z" "A-Z"`

View File

@ -79,29 +79,32 @@ AC_DEFUN([AC_PC_FROM_UCONTEXT],
if ! $pc_field_found; then
# Prefer sys/ucontext.h to ucontext.h, for OS X's sake.
if test "x$ac_cv_header_cygwin_signal_h" = xyes; then
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <cygwin/signal.h>]],
[[ucontext_t u; return u.$pc_field == 0;]])],
AC_TRY_COMPILE([#define _GNU_SOURCE 1
#include <cygwin/signal.h>],
[ucontext_t u; return u.$pc_field == 0;],
AC_DEFINE_UNQUOTED(PC_FROM_UCONTEXT, $pc_field,
How to access the PC from a struct ucontext)
AC_MSG_RESULT([$pc_field])
pc_field_found=true)
elif test "x$ac_cv_header_sys_ucontext_h" = xyes; then
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/ucontext.h>]],
[[ucontext_t u; return u.$pc_field == 0;]])],
AC_TRY_COMPILE([#define _GNU_SOURCE 1
#include <sys/ucontext.h>],
[ucontext_t u; return u.$pc_field == 0;],
AC_DEFINE_UNQUOTED(PC_FROM_UCONTEXT, $pc_field,
How to access the PC from a struct ucontext)
AC_MSG_RESULT([$pc_field])
pc_field_found=true)
elif test "x$ac_cv_header_ucontext_h" = xyes; then
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <ucontext.h>]],
[[ucontext_t u; return u.$pc_field == 0;]])],
AC_TRY_COMPILE([#define _GNU_SOURCE 1
#include <ucontext.h>],
[ucontext_t u; return u.$pc_field == 0;],
AC_DEFINE_UNQUOTED(PC_FROM_UCONTEXT, $pc_field,
How to access the PC from a struct ucontext)
AC_MSG_RESULT([$pc_field])
pc_field_found=true)
else # hope some standard header gives it to us
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]],
[[ucontext_t u; return u.$pc_field == 0;]])],
AC_TRY_COMPILE([],
[ucontext_t u; return u.$pc_field == 0;],
AC_DEFINE_UNQUOTED(PC_FROM_UCONTEXT, $pc_field,
How to access the PC from a struct ucontext)
AC_MSG_RESULT([$pc_field])
@ -114,8 +117,8 @@ AC_DEFUN([AC_PC_FROM_UCONTEXT],
pc_fields="$pc_fields sc_rip" # OpenBSD (x86_64)
for pc_field in $pc_fields; do
if ! $pc_field_found; then
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <signal.h>]],
[[ucontext_t u; return u.$pc_field == 0;]])],
AC_TRY_COMPILE([#include <signal.h>],
[ucontext_t u; return u.$pc_field == 0;],
AC_DEFINE_UNQUOTED(PC_FROM_UCONTEXT, $pc_field,
How to access the PC from a struct ucontext)
AC_MSG_RESULT([$pc_field])

View File

@ -1,6 +0,0 @@
@@
expression n, d;
@@
- (((n) + (d) - 1) / (d))
+ CEIL_DIV(n, d)

View File

@ -1,11 +0,0 @@
#!/usr/bin/perl -w -p -i
next if m#^ */\*# or m#^ *\* #;
s/<([,)])/OP_LT$1/;
s/(?<=[\s,])>([,)])/OP_GT$1/;
#s/>([,)])/OP_GT$1/;
s/==([,)])/OP_EQ$1/;
s/>=([,)])/OP_GE$1/;
s/<=([,)])/OP_LE$1/;
s/!=([,)])/OP_NE$1/;

View File

@ -1,49 +0,0 @@
@@
int e;
constant c;
@@
(
- tt_assert(e == c)
+ tt_int_op(e, OP_EQ, c)
|
- tt_assert(e != c)
+ tt_int_op(e, OP_NE, c)
|
- tt_assert(e < c)
+ tt_int_op(e, OP_LT, c)
|
- tt_assert(e <= c)
+ tt_int_op(e, OP_LE, c)
|
- tt_assert(e > c)
+ tt_int_op(e, OP_GT, c)
|
- tt_assert(e >= c)
+ tt_int_op(e, OP_GE, c)
)
@@
unsigned int e;
constant c;
@@
(
- tt_assert(e == c)
+ tt_uint_op(e, OP_EQ, c)
|
- tt_assert(e != c)
+ tt_uint_op(e, OP_NE, c)
|
- tt_assert(e < c)
+ tt_uint_op(e, OP_LT, c)
|
- tt_assert(e <= c)
+ tt_uint_op(e, OP_LE, c)
|
- tt_assert(e > c)
+ tt_uint_op(e, OP_GT, c)
|
- tt_assert(e >= c)
+ tt_uint_op(e, OP_GE, c)
)

View File

@ -1,11 +0,0 @@
@@
expression * e;
@@
(
- tt_assert(e != NULL)
+ tt_ptr_op(e, OP_NE, NULL)
|
- tt_assert(e == NULL)
+ tt_ptr_op(e, OP_EQ, NULL)
)

View File

@ -1,5 +0,0 @@
@@
@@
- tt_assert(0)
+ tt_abort()

View File

@ -1,160 +0,0 @@
#!/usr/bin/python
FUZZERS = """
consensus
descriptor
diff
diff-apply
extrainfo
hsdescv2
hsdescv3
http
http-connect
iptsv2
microdesc
vrs
"""
PREAMBLE = r"""
FUZZING_CPPFLAGS = \
$(src_test_AM_CPPFLAGS) $(TEST_CPPFLAGS)
FUZZING_CFLAGS = \
$(AM_CFLAGS) $(TEST_CFLAGS)
FUZZING_LDFLAG = \
@TOR_LDFLAGS_zlib@ @TOR_LDFLAGS_openssl@ @TOR_LDFLAGS_libevent@
FUZZING_LIBS = \
src/or/libtor-testing.a \
src/common/libor-crypto-testing.a \
$(LIBKECCAK_TINY) \
$(LIBDONNA) \
src/common/libor-testing.a \
src/common/libor-ctime-testing.a \
src/common/libor-event-testing.a \
src/trunnel/libor-trunnel-testing.a \
$(rust_ldadd) \
@TOR_ZLIB_LIBS@ @TOR_LIB_MATH@ \
@TOR_LIBEVENT_LIBS@ \
@TOR_OPENSSL_LIBS@ @TOR_LIB_WS32@ @TOR_LIB_GDI@ @TOR_LIB_USERENV@ \
@CURVE25519_LIBS@ \
@TOR_SYSTEMD_LIBS@ \
@TOR_LZMA_LIBS@ \
@TOR_ZSTD_LIBS@
oss-fuzz-prereqs: \
src/or/libtor-testing.a \
src/common/libor-crypto-testing.a \
$(LIBKECCAK_TINY) \
$(LIBDONNA) \
src/common/libor-testing.a \
src/common/libor-ctime-testing.a \
src/common/libor-event-testing.a \
src/trunnel/libor-trunnel-testing.a
noinst_HEADERS += \
src/test/fuzz/fuzzing.h
LIBFUZZER = -lFuzzer
LIBFUZZER_CPPFLAGS = $(FUZZING_CPPFLAGS) -DLLVM_FUZZ
LIBFUZZER_CFLAGS = $(FUZZING_CFLAGS)
LIBFUZZER_LDFLAG = $(FUZZING_LDFLAG)
LIBFUZZER_LIBS = $(FUZZING_LIBS) $(LIBFUZZER) -lstdc++
LIBOSS_FUZZ_CPPFLAGS = $(FUZZING_CPPFLAGS) -DLLVM_FUZZ
LIBOSS_FUZZ_CFLAGS = $(FUZZING_CFLAGS)
"""
POSTAMBLE = r"""
noinst_PROGRAMS += $(FUZZERS) $(LIBFUZZER_FUZZERS)
noinst_LIBRARIES += $(OSS_FUZZ_FUZZERS)
oss-fuzz-fuzzers: oss-fuzz-prereqs $(OSS_FUZZ_FUZZERS)
fuzzers: $(FUZZERS) $(LIBFUZZER_FUZZERS)
test-fuzz-corpora: $(FUZZERS)
$(top_srcdir)/src/test/fuzz_static_testcases.sh
"""
########### No user serviceable parts will follow.
PREAMBLE = PREAMBLE.strip()
POSTAMBLE = POSTAMBLE.strip() # If I use it, it's a word!
FUZZERS = FUZZERS.split()
FUZZERS.sort()
WARNING = """
# This file was generated by fuzzing_include_am.py; do not hand-edit unless
# you enjoy having your changes erased.
""".strip()
print(WARNING)
print(PREAMBLE)
print("\n# ===== AFL fuzzers")
def get_id_name(s):
return s.replace("-", "_")
for fuzzer in FUZZERS:
idname = get_id_name(fuzzer)
print("""\
src_test_fuzz_fuzz_{name}_SOURCES = \\
src/test/fuzz/fuzzing_common.c \\
src/test/fuzz/fuzz_{name}.c
src_test_fuzz_fuzz_{name}_CPPFLAGS = $(FUZZING_CPPFLAGS)
src_test_fuzz_fuzz_{name}_CFLAGS = $(FUZZING_CFLAGS)
src_test_fuzz_fuzz_{name}_LDFLAGS = $(FUZZING_LDFLAG)
src_test_fuzz_fuzz_{name}_LDADD = $(FUZZING_LIBS)
""".format(name=idname))
print("FUZZERS = \\")
print(" \\\n".join("\tsrc/test/fuzz/fuzz-{name}".format(name=fuzzer)
for fuzzer in FUZZERS))
print("\n# ===== libfuzzer")
print("\nif LIBFUZZER_ENABLED")
for fuzzer in FUZZERS:
idname = get_id_name(fuzzer)
print("""\
src_test_fuzz_lf_fuzz_{name}_SOURCES = \\
$(src_test_fuzz_fuzz_{name}_SOURCES)
src_test_fuzz_lf_fuzz_{name}_CPPFLAGS = $(LIBFUZZER_CPPFLAGS)
src_test_fuzz_lf_fuzz_{name}_CFLAGS = $(LIBFUZZER_CFLAGS)
src_test_fuzz_lf_fuzz_{name}_LDFLAGS = $(LIBFUZZER_LDFLAG)
src_test_fuzz_lf_fuzz_{name}_LDADD = $(LIBFUZZER_LIBS)
""".format(name=idname))
print("LIBFUZZER_FUZZERS = \\")
print(" \\\n".join("\tsrc/test/fuzz/lf-fuzz-{name}".format(name=fuzzer)
for fuzzer in FUZZERS))
print("""
else
LIBFUZZER_FUZZERS =
endif""")
print("\n# ===== oss-fuzz\n")
print("if OSS_FUZZ_ENABLED")
for fuzzer in FUZZERS:
idname = get_id_name(fuzzer)
print("""\
src_test_fuzz_liboss_fuzz_{name}_a_SOURCES = \\
$(src_test_fuzz_fuzz_{name}_SOURCES)
src_test_fuzz_liboss_fuzz_{name}_a_CPPFLAGS = $(LIBOSS_FUZZ_CPPFLAGS)
src_test_fuzz_liboss_fuzz_{name}_a_CFLAGS = $(LIBOSS_FUZZ_CFLAGS)
""".format(name=idname))
print("OSS_FUZZ_FUZZERS = \\")
print(" \\\n".join("\tsrc/test/fuzz/liboss-fuzz-{name}.a".format(name=fuzzer)
for fuzzer in FUZZERS))
print("""
else
OSS_FUZZ_FUZZERS =
endif""")
print("")
print(POSTAMBLE)

View File

@ -1,5 +1,5 @@
#!/usr/bin/python
# Copyright 2014-2017, The Tor Project, Inc
# Copyright 2014-2015, The Tor Project, Inc
# See LICENSE for licensing information
# This script parses openssl headers to find ciphersuite names, determines
@ -13,13 +13,13 @@ import sys
EPHEMERAL_INDICATORS = [ "_EDH_", "_DHE_", "_ECDHE_" ]
BAD_STUFF = [ "_DES_40_", "MD5", "_RC4_", "_DES_64_",
"_SEED_", "_CAMELLIA_", "_NULL",
"_CCM_8", "_DES_", ]
"_SEED_", "_CAMELLIA_", "_NULL" ]
# these never get #ifdeffed.
MANDATORY = [
"TLS1_TXT_DHE_RSA_WITH_AES_256_SHA",
"TLS1_TXT_DHE_RSA_WITH_AES_128_SHA",
"SSL3_TXT_EDH_RSA_DES_192_CBC3_SHA",
]
def find_ciphers(filename):
@ -48,23 +48,15 @@ def usable_cipher(ciph):
# All fields we sort on, in order of priority.
FIELDS = [ 'cipher', 'fwsec', 'mode', 'digest', 'bitlength' ]
# Map from sorted fields to recognized value in descending order of goodness
FIELD_VALS = { 'cipher' : [ 'AES', 'CHACHA20' ],
FIELD_VALS = { 'cipher' : [ 'AES', 'DES'],
'fwsec' : [ 'ECDHE', 'DHE' ],
'mode' : [ 'POLY1305', 'GCM', 'CCM', 'CBC', ],
'digest' : [ 'n/a', 'SHA384', 'SHA256', 'SHA', ],
'mode' : [ 'GCM', 'CBC' ],
'digest' : [ 'SHA384', 'SHA256', 'SHA' ],
'bitlength' : [ '256', '128', '192' ],
}
class Ciphersuite(object):
def __init__(self, name, fwsec, cipher, bitlength, mode, digest):
if fwsec == 'EDH':
fwsec = 'DHE'
if mode in [ '_CBC3', '_CBC', '' ]:
mode = 'CBC'
elif mode == '_GCM':
mode = 'GCM'
self.name = name
self.fwsec = fwsec
self.cipher = cipher
@ -82,50 +74,42 @@ class Ciphersuite(object):
def parse_cipher(ciph):
m = re.match('(?:TLS1|SSL3)_TXT_(EDH|DHE|ECDHE)_RSA(?:_WITH)?_(AES|DES)_(256|128|192)(|_CBC|_CBC3|_GCM)_(SHA|SHA256|SHA384)$', ciph)
if m:
fwsec, cipher, bits, mode, digest = m.groups()
return Ciphersuite(ciph, fwsec, cipher, bits, mode, digest)
if not m:
print "/* Couldn't parse %s ! */"%ciph
return None
m = re.match('(?:TLS1|SSL3)_TXT_(EDH|DHE|ECDHE)_RSA(?:_WITH)?_(AES|DES)_(256|128|192)_CCM', ciph)
if m:
fwsec, cipher, bits = m.groups()
return Ciphersuite(ciph, fwsec, cipher, bits, "CCM", "n/a")
fwsec, cipher, bits, mode, digest = m.groups()
if fwsec == 'EDH':
fwsec = 'DHE'
m = re.match('(?:TLS1|SSL3)_TXT_(EDH|DHE|ECDHE)_RSA(?:_WITH)?_CHACHA20_POLY1305', ciph)
if m:
fwsec, = m.groups()
return Ciphersuite(ciph, fwsec, "CHACHA20", "256", "POLY1305", "n/a")
print "/* Couldn't parse %s ! */"%ciph
return None
if mode in [ '_CBC3', '_CBC', '' ]:
mode = 'CBC'
elif mode == '_GCM':
mode = 'GCM'
return Ciphersuite(ciph, fwsec, cipher, bits, mode, digest)
ALL_CIPHERS = []
for fname in sys.argv[1:]:
for c in find_ciphers(fname):
if usable_cipher(c):
parsed = parse_cipher(c)
if parsed != None:
ALL_CIPHERS.append(parsed)
ALL_CIPHERS += (parse_cipher(c)
for c in find_ciphers(fname)
if usable_cipher(c) )
ALL_CIPHERS.sort(key=Ciphersuite.sort_key)
indent = " "*7
for c in ALL_CIPHERS:
if c is ALL_CIPHERS[-1]:
colon = ''
colon = ';'
else:
colon = ' ":"'
if c.name in MANDATORY:
print "%s/* Required */"%indent
print '%s%s%s'%(indent,c.name,colon)
print " /* Required */"
print ' %s%s'%(c.name,colon)
else:
print "#ifdef %s"%c.name
print '%s%s%s'%(indent,c.name,colon)
print ' %s%s'%(c.name,colon)
print "#endif"
print '%s;'%indent

21
scripts/codegen/get_mozilla_ciphers.py Executable file → Normal file
View File

@ -1,6 +1,6 @@
#!/usr/bin/python
# coding=utf-8
# Copyright 2011-2017, The Tor Project, Inc
# Copyright 2011-2015, The Tor Project, Inc
# original version by Arturo Filastò
# See LICENSE for licensing information
@ -29,7 +29,7 @@ def ossl(s):
#####
# Read the cpp file to understand what Ciphers map to what name :
# Make "ciphers" a map from name used in the javascript to a cipher macro name
fileA = open(ff('security/manager/ssl/nsNSSComponent.cpp'),'r')
fileA = open(ff('security/manager/ssl/src/nsNSSComponent.cpp'),'r')
# The input format is a file containing exactly one section of the form:
# static CipherPref CipherPrefs[] = {
@ -71,7 +71,7 @@ for line in cipherLines:
assert not key_pending
key_pending = m.group(1)
continue
m = re.search(r'^\s*(\S+)(?:,\s*(true|false))+\s*}', line)
m = re.search(r'^\s*(\S+)(?:,\s*(true|false))?\s*}', line)
if m:
assert key_pending
key = key_pending
@ -107,7 +107,7 @@ fileC.close()
# Build a map enabled_ciphers from javascript name to "true" or "false",
# and an (unordered!) list of the macro names for those ciphers that are
# enabled.
fileB = open(ff('netwerk/base/security-prefs.js'), 'r')
fileB = open(ff('netwerk/base/public/security-prefs.js'), 'r')
enabled_ciphers = {}
for line in fileB:
@ -127,9 +127,9 @@ for k, v in enabled_ciphers.items():
#oSSLinclude = ('/usr/include/openssl/ssl3.h', '/usr/include/openssl/ssl.h',
# '/usr/include/openssl/ssl2.h', '/usr/include/openssl/ssl23.h',
# '/usr/include/openssl/tls1.h')
oSSLinclude = ['ssl3.h', 'ssl.h'
'ssl2.h', 'ssl23.h',
'tls1.h']
oSSLinclude = ('ssl/ssl3.h', 'ssl/ssl.h',
'ssl/ssl2.h', 'ssl/ssl23.h',
'ssl/tls1.h')
#####
# This reads the hex code for the ciphers that are used by firefox.
@ -155,12 +155,9 @@ for x in used_ciphers:
openssl_macro_by_hex = {}
all_openssl_macros = {}
for fl in oSSLinclude:
fname = ossl("include/openssl/"+fl)
if not os.path.exists(fname):
continue
fp = open(fname, 'r')
fp = open(ossl(fl), 'r')
for line in fp.readlines():
m = re.match('# *define\s+(\S+)\s+(\S+)', line)
m = re.match('#define\s+(\S+)\s+(\S+)', line)
if m:
value,key = m.groups()
if key.startswith('0x') and "_CK_" in value:

View File

@ -1,5 +1,5 @@
#!/usr/bin/python
# Copyright 2014-2017, The Tor Project, Inc.
# Copyright 2014-2015, The Tor Project, Inc.
# See LICENSE for license information
# This is a kludgey python script that uses ctypes and openssl to sign

View File

@ -5,13 +5,7 @@ if test "x$TRUNNEL_PATH" != "x"; then
export PYTHONPATH
fi
OPTIONS="--require-version=1.5.1"
python -m trunnel --require-version=1.4 ./src/trunnel/*.trunnel
# Get all .trunnel files recursively from that directory so we can support
# multiple sub-directories.
for file in `find ./src/trunnel/ -name '*.trunnel'`; do
python -m trunnel ${OPTIONS} $file
done
python -m trunnel ${OPTIONS} --write-c-files --target-dir=./src/ext/trunnel/
python -m trunnel --require-version=1.4 --write-c-files --target-dir=./src/ext/trunnel/

View File

@ -0,0 +1,219 @@
#!/usr/bin/python
import re
import sys
import copy
import cPickle
import os
class Parser:
def __init__(self):
self.calls = {}
def enter_func(self, name):
if self.infunc and not self.extern:
self.calls.setdefault(self.infunc, set()).update( self.calledfns )
self.calledfns = set()
self.infunc = name
self.extern = False
def parse_callgraph_file(self, inp):
self.infunc = None
self.extern = False
self.calledfns = set()
for line in inp:
m = re.match(r"Call graph node for function: '([^']+)'", line)
if m:
self.enter_func(m.group(1))
continue
m = re.match(r" CS<[^>]+> calls external node", line)
if m:
self.extern = True
m = re.match(r" CS<[^>]+> calls function '([^']+)'", line)
if m:
self.calledfns.add(m.group(1))
self.enter_func(None)
def extract_callgraph(self):
c = self.calls
self.calls = {}
return c
def transitive_closure(g):
passno = 0
changed = True
g = copy.deepcopy(g)
import random
while changed:
passno += 1
changed = False
keys = g.keys()
idx = 0
for k in keys:
idx += 1
print "Pass %d/?: %d/%d\r" %(passno, idx, len(keys)),
sys.stdout.flush()
newset = g[k].copy()
for fn in g[k]:
newset.update(g.get(fn, set()))
if len(newset) != len(g[k]):
g[k].update( newset )
changed = True
print
return g
def strongly_connected_components(g):
# From https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm, done stupidly.
index_of = {}
index = [ 0 ]
lowlink = {}
S = []
onStack = set()
all_sccs = []
def strongconnect(fn):
index_of[fn] = index[0]
lowlink[fn] = index[0]
index[0] += 1
S.append(fn)
onStack.add(fn)
for w in g.get(fn, []):
if w not in index_of:
strongconnect(w)
lowlink[fn] = min(lowlink[fn], lowlink[w])
elif w in onStack:
lowlink[fn] = min(lowlink[fn], index_of[w])
if lowlink[fn] == index_of[fn]:
this_scc = []
all_sccs.append(this_scc)
while True:
w = S.pop()
onStack.remove(w)
this_scc.append(w)
if w == fn:
break
for v in g.keys():
if v not in index_of:
strongconnect(v)
return all_sccs
def biggest_component(sccs):
return max(len(c) for c in sccs)
def connection_bottlenecks(callgraph):
callers = {}
for fn in callgraph:
for fn2 in callgraph[fn]:
callers.setdefault(fn2, set()).add(fn)
components = strongly_connected_components(callgraph)
components.sort(key=len)
big_component_fns = components[-1]
size = len(big_component_fns)
function_bottlenecks = fn_results = []
total = len(big_component_fns)
idx = 0
for fn in big_component_fns:
idx += 1
print "Pass 1/3: %d/%d\r"%(idx, total),
sys.stdout.flush()
cg2 = copy.deepcopy(callgraph)
del cg2[fn]
fn_results.append( (size - biggest_component(strongly_connected_components(cg2)), fn) )
print
bcf_set = set(big_component_fns)
call_bottlenecks = fn_results = []
result_set = set()
total = len(big_component_fns)
idx = 0
for fn in big_component_fns:
fn_callers = callers[fn].intersection(bcf_set)
idx += 1
if len(fn_callers) != 1:
continue
print "Pass 2/3: %d/%d\r"%(idx, total),
sys.stdout.flush()
caller = fn_callers.pop()
assert len(fn_callers) == 0
cg2 = copy.deepcopy(callgraph)
cg2[caller].remove(fn)
fn_results.append( (size - biggest_component(strongly_connected_components(cg2)), fn, "called by", caller) )
result_set.add( (caller, fn) )
print
total = len(big_component_fns)
idx = 0
for fn in big_component_fns:
fn_calls = callgraph[fn].intersection(bcf_set)
idx += 1
if len(fn_calls) != 1:
continue
print "Pass 3/3: %d/%d\r"%(idx, total),
sys.stdout.flush()
callee = fn_calls.pop()
if (fn, callee) in result_set:
continue
assert len(fn_calls) == 0
cg2 = copy.deepcopy(callgraph)
cg2[fn].remove(callee)
fn_results.append( (size - biggest_component(strongly_connected_components(cg2)), callee, "called by", fn) )
print
return (function_bottlenecks, call_bottlenecks)
if __name__ == '__main__':
p = Parser()
for fname in sys.argv[1:]:
with open(fname, 'r') as f:
p.parse_callgraph_file(f)
sys.stdout.flush
print "Building callgraph"
callgraph = p.extract_callgraph()
print "Finding strongly connected components"
sccs = strongly_connected_components(callgraph)
print "Finding the transitive closure of the callgraph.."
closure = transitive_closure(callgraph)
print "Finding bottlenecks..."
bottlenecks = connection_bottlenecks(callgraph)
data = {
'callgraph' : callgraph,
'sccs' : sccs,
'closure' : closure,
'bottlenecks' : bottlenecks }
with open('callgraph.pkl', 'w') as f:
cPickle.dump(data, f)

Some files were not shown because too many files have changed in this diff Show More