forked from cwtch.im/cwtch
Compare commits
289 Commits
Author | SHA1 | Date |
---|---|---|
Sarah Jamie Lewis | 262df80cda | |
Dan Ballard | bd75e44555 | |
Dan Ballard | 97fae65aa1 | |
Dan Ballard | dfcf7f8777 | |
Dan Ballard | af1d3a83a3 | |
Dan Ballard | 4d3f96b4b1 | |
Dan Ballard | 5a88f51dda | |
Sarah Jamie Lewis | 53c045f1ce | |
Dan Ballard | ea6fc6e3a1 | |
Sarah Jamie Lewis | 01ec46a97c | |
Dan Ballard | 2aee92aa64 | |
Sarah Jamie Lewis | 3c67c47bb0 | |
Sarah Jamie Lewis | 371fe89c9b | |
Dan Ballard | 695a622963 | |
Dan Ballard | e4fbefe496 | |
Sarah Jamie Lewis | 4a08331675 | |
Dan Ballard | 5aaa228691 | |
Sarah Jamie Lewis | e7f6dc3fa1 | |
Sarah Jamie Lewis | 2d72272127 | |
Sarah Jamie Lewis | f64d7ab1ed | |
Dan Ballard | 4ee0adf625 | |
Sarah Jamie Lewis | f08af1289f | |
Dan Ballard | ae2e717f76 | |
Sarah Jamie Lewis | e2ee27cc4d | |
Sarah Jamie Lewis | c4fb1f0d91 | |
Dan Ballard | f2e69f48d1 | |
Sarah Jamie Lewis | 799ac6aa7f | |
Dan Ballard | bb6edbac47 | |
Dan Ballard | b5d238f06b | |
Sarah Jamie Lewis | 821e64c360 | |
Dan Ballard | c43ec5497b | |
Sarah Jamie Lewis | 29c5214552 | |
Dan Ballard | 51d2c49c71 | |
Sarah Jamie Lewis | 37b9c72abb | |
Sarah Jamie Lewis | 815ec2565b | |
Sarah Jamie Lewis | 101cce532f | |
Sarah Jamie Lewis | 29852508d9 | |
Sarah Jamie Lewis | 71dd298a91 | |
Sarah Jamie Lewis | bc226173e4 | |
Sarah Jamie Lewis | 4c16ec379f | |
Sarah Jamie Lewis | d8ce3bee4e | |
Sarah Jamie Lewis | 6e64f65962 | |
Sarah Jamie Lewis | b959bfa3d9 | |
Dan Ballard | 8c16210407 | |
Sarah Jamie Lewis | 24855ca604 | |
Dan Ballard | 0465973a78 | |
Dan Ballard | 7cdb73d04e | |
Sarah Jamie Lewis | b7577d9fe3 | |
Sarah Jamie Lewis | ae3c3ace4b | |
Dan Ballard | 2246c6b3bc | |
Dan Ballard | b32cb0268c | |
Sarah Jamie Lewis | 1267f56a69 | |
Dan Ballard | 91195b7641 | |
Dan Ballard | 9f52e5de7b | |
Dan Ballard | 67678e14e4 | |
Sarah Jamie Lewis | 03b9b80268 | |
Dan Ballard | e1d6dd7253 | |
Sarah Jamie Lewis | 44993d00fd | |
Dan Ballard | 04dd8fa89c | |
Sarah Jamie Lewis | 5429cc6deb | |
Dan Ballard | 0c4bbe9ad1 | |
Sarah Jamie Lewis | 8be10930e7 | |
Dan Ballard | 1e1cbe6cd8 | |
Sarah Jamie Lewis | 475073f9f6 | |
Dan Ballard | 2b2dcb9f6b | |
erinn | db781042a9 | |
Dan Ballard | f519da5cde | |
Dan Ballard | f3fb1a42dd | |
erinn | 8ea11f8afd | |
Sarah Jamie Lewis | 45b1a10fff | |
Dan Ballard | 7640fc1c0d | |
Dan Ballard | 3671d91287 | |
erinn | 5ece75b3a8 | |
erinn | 94664d5604 | |
Sarah Jamie Lewis | 6697c73222 | |
Dan Ballard | fca4fe17eb | |
Sarah Jamie Lewis | 711e46ce10 | |
Dan Ballard | 600e1b31e6 | |
Sarah Jamie Lewis | 83faab35a3 | |
Sarah Jamie Lewis | 4af7773bec | |
Dan Ballard | f66f0273a6 | |
Dan Ballard | 558df278d7 | |
Sarah Jamie Lewis | a210986140 | |
Dan Ballard | 1b9fe4a1ce | |
Sarah Jamie Lewis | f056407cd0 | |
Sarah Jamie Lewis | eedfd872e5 | |
Dan Ballard | 30722415d5 | |
erinn | d15585eda7 | |
Sarah Jamie Lewis | 46bab264b4 | |
Dan Ballard | 862476293c | |
Sarah Jamie Lewis | 27e42afbbf | |
erinn | 2ed05dc8cd | |
Sarah Jamie Lewis | b607293c2d | |
erinn | 6ebf8ebc88 | |
Sarah Jamie Lewis | 3f6623bf42 | |
Sarah Jamie Lewis | de32784286 | |
Dan Ballard | 8959382449 | |
Sarah Jamie Lewis | 1620621d89 | |
erinn | 1f5a8685c4 | |
erinn | cadf00621b | |
Sarah Jamie Lewis | 8f3b607053 | |
Dan Ballard | ef480985a2 | |
Sarah Jamie Lewis | 84a10a9f5e | |
Sarah Jamie Lewis | 58535b0eb6 | |
Dan Ballard | c8541a5e36 | |
Dan Ballard | b845673d10 | |
Sarah Jamie Lewis | c4d55aee59 | |
Dan Ballard | 4d11547393 | |
Sarah Jamie Lewis | eb2a770085 | |
Dan Ballard | 742d36734b | |
Sarah Jamie Lewis | ffa2144b9f | |
Dan Ballard | ef2b95c54b | |
Sarah Jamie Lewis | 5d0c950319 | |
Dan Ballard | 0218159114 | |
Sarah Jamie Lewis | 5399a31a6f | |
Dan Ballard | f52144a67c | |
Sarah Jamie Lewis | 33a8922e43 | |
erinn | b93de92fdd | |
Sarah Jamie Lewis | 2b47c50d0d | |
Sarah Jamie Lewis | b9f7ab3757 | |
erinn | ac077521be | |
Sarah Jamie Lewis | 34e0a8f925 | |
Dan Ballard | a0dab022ad | |
Dan Ballard | 44173c9f52 | |
erinn | 4d7b925581 | |
Sarah Jamie Lewis | fea46b5aaf | |
erinn | 232a554e88 | |
Sarah Jamie Lewis | 2239463512 | |
erinn | ad7cddaacf | |
Dan Ballard | 2db7385ce0 | |
Sarah Jamie Lewis | 9e055f1ee0 | |
Sarah Jamie Lewis | 2fb5724332 | |
erinn | dcc65f0ffe | |
erinn | 17577f7fb5 | |
Sarah Jamie Lewis | ebe8db6c31 | |
erinn | 428b6d78f1 | |
Sarah Jamie Lewis | 0d026cda45 | |
Sarah Jamie Lewis | dc6cd56098 | |
Dan Ballard | 84e31f02fe | |
erinn | 9054006899 | |
Sarah Jamie Lewis | 16c8095e5f | |
Dan Ballard | ffc4254f18 | |
Sarah Jamie Lewis | d95b8aa9ed | |
erinn | f28cf6b781 | |
erinn | 157d4d740b | |
Sarah Jamie Lewis | 7b63fe79de | |
Sarah Jamie Lewis | 9bf6679572 | |
erinn | 4f39aec94b | |
Dan Ballard | 9a0c87f747 | |
Sarah Jamie Lewis | 1028f04ae7 | |
Sarah Jamie Lewis | e6d9e36220 | |
Dan Ballard | 591a09e25d | |
Sarah Jamie Lewis | 849deb14dc | |
Sarah Jamie Lewis | c3d797b2e1 | |
Sarah Jamie Lewis | 39d174ca26 | |
Dan Ballard | 6547cc0e5f | |
Sarah Jamie Lewis | 01a28ef3b0 | |
Sarah Jamie Lewis | 4a4c153103 | |
Dan Ballard | 3367f1a083 | |
Dan Ballard | d56a00842e | |
Sarah Jamie Lewis | a12f78cc9e | |
Sarah Jamie Lewis | 1e2ecb8e88 | |
Dan Ballard | d32bc34eb3 | |
Sarah Jamie Lewis | 746c5397fd | |
Dan Ballard | 3cfbb88a58 | |
Sarah Jamie Lewis | 55c5343b2b | |
Sarah Jamie Lewis | 1b8700c677 | |
erinn | 30d37da4e9 | |
Dan Ballard | 563597907c | |
erinn | e3eddb0d93 | |
erinn | 9734540aca | |
Sarah Jamie Lewis | b418eab92f | |
Sarah Jamie Lewis | 319c724c05 | |
Dan Ballard | 6f6fa90946 | |
Dan Ballard | 8a0d9c54fd | |
Sarah Jamie Lewis | 9421e471f6 | |
Dan Ballard | 4814e0b409 | |
Sarah Jamie Lewis | 60fb12054a | |
erinn | ddbf96e668 | |
erinn | 12206ce23c | |
erinn | d06b68d001 | |
erinn | 7ae9954c7d | |
erinn | 3b932bedc0 | |
Dan Ballard | b5992d9f85 | |
Dan Ballard | ff54059111 | |
Dan Ballard | b087d3667e | |
Sarah Jamie Lewis | dc489398ea | |
Sarah Jamie Lewis | fd70341dff | |
Dan Ballard | 655afb1aa0 | |
Sarah Jamie Lewis | 8083e6fa5c | |
Sarah Jamie Lewis | 8ca6589de0 | |
Dan Ballard | 85a2c44891 | |
Dan Ballard | 07ffd780b0 | |
Dan Ballard | 09fdb05168 | |
erinn | 6cce18120d | |
Sarah Jamie Lewis | ae84893d78 | |
Dan Ballard | b145420f21 | |
erinn | 3b9058c4ed | |
Dan Ballard | 9be69bda99 | |
Dan Ballard | 78fe3ca96e | |
Dan Ballard | bcc94c46e7 | |
Gareth @ BrassHornComms | 04b2191238 | |
Sarah Jamie Lewis | a94ed9eaff | |
erinn | 8cf26956e6 | |
erinn | 0baaba348b | |
Sarah Jamie Lewis | 1531885bcd | |
Sarah Jamie Lewis | f4c45e863e | |
Dan Ballard | 971f780881 | |
erinn | ce7d57a1a5 | |
erinn | 912894b55f | |
Sarah Jamie Lewis | 22c8941282 | |
Dan Ballard | d368ee1609 | |
erinn | 6fda5d223a | |
erinn | 9eeefae88d | |
Gareth @ BrassHornComms | 011866c1bc | |
Gareth @ BrassHornComms | 24a2fcc44d | |
Gareth @ BrassHornComms | 77bac24202 | |
Gareth @ BrassHornComms | 48fa6f386c | |
Gareth @ BrassHornComms | d641c83735 | |
Gareth @ BrassHornComms | 0ee32540f8 | |
Gareth @ BrassHornComms | 1001dbb3fe | |
Sarah Jamie Lewis | b58512693d | |
erinn | 2df386f6f6 | |
erinn | b06c54bb36 | |
erinn | 66459b3d37 | |
erinn | d3d8cd165f | |
Dan Ballard | 1569effa5b | |
erinn | f271638422 | |
Sarah Jamie Lewis | 51b6b9a0df | |
erinn | 6b98b32156 | |
erinn | c7f19fc677 | |
erinn | 5e580df127 | |
erinn | 66880628a4 | |
erinn | 475fea02d2 | |
erinn | ebf846e003 | |
erinn | 80339483f0 | |
Sarah Jamie Lewis | 007c72c43c | |
Dan Ballard | 4544535ad5 | |
Dan Ballard | e2b5e5db91 | |
Dan Ballard | 9a62468915 | |
Dan Ballard | be290d021d | |
Sarah Jamie Lewis | 1e04b1161e | |
Sarah Jamie Lewis | 6818dd5602 | |
Sarah Jamie Lewis | 8ab4752b44 | |
Sarah Jamie Lewis | 6620875b61 | |
Dan Ballard | dba22f3989 | |
Sarah Jamie Lewis | b8a3904c37 | |
Sarah Jamie Lewis | 85885f183b | |
Dan Ballard | a5ad2a6644 | |
Sarah Jamie Lewis | aef41ed4fa | |
Dan Ballard | 5f046e6d53 | |
Dan Ballard | 0351de1ff1 | |
Dan Ballard | beb9bc559c | |
Sarah Jamie Lewis | d37447f320 | |
Sarah Jamie Lewis | 2197134758 | |
Sarah Jamie Lewis | 291f717e7e | |
Sarah Jamie Lewis | c1474cbb94 | |
Dan Ballard | 68556bb4fc | |
Dan Ballard | 4e95427f44 | |
Dan Ballard | f217533044 | |
Dan Ballard | 298a996b7d | |
Sarah Jamie Lewis | 7ae8e8d8ee | |
Sarah Jamie Lewis | a459758070 | |
Dan Ballard | da37046658 | |
Dan Ballard | 195278325b | |
Sarah Jamie Lewis | 437c12db75 | |
Sarah Jamie Lewis | 8764417b0e | |
Dan Ballard | 0f6c0edc46 | |
Sarah Jamie Lewis | 16d21f582e | |
Dan Ballard | cded8755ca | |
Sarah Jamie Lewis | 90ff4bcba6 | |
Sarah Jamie Lewis | 8b64ba19fd | |
Sarah Jamie Lewis | 96b6cf6bb8 | |
Dan Ballard | c289052aae | |
Sarah Jamie Lewis | bcaa85fd5b | |
Dan Ballard | 6103287542 | |
Sarah Jamie Lewis | 7974bf4a4d | |
gpestana | bea9ac01f4 | |
Sarah Jamie Lewis | 3497dbe8e1 | |
Sarah Jamie Lewis | a8dbadccf0 | |
Dan Ballard | 5c3bce50d7 | |
Dan Ballard | 6592b37eda | |
Dan Ballard | fece08e87c | |
Sarah Jamie Lewis | e5bce02c7c | |
Dan Ballard | 57f9d68747 | |
Dan Ballard | 5a64ae8e27 | |
Sarah Jamie Lewis | 014c09fa0d | |
Dan Ballard | 0c0760f6f0 | |
Sarah Jamie Lewis | f1405680f6 |
40
.drone.yml
40
.drone.yml
|
@ -3,11 +3,43 @@ workspace:
|
|||
path: src/cwtch.im/cwtch
|
||||
|
||||
pipeline:
|
||||
build:
|
||||
fetch:
|
||||
image: golang
|
||||
commands:
|
||||
- wget https://git.openprivacy.ca/openprivacy/buildfiles/raw/master/tor/tor
|
||||
- wget https://git.openprivacy.ca/openprivacy/buildfiles/raw/master/tor/torrc
|
||||
- chmod a+x tor
|
||||
- export GO111MODULE=on
|
||||
- go mod vendor
|
||||
- go get -u golang.org/x/lint/golint
|
||||
quality:
|
||||
image: golang
|
||||
commands:
|
||||
- go list ./... | xargs go get
|
||||
- go get -u github.com/golang/lint/golint
|
||||
- go list ./... | xargs go vet
|
||||
- go list ./... | xargs golint -set_exit_status
|
||||
- sh testing/tests.sh
|
||||
units-tests:
|
||||
image: golang
|
||||
commands:
|
||||
- export PATH=$PATH:/go/src/cwtch.im/cwtch
|
||||
- sh testing/tests.sh
|
||||
integ-test:
|
||||
image: golang
|
||||
commands:
|
||||
- ./tor -f ./torrc
|
||||
- sleep 15
|
||||
- go test -v cwtch.im/cwtch/testing
|
||||
notify-email:
|
||||
image: drillster/drone-email
|
||||
host: build.openprivacy.ca
|
||||
port: 25
|
||||
skip_verify: true
|
||||
from: drone@openprivacy.ca
|
||||
when:
|
||||
status: [ failure ]
|
||||
notify-gogs:
|
||||
image: openpriv/drone-gogs
|
||||
when:
|
||||
event: pull_request
|
||||
status: [ success, changed, failure ]
|
||||
secrets: [gogs_account_token]
|
||||
gogs_url: https://git.openprivacy.ca
|
||||
|
|
|
@ -3,5 +3,11 @@
|
|||
*private_key*
|
||||
*.messages
|
||||
*.test
|
||||
*/*test_*
|
||||
*/*_test*
|
||||
*.json
|
||||
*/messages/*
|
||||
server/app/messages
|
||||
.reviewboardrc
|
||||
/vendor/
|
||||
/testing/tor/
|
||||
/storage/testing/
|
||||
/testing/storage/
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
image: golang:latest
|
||||
|
||||
#before_script:
|
||||
|
||||
stages:
|
||||
- test
|
||||
- docker-push
|
||||
- deploy-staging
|
||||
|
||||
test-server:
|
||||
stage: test
|
||||
script:
|
||||
- mkdir /go/src/cwtch.im
|
||||
- ln -s /builds/${CI_PROJECT_NAMESPACE}/cwtch /go/src/cwtch.im/cwtch
|
||||
- cd /go/src/cwtch.im/cwtch/server/app/
|
||||
- go get
|
||||
- go tool vet -composites=false -shadow=true *.go
|
||||
- go test
|
||||
|
||||
test-client:
|
||||
stage: test
|
||||
script:
|
||||
- mkdir /go/src/cwtch.im
|
||||
- ln -s /builds/${CI_PROJECT_NAMESPACE}/cwtch /go/src/cwtch.im/cwtch
|
||||
- cd /go/src/cwtch.im/cwtch/app/cli/
|
||||
- go get
|
||||
- go tool vet -composites=false -shadow=true *.go
|
||||
- go test
|
||||
# We don't really care about the client here but it's useful to know what's
|
||||
# happening on t'other side of the coin
|
||||
allow_failure: true
|
||||
|
||||
|
||||
gitlab-registry:
|
||||
stage: docker-push
|
||||
image: docker:latest
|
||||
services:
|
||||
- docker:dind
|
||||
tags:
|
||||
script:
|
||||
- docker login -u gitlab-ci-token -p $CI_JOB_TOKEN ${CI_REGISTRY}
|
||||
- docker build -t ${CI_REGISTRY_IMAGE}:latest -t ${CI_REGISTRY_IMAGE}:${CI_COMMIT_SHA:0:8} .
|
||||
- docker push ${CI_REGISTRY_IMAGE}:latest
|
||||
- docker push ${CI_REGISTRY_IMAGE}:${CI_COMMIT_SHA:0:8}
|
||||
dependencies:
|
||||
- test-server
|
||||
|
||||
docker-hub:
|
||||
stage: docker-push
|
||||
image: docker:latest
|
||||
services:
|
||||
- docker:dind
|
||||
tags:
|
||||
script:
|
||||
- docker login -u ${DOCKER_HUB_ID} -p ${DOCKER_HUB_PASSWORD} registry.hub.docker.com
|
||||
- docker build -t registry.hub.docker.com/${DOCKER_HUB_ID}/cwtch:latest -t registry.hub.docker.com/${DOCKER_HUB_ID}/cwtch:${CI_COMMIT_SHA:0:8} .
|
||||
- docker push registry.hub.docker.com/${DOCKER_HUB_ID}/cwtch:latest
|
||||
- docker push registry.hub.docker.com/${DOCKER_HUB_ID}/cwtch:${CI_COMMIT_SHA:0:8}
|
||||
dependencies:
|
||||
- test-server
|
|
@ -1,3 +1,7 @@
|
|||
# Contributing
|
||||
|
||||
## Getting Started
|
||||
|
||||
Sign up to the [Open Privacy Gogs instance](https://git.openprivacy.ca/)
|
||||
|
||||
Get the code
|
||||
|
@ -10,6 +14,8 @@ Make a development branch to do your work
|
|||
|
||||
If you are using Goland as an IDE, now would be a good time to enable automatic gofmt on save of files with the File Watches plugin [https://stackoverflow.com/questions/33774950/execute-gofmt-on-file-save-in-intellij](StackOverflow)
|
||||
|
||||
## Pull Requests
|
||||
|
||||
When you are done, rebase squash any multiple commits you have into one
|
||||
|
||||
git rebase -i master
|
||||
|
@ -19,7 +25,7 @@ Test the code and check it has not quality issues
|
|||
./testing/tests.sh
|
||||
./testing/quality.sh
|
||||
|
||||
Ideally run the ingegration tests (~5 minutes)
|
||||
Ideally run the integration tests (~5 minutes)
|
||||
|
||||
cd testing
|
||||
go test
|
||||
|
@ -30,7 +36,27 @@ push your branch (-f for *force* in the case you've rebased and squashed)
|
|||
|
||||
create a [pull request](https://git.openprivacy.ca/cwtch.im/cwtch/pulls)
|
||||
|
||||
If you have fixes, you can ammend them to the current commit rather than a new one with
|
||||
If you have fixes, you can amend them to the current commit rather than a new one with
|
||||
|
||||
git commit --ammend
|
||||
git commit --amend
|
||||
git push -f
|
||||
|
||||
## Review Board
|
||||
|
||||
For very large and complicated Pull Requests we have created a Review Board instance to facilitate more in depth review and discussion at https://review.openprivacy.ca
|
||||
|
||||
First acquire the client, RBTools, on Ubuntu:
|
||||
|
||||
apt install rbtools
|
||||
|
||||
Then hookup your git repo to review board with:
|
||||
|
||||
rbt setup-repo
|
||||
|
||||
Using the repo `cwtch`
|
||||
|
||||
Finally you will be able to create commits with
|
||||
|
||||
rbt post --parent master
|
||||
|
||||
It possibly will need the arguments `--tracking-branch=cwtch/master --branch=YOUR-BRANCH -d` (-d for debug if you are having trouble)
|
||||
|
|
|
@ -0,0 +1,78 @@
|
|||
FROM golang as server-build-stage
|
||||
ENV CGO_ENABLED=0 GOOS=linux
|
||||
|
||||
WORKDIR /go/src/cwtch.im/cwtch
|
||||
COPY . .
|
||||
|
||||
RUN go get -d -v ./...
|
||||
#RUN go install -v ./...
|
||||
WORKDIR /go/src/cwtch.im/cwtch/server/app/
|
||||
RUN go build -ldflags "-extldflags '-static'"
|
||||
|
||||
|
||||
|
||||
#----------------------------------------------
|
||||
FROM alpine:latest as tor-build-stage
|
||||
|
||||
# Install prerequisites
|
||||
RUN apk --no-cache add --update \
|
||||
gnupg \
|
||||
build-base \
|
||||
libevent \
|
||||
libevent-dev \
|
||||
libressl \
|
||||
libressl-dev \
|
||||
xz-libs \
|
||||
xz-dev \
|
||||
zlib \
|
||||
zlib-dev \
|
||||
zstd \
|
||||
zstd-dev \
|
||||
&& wget -q https://www.torproject.org/dist/tor-0.3.5.3-alpha.tar.gz \
|
||||
&& tar xf tor-0.3.5.3-alpha.tar.gz \
|
||||
&& cd tor-0.3.5.3-alpha \
|
||||
&& ./configure \
|
||||
&& make install \
|
||||
&& ls -R /usr/local/
|
||||
|
||||
FROM alpine:latest
|
||||
MAINTAINER Ablative Hosting <support@ablative.hosting>
|
||||
|
||||
#BSD habits die hard
|
||||
ENV TOR_USER=_tor
|
||||
|
||||
# Installing dependencies of Tor and pwgen
|
||||
RUN apk --no-cache add --update \
|
||||
libevent \
|
||||
libressl \
|
||||
xz-libs \
|
||||
zlib \
|
||||
zstd \
|
||||
zstd-dev \
|
||||
pwgen
|
||||
|
||||
# Copy Tor
|
||||
COPY --from=tor-build-stage /usr/local/ /usr/local/
|
||||
|
||||
# Create an unprivileged tor user
|
||||
RUN addgroup -S $TOR_USER && adduser -G $TOR_USER -S $TOR_USER && adduser -G _tor -S cwtchd && mkdir /run/tor
|
||||
|
||||
# Copy Tor configuration file
|
||||
COPY ./server/docker/torrc /etc/tor/torrc
|
||||
|
||||
# Copy docker-entrypoint
|
||||
COPY ./server/docker/docker-entrypoint /usr/local/bin/
|
||||
|
||||
# Copy across cwtch
|
||||
COPY --from=server-build-stage /go/src/cwtch.im/cwtch/server/app/app /usr/local/bin/cwtch_server
|
||||
|
||||
# Persist data
|
||||
VOLUME /etc/tor /var/lib/tor /etc/cwtch
|
||||
|
||||
ENTRYPOINT ["docker-entrypoint"]
|
||||
|
||||
#cwtchd is in the _tor group so can access the socket but that's it
|
||||
#USER cwtchd
|
||||
|
||||
#Launches the cwtchd daemon
|
||||
CMD ["/usr/local/bin/cwtch_server"]
|
28
README.md
28
README.md
|
@ -27,4 +27,32 @@ More Information: [https://cwtch.im](https://cwtch.im)
|
|||
|
||||
Development and Contributing information in [CONTRIBUTING.md](https://git.openprivacy.ca/cwtch.im/cwtch/src/master/CONTRIBUTING.md)
|
||||
|
||||
## Running Cwtch
|
||||
### Server
|
||||
#### Docker
|
||||
This repository contains a `Dockerfile` allowing you to build and run the server as a [docker](https://www.docker.com/) container.
|
||||
|
||||
To get started issue `docker build -t openpriv/cwtch-server:latest`, this will create 2 temporary docker containers, one to build the Tor daemon and one to build Cwtch. The compiled binaries will then be bundled into a new image and tagged as `openpriv/cwtch-server:latest`.
|
||||
|
||||
To run Cwtch in the foreground execute `docker run openpriv/cwtch-server:latest`, you will see a small amount of output from Tor and then Cwtch will output your server address. When you `Ctrl + C` the container will terminate. To run Cwtch in the background execute `docker run --name my-cwtch-server -d openpriv/cwtch-server:latest`. To get your Cwtch server address issue `docker logs my-cwtch-server`.
|
||||
|
||||
The image creates 3 volumes, for /etc/cwtch, /etc/tor, /var/lib/tor
|
||||
|
||||
##### Upgrading
|
||||
|
||||
To upgrade with continuity
|
||||
|
||||
```
|
||||
# Stop current container/service
|
||||
docker stop my-cwtch-server
|
||||
|
||||
docker pull openpriv/cwtch-server
|
||||
|
||||
# Create a new container and copy the volumes (cwtch/onion keys, message store)
|
||||
docker create --name my-cwtch-server-2 --volumes-from my-cwtch-server openpriv/cwtch-server:latest
|
||||
|
||||
# Resume service with the new container
|
||||
docker start my-cwtch-server-2
|
||||
```
|
||||
|
||||
![](https://git.openprivacy.ca/avatars/5?s=140)
|
||||
|
|
261
app/app.go
261
app/app.go
|
@ -1,98 +1,223 @@
|
|||
package app
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/connectivity/tor"
|
||||
"cwtch.im/cwtch/app/plugins"
|
||||
"cwtch.im/cwtch/event"
|
||||
"cwtch.im/cwtch/model"
|
||||
"cwtch.im/cwtch/peer"
|
||||
"cwtch.im/cwtch/protocol/connections"
|
||||
"cwtch.im/cwtch/storage"
|
||||
"cwtch.im/tapir/primitives"
|
||||
"fmt"
|
||||
"log"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connectivity"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Application is a facade over a cwtchPeer that provides some wrapping logic.
|
||||
type Application struct {
|
||||
Peer peer.CwtchPeerInterface
|
||||
TorManager *tor.Manager
|
||||
type applicationCore struct {
|
||||
eventBuses map[string]event.Manager
|
||||
|
||||
directory string
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// NewProfile creates a new cwtchPeer with a given name.
|
||||
func (app *Application) NewProfile(name string, filename string, password string) error {
|
||||
profile := peer.NewCwtchPeer(name, password)
|
||||
app.Peer = profile
|
||||
err := profile.Save(filename)
|
||||
if err == nil {
|
||||
|
||||
err := app.startTor()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
err := app.Peer.Listen()
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
return err
|
||||
type application struct {
|
||||
applicationCore
|
||||
appletPeers
|
||||
appletACN
|
||||
appletPlugins
|
||||
storage map[string]storage.ProfileStore
|
||||
engines map[string]connections.Engine
|
||||
appBus event.Manager
|
||||
}
|
||||
|
||||
func (app *Application) startTor() error {
|
||||
// Application is a full cwtch peer application. It allows management, usage and storage of multiple peers
|
||||
type Application interface {
|
||||
LoadProfiles(password string)
|
||||
CreatePeer(name string, password string)
|
||||
AddPeerPlugin(onion string, pluginID plugins.PluginID)
|
||||
LaunchPeers()
|
||||
|
||||
// Creating a local cwtch tor server config for the user
|
||||
usr, err := user.Current()
|
||||
if err != nil {
|
||||
return err
|
||||
GetPrimaryBus() event.Manager
|
||||
GetEventBus(onion string) event.Manager
|
||||
|
||||
ShutdownPeer(string)
|
||||
Shutdown()
|
||||
|
||||
GetPeer(onion string) peer.CwtchPeer
|
||||
ListPeers() map[string]string
|
||||
}
|
||||
|
||||
// LoadProfileFn is the function signature for a function in an app that loads a profile
|
||||
type LoadProfileFn func(profile *model.Profile, store storage.ProfileStore)
|
||||
|
||||
func newAppCore(appDirectory string) *applicationCore {
|
||||
appCore := &applicationCore{eventBuses: make(map[string]event.Manager), directory: appDirectory}
|
||||
os.MkdirAll(path.Join(appCore.directory, "profiles"), 0700)
|
||||
return appCore
|
||||
}
|
||||
|
||||
// NewApp creates a new app with some environment awareness and initializes a Tor Manager
|
||||
func NewApp(acn connectivity.ACN, appDirectory string) Application {
|
||||
log.Debugf("NewApp(%v)\n", appDirectory)
|
||||
app := &application{storage: make(map[string]storage.ProfileStore), engines: make(map[string]connections.Engine), applicationCore: *newAppCore(appDirectory), appBus: event.NewEventManager()}
|
||||
app.appletPeers.init()
|
||||
|
||||
fn := func(progress int, status string) {
|
||||
progStr := strconv.Itoa(progress)
|
||||
app.appBus.Publish(event.NewEventList(event.ACNStatus, event.Progreess, progStr, event.Status, status))
|
||||
}
|
||||
app.appletACN.init(acn, fn)
|
||||
return app
|
||||
}
|
||||
|
||||
// CreatePeer creates a new Peer with a given name and core required accessories (eventbus)
|
||||
func (ac *applicationCore) CreatePeer(name string, password string) (*model.Profile, error) {
|
||||
log.Debugf("CreatePeer(%v)\n", name)
|
||||
|
||||
profile := storage.NewProfile(name)
|
||||
|
||||
ac.mutex.Lock()
|
||||
defer ac.mutex.Unlock()
|
||||
|
||||
_, exists := ac.eventBuses[profile.Onion]
|
||||
if exists {
|
||||
return nil, fmt.Errorf("Error: profile for onion %v already exists", profile.Onion)
|
||||
}
|
||||
|
||||
// creating /home/<usr>/.cwtch/torrc file
|
||||
// SOCKSPort socksPort
|
||||
// ControlPort controlPort
|
||||
torrc := path.Join(usr.HomeDir, ".cwtch", "torrc")
|
||||
if _, err := os.Stat(torrc); os.IsNotExist(err) {
|
||||
eventBus := event.NewEventManager()
|
||||
ac.eventBuses[profile.Onion] = eventBus
|
||||
|
||||
os.MkdirAll(path.Join(usr.HomeDir, ".cwtch"), 0700)
|
||||
return profile, nil
|
||||
}
|
||||
|
||||
file, err := os.Create(torrc)
|
||||
// CreatePeer creates a new Peer with the given name and required accessories (eventbus, storage, protocol engine)
|
||||
func (app *application) CreatePeer(name string, password string) {
|
||||
profile, err := app.applicationCore.CreatePeer(name, password)
|
||||
if err != nil {
|
||||
app.appBus.Publish(event.NewEventList(event.PeerError, event.Error, err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
profileStore := storage.NewProfileWriterStore(app.eventBuses[profile.Onion], path.Join(app.directory, "profiles", profile.LocalID), password, profile)
|
||||
app.storage[profile.Onion] = profileStore
|
||||
|
||||
pc := app.storage[profile.Onion].GetProfileCopy(true)
|
||||
peer := peer.FromProfile(pc)
|
||||
peer.Init(app.eventBuses[profile.Onion])
|
||||
|
||||
blockedPeers := profile.BlockedPeers()
|
||||
// TODO: Would be nice if ProtocolEngine did not need to explicitly be given the Private Key.
|
||||
identity := primitives.InitializeIdentity(profile.Name, &profile.Ed25519PrivateKey, &profile.Ed25519PublicKey)
|
||||
engine := connections.NewProtocolEngine(identity, profile.Ed25519PrivateKey, app.acn, app.eventBuses[profile.Onion], blockedPeers)
|
||||
|
||||
app.peers[profile.Onion] = peer
|
||||
app.engines[profile.Onion] = engine
|
||||
|
||||
app.appBus.Publish(event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.Onion}))
|
||||
}
|
||||
|
||||
func (app *application) AddPeerPlugin(onion string, pluginID plugins.PluginID) {
|
||||
app.AddPlugin(onion, pluginID, app.eventBuses[onion])
|
||||
}
|
||||
|
||||
// LoadProfiles takes a password and attempts to load any profiles it can from storage with it and create Peers for them
|
||||
func (ac *applicationCore) LoadProfiles(password string, timeline bool, loadProfileFn LoadProfileFn) error {
|
||||
files, err := ioutil.ReadDir(path.Join(ac.directory, "profiles"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error: cannot read profiles directory: %v", err)
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
eventBus := event.NewEventManager()
|
||||
profileStore := storage.NewProfileWriterStore(eventBus, path.Join(ac.directory, "profiles", file.Name()), password, nil)
|
||||
err = profileStore.Load()
|
||||
if err != nil {
|
||||
return err
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(file, "SOCKSPort %d\nControlPort %d\n", 9050, 9051)
|
||||
file.Close()
|
||||
}
|
||||
|
||||
tm, err := tor.NewTorManager(9050, 9051, torrc)
|
||||
if err != nil {
|
||||
return err
|
||||
profile := profileStore.GetProfileCopy(timeline)
|
||||
|
||||
_, exists := ac.eventBuses[profile.Onion]
|
||||
if exists {
|
||||
profileStore.Shutdown()
|
||||
eventBus.Shutdown()
|
||||
log.Errorf("profile for onion %v already exists", profile.Onion)
|
||||
continue
|
||||
}
|
||||
|
||||
ac.mutex.Lock()
|
||||
ac.eventBuses[profile.Onion] = eventBus
|
||||
ac.mutex.Unlock()
|
||||
|
||||
loadProfileFn(profile, profileStore)
|
||||
}
|
||||
app.TorManager = tm
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetProfile loads an existing profile from the given filename.
|
||||
func (app *Application) SetProfile(filename string, password string) error {
|
||||
profile, err := peer.LoadCwtchPeer(filename, password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
app.Peer = profile
|
||||
if err == nil {
|
||||
// LoadProfiles takes a password and attempts to load any profiles it can from storage with it and create Peers for them
|
||||
func (app *application) LoadProfiles(password string) {
|
||||
count := 0
|
||||
app.applicationCore.LoadProfiles(password, true, func(profile *model.Profile, profileStore storage.ProfileStore) {
|
||||
peer := peer.FromProfile(profile)
|
||||
peer.Init(app.eventBuses[profile.Onion])
|
||||
|
||||
err := app.startTor()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
err := app.Peer.Listen()
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
}()
|
||||
blockedPeers := profile.BlockedPeers()
|
||||
identity := primitives.InitializeIdentity(profile.Name, &profile.Ed25519PrivateKey, &profile.Ed25519PublicKey)
|
||||
engine := connections.NewProtocolEngine(identity, profile.Ed25519PrivateKey, app.acn, app.eventBuses[profile.Onion], blockedPeers)
|
||||
app.mutex.Lock()
|
||||
app.peers[profile.Onion] = peer
|
||||
app.storage[profile.Onion] = profileStore
|
||||
app.engines[profile.Onion] = engine
|
||||
app.mutex.Unlock()
|
||||
app.appBus.Publish(event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.Onion}))
|
||||
count++
|
||||
})
|
||||
if count == 0 {
|
||||
message := event.NewEventList(event.AppError, event.Error, event.AppErrLoaded0)
|
||||
app.appBus.Publish(message)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// PeerRequest attempts to setup peer relationship with the given onion address.`
|
||||
func (app *Application) PeerRequest(onion string) {
|
||||
app.Peer.PeerWithOnion(onion)
|
||||
// GetPrimaryBus returns the bus the Application uses for events that aren't peer specific
|
||||
func (app *application) GetPrimaryBus() event.Manager {
|
||||
return app.appBus
|
||||
}
|
||||
|
||||
// GetEventBus returns a cwtchPeer's event bus
|
||||
func (ac *applicationCore) GetEventBus(onion string) event.Manager {
|
||||
if manager, ok := ac.eventBuses[onion]; ok {
|
||||
return manager
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShutdownPeer shuts down a peer and removes it from the app's management
|
||||
func (app *application) ShutdownPeer(onion string) {
|
||||
app.mutex.Lock()
|
||||
defer app.mutex.Unlock()
|
||||
app.eventBuses[onion].Shutdown()
|
||||
delete(app.eventBuses, onion)
|
||||
app.peers[onion].Shutdown()
|
||||
delete(app.peers, onion)
|
||||
app.engines[onion].Shutdown()
|
||||
delete(app.engines, onion)
|
||||
app.storage[onion].Shutdown()
|
||||
delete(app.storage, onion)
|
||||
}
|
||||
|
||||
// Shutdown shutsdown all peers of an app and then the tormanager
|
||||
func (app *application) Shutdown() {
|
||||
for id, peer := range app.peers {
|
||||
peer.Shutdown()
|
||||
app.engines[id].Shutdown()
|
||||
app.storage[id].Shutdown()
|
||||
app.appletPlugins.Shutdown()
|
||||
app.eventBuses[id].Shutdown()
|
||||
}
|
||||
app.appBus.Shutdown()
|
||||
}
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
package app
|
||||
|
||||
import "cwtch.im/cwtch/event"
|
||||
import "git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
|
||||
const (
|
||||
// DestApp should be used as a destination for IPC messages that are for the application itself an not a peer
|
||||
DestApp = "app"
|
||||
)
|
||||
|
||||
type applicationBridge struct {
|
||||
applicationCore
|
||||
|
||||
bridge event.IPCBridge
|
||||
handle func(*event.Event)
|
||||
}
|
||||
|
||||
func (ab *applicationBridge) listen() {
|
||||
log.Infoln("ab.listen()")
|
||||
for {
|
||||
ipcMessage, ok := ab.bridge.Read()
|
||||
log.Debugf("listen() got %v for %v\n", ipcMessage.Message.EventType, ipcMessage.Dest)
|
||||
if !ok {
|
||||
log.Debugln("exiting appBridge.listen()")
|
||||
return
|
||||
}
|
||||
|
||||
if ipcMessage.Dest == DestApp {
|
||||
ab.handle(&ipcMessage.Message)
|
||||
} else {
|
||||
if eventBus, exists := ab.eventBuses[ipcMessage.Dest]; exists {
|
||||
eventBus.PublishLocal(ipcMessage.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ab *applicationBridge) Shutdown() {
|
||||
}
|
|
@ -0,0 +1,129 @@
|
|||
package app
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/app/plugins"
|
||||
"cwtch.im/cwtch/event"
|
||||
"cwtch.im/cwtch/peer"
|
||||
"cwtch.im/cwtch/storage"
|
||||
"fmt"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"path"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type applicationClient struct {
|
||||
applicationBridge
|
||||
appletPeers
|
||||
|
||||
appBus event.Manager
|
||||
}
|
||||
|
||||
// NewAppClient returns an Application that acts as a client to a AppService, connected by the IPCBridge supplied
|
||||
func NewAppClient(appDirectory string, bridge event.IPCBridge) Application {
|
||||
appClient := &applicationClient{appletPeers: appletPeers{peers: make(map[string]peer.CwtchPeer)}, applicationBridge: applicationBridge{applicationCore: *newAppCore(appDirectory), bridge: bridge}, appBus: event.NewEventManager()}
|
||||
appClient.handle = appClient.handleEvent
|
||||
|
||||
go appClient.listen()
|
||||
|
||||
appClient.bridge.Write(&event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.ReloadClient)})
|
||||
|
||||
log.Infoln("Created new App Client")
|
||||
return appClient
|
||||
}
|
||||
|
||||
// GetPrimaryBus returns the bus the Application uses for events that aren't peer specific
|
||||
func (ac *applicationClient) GetPrimaryBus() event.Manager {
|
||||
return ac.appBus
|
||||
}
|
||||
|
||||
func (ac *applicationClient) handleEvent(ev *event.Event) {
|
||||
switch ev.EventType {
|
||||
case event.NewPeer:
|
||||
localID := ev.Data[event.Identity]
|
||||
password := ev.Data[event.Password]
|
||||
reload := ev.Data[event.Status] == "running"
|
||||
ac.newPeer(localID, password, reload)
|
||||
case event.PeerError:
|
||||
ac.appBus.Publish(*ev)
|
||||
case event.AppError:
|
||||
ac.appBus.Publish(*ev)
|
||||
case event.ACNStatus:
|
||||
ac.appBus.Publish(*ev)
|
||||
case event.ReloadDone:
|
||||
ac.appBus.Publish(*ev)
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *applicationClient) newPeer(localID, password string, reload bool) {
|
||||
profile, err := storage.ReadProfile(path.Join(ac.directory, "profiles", localID), password)
|
||||
if err != nil {
|
||||
log.Errorf("Could not read profile for NewPeer event: %v\n", err)
|
||||
ac.appBus.Publish(event.NewEventList(event.PeerError, event.Error, fmt.Sprintf("Could not read profile for NewPeer event: %v\n", err)))
|
||||
return
|
||||
}
|
||||
|
||||
_, exists := ac.peers[profile.Onion]
|
||||
if exists {
|
||||
log.Errorf("profile for onion %v already exists", profile.Onion)
|
||||
ac.appBus.Publish(event.NewEventList(event.PeerError, event.Error, fmt.Sprintf("profile for onion %v already exists", profile.Onion)))
|
||||
return
|
||||
}
|
||||
|
||||
eventBus := event.NewIPCEventManager(ac.bridge, profile.Onion)
|
||||
peer := peer.FromProfile(profile)
|
||||
peer.Init(eventBus)
|
||||
|
||||
ac.mutex.Lock()
|
||||
defer ac.mutex.Unlock()
|
||||
ac.peers[profile.Onion] = peer
|
||||
ac.eventBuses[profile.Onion] = eventBus
|
||||
npEvent := event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.Onion})
|
||||
if reload {
|
||||
npEvent.Data[event.Status] = "running"
|
||||
}
|
||||
ac.appBus.Publish(npEvent)
|
||||
|
||||
if reload {
|
||||
ac.bridge.Write(&event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.ReloadPeer, event.Identity, profile.Onion)})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// CreatePeer messages the service to create a new Peer with the given name
|
||||
func (ac *applicationClient) CreatePeer(name string, password string) {
|
||||
log.Infof("appClient CreatePeer %v\n", name)
|
||||
message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.CreatePeer, map[event.Field]string{event.ProfileName: name, event.Password: password})}
|
||||
ac.bridge.Write(&message)
|
||||
}
|
||||
|
||||
func (ac *applicationClient) AddPeerPlugin(onion string, pluginID plugins.PluginID) {
|
||||
message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.AddPeerPlugin, map[event.Field]string{event.Identity: onion, event.Data: strconv.Itoa(int(pluginID))})}
|
||||
ac.bridge.Write(&message)
|
||||
}
|
||||
|
||||
// LoadProfiles messages the service to load any profiles for the given password
|
||||
func (ac *applicationClient) LoadProfiles(password string) {
|
||||
message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.LoadProfiles, map[event.Field]string{event.Password: password})}
|
||||
ac.bridge.Write(&message)
|
||||
}
|
||||
|
||||
// ShutdownPeer shuts down a peer and removes it from the app's management
|
||||
func (ac *applicationClient) ShutdownPeer(onion string) {
|
||||
ac.mutex.Lock()
|
||||
defer ac.mutex.Unlock()
|
||||
ac.eventBuses[onion].Shutdown()
|
||||
delete(ac.eventBuses, onion)
|
||||
ac.peers[onion].Shutdown()
|
||||
delete(ac.peers, onion)
|
||||
message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.ShutdownPeer, map[event.Field]string{event.Identity: onion})}
|
||||
ac.bridge.Write(&message)
|
||||
}
|
||||
|
||||
// Shutdown shuts down the application lcienr and all front end peer components
|
||||
func (ac *applicationClient) Shutdown() {
|
||||
for id := range ac.peers {
|
||||
ac.ShutdownPeer(id)
|
||||
}
|
||||
ac.applicationBridge.Shutdown()
|
||||
ac.appBus.Shutdown()
|
||||
}
|
|
@ -0,0 +1,145 @@
|
|||
package app
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/app/plugins"
|
||||
"cwtch.im/cwtch/event"
|
||||
"cwtch.im/cwtch/model"
|
||||
"cwtch.im/cwtch/protocol/connections"
|
||||
"cwtch.im/cwtch/storage"
|
||||
"cwtch.im/tapir/primitives"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connectivity"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"path"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type applicationService struct {
|
||||
applicationBridge
|
||||
appletACN
|
||||
appletPlugins
|
||||
|
||||
storage map[string]storage.ProfileStore
|
||||
engines map[string]connections.Engine
|
||||
}
|
||||
|
||||
// ApplicationService is the back end of an application that manages engines and writing storage and communicates to an ApplicationClient by an IPCBridge
|
||||
type ApplicationService interface {
|
||||
Shutdown()
|
||||
}
|
||||
|
||||
// NewAppService returns an ApplicationService that runs the backend of an app and communicates with a client by the supplied IPCBridge
|
||||
func NewAppService(acn connectivity.ACN, appDirectory string, bridge event.IPCBridge) ApplicationService {
|
||||
appService := &applicationService{storage: make(map[string]storage.ProfileStore), engines: make(map[string]connections.Engine), applicationBridge: applicationBridge{applicationCore: *newAppCore(appDirectory), bridge: bridge}}
|
||||
|
||||
fn := func(progress int, status string) {
|
||||
progStr := strconv.Itoa(progress)
|
||||
appService.bridge.Write(&event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.ACNStatus, event.Progreess, progStr, event.Status, status)})
|
||||
}
|
||||
appService.appletACN.init(acn, fn)
|
||||
appService.handle = appService.handleEvent
|
||||
|
||||
go appService.listen()
|
||||
|
||||
log.Infoln("Created new App Service")
|
||||
return appService
|
||||
}
|
||||
|
||||
func (as *applicationService) handleEvent(ev *event.Event) {
|
||||
log.Infof("app Service handleEvent %v\n", ev.EventType)
|
||||
switch ev.EventType {
|
||||
case event.CreatePeer:
|
||||
profileName := ev.Data[event.ProfileName]
|
||||
password := ev.Data[event.Password]
|
||||
as.createPeer(profileName, password)
|
||||
case event.AddPeerPlugin:
|
||||
onion := ev.Data[event.Identity]
|
||||
pluginID, _ := strconv.Atoi(ev.Data[event.Data])
|
||||
as.AddPlugin(onion, plugins.PluginID(pluginID), as.eventBuses[onion])
|
||||
case event.LoadProfiles:
|
||||
password := ev.Data[event.Password]
|
||||
as.loadProfiles(password)
|
||||
case event.ReloadClient:
|
||||
for _, storage := range as.storage {
|
||||
message := event.IPCMessage{Dest: DestApp, Message: *storage.GetNewPeerMessage()}
|
||||
as.bridge.Write(&message)
|
||||
}
|
||||
|
||||
message := event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.ReloadDone)}
|
||||
as.bridge.Write(&message)
|
||||
case event.ReloadPeer:
|
||||
onion := ev.Data[event.Identity]
|
||||
events := as.storage[onion].GetStatusMessages()
|
||||
|
||||
for _, ev := range events {
|
||||
message := event.IPCMessage{Dest: onion, Message: *ev}
|
||||
as.bridge.Write(&message)
|
||||
}
|
||||
case event.ShutdownPeer:
|
||||
onion := ev.Data[event.Identity]
|
||||
as.ShutdownPeer(onion)
|
||||
}
|
||||
}
|
||||
|
||||
func (as *applicationService) createPeer(name, password string) {
|
||||
log.Infof("app Service create peer %v %v\n", name, password)
|
||||
profile, err := as.applicationCore.CreatePeer(name, password)
|
||||
as.eventBuses[profile.Onion] = event.IPCEventManagerFrom(as.bridge, profile.Onion, as.eventBuses[profile.Onion])
|
||||
if err != nil {
|
||||
log.Errorf("Could not create Peer: %v\n", err)
|
||||
message := event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.PeerError, event.Error, err.Error())}
|
||||
as.bridge.Write(&message)
|
||||
return
|
||||
}
|
||||
|
||||
profileStore := storage.NewProfileWriterStore(as.eventBuses[profile.Onion], path.Join(as.directory, "profiles", profile.LocalID), password, profile)
|
||||
|
||||
blockedPeers := profile.BlockedPeers()
|
||||
// TODO: Would be nice if ProtocolEngine did not need to explicitly be given the Private Key.
|
||||
identity := primitives.InitializeIdentity(profile.Name, &profile.Ed25519PrivateKey, &profile.Ed25519PublicKey)
|
||||
engine := connections.NewProtocolEngine(identity, profile.Ed25519PrivateKey, as.acn, as.eventBuses[profile.Onion], blockedPeers)
|
||||
|
||||
as.storage[profile.Onion] = profileStore
|
||||
as.engines[profile.Onion] = engine
|
||||
|
||||
message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.LocalID, event.Password: password})}
|
||||
as.bridge.Write(&message)
|
||||
}
|
||||
|
||||
func (as *applicationService) loadProfiles(password string) {
|
||||
count := 0
|
||||
as.applicationCore.LoadProfiles(password, false, func(profile *model.Profile, profileStore storage.ProfileStore) {
|
||||
as.eventBuses[profile.Onion] = event.IPCEventManagerFrom(as.bridge, profile.Onion, as.eventBuses[profile.Onion])
|
||||
|
||||
blockedPeers := profile.BlockedPeers()
|
||||
identity := primitives.InitializeIdentity(profile.Name, &profile.Ed25519PrivateKey, &profile.Ed25519PublicKey)
|
||||
engine := connections.NewProtocolEngine(identity, profile.Ed25519PrivateKey, as.acn, as.eventBuses[profile.Onion], blockedPeers)
|
||||
as.mutex.Lock()
|
||||
as.storage[profile.Onion] = profileStore
|
||||
as.engines[profile.Onion] = engine
|
||||
as.mutex.Unlock()
|
||||
message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.LocalID, event.Password: password})}
|
||||
as.bridge.Write(&message)
|
||||
count++
|
||||
})
|
||||
if count == 0 {
|
||||
message := event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.AppError, event.Error, event.AppErrLoaded0)}
|
||||
as.bridge.Write(&message)
|
||||
}
|
||||
}
|
||||
|
||||
func (as *applicationService) ShutdownPeer(onion string) {
|
||||
as.engines[onion].Shutdown()
|
||||
delete(as.engines, onion)
|
||||
as.storage[onion].Shutdown()
|
||||
delete(as.storage, onion)
|
||||
as.eventBuses[onion].Shutdown()
|
||||
delete(as.eventBuses, onion)
|
||||
}
|
||||
|
||||
// Shutdown shuts down the application Service and all peer related backend parts
|
||||
func (as *applicationService) Shutdown() {
|
||||
for id := range as.engines {
|
||||
as.appletPlugins.Shutdown()
|
||||
as.ShutdownPeer(id)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
package app
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/event"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connectivity"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"sync"
|
||||
|
||||
"cwtch.im/cwtch/app/plugins"
|
||||
"cwtch.im/cwtch/peer"
|
||||
)
|
||||
|
||||
type appletPeers struct {
|
||||
peers map[string]peer.CwtchPeer
|
||||
launched bool // bit hacky, place holder while we transition to full multi peer support and a better api
|
||||
}
|
||||
|
||||
type appletACN struct {
|
||||
acn connectivity.ACN
|
||||
}
|
||||
|
||||
type appletPlugins struct {
|
||||
plugins sync.Map //map[string] []plugins.Plugin
|
||||
}
|
||||
|
||||
// ***** applet ACN
|
||||
|
||||
func (a *appletACN) init(acn connectivity.ACN, publish func(int, string)) {
|
||||
a.acn = acn
|
||||
acn.SetStatusCallback(publish)
|
||||
prog, status := acn.GetBootstrapStatus()
|
||||
publish(prog, status)
|
||||
}
|
||||
|
||||
func (a *appletACN) Shutdown() {
|
||||
a.acn.Close()
|
||||
}
|
||||
|
||||
// ***** appletPeers
|
||||
|
||||
func (ap *appletPeers) init() {
|
||||
ap.peers = make(map[string]peer.CwtchPeer)
|
||||
ap.launched = false
|
||||
}
|
||||
|
||||
// LaunchPeers starts each peer Listening and connecting to peers and groups
|
||||
func (ap *appletPeers) LaunchPeers() {
|
||||
log.Debugf("appletPeers LaunchPeers\n")
|
||||
if ap.launched {
|
||||
return
|
||||
}
|
||||
for _, p := range ap.peers {
|
||||
p.Listen()
|
||||
p.StartPeersConnections()
|
||||
p.StartGroupConnections()
|
||||
}
|
||||
ap.launched = true
|
||||
}
|
||||
|
||||
// ListPeers returns a map of onions to their profile's Name
|
||||
func (ap *appletPeers) ListPeers() map[string]string {
|
||||
keys := map[string]string{}
|
||||
for k, p := range ap.peers {
|
||||
keys[k] = p.GetProfile().Name
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// GetPeer returns a cwtchPeer for a given onion address
|
||||
func (ap *appletPeers) GetPeer(onion string) peer.CwtchPeer {
|
||||
if peer, ok := ap.peers[onion]; ok {
|
||||
return peer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ***** applet Plugins
|
||||
|
||||
func (ap *appletPlugins) Shutdown() {
|
||||
ap.plugins.Range(func(k, v interface{}) bool {
|
||||
plugins := v.([]plugins.Plugin)
|
||||
for _, plugin := range plugins {
|
||||
plugin.Shutdown()
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (ap *appletPlugins) AddPlugin(peerid string, id plugins.PluginID, bus event.Manager) {
|
||||
if _, exists := ap.plugins.Load(peerid); !exists {
|
||||
ap.plugins.Store(peerid, []plugins.Plugin{})
|
||||
}
|
||||
|
||||
pluginsinf, _ := ap.plugins.Load(peerid)
|
||||
peerPlugins := pluginsinf.([]plugins.Plugin)
|
||||
|
||||
newp := plugins.Get(id, bus)
|
||||
newp.Start()
|
||||
peerPlugins = append(peerPlugins, newp)
|
||||
ap.plugins.Store(peerid, peerPlugins)
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
app2 "cwtch.im/cwtch/app"
|
||||
"cwtch.im/cwtch/app/utils"
|
||||
"cwtch.im/cwtch/peer"
|
||||
"cwtch.im/cwtch/protocol/connections"
|
||||
"errors"
|
||||
"fmt"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connectivity"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
func waitForPeerGroupConnection(peer peer.CwtchPeer, groupID string) error {
|
||||
for {
|
||||
_, ok := peer.GetProfile().Groups[groupID]
|
||||
if ok {
|
||||
state := peer.GetGroupState(groupID)
|
||||
if state == connections.FAILED {
|
||||
return errors.New("Connection to group " + groupID + " failed!")
|
||||
}
|
||||
if state != connections.AUTHENTICATED {
|
||||
fmt.Printf("peer %v waiting to authenticate with group %v 's server, current state: %v\n", peer.GetProfile().Onion, groupID, connections.ConnectionStateName[state])
|
||||
time.Sleep(time.Second * 10)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
return errors.New("peer server connections should have entry for server but do not")
|
||||
}
|
||||
break
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
if len(os.Args) != 2 {
|
||||
fmt.Printf("Usage: ./servermon SERVER_ADDRESS\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
serverAddr := os.Args[1]
|
||||
|
||||
acn, err := connectivity.StartTor(".", "")
|
||||
if err != nil {
|
||||
fmt.Printf("Could not start tor: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
app := app2.NewApp(acn, ".")
|
||||
|
||||
app.CreatePeer("servermon", "be gay, do crimes")
|
||||
|
||||
botPeer := utils.WaitGetPeer(app, "servermon")
|
||||
|
||||
fmt.Printf("Connecting to %v...\n", serverAddr)
|
||||
botPeer.JoinServer(serverAddr)
|
||||
groupID, _, err := botPeer.StartGroup(serverAddr)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating group on server %v: %v\n", serverAddr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = waitForPeerGroupConnection(botPeer, groupID)
|
||||
if err != nil {
|
||||
fmt.Printf("Could not connect to server %v: %v\n", serverAddr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
timeout := 1 * time.Second
|
||||
timeElapsed := 0 * time.Second
|
||||
for {
|
||||
err := botPeer.SendMessageToGroup(groupID, timeout.String())
|
||||
if err != nil {
|
||||
fmt.Printf("Sent to group on server %v failed at interval %v of total %v with: %v\n", serverAddr, timeout, timeElapsed, err)
|
||||
os.Exit(1)
|
||||
} else {
|
||||
fmt.Printf("Successfully sent message to %v at interval %v of total %v\n", serverAddr, timeout, timeElapsed)
|
||||
}
|
||||
time.Sleep(timeout)
|
||||
timeElapsed += timeout
|
||||
if timeout < 2*time.Minute {
|
||||
timeout = timeout * 2
|
||||
}
|
||||
}
|
||||
}
|
585
app/cli/main.go
585
app/cli/main.go
|
@ -2,58 +2,125 @@ package main
|
|||
|
||||
import (
|
||||
app2 "cwtch.im/cwtch/app"
|
||||
"fmt"
|
||||
|
||||
"github.com/c-bata/go-prompt"
|
||||
"strings"
|
||||
"time"
|
||||
"cwtch.im/cwtch/event"
|
||||
peer2 "cwtch.im/cwtch/peer"
|
||||
|
||||
"bytes"
|
||||
"cwtch.im/cwtch/model"
|
||||
"fmt"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connectivity"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"github.com/c-bata/go-prompt"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
var app app2.Application
|
||||
var peer peer2.CwtchPeer
|
||||
var group *model.Group
|
||||
var groupFollowBreakChan chan bool
|
||||
var prmpt string
|
||||
|
||||
var suggestions = []prompt.Suggest{
|
||||
{Text: "new-profile", Description: "create a new profile"},
|
||||
{Text: "load-profile", Description: "load a new profile"},
|
||||
{Text: "quit", Description: "quit cwtch"},
|
||||
{Text: "info", Description: "show user info"},
|
||||
{Text: "servers", Description: "retrieve a list of servers and their connection status"},
|
||||
{Text: "peers", Description: "retrieve a list of peers and their connection status"},
|
||||
{Text: "contacts", Description: "retrieve a list of contacts"},
|
||||
{Text: "groups", Description: "retrieve a list of groups"},
|
||||
{Text: "send", Description: "send a message to a group"},
|
||||
{Text: "timeline", Description: "read the timeline of a given group"},
|
||||
{Text: "accept-invite", Description: "accept the invite of a group"},
|
||||
{Text: "invite", Description: "invite a new contact"},
|
||||
{Text: "invite-to-group", Description: "invite an existing contact to join an existing group"},
|
||||
{Text: "new-group", Description: "create a new group"},
|
||||
{Text: "help", Description: "print list of commands"},
|
||||
{Text: "trust", Description: "trust a peer"},
|
||||
{Text: "block", Description: "block a peer - you will no longer see messages or connect to this peer"},
|
||||
var suggestionsBase = []prompt.Suggest{
|
||||
{Text: "/new-profile", Description: "create a new profile"},
|
||||
{Text: "/load-profiles", Description: "loads profiles with a password"},
|
||||
{Text: "/list-profiles", Description: "list active profiles"},
|
||||
{Text: "/select-profile", Description: "selects an active profile to use"},
|
||||
{Text: "/help", Description: "print list of commands"},
|
||||
{Text: "/quit", Description: "quit cwtch"},
|
||||
}
|
||||
|
||||
var suggestionsSelectedProfile = []prompt.Suggest{
|
||||
{Text: "/info", Description: "show user info"},
|
||||
{Text: "/list-contacts", Description: "retrieve a list of contacts"},
|
||||
{Text: "/list-groups", Description: "retrieve a list of groups"},
|
||||
{Text: "/new-group", Description: "create a new group on a server"},
|
||||
{Text: "/select-group", Description: "selects a group to follow"},
|
||||
{Text: "/unselect-group", Description: "stop following the current group"},
|
||||
{Text: "/invite", Description: "invite a new contact"},
|
||||
{Text: "/invite-to-group", Description: "invite an existing contact to join an existing group"},
|
||||
{Text: "/accept-invite", Description: "accept the invite of a group"},
|
||||
/*{Text: "/list-servers", Description: "retrieve a list of servers and their connection status"},
|
||||
{Text: "/list-peers", Description: "retrieve a list of peers and their connection status"},*/
|
||||
{Text: "/export-group", Description: "export a group invite: prints as a string"},
|
||||
{Text: "/trust", Description: "trust a peer"},
|
||||
{Text: "/block", Description: "block a peer - you will no longer see messages or connect to this peer"},
|
||||
}
|
||||
|
||||
var suggestions = suggestionsBase
|
||||
|
||||
var usages = map[string]string{
|
||||
"new-profile": "new-profile [name] [filename]",
|
||||
"load-profile": "load-profile [filename]",
|
||||
"quit": "",
|
||||
"servers": "",
|
||||
"peers": "",
|
||||
"contacts": "",
|
||||
"groups": "",
|
||||
"info": "",
|
||||
"send": "send [groupid] [message]",
|
||||
"timeline": "timeline [groupid]",
|
||||
"accept-invite": "accept-invite [groupid]",
|
||||
"invite": "invite [peerid]",
|
||||
"invite-to-group": "invite-to-group [peerid] [groupid]",
|
||||
"new-group": "new-group [server]",
|
||||
"help": "",
|
||||
"trust": "trust [peerid]",
|
||||
"block": "block [peerid]",
|
||||
"/new-profile": "/new-profile [name]",
|
||||
"/load-profiles": "/load-profiles",
|
||||
"/list-profiles": "",
|
||||
"/select-profile": "/select-profile [onion]",
|
||||
"/quit": "",
|
||||
/* "/list-servers": "",
|
||||
"/list-peers": "",*/
|
||||
"/list-contacts": "",
|
||||
"/list-groups": "",
|
||||
"/select-group": "/select-group [groupid]",
|
||||
"/unselect-group": "",
|
||||
"/export-group": "/export-group [groupid]",
|
||||
"/info": "",
|
||||
"/send": "/send [groupid] [message]",
|
||||
"/timeline": "/timeline [groupid]",
|
||||
"/accept-invite": "/accept-invite [groupid]",
|
||||
"/invite": "/invite [peerid]",
|
||||
"/invite-to-group": "/invite-to-group [groupid] [peerid]",
|
||||
"/new-group": "/new-group [server]",
|
||||
"/help": "",
|
||||
"/trust": "/trust [peerid]",
|
||||
"/block": "/block [peerid]",
|
||||
}
|
||||
|
||||
func printMessage(m model.Message) {
|
||||
p := peer.GetContact(m.PeerID)
|
||||
name := "unknown"
|
||||
if p != nil {
|
||||
name = p.Name
|
||||
} else if peer.GetProfile().Onion == m.PeerID {
|
||||
name = peer.GetProfile().Name
|
||||
}
|
||||
|
||||
fmt.Printf("%v %v (%v): %v\n", m.Timestamp, name, m.PeerID, m.Message)
|
||||
}
|
||||
|
||||
func startGroupFollow() {
|
||||
groupFollowBreakChan = make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
l := len(group.Timeline.GetMessages())
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
if group == nil {
|
||||
return
|
||||
}
|
||||
gms := group.Timeline.GetMessages()
|
||||
if len(gms) != l {
|
||||
fmt.Printf("\n")
|
||||
for ; l < len(gms); l++ {
|
||||
printMessage(gms[l])
|
||||
}
|
||||
fmt.Printf(prmpt)
|
||||
}
|
||||
case <-groupFollowBreakChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func stopGroupFollow() {
|
||||
if group != nil {
|
||||
groupFollowBreakChan <- true
|
||||
group = nil
|
||||
}
|
||||
}
|
||||
|
||||
func completer(d prompt.Document) []prompt.Suggest {
|
||||
|
@ -65,53 +132,37 @@ func completer(d prompt.Document) []prompt.Suggest {
|
|||
}
|
||||
|
||||
w := d.CurrentLine()
|
||||
if strings.HasPrefix(w, "send") || strings.HasPrefix(w, "timeline") {
|
||||
|
||||
// Suggest a profile id
|
||||
if strings.HasPrefix(w, "/select-profile") {
|
||||
s = []prompt.Suggest{}
|
||||
groups := app.Peer.GetGroups()
|
||||
peerlist := app.ListPeers()
|
||||
for onion, peername := range peerlist {
|
||||
s = append(s, prompt.Suggest{Text: onion, Description: peername})
|
||||
}
|
||||
}
|
||||
|
||||
if peer == nil {
|
||||
return s
|
||||
}
|
||||
|
||||
// Suggest groupid
|
||||
if /*strings.HasPrefix(w, "send") || strings.HasPrefix(w, "timeline") ||*/ strings.HasPrefix(w, "/export-group") || strings.HasPrefix(w, "/select-group") {
|
||||
s = []prompt.Suggest{}
|
||||
groups := peer.GetGroups()
|
||||
for _, groupID := range groups {
|
||||
group := app.Peer.GetGroup(groupID)
|
||||
group := peer.GetGroup(groupID)
|
||||
s = append(s, prompt.Suggest{Text: group.GroupID, Description: "Group owned by " + group.Owner + " on " + group.GroupServer})
|
||||
}
|
||||
return prompt.FilterHasPrefix(s, d.GetWordBeforeCursor(), true)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(w, "block") || strings.HasPrefix(w, "trust") {
|
||||
// Suggest unaccepted group
|
||||
if strings.HasPrefix(w, "/accept-invite") {
|
||||
s = []prompt.Suggest{}
|
||||
contacts := app.Peer.GetContacts()
|
||||
for _, onion := range contacts {
|
||||
contact := app.Peer.GetContact(onion)
|
||||
s = append(s, prompt.Suggest{Text: contact.Onion, Description: contact.Name})
|
||||
}
|
||||
return prompt.FilterHasPrefix(s, d.GetWordBeforeCursor(), true)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(w, "invite-to-group") {
|
||||
|
||||
if d.FindStartOfPreviousWordWithSpace() == 0 {
|
||||
s = []prompt.Suggest{}
|
||||
contacts := app.Peer.GetContacts()
|
||||
for _, onion := range contacts {
|
||||
contact := app.Peer.GetContact(onion)
|
||||
s = append(s, prompt.Suggest{Text: contact.Onion, Description: contact.Name})
|
||||
}
|
||||
return prompt.FilterHasPrefix(s, d.GetWordBeforeCursor(), true)
|
||||
}
|
||||
s = []prompt.Suggest{}
|
||||
groups := app.Peer.GetGroups()
|
||||
groups := peer.GetGroups()
|
||||
for _, groupID := range groups {
|
||||
group := app.Peer.GetGroup(groupID)
|
||||
if group.Owner == "self" {
|
||||
s = append(s, prompt.Suggest{Text: group.GroupID, Description: "Group owned by " + group.Owner + " on " + group.GroupServer})
|
||||
}
|
||||
}
|
||||
return prompt.FilterHasPrefix(s, d.GetWordBeforeCursor(), true)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(w, "accept-invite") {
|
||||
s = []prompt.Suggest{}
|
||||
groups := app.Peer.GetGroups()
|
||||
for _, groupID := range groups {
|
||||
group := app.Peer.GetGroup(groupID)
|
||||
group := peer.GetGroup(groupID)
|
||||
if group.Accepted == false {
|
||||
s = append(s, prompt.Suggest{Text: group.GroupID, Description: "Group owned by " + group.Owner + " on " + group.GroupServer})
|
||||
}
|
||||
|
@ -119,9 +170,70 @@ func completer(d prompt.Document) []prompt.Suggest {
|
|||
return prompt.FilterHasPrefix(s, d.GetWordBeforeCursor(), true)
|
||||
}
|
||||
|
||||
// suggest groupid AND peerid
|
||||
if strings.HasPrefix(w, "/invite-to-group") {
|
||||
|
||||
if d.FindStartOfPreviousWordWithSpace() == 0 {
|
||||
s = []prompt.Suggest{}
|
||||
groups := peer.GetGroups()
|
||||
for _, groupID := range groups {
|
||||
group := peer.GetGroup(groupID)
|
||||
if group.Owner == "self" {
|
||||
s = append(s, prompt.Suggest{Text: group.GroupID, Description: "Group owned by " + group.Owner + " on " + group.GroupServer})
|
||||
}
|
||||
}
|
||||
return prompt.FilterHasPrefix(s, d.GetWordBeforeCursor(), true)
|
||||
}
|
||||
|
||||
s = []prompt.Suggest{}
|
||||
contacts := peer.GetContacts()
|
||||
for _, onion := range contacts {
|
||||
contact := peer.GetContact(onion)
|
||||
s = append(s, prompt.Suggest{Text: contact.Onion, Description: contact.Name})
|
||||
}
|
||||
return prompt.FilterHasPrefix(s, d.GetWordBeforeCursor(), true)
|
||||
}
|
||||
|
||||
// Suggest contact onion / peerid
|
||||
if strings.HasPrefix(w, "/block") || strings.HasPrefix(w, "/trust") || strings.HasPrefix(w, "/invite") {
|
||||
s = []prompt.Suggest{}
|
||||
contacts := peer.GetContacts()
|
||||
for _, onion := range contacts {
|
||||
contact := peer.GetContact(onion)
|
||||
s = append(s, prompt.Suggest{Text: contact.Onion, Description: contact.Name})
|
||||
}
|
||||
return prompt.FilterHasPrefix(s, d.GetWordBeforeCursor(), true)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func handleAppEvents(em event.Manager) {
|
||||
queue := event.NewQueue()
|
||||
em.Subscribe(event.NewPeer, queue)
|
||||
em.Subscribe(event.PeerError, queue)
|
||||
|
||||
for {
|
||||
ev := queue.Next()
|
||||
switch ev.EventType {
|
||||
case event.NewPeer:
|
||||
onion := ev.Data[event.Identity]
|
||||
p := app.GetPeer(onion)
|
||||
app.LaunchPeers()
|
||||
|
||||
fmt.Printf("\nLoaded profile %v (%v)\n", p.GetProfile().Name, p.GetProfile().Onion)
|
||||
suggestions = append(suggestionsBase, suggestionsSelectedProfile...)
|
||||
|
||||
profiles := app.ListPeers()
|
||||
fmt.Printf("\n%v profiles active now\n", len(profiles))
|
||||
fmt.Printf("You should run `select-profile` to use a profile or `list-profiles` to view loaded profiles\n")
|
||||
case event.PeerError:
|
||||
err := ev.Data[event.Error]
|
||||
fmt.Printf("\nError creating profile: %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
cwtch :=
|
||||
|
@ -161,30 +273,77 @@ func main() {
|
|||
@+''+@ '++@ ;++@ '#''@ ##'''@: +++, +++, :@ @@@@ @@@' @@@ '@@@
|
||||
:' ' '`
|
||||
fmt.Printf("%v\n\n", cwtch)
|
||||
|
||||
quit := false
|
||||
app = app2.Application{}
|
||||
profilefile := ""
|
||||
|
||||
usr, err := user.Current()
|
||||
if err != nil {
|
||||
log.Errorf("\nError: could not load current user: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
acn, err := connectivity.StartTor(path.Join(usr.HomeDir, ".cwtch"), "")
|
||||
if err != nil {
|
||||
log.Errorf("\nError connecting to Tor: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
app = app2.NewApp(acn, path.Join(usr.HomeDir, ".cwtch"))
|
||||
go handleAppEvents(app.GetPrimaryBus())
|
||||
if err != nil {
|
||||
log.Errorf("Error initializing application: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
log.SetLevel(log.LevelDebug)
|
||||
fmt.Printf("\nWelcome to Cwtch!\n")
|
||||
fmt.Printf("If this if your first time you should create a profile by running `/new-profile`\n")
|
||||
fmt.Printf("`/load-profiles` will prompt you for a password and load profiles from storage\n")
|
||||
fmt.Printf("`/help` will show you other available commands\n")
|
||||
fmt.Printf("There is full [TAB] completion support\n\n")
|
||||
|
||||
var history []string
|
||||
for !quit {
|
||||
profile := "unset"
|
||||
if app.Peer != nil {
|
||||
profile = app.Peer.GetProfile().Name
|
||||
|
||||
prmpt = "cwtch> "
|
||||
if group != nil {
|
||||
prmpt = fmt.Sprintf("cwtch %v (%v) [%v] say> ", peer.GetProfile().Name, peer.GetProfile().Onion, group.GroupID)
|
||||
} else if peer != nil {
|
||||
prmpt = fmt.Sprintf("cwtch %v (%v)> ", peer.GetProfile().Name, peer.GetProfile().Onion)
|
||||
}
|
||||
prmpt := fmt.Sprintf("cwtch [%v]> ", profile)
|
||||
|
||||
text := prompt.Input(prmpt, completer, prompt.OptionSuggestionBGColor(prompt.Purple),
|
||||
prompt.OptionDescriptionBGColor(prompt.White),
|
||||
prompt.OptionPrefixTextColor(prompt.White),
|
||||
prompt.OptionInputTextColor(prompt.Purple),
|
||||
prompt.OptionHistory(history))
|
||||
|
||||
commands := strings.Split(text[0:], " ")
|
||||
history = append(history, text)
|
||||
|
||||
if peer == nil {
|
||||
if commands[0] != "/help" && commands[0] != "/quit" && commands[0] != "/new-profile" && commands[0] != "/load-profiles" && commands[0] != "/select-profile" && commands[0] != "/list-profiles" {
|
||||
fmt.Printf("Profile needs to be set\n")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Send
|
||||
if group != nil && !strings.HasPrefix(commands[0], "/") {
|
||||
err := peer.SendMessageToGroup(group.GroupID, text)
|
||||
if err != nil {
|
||||
fmt.Printf("Error sending message: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
switch commands[0] {
|
||||
case "quit":
|
||||
app.Peer.Save(profilefile)
|
||||
case "/quit":
|
||||
quit = true
|
||||
case "new-profile":
|
||||
if len(commands) == 3 {
|
||||
case "/new-profile":
|
||||
if len(commands) == 2 {
|
||||
name := strings.Trim(commands[1], " ")
|
||||
if name == "" {
|
||||
fmt.Printf("Error creating profile, usage: %v\n", usages[commands[0]])
|
||||
break
|
||||
}
|
||||
|
||||
fmt.Print("** WARNING: PASSWORDS CANNOT BE RECOVERED! **\n")
|
||||
|
||||
password := ""
|
||||
|
@ -207,191 +366,221 @@ func main() {
|
|||
}
|
||||
|
||||
if failcount >= 3 {
|
||||
fmt.Printf("Error creating profile for %v: Your password entries must match!\n", commands[1])
|
||||
fmt.Printf("Error creating profile for %v: Your password entries must match!\n", name)
|
||||
} else {
|
||||
err := app.NewProfile(commands[1], commands[2], password)
|
||||
profilefile = commands[2]
|
||||
if err == nil {
|
||||
fmt.Printf("\nNew profile created for %v\n", commands[1])
|
||||
} else {
|
||||
fmt.Printf("\nError creating profile for %v: %v\n", commands[1], err)
|
||||
}
|
||||
app.CreatePeer(name, password)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Error creating NewProfile, usage: %s\n", usages["new-profile"])
|
||||
fmt.Printf("Error creating New Profile, usage: %s\n", usages[commands[0]])
|
||||
}
|
||||
case "load-profile":
|
||||
if len(commands) == 2 {
|
||||
fmt.Print("Enter a password to decrypt the profile: ")
|
||||
bytePassword, err := terminal.ReadPassword(int(syscall.Stdin))
|
||||
err = app.SetProfile(commands[1], string(bytePassword))
|
||||
if err == nil {
|
||||
fmt.Printf("\nLoaded profile for %v\n", commands[1])
|
||||
profilefile = commands[1]
|
||||
} else {
|
||||
fmt.Printf("Error loading profile for %v: %v\n", commands[1], err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Error Loading profile, usage: %s\n", usages["load-profile"])
|
||||
case "/load-profiles":
|
||||
fmt.Print("Enter a password to decrypt the profile: ")
|
||||
bytePassword, err := terminal.ReadPassword(int(syscall.Stdin))
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("\nError loading profiles: %v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
case "info":
|
||||
if app.Peer != nil {
|
||||
fmt.Printf("Address cwtch:%v\n", app.Peer.GetProfile().Onion)
|
||||
app.LoadProfiles(string(bytePassword))
|
||||
|
||||
if err == nil {
|
||||
|
||||
} else {
|
||||
fmt.Printf("\nError loading profiles: %v\n", err)
|
||||
}
|
||||
|
||||
case "/list-profiles":
|
||||
peerlist := app.ListPeers()
|
||||
for onion, peername := range peerlist {
|
||||
fmt.Printf(" %v\t%v\n", onion, peername)
|
||||
}
|
||||
case "/select-profile":
|
||||
if len(commands) == 2 {
|
||||
p := app.GetPeer(commands[1])
|
||||
if p == nil {
|
||||
fmt.Printf("Error: profile '%v' does not exist\n", commands[1])
|
||||
} else {
|
||||
stopGroupFollow()
|
||||
peer = p
|
||||
suggestions = append(suggestionsBase, suggestionsSelectedProfile...)
|
||||
}
|
||||
|
||||
// Auto cwtchPeer / Join Server
|
||||
// TODO There are some privacy implications with this that we should
|
||||
// think over.
|
||||
for _, name := range p.GetProfile().GetContacts() {
|
||||
profile := p.GetContact(name)
|
||||
if profile.Trusted && !profile.Blocked {
|
||||
p.PeerWithOnion(profile.Onion)
|
||||
}
|
||||
}
|
||||
|
||||
for _, groupid := range p.GetGroups() {
|
||||
group := p.GetGroup(groupid)
|
||||
if group.Accepted || group.Owner == "self" {
|
||||
p.JoinServer(group.GroupServer)
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
fmt.Printf("Error selecting profile, usage: %s\n", usages[commands[0]])
|
||||
}
|
||||
case "/info":
|
||||
if peer != nil {
|
||||
fmt.Printf("Address cwtch:%v\n", peer.GetProfile().Onion)
|
||||
} else {
|
||||
fmt.Printf("Profile needs to be set\n")
|
||||
}
|
||||
case "invite":
|
||||
case "/invite":
|
||||
if len(commands) == 2 {
|
||||
fmt.Printf("Inviting cwtch:%v\n", commands[1])
|
||||
app.PeerRequest(commands[1])
|
||||
peer.PeerWithOnion(commands[1])
|
||||
} else {
|
||||
fmt.Printf("Error inviting peer, usage: %s\n", usages["invite"])
|
||||
fmt.Printf("Error inviting peer, usage: %s\n", usages[commands[0]])
|
||||
}
|
||||
case "peers":
|
||||
peers := app.Peer.GetPeers()
|
||||
/*case "/list-peers":
|
||||
peers := peer.GetPeers()
|
||||
for p, s := range peers {
|
||||
fmt.Printf("Name: %v Status: %v\n", p, s)
|
||||
fmt.Printf("Name: %v Status: %v\n", p, connections.ConnectionStateName[s])
|
||||
}
|
||||
case "servers":
|
||||
servers := app.Peer.GetServers()
|
||||
case "/list-servers":
|
||||
servers := peer.GetServers()
|
||||
for s, st := range servers {
|
||||
fmt.Printf("Name: %v Status: %v\n", s, st)
|
||||
}
|
||||
case "contacts":
|
||||
contacts := app.Peer.GetContacts()
|
||||
fmt.Printf("Name: %v Status: %v\n", s, connections.ConnectionStateName[st])
|
||||
}*/
|
||||
case "/list-contacts":
|
||||
contacts := peer.GetContacts()
|
||||
for _, onion := range contacts {
|
||||
c := app.Peer.GetContact(onion)
|
||||
c := peer.GetContact(onion)
|
||||
fmt.Printf("Name: %v Onion: %v Trusted: %v\n", c.Name, c.Onion, c.Trusted)
|
||||
}
|
||||
case "groups":
|
||||
for _, gid := range app.Peer.GetGroups() {
|
||||
g := app.Peer.GetGroup(gid)
|
||||
case "/list-groups":
|
||||
for _, gid := range peer.GetGroups() {
|
||||
g := peer.GetGroup(gid)
|
||||
fmt.Printf("Group Id: %v Owner: %v Accepted:%v\n", gid, g.Owner, g.Accepted)
|
||||
}
|
||||
case "trust":
|
||||
case "/trust":
|
||||
if len(commands) == 2 {
|
||||
app.Peer.TrustPeer(commands[1])
|
||||
peer.TrustPeer(commands[1])
|
||||
} else {
|
||||
fmt.Printf("Error trusting peer, usage: %s\n", usages["trust"])
|
||||
fmt.Printf("Error trusting peer, usage: %s\n", usages[commands[0]])
|
||||
}
|
||||
case "block":
|
||||
case "/block":
|
||||
if len(commands) == 2 {
|
||||
app.Peer.BlockPeer(commands[1])
|
||||
peer.BlockPeer(commands[1])
|
||||
} else {
|
||||
fmt.Printf("Error blocking peer, usage: %s\n", usages["trust"])
|
||||
fmt.Printf("Error blocking peer, usage: %s\n", usages[commands[0]])
|
||||
}
|
||||
case "accept-invite":
|
||||
case "/accept-invite":
|
||||
if len(commands) == 2 {
|
||||
groupID := commands[1]
|
||||
err := app.Peer.AcceptInvite(groupID)
|
||||
err := peer.AcceptInvite(groupID)
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
} else {
|
||||
app.Peer.Save(profilefile)
|
||||
group := app.Peer.GetGroup(groupID)
|
||||
group := peer.GetGroup(groupID)
|
||||
if group == nil {
|
||||
fmt.Printf("Error: group does not exist\n")
|
||||
} else {
|
||||
app.Peer.JoinServer(group.GroupServer)
|
||||
peer.JoinServer(group.GroupServer)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Error accepting invite, usage: %s\n", usages["accept-invite"])
|
||||
fmt.Printf("Error accepting invite, usage: %s\n", usages[commands[0]])
|
||||
}
|
||||
case "invite-to-group":
|
||||
case "/invite-to-group":
|
||||
if len(commands) == 3 {
|
||||
fmt.Printf("Inviting %v to %v\n", commands[1], commands[2])
|
||||
err := app.Peer.InviteOnionToGroup(commands[1], commands[2])
|
||||
err := peer.InviteOnionToGroup(commands[2], commands[1])
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Error inviting peer to group, usage: %s\n", usages["invite-to-group"])
|
||||
fmt.Printf("Error inviting peer to group, usage: %s\n", usages[commands[0]])
|
||||
}
|
||||
case "new-group":
|
||||
case "/new-group":
|
||||
if len(commands) == 2 && commands[1] != "" {
|
||||
fmt.Printf("Setting up a new group on server:%v\n", commands[1])
|
||||
id, _, err := app.Peer.StartGroup(commands[1])
|
||||
id, _, err := peer.StartGroup(commands[1])
|
||||
if err == nil {
|
||||
fmt.Printf("New Group [%v] created for server %v\n", id, commands[1])
|
||||
app.Peer.Save(profilefile)
|
||||
group := app.Peer.GetGroup(id)
|
||||
group := peer.GetGroup(id)
|
||||
if group == nil {
|
||||
fmt.Printf("Error: group does not exist\n")
|
||||
} else {
|
||||
app.Peer.JoinServer(group.GroupServer)
|
||||
peer.JoinServer(group.GroupServer)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Error creating new group: %v", err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Error creating a new group, usage: %s\n", usages["new-group"])
|
||||
fmt.Printf("Error creating a new group, usage: %s\n", usages[commands[0]])
|
||||
}
|
||||
case "send":
|
||||
if len(commands) > 2 {
|
||||
message := strings.Join(commands[2:], " ")
|
||||
err := app.Peer.SendMessageToGroup(commands[1], message)
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Error sending message to group, usage: %s\n", usages["send"])
|
||||
}
|
||||
case "timeline":
|
||||
case "/select-group":
|
||||
if len(commands) == 2 {
|
||||
group := app.Peer.GetGroup(commands[1])
|
||||
if group == nil {
|
||||
fmt.Printf("Error: group does not exist\n")
|
||||
g := peer.GetGroup(commands[1])
|
||||
if g == nil {
|
||||
fmt.Printf("Error: group %s not found!\n", commands[1])
|
||||
} else {
|
||||
timeline := group.GetTimeline()
|
||||
for _, m := range timeline {
|
||||
verified := "not-verified"
|
||||
if m.Verified {
|
||||
verified = "verified"
|
||||
}
|
||||
stopGroupFollow()
|
||||
group = g
|
||||
|
||||
p := app.Peer.GetContact(m.PeerID)
|
||||
name := "unknown"
|
||||
if p != nil {
|
||||
name = p.Name
|
||||
} else if app.Peer.GetProfile().Onion == m.PeerID {
|
||||
name = app.Peer.GetProfile().Name
|
||||
}
|
||||
|
||||
fmt.Printf("%v %v (%v): %v [%s]\n", m.Timestamp, name, m.PeerID, m.Message, verified)
|
||||
fmt.Printf("--------------- %v ---------------\n", group.GroupID)
|
||||
gms := group.Timeline.GetMessages()
|
||||
max := 20
|
||||
if len(gms) < max {
|
||||
max = len(gms)
|
||||
}
|
||||
for i := len(gms) - max; i < len(gms); i++ {
|
||||
printMessage(gms[i])
|
||||
}
|
||||
fmt.Printf("------------------------------\n")
|
||||
|
||||
startGroupFollow()
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Error reading timeline from group, usage: %s\n", usages["timeline"])
|
||||
fmt.Printf("Error selecting a group, usage: %s\n", usages[commands[0]])
|
||||
}
|
||||
case "export-group":
|
||||
case "/unselect-group":
|
||||
stopGroupFollow()
|
||||
case "/export-group":
|
||||
if len(commands) == 2 {
|
||||
group := app.Peer.GetGroup(commands[1])
|
||||
group := peer.GetGroup(commands[1])
|
||||
if group == nil {
|
||||
fmt.Printf("Error: group does not exist\n")
|
||||
} else {
|
||||
invite, _ := app.Peer.ExportGroup(commands[1])
|
||||
invite, _ := peer.ExportGroup(commands[1])
|
||||
fmt.Printf("Invite: %v\n", invite)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Error reading timeline from group, usage: %s\n", usages["timeline"])
|
||||
fmt.Printf("Error exporting group, usage: %s\n", usages[commands[0]])
|
||||
}
|
||||
case "save":
|
||||
app.Peer.Save(profilefile)
|
||||
case "help":
|
||||
case "/import-group":
|
||||
if len(commands) == 2 {
|
||||
groupID, err := peer.ImportGroup(commands[1])
|
||||
if err != nil {
|
||||
fmt.Printf("Error importing group: %v\n", err)
|
||||
} else {
|
||||
fmt.Printf("Imported group: %s\n", groupID)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("%v", commands)
|
||||
fmt.Printf("Error importing group, usage: %s\n", usages[commands[0]])
|
||||
}
|
||||
case "/help":
|
||||
for _, command := range suggestions {
|
||||
fmt.Printf("%-18s%-56s%s\n", command.Text, command.Description, usages[command.Text])
|
||||
}
|
||||
case "sendlots":
|
||||
case "/sendlots":
|
||||
if len(commands) == 2 {
|
||||
group := app.Peer.GetGroup(commands[1])
|
||||
group := peer.GetGroup(commands[1])
|
||||
if group == nil {
|
||||
fmt.Printf("Error: group does not exist\n")
|
||||
} else {
|
||||
for i := 0; i < 100; i++ {
|
||||
fmt.Printf("Sending message: %v\n", i)
|
||||
err := app.Peer.SendMessageToGroup(commands[1], fmt.Sprintf("this is message %v", i))
|
||||
err := peer.SendMessageToGroup(commands[1], fmt.Sprintf("this is message %v", i))
|
||||
if err != nil {
|
||||
fmt.Printf("could not send message %v because %v\n", i, err)
|
||||
}
|
||||
|
@ -405,7 +594,7 @@ func main() {
|
|||
for i := 0; i < 100; i++ {
|
||||
found := false
|
||||
for _, m := range timeline {
|
||||
if m.Message == fmt.Sprintf("this is message %v", i) && m.PeerID == app.Peer.GetProfile().Onion {
|
||||
if m.Message == fmt.Sprintf("this is message %v", i) && m.PeerID == peer.GetProfile().Onion {
|
||||
found = true
|
||||
latency := m.Received.Sub(m.Timestamp)
|
||||
fmt.Printf("Latency for Message %v was %v\n", i, latency)
|
||||
|
@ -428,17 +617,9 @@ func main() {
|
|||
}
|
||||
}
|
||||
}
|
||||
if profilefile != "" {
|
||||
if app.Peer != nil {
|
||||
app.Peer.Save(profilefile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if app.TorManager != nil {
|
||||
fmt.Println("Shutting down Tor process...")
|
||||
app.TorManager.Shutdown()
|
||||
}
|
||||
app.Shutdown()
|
||||
acn.Close()
|
||||
os.Exit(0)
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,159 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"os"
|
||||
//"bufio"
|
||||
//"cwtch.im/cwtch/storage"
|
||||
)
|
||||
|
||||
func convertTorFile(filename string, password string) error {
|
||||
return errors.New("this code doesn't work and can never work :( it's a math thing")
|
||||
|
||||
/*name, _ := diceware.Generate(2)
|
||||
sk, err := ioutil.ReadFile("hs_ed25519_secret_key")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sk = sk[32:]
|
||||
|
||||
pk, err := ioutil.ReadFile("hs_ed25519_public_key")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pk = pk[32:]
|
||||
|
||||
onion, err := ioutil.ReadFile("hostname")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
onion = onion[:56]
|
||||
|
||||
peer := libpeer.NewCwtchPeer(strings.Join(name, "-"))
|
||||
|
||||
fmt.Printf("%d %d %s\n", len(peer.GetProfile().Ed25519PublicKey), len(peer.GetProfile().Ed25519PrivateKey), peer.GetProfile().Onion)
|
||||
peer.GetProfile().Ed25519PrivateKey = sk
|
||||
peer.GetProfile().Ed25519PublicKey = pk
|
||||
peer.GetProfile().Onion = string(onion)
|
||||
fileStore := storage2.NewFileStore(filename, password)
|
||||
err = fileStore.save(peer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("success! loaded %d byte pk and %d byte sk for %s.onion\n", len(pk), len(sk), onion)
|
||||
return nil*/
|
||||
}
|
||||
|
||||
/*
|
||||
func vanity() error {
|
||||
for {
|
||||
pk, sk, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
onion := utils.GetTorV3Hostname(pk)
|
||||
for i := 4; i < len(os.Args); i++ {
|
||||
if strings.HasPrefix(onion, os.Args[i]) {
|
||||
peer := libpeer.NewCwtchPeer(os.Args[i])
|
||||
peer.GetProfile().Ed25519PrivateKey = sk
|
||||
peer.GetProfile().Ed25519PublicKey = pk
|
||||
peer.GetProfile().Onion = onion
|
||||
profileStore, _ := storage2.NewProfileStore(nil, os.Args[3], onion+".cwtch")
|
||||
profileStore.Init("")
|
||||
// need to signal new onion? impossible
|
||||
log.Infof("found %s.onion\n", onion)
|
||||
}
|
||||
}
|
||||
}
|
||||
}*/
|
||||
|
||||
func printHelp() {
|
||||
log.Infoln("usage: cwtchutil {help, convert-cwtch-file, convert-tor-file, changepw, vanity}")
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetLevel(log.LevelInfo)
|
||||
if len(os.Args) < 2 {
|
||||
printHelp()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
switch os.Args[1] {
|
||||
default:
|
||||
printHelp()
|
||||
case "help":
|
||||
printHelp()
|
||||
case "convert-tor-file":
|
||||
if len(os.Args) != 4 {
|
||||
fmt.Println("example: cwtchutil convert-tor-file /var/lib/tor/hs1 passw0rd")
|
||||
os.Exit(1)
|
||||
}
|
||||
err := convertTorFile(os.Args[2], os.Args[3])
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
}
|
||||
/*case "vanity":
|
||||
if len(os.Args) < 5 {
|
||||
fmt.Println("example: cwtchutil vanity 4 passw0rd erinn openpriv")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
goroutines, err := strconv.Atoi(os.Args[2])
|
||||
if err != nil {
|
||||
log.Errorf("first parameter after vanity should be a number\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
log.Infoln("searching. press ctrl+c to stop")
|
||||
for i := 0; i < goroutines; i++ {
|
||||
go vanity()
|
||||
}
|
||||
|
||||
for { // run until ctrl+c
|
||||
time.Sleep(time.Hour * 24)
|
||||
}*/
|
||||
/*case "changepw":
|
||||
if len(os.Args) != 3 {
|
||||
fmt.Println("example: cwtch changepw ~/.cwtch/profiles/XXX")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("old password: ")
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
pw, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
pw = pw[:len(pw)-1]
|
||||
|
||||
profileStore, _ := storage.NewProfileStore(nil, os.Args[2], pw)
|
||||
|
||||
err = profileStore.Load()
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("new password: ")
|
||||
newpw1, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
newpw1 = newpw1[:len(newpw1)-1] // fuck go with this linebreak shit ^ea
|
||||
|
||||
fileStore2, _ := storage.NewProfileStore(nil, os.Args[2], newpw1)
|
||||
// No way to copy, populate this method
|
||||
err = fileStore2.save(peer)
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
log.Infoln("success!")
|
||||
*/
|
||||
}
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
app2 "cwtch.im/cwtch/app"
|
||||
"cwtch.im/cwtch/app/utils"
|
||||
"cwtch.im/cwtch/event"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connectivity"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
// System Setup, We need Tor and Logging up and Running
|
||||
log.AddEverythingFromPattern("peer/alice")
|
||||
log.SetLevel(log.LevelDebug)
|
||||
|
||||
acn, err := connectivity.StartTor(path.Join(".", ".cwtch"), "")
|
||||
if err != nil {
|
||||
log.Errorf("\nError connecting to Tor: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
app := app2.NewApp(acn, ".")
|
||||
app.CreatePeer("alice", "be gay, do crimes")
|
||||
alice := utils.WaitGetPeer(app, "alice")
|
||||
app.LaunchPeers()
|
||||
eventBus := app.GetEventBus(alice.GetProfile().Onion)
|
||||
queue := event.NewQueue()
|
||||
eventBus.Subscribe(event.NewMessageFromPeer, queue)
|
||||
|
||||
// For every new Data Packet Alice received she will Print it out.
|
||||
for {
|
||||
event := queue.Next()
|
||||
log.Printf(log.LevelInfo, "Received %v from %v: %s", event.EventType, event.Data["Onion"], event.Data["Data"])
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
app2 "cwtch.im/cwtch/app"
|
||||
"cwtch.im/cwtch/app/utils"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connectivity"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
// System Boilerplate, We need Tor Up and Running
|
||||
log.AddEverythingFromPattern("peer/bob")
|
||||
log.SetLevel(log.LevelDebug)
|
||||
acn, err := connectivity.StartTor(path.Join(".", ".cwtch"), "")
|
||||
if err != nil {
|
||||
log.Errorf("\nError connecting to Tor: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
app := app2.NewApp(acn, ".")
|
||||
app.CreatePeer("bob", "be gay, do crimes")
|
||||
bob := utils.WaitGetPeer(app, "bob")
|
||||
|
||||
// Add Alice's Onion Here (It changes run to run)
|
||||
bob.PeerWithOnion("upiztu7myymjf2dn4x4czhagp7axlnqjvf5zwfegbhtpkqb6v3vgu5yd")
|
||||
|
||||
// Send the Message...
|
||||
log.Infof("Waiting for Bob to Connect to Alice...")
|
||||
bob.SendMessageToPeer("upiztu7myymjf2dn4x4czhagp7axlnqjvf5zwfegbhtpkqb6v3vgu5yd", "Hello Alice!!!")
|
||||
|
||||
// Wait a while...
|
||||
// Everything is run in a goroutine so the main thread has to stay active
|
||||
time.Sleep(time.Second * 100)
|
||||
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
package plugins
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/event"
|
||||
"cwtch.im/cwtch/protocol/connections"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const tickTime = 10 * time.Second
|
||||
const maxBakoff int = 32 // 320 seconds or ~5 min
|
||||
|
||||
type peer struct {
|
||||
id string
|
||||
state connections.ConnectionState
|
||||
|
||||
ticks int
|
||||
backoff int
|
||||
}
|
||||
|
||||
type contactRetry struct {
|
||||
bus event.Manager
|
||||
queue event.Queue
|
||||
|
||||
breakChan chan bool
|
||||
|
||||
peers sync.Map //[string]*peer
|
||||
}
|
||||
|
||||
// NewContactRetry returns a Plugin that when started will retry connecting to contacts with a backoff timing
|
||||
func NewContactRetry(bus event.Manager) Plugin {
|
||||
cr := &contactRetry{bus: bus, queue: event.NewQueue(), breakChan: make(chan bool), peers: sync.Map{}}
|
||||
return cr
|
||||
}
|
||||
|
||||
func (cr *contactRetry) Start() {
|
||||
go cr.run()
|
||||
}
|
||||
|
||||
func (cr *contactRetry) run() {
|
||||
cr.bus.Subscribe(event.PeerStateChange, cr.queue)
|
||||
for {
|
||||
select {
|
||||
case e := <-cr.queue.OutChan():
|
||||
switch e.EventType {
|
||||
case event.PeerStateChange:
|
||||
state := connections.ConnectionStateToType[e.Data[event.ConnectionState]]
|
||||
peer := e.Data[event.RemotePeer]
|
||||
cr.handleEvent(peer, state)
|
||||
}
|
||||
|
||||
case <-time.After(tickTime):
|
||||
cr.peers.Range(func(k, v interface{}) bool {
|
||||
p := v.(*peer)
|
||||
|
||||
if p.state == connections.DISCONNECTED {
|
||||
p.ticks++
|
||||
if p.ticks == p.backoff {
|
||||
p.ticks = 0
|
||||
cr.bus.Publish(event.NewEvent(event.PeerRequest, map[event.Field]string{event.RemotePeer: p.id}))
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
case <-cr.breakChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cr *contactRetry) handleEvent(id string, state connections.ConnectionState) {
|
||||
if _, exists := cr.peers.Load(id); !exists {
|
||||
p := &peer{id: id, state: connections.DISCONNECTED, backoff: 1, ticks: 0}
|
||||
cr.peers.Store(id, p)
|
||||
return
|
||||
}
|
||||
|
||||
pinf, _ := cr.peers.Load(id)
|
||||
p := pinf.(*peer)
|
||||
if state == connections.DISCONNECTED || state == connections.FAILED || state == connections.KILLED {
|
||||
p.state = connections.DISCONNECTED
|
||||
if p.backoff < maxBakoff {
|
||||
p.backoff *= 2
|
||||
}
|
||||
p.ticks = 0
|
||||
} else if state == connections.CONNECTING || state == connections.CONNECTED {
|
||||
p.state = state
|
||||
} else if state == connections.AUTHENTICATED {
|
||||
p.state = state
|
||||
p.backoff = 1
|
||||
}
|
||||
}
|
||||
|
||||
func (cr *contactRetry) Shutdown() {
|
||||
cr.breakChan <- true
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
package plugins
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/event"
|
||||
)
|
||||
|
||||
// PluginID is used as an ID for signaling plugin activities
|
||||
type PluginID int
|
||||
|
||||
// These are the plugin IDs for the supplied plugins
|
||||
const (
|
||||
CONTACTRETRY PluginID = iota
|
||||
)
|
||||
|
||||
// Plugin is the interface for a plugin
|
||||
type Plugin interface {
|
||||
Start()
|
||||
Shutdown()
|
||||
}
|
||||
|
||||
// Get is a plugin factory for the requested plugin
|
||||
func Get(id PluginID, bus event.Manager) Plugin {
|
||||
switch id {
|
||||
case CONTACTRETRY:
|
||||
return NewContactRetry(bus)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
app2 "cwtch.im/cwtch/app"
|
||||
"cwtch.im/cwtch/peer"
|
||||
"time"
|
||||
)
|
||||
|
||||
// WaitGetPeer is a helper function for utility apps not written using the event bus
|
||||
// Proper use of an App is to call CreatePeer and then process the NewPeer event
|
||||
// however for small utility use, this function which polls the app until the peer is created
|
||||
// may fill that usecase better
|
||||
func WaitGetPeer(app app2.Application, name string) peer.CwtchPeer {
|
||||
for true {
|
||||
for id, n := range app.ListPeers() {
|
||||
if n == name {
|
||||
return app.GetPeer(id)
|
||||
}
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,126 +0,0 @@
|
|||
package tor
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/yawning/bulb"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Manager checks connectivity of the Tor process used to support Cwtch
|
||||
type Manager struct {
|
||||
socksPort int
|
||||
controlPort int
|
||||
process *exec.Cmd
|
||||
}
|
||||
|
||||
// NewTorManager Instantiates a new connection manager, returns non-nil error if it fails to connect to a tor daemon on the given ports.
|
||||
func NewTorManager(socksPort int, controlPort int, torrc string) (*Manager, error) {
|
||||
torManager := new(Manager)
|
||||
torManager.socksPort = socksPort
|
||||
torManager.controlPort = controlPort
|
||||
err := torManager.TestConnection()
|
||||
|
||||
if err == nil {
|
||||
log.Printf("using existing tor proxy")
|
||||
return torManager, nil
|
||||
}
|
||||
|
||||
// try to start tor
|
||||
cmd := exec.Command("tor", "-f", torrc)
|
||||
log.Printf("starting local tor proxy")
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
log.Printf("starting tor failed %v", err)
|
||||
return nil, err
|
||||
}
|
||||
time.Sleep(time.Second * 5)
|
||||
torManager.process = cmd
|
||||
err = torManager.TestConnection()
|
||||
return torManager, err
|
||||
}
|
||||
|
||||
type proxyStatus int
|
||||
|
||||
const (
|
||||
proxyStatusOK proxyStatus = iota
|
||||
proxyStatusWrongType
|
||||
proxyStatusCannotConnect
|
||||
proxyStatusTimeout
|
||||
)
|
||||
|
||||
// Shutdown kills the managed Tor Process
|
||||
func (tm *Manager) Shutdown() {
|
||||
if tm.process != nil {
|
||||
if err := tm.process.Process.Kill(); err != nil {
|
||||
log.Fatal("failed to kill process: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Detect whether a proxy is connectable and is a Tor proxy
|
||||
func checkTorProxy(proxyAddress string) proxyStatus {
|
||||
// A trick to do this without making an outward connection is,
|
||||
// paradoxically, to try to open it as http.
|
||||
// This is documented in section 4 here: https://github.com/torproject/torspec/blob/master/socks-extensions.txt
|
||||
client := &http.Client{Timeout: 2 * time.Second}
|
||||
response, err := client.Get("http://" + proxyAddress + "/")
|
||||
if err != nil {
|
||||
switch t := err.(type) {
|
||||
case *url.Error:
|
||||
switch t.Err.(type) {
|
||||
case *net.OpError: // Network-level error. Will in turn contain a os.SyscallError
|
||||
return proxyStatusCannotConnect
|
||||
default:
|
||||
// http.error unfortunately not exported, need to match on string
|
||||
// net/http: request canceled
|
||||
if strings.Index(t.Err.Error(), "request canceled") != -1 {
|
||||
return proxyStatusTimeout
|
||||
}
|
||||
}
|
||||
}
|
||||
// Protocol-level errors mean that http failed, so it's not Tor
|
||||
return proxyStatusWrongType
|
||||
}
|
||||
defer response.Body.Close()
|
||||
if response.Status != "501 Tor is not an HTTP Proxy" {
|
||||
return proxyStatusWrongType
|
||||
}
|
||||
return proxyStatusOK
|
||||
}
|
||||
|
||||
func proxyStatusMessage(status proxyStatus) string {
|
||||
switch status {
|
||||
case proxyStatusWrongType:
|
||||
return "Proxy specified is not a Tor proxy"
|
||||
case proxyStatusCannotConnect:
|
||||
return "Cannot connect to Tor proxy"
|
||||
case proxyStatusTimeout:
|
||||
return "Proxy timeout"
|
||||
default:
|
||||
return "Unknown proxy error"
|
||||
}
|
||||
}
|
||||
|
||||
// TestConnection returns nil if both the socks and control ports of the Tor connection are active, otherwise it returns an error.
|
||||
func (tm *Manager) TestConnection() error {
|
||||
proxyStatus := checkTorProxy(fmt.Sprintf("127.0.0.1:%d", tm.socksPort))
|
||||
controlAddress := fmt.Sprintf("127.0.0.1:%d", tm.controlPort)
|
||||
if proxyStatus == proxyStatusOK {
|
||||
c, err := bulb.Dial("tcp4", controlAddress)
|
||||
if c != nil {
|
||||
c.Close()
|
||||
}
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("could not connect to Tor Control Port %v %v", tm.controlPort, err)
|
||||
}
|
||||
return errors.New(proxyStatusMessage(proxyStatus))
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
package tor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTorManager(t *testing.T) {
|
||||
|
||||
os.Remove("/tmp/torrc")
|
||||
file, _ := os.Create("/tmp/torrc")
|
||||
fmt.Fprintf(file, "SOCKSPort %d\nControlPort %d\nDataDirectory /tmp/tor\n", 10050, 10051)
|
||||
file.Close()
|
||||
tm, err := NewTorManager(10050, 10051, "/tmp/torrc")
|
||||
if err != nil {
|
||||
t.Errorf("creating a new tor manager failed: %v", err)
|
||||
} else {
|
||||
|
||||
tm2, err := NewTorManager(10050, 10051, "/tmp/torrc")
|
||||
if err != nil {
|
||||
t.Errorf("creating a new tor manager failed: %v", err)
|
||||
}
|
||||
tm2.Shutdown() // should not noop
|
||||
}
|
||||
tm.Shutdown()
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
package bridge
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/event"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type goChanBridge struct {
|
||||
in chan event.IPCMessage
|
||||
out chan event.IPCMessage
|
||||
closedChan chan bool
|
||||
closed bool
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// MakeGoChanBridge returns a simple testing IPCBridge made from inprocess go channels
|
||||
func MakeGoChanBridge() (b1, b2 event.IPCBridge) {
|
||||
chan1 := make(chan event.IPCMessage)
|
||||
chan2 := make(chan event.IPCMessage)
|
||||
closed := make(chan bool)
|
||||
|
||||
a := &goChanBridge{in: chan1, out: chan2, closedChan: closed, closed: false}
|
||||
b := &goChanBridge{in: chan2, out: chan1, closedChan: closed, closed: false}
|
||||
|
||||
go monitor(a, b)
|
||||
|
||||
return a, b
|
||||
}
|
||||
|
||||
func monitor(a, b *goChanBridge) {
|
||||
<-a.closedChan
|
||||
a.closed = true
|
||||
b.closed = true
|
||||
a.closedChan <- true
|
||||
}
|
||||
|
||||
func (pb *goChanBridge) Read() (*event.IPCMessage, bool) {
|
||||
message, ok := <-pb.in
|
||||
return &message, ok
|
||||
}
|
||||
|
||||
func (pb *goChanBridge) Write(message *event.IPCMessage) {
|
||||
pb.lock.Lock()
|
||||
defer pb.lock.Unlock()
|
||||
if !pb.closed {
|
||||
pb.out <- *message
|
||||
}
|
||||
}
|
||||
|
||||
func (pb *goChanBridge) Shutdown() {
|
||||
if !pb.closed {
|
||||
close(pb.in)
|
||||
close(pb.out)
|
||||
pb.closedChan <- true
|
||||
<-pb.closedChan
|
||||
}
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
// +build windows
|
||||
|
||||
package bridge
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/event"
|
||||
"log"
|
||||
)
|
||||
|
||||
func NewPipeBridgeClient(inFilename, outFilename string) event.IPCBridge {
|
||||
log.Fatal("Not supported on windows")
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewPipeBridgeService returns a pipe backed IPCBridge for a service
|
||||
func NewPipeBridgeService(inFilename, outFilename string) event.IPCBridge {
|
||||
log.Fatal("Not supported on windows")
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,241 @@
|
|||
// +build !windows
|
||||
|
||||
package bridge
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/protocol/connections"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"syscall"
|
||||
|
||||
"cwtch.im/cwtch/event"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
/* pipeBridge creates a pair of named pipes
|
||||
Needs a call to new client and service to fully successfully open
|
||||
*/
|
||||
|
||||
const maxBufferSize = 1000
|
||||
|
||||
type pipeBridge struct {
|
||||
infile, outfile string
|
||||
in, out *os.File
|
||||
read, write chan event.IPCMessage
|
||||
closedChan chan bool
|
||||
state connections.ConnectionState
|
||||
lock sync.Mutex
|
||||
|
||||
// For logging / debugging purposes
|
||||
name string
|
||||
}
|
||||
|
||||
func newPipeBridge(inFilename, outFilename string) *pipeBridge {
|
||||
syscall.Mkfifo(inFilename, 0600)
|
||||
syscall.Mkfifo(outFilename, 0600)
|
||||
pb := &pipeBridge{infile: inFilename, outfile: outFilename, state: connections.DISCONNECTED}
|
||||
pb.read = make(chan event.IPCMessage, maxBufferSize)
|
||||
pb.write = make(chan event.IPCMessage, maxBufferSize)
|
||||
return pb
|
||||
}
|
||||
|
||||
// NewPipeBridgeClient returns a pipe backed IPCBridge for a client
|
||||
func NewPipeBridgeClient(inFilename, outFilename string) event.IPCBridge {
|
||||
log.Debugf("Making new PipeBridge Client...\n")
|
||||
pb := newPipeBridge(inFilename, outFilename)
|
||||
pb.name = "client"
|
||||
go pb.connectionManager()
|
||||
|
||||
return pb
|
||||
}
|
||||
|
||||
// NewPipeBridgeService returns a pipe backed IPCBridge for a service
|
||||
func NewPipeBridgeService(inFilename, outFilename string) event.IPCBridge {
|
||||
log.Debugf("Making new PipeBridge Service...\n")
|
||||
pb := newPipeBridge(inFilename, outFilename)
|
||||
pb.name = "service"
|
||||
|
||||
go pb.connectionManager()
|
||||
|
||||
log.Debugf("Successfully created new PipeBridge Service!\n")
|
||||
return pb
|
||||
}
|
||||
|
||||
func (pb *pipeBridge) connectionManager() {
|
||||
for pb.state != connections.KILLED {
|
||||
log.Debugf("clientConnManager loop start init\n")
|
||||
pb.state = connections.CONNECTING
|
||||
|
||||
var err error
|
||||
log.Debugf("%v open file infile\n", pb.name)
|
||||
pb.in, err = os.OpenFile(pb.infile, os.O_RDWR, 0600)
|
||||
if err != nil {
|
||||
pb.state = connections.DISCONNECTED
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("%v open file outfile\n", pb.name)
|
||||
pb.out, err = os.OpenFile(pb.outfile, os.O_RDWR, 0600)
|
||||
if err != nil {
|
||||
pb.state = connections.DISCONNECTED
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Successfully connected PipeBridge %v!\n", pb.name)
|
||||
|
||||
pb.handleConns()
|
||||
}
|
||||
log.Debugf("exiting %v ConnectionManager\n", pb.name)
|
||||
|
||||
}
|
||||
|
||||
func (pb *pipeBridge) handleConns() {
|
||||
|
||||
// auth?
|
||||
pb.state = connections.AUTHENTICATED
|
||||
|
||||
pb.closedChan = make(chan bool, 5)
|
||||
|
||||
log.Debugf("handleConns authed, %v 2xgo\n", pb.name)
|
||||
|
||||
go pb.handleRead()
|
||||
go pb.handleWrite()
|
||||
|
||||
<-pb.closedChan
|
||||
log.Debugf("handleConns <-closedChan (%v)\n", pb.name)
|
||||
if pb.state != connections.KILLED {
|
||||
pb.state = connections.FAILED
|
||||
}
|
||||
pb.closeReset()
|
||||
log.Debugf("handleConns done for %v, exit\n", pb.name)
|
||||
}
|
||||
|
||||
func (pb *pipeBridge) closeReset() {
|
||||
pb.in.Close()
|
||||
pb.out.Close()
|
||||
close(pb.write)
|
||||
close(pb.read)
|
||||
pb.read = make(chan event.IPCMessage, maxBufferSize)
|
||||
pb.write = make(chan event.IPCMessage, maxBufferSize)
|
||||
}
|
||||
|
||||
func (pb *pipeBridge) handleWrite() {
|
||||
log.Debugf("handleWrite() %v\n", pb.name)
|
||||
defer log.Debugf("exiting handleWrite() %v\n", pb.name)
|
||||
|
||||
for {
|
||||
select {
|
||||
case message := <-pb.write:
|
||||
if message.Message.EventType == event.EncryptedGroupMessage || message.Message.EventType == event.SendMessageToGroup || message.Message.EventType == event.NewMessageFromGroup {
|
||||
log.Debugf("handleWrite <- message: %v %v ...\n", message.Dest, message.Message.EventType)
|
||||
} else {
|
||||
log.Debugf("handleWrite <- message: %v\n", message)
|
||||
}
|
||||
if pb.state == connections.AUTHENTICATED {
|
||||
encMessage := &event.IPCMessage{Dest: message.Dest, Message: event.Event{EventType: message.Message.EventType, EventID: message.Message.EventID, Data: make(map[event.Field]string)}}
|
||||
for k, v := range message.Message.Data {
|
||||
encMessage.Message.Data[k] = base64.StdEncoding.EncodeToString([]byte(v))
|
||||
}
|
||||
|
||||
messageJSON, _ := json.Marshal(encMessage)
|
||||
size := make([]byte, 2)
|
||||
binary.LittleEndian.PutUint16(size, uint16(len(messageJSON)))
|
||||
pb.out.Write(size)
|
||||
|
||||
for pos := 0; pos < len(messageJSON); {
|
||||
n, err := pb.out.Write(messageJSON)
|
||||
if err != nil {
|
||||
log.Errorf("Writing out on pipeBridge: %v\n", err)
|
||||
pb.closedChan <- true
|
||||
return
|
||||
}
|
||||
pos += n
|
||||
}
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pb *pipeBridge) handleRead() {
|
||||
log.Debugf("handleRead() %v\n", pb.name)
|
||||
defer log.Debugf("exiting handleRead() %v", pb.name)
|
||||
|
||||
var n int
|
||||
size := make([]byte, 2)
|
||||
var err error
|
||||
for {
|
||||
log.Debugf("Waiting to handleRead()...\n")
|
||||
n, err = pb.in.Read(size)
|
||||
if err != nil || n != 2 {
|
||||
log.Errorf("Could not read len int from stream: %v\n", err)
|
||||
pb.closedChan <- true
|
||||
return
|
||||
}
|
||||
|
||||
n = int(binary.LittleEndian.Uint16(size))
|
||||
pos := 0
|
||||
buffer := make([]byte, n)
|
||||
for n > 0 {
|
||||
m, err := pb.in.Read(buffer[pos:])
|
||||
if err != nil {
|
||||
log.Errorf("Reading into buffer from pipe: %v\n", err)
|
||||
pb.closedChan <- true
|
||||
return
|
||||
}
|
||||
n -= m
|
||||
pos += m
|
||||
}
|
||||
|
||||
var message event.IPCMessage
|
||||
err = json.Unmarshal(buffer, &message)
|
||||
if err != nil {
|
||||
log.Errorf("Read error: %v --value: %v", err, message)
|
||||
continue // signal error?
|
||||
}
|
||||
for k, v := range message.Message.Data {
|
||||
val, _ := base64.StdEncoding.DecodeString(v)
|
||||
message.Message.Data[k] = string(val)
|
||||
}
|
||||
if message.Message.EventType == event.EncryptedGroupMessage || message.Message.EventType == event.SendMessageToGroup || message.Message.EventType == event.NewMessageFromGroup {
|
||||
log.Debugf("handleRead read<-: %v %v ...\n", message.Dest, message.Message.EventType)
|
||||
} else {
|
||||
log.Debugf("handleRead read<-: %v\n", message)
|
||||
}
|
||||
pb.read <- message
|
||||
log.Debugf("handleRead wrote\n")
|
||||
}
|
||||
}
|
||||
|
||||
func (pb *pipeBridge) Read() (*event.IPCMessage, bool) {
|
||||
log.Debugf("Read() %v...\n", pb.name)
|
||||
|
||||
message := <-pb.read
|
||||
if message.Message.EventType == event.EncryptedGroupMessage || message.Message.EventType == event.SendMessageToGroup || message.Message.EventType == event.NewMessageFromGroup {
|
||||
log.Debugf("Read %v: %v %v ...\n", pb.name, message.Dest, message.Message.EventType)
|
||||
} else {
|
||||
log.Debugf("Read %v: %v\n", pb.name, message)
|
||||
}
|
||||
return &message, pb.state != connections.KILLED
|
||||
}
|
||||
|
||||
func (pb *pipeBridge) Write(message *event.IPCMessage) {
|
||||
if message.Message.EventType == event.EncryptedGroupMessage || message.Message.EventType == event.SendMessageToGroup || message.Message.EventType == event.NewMessageFromGroup {
|
||||
log.Debugf("Write %v: %v %v ...\n", pb.name, message.Dest, message.Message.EventType)
|
||||
} else {
|
||||
log.Debugf("Write %v: %v\n", pb.name, message)
|
||||
}
|
||||
pb.write <- *message
|
||||
log.Debugf("Wrote\n")
|
||||
}
|
||||
|
||||
func (pb *pipeBridge) Shutdown() {
|
||||
log.Debugf("pb.Shutdown() for %v currently in state: %v\n", pb.name, connections.ConnectionStateName[pb.state])
|
||||
pb.state = connections.KILLED
|
||||
pb.closedChan <- true
|
||||
log.Debugf("Done Shutdown for %v\n", pb.name)
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
package bridge
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/event"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
clientPipe = "./client"
|
||||
servicePipe = "./service"
|
||||
)
|
||||
|
||||
func clientHelper(t *testing.T, in, out string, messageOrig *event.IPCMessage, done chan bool) {
|
||||
client := NewPipeBridgeClient(in, out)
|
||||
|
||||
messageAfter, ok := client.Read()
|
||||
if !ok {
|
||||
t.Errorf("Reading from client IPCBridge failed")
|
||||
done <- true
|
||||
return
|
||||
}
|
||||
|
||||
if messageOrig.Dest != messageAfter.Dest {
|
||||
t.Errorf("Dest's value differs expected: %v actaul: %v", messageOrig.Dest, messageAfter.Dest)
|
||||
}
|
||||
|
||||
if messageOrig.Message.EventType != messageAfter.Message.EventType {
|
||||
t.Errorf("EventTypes's value differs expected: %v actaul: %v", messageOrig.Message.EventType, messageAfter.Message.EventType)
|
||||
}
|
||||
|
||||
if messageOrig.Message.Data[event.Identity] != messageAfter.Message.Data[event.Identity] {
|
||||
t.Errorf("Data[Identity]'s value differs expected: %v actaul: %v", messageOrig.Message.Data[event.Identity], messageAfter.Message.Data[event.Identity])
|
||||
}
|
||||
|
||||
done <- true
|
||||
}
|
||||
|
||||
func serviceHelper(t *testing.T, in, out string, messageOrig *event.IPCMessage, done chan bool) {
|
||||
service := NewPipeBridgeService(in, out)
|
||||
|
||||
service.Write(messageOrig)
|
||||
|
||||
done <- true
|
||||
}
|
||||
|
||||
func TestPipeBridge(t *testing.T) {
|
||||
|
||||
messageOrig := &event.IPCMessage{Dest: "ABC", Message: event.NewEventList(event.NewPeer, event.Identity, "It is I")}
|
||||
serviceDone := make(chan bool)
|
||||
clientDone := make(chan bool)
|
||||
|
||||
go clientHelper(t, clientPipe, servicePipe, messageOrig, clientDone)
|
||||
go serviceHelper(t, servicePipe, clientPipe, messageOrig, serviceDone)
|
||||
|
||||
<-serviceDone
|
||||
<-clientDone
|
||||
}
|
|
@ -0,0 +1,197 @@
|
|||
package event
|
||||
|
||||
// Type captures the definition of many common Cwtch application events
|
||||
type Type string
|
||||
|
||||
// Defining Common Event Types
|
||||
const (
|
||||
StatusRequest = Type("StatusRequest")
|
||||
ProtocolEngineStatus = Type("ProtocolEngineStatus")
|
||||
|
||||
PeerRequest = Type("PeerRequest")
|
||||
|
||||
// Blocking Events both Block and Unblock have the same structure
|
||||
// attributes:
|
||||
// RemotePeer: [eg "chpr7qm6op5vfcg2pi4vllco3h6aa7exexc4rqwnlupqhoogx2zgd6qd"
|
||||
BlockPeer = Type("BlockPeer")
|
||||
UnblockPeer = Type("UnblockPeer")
|
||||
|
||||
JoinServer = Type("JoinServer")
|
||||
|
||||
ProtocolEngineStartListen = Type("ProtocolEngineStartListen")
|
||||
ProtocolEngineStopped = Type("ProtocolEngineStopped")
|
||||
|
||||
InvitePeerToGroup = Type("InvitePeerToGroup")
|
||||
|
||||
// a group invite has been received from a remote peer
|
||||
// attributes:
|
||||
// TimestampReceived [eg time.Now().Format(time.RFC3339Nano)]
|
||||
// RemotePeer: [eg "chpr7qm6op5vfcg2pi4vllco3h6aa7exexc4rqwnlupqhoogx2zgd6qd"]
|
||||
// GroupInvite: [eg "torv3....."]
|
||||
NewGroupInvite = Type("NewGroupInvite")
|
||||
|
||||
// GroupID
|
||||
AcceptGroupInvite = Type("AcceptGroupInvite")
|
||||
|
||||
SendMessageToGroup = Type("SendMessagetoGroup")
|
||||
|
||||
//Ciphertext, Signature:
|
||||
EncryptedGroupMessage = Type("EncryptedGroupMessage")
|
||||
//TimestampReceived, TimestampSent, Data(Message), GroupID, Signature, PreviousSignature, RemotePeer
|
||||
NewMessageFromGroup = Type("NewMessageFromGroup")
|
||||
|
||||
// an error was encountered trying to send a particular Message to a group
|
||||
// attributes:
|
||||
// GroupServer: The server the Message was sent to
|
||||
// Signature: The signature of the Message that failed to send
|
||||
// Error: string describing the error
|
||||
SendMessageToGroupError = Type("SendMessageToGroupError")
|
||||
|
||||
SendMessageToPeer = Type("SendMessageToPeer")
|
||||
NewMessageFromPeer = Type("NewMessageFromPeer")
|
||||
|
||||
// Peer acknowledges a previously sent message
|
||||
// attributes
|
||||
// EventID: The original event id that the peer is responding too.
|
||||
// RemotePeer: The peer associated with the acknowledgement
|
||||
PeerAcknowledgement = Type("PeerAcknowledgement")
|
||||
|
||||
// attributes:
|
||||
// RemotePeer: [eg "chpr7qm6op5vfcg2pi4vllco3h6aa7exexc4rqwnlupqhoogx2zgd6qd"]
|
||||
// Error: string describing the error
|
||||
SendMessageToPeerError = Type("SendMessageToPeerError")
|
||||
|
||||
// REQUESTS TO STORAGE ENGINE
|
||||
|
||||
// a peer contact has been added
|
||||
// attributes:
|
||||
// RemotePeer [eg ""]
|
||||
PeerCreated = Type("PeerCreated")
|
||||
|
||||
// a group has been successfully added or newly created
|
||||
// attributes:
|
||||
// Data [serialized *model.Group]
|
||||
GroupCreated = Type("GroupCreated")
|
||||
|
||||
// RemotePeer
|
||||
DeleteContact = Type("DeleteContact")
|
||||
|
||||
// GroupID
|
||||
DeleteGroup = Type("DeleteGroup")
|
||||
|
||||
// change the .Name attribute of a profile (careful - this is not a custom attribute. it is used in the underlying protocol during handshakes!)
|
||||
// attributes:
|
||||
// ProfileName [eg "erinn"]
|
||||
SetProfileName = Type("SetProfileName")
|
||||
|
||||
// request to store a profile-wide attribute (good for e.g. per-profile settings like theme prefs)
|
||||
// attributes:
|
||||
// Key [eg "fontcolor"]
|
||||
// Data [eg "red"]
|
||||
SetAttribute = Type("SetAttribute")
|
||||
|
||||
// request to store a per-contact attribute (e.g. display names for a peer)
|
||||
// attributes:
|
||||
// RemotePeer [eg ""]
|
||||
// Key [eg "nick"]
|
||||
// Data [eg "erinn"]
|
||||
SetPeerAttribute = Type("SetPeerAttribute")
|
||||
|
||||
// request to store a per-cwtch-group attribute (e.g. display name for a group)
|
||||
// attributes:
|
||||
// GroupID [eg ""]
|
||||
// Key [eg "nick"]
|
||||
// Data [eg "open privacy board"]
|
||||
SetGroupAttribute = Type("SetGroupAttribute")
|
||||
|
||||
// RemotePeer
|
||||
// ConnectionState
|
||||
PeerStateChange = Type("PeerStateChange")
|
||||
|
||||
// GroupServer
|
||||
// ConnectionState
|
||||
ServerStateChange = Type("ServerStateChange")
|
||||
|
||||
/***** Application client / service messages *****/
|
||||
|
||||
// ProfileName, Password
|
||||
CreatePeer = Type("CreatePeer")
|
||||
|
||||
// service -> client: Identity(localId), Password, [Status(new/default=blank || from reload='running')]
|
||||
// app -> Identity(onion)
|
||||
NewPeer = Type("NewPeer")
|
||||
|
||||
// Identity(onion), Data(pluginID)
|
||||
AddPeerPlugin = Type("AddPeerPlugin")
|
||||
|
||||
// Password
|
||||
LoadProfiles = Type("LoadProfiles")
|
||||
|
||||
// Client has reloaded, triggers NewPeer s then ReloadDone
|
||||
ReloadClient = Type("ReloadClient")
|
||||
|
||||
ReloadDone = Type("ReloadDone")
|
||||
|
||||
// Identity - Ask service to resend all connection states
|
||||
ReloadPeer = Type("ReloadPeer")
|
||||
|
||||
// Identity(onion)
|
||||
ShutdownPeer = Type("ShutdownPeer")
|
||||
|
||||
Shutdown = Type("Shutdown")
|
||||
|
||||
// Error(err)
|
||||
PeerError = Type("PeerError")
|
||||
|
||||
// Error(err)
|
||||
AppError = Type("AppError")
|
||||
|
||||
// Progress, Status
|
||||
ACNStatus = Type("ACNStatus")
|
||||
)
|
||||
|
||||
// Field defines common event attributes
|
||||
type Field string
|
||||
|
||||
// Defining Common Field Types
|
||||
const (
|
||||
RemotePeer = Field("RemotePeer")
|
||||
Ciphertext = Field("Ciphertext")
|
||||
Signature = Field("Signature")
|
||||
PreviousSignature = Field("PreviousSignature")
|
||||
TimestampSent = Field("TimestampSent")
|
||||
TimestampReceived = Field("TimestampReceived")
|
||||
|
||||
Identity = Field("Identity")
|
||||
|
||||
GroupID = Field("GroupID")
|
||||
GroupServer = Field("GroupServer")
|
||||
GroupInvite = Field("GroupInvite")
|
||||
|
||||
ProfileName = Field("ProfileName")
|
||||
Password = Field("Password")
|
||||
|
||||
ConnectionState = Field("ConnectionState")
|
||||
|
||||
Key = Field("Key")
|
||||
Data = Field("Data")
|
||||
|
||||
Error = Field("Error")
|
||||
|
||||
Progreess = Field("Progress")
|
||||
Status = Field("Status")
|
||||
EventID = Field("EventID")
|
||||
EventContext = Field("EventContext")
|
||||
)
|
||||
|
||||
// Defining Common errors
|
||||
const (
|
||||
AppErrLoaded0 = "Loaded 0 profiles"
|
||||
)
|
||||
|
||||
// Defining Protocol Contexts
|
||||
const (
|
||||
ContextAck = "im.cwtch.acknowledgement"
|
||||
ContextInvite = "im.cwtch.invite"
|
||||
ContextRaw = "im.cwtch.raw"
|
||||
)
|
|
@ -0,0 +1,80 @@
|
|||
package event
|
||||
|
||||
type queue struct {
|
||||
infChan infiniteChannel
|
||||
}
|
||||
|
||||
type simpleQueue struct {
|
||||
eventChannel chan Event
|
||||
}
|
||||
|
||||
// Queue is a wrapper around a channel for handling Events in a consistent way across subsystems.
|
||||
// The expectation is that each subsystem in Cwtch will manage a given an event.Queue fed from
|
||||
// the event.Manager.
|
||||
type Queue interface {
|
||||
InChan() chan<- Event
|
||||
OutChan() <-chan Event
|
||||
Next() *Event
|
||||
Shutdown()
|
||||
Len() int
|
||||
}
|
||||
|
||||
// NewQueue initializes an event.Queue
|
||||
func NewQueue() Queue {
|
||||
queue := &queue{infChan: *newInfiniteChannel()}
|
||||
return queue
|
||||
}
|
||||
|
||||
// NewSimpleQueue initializes an event.Queue of the given buffer size.
|
||||
func NewSimpleQueue(buffer int) Queue {
|
||||
queue := new(simpleQueue)
|
||||
queue.eventChannel = make(chan Event, buffer)
|
||||
return queue
|
||||
}
|
||||
|
||||
func (sq *simpleQueue) InChan() chan<- Event {
|
||||
return sq.eventChannel
|
||||
}
|
||||
|
||||
func (sq *simpleQueue) OutChan() <-chan Event {
|
||||
return sq.eventChannel
|
||||
}
|
||||
|
||||
// Backlog returns the length of the queue backlog
|
||||
func (sq *simpleQueue) Len() int {
|
||||
return len(sq.eventChannel)
|
||||
}
|
||||
|
||||
// Next returns the next available event from the front of the queue
|
||||
func (sq *simpleQueue) Next() *Event {
|
||||
event := <-sq.eventChannel
|
||||
return &event
|
||||
}
|
||||
|
||||
// Shutdown closes our eventChannel
|
||||
func (sq *simpleQueue) Shutdown() {
|
||||
close(sq.eventChannel)
|
||||
}
|
||||
|
||||
func (iq *queue) InChan() chan<- Event {
|
||||
return iq.infChan.In()
|
||||
}
|
||||
|
||||
func (iq *queue) OutChan() <-chan Event {
|
||||
return iq.infChan.Out()
|
||||
}
|
||||
|
||||
// Out returns the next available event from the front of the queue
|
||||
func (iq *queue) Next() *Event {
|
||||
event := <-iq.infChan.Out()
|
||||
return &event
|
||||
}
|
||||
|
||||
func (iq *queue) Len() int {
|
||||
return iq.infChan.Len()
|
||||
}
|
||||
|
||||
// Shutdown closes our eventChannel
|
||||
func (iq *queue) Shutdown() {
|
||||
iq.infChan.Close()
|
||||
}
|
|
@ -0,0 +1,119 @@
|
|||
package event
|
||||
|
||||
import (
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/utils"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Event is a structure which binds a given set of data to an Type
|
||||
type Event struct {
|
||||
EventType Type
|
||||
EventID string
|
||||
Data map[Field]string
|
||||
}
|
||||
|
||||
// NewEvent creates a new event object with a unique ID and the given type and data.
|
||||
func NewEvent(eventType Type, data map[Field]string) Event {
|
||||
return Event{EventType: eventType, EventID: utils.GetRandNumber().String(), Data: data}
|
||||
}
|
||||
|
||||
// NewEventList creates a new event object with a unique ID and the given type and data supplied in a list format and composed into a map of Type:string
|
||||
func NewEventList(eventType Type, args ...interface{}) Event {
|
||||
data := map[Field]string{}
|
||||
for i := 0; i < len(args); i += 2 {
|
||||
key, kok := args[i].(Field)
|
||||
val, vok := args[i+1].(string)
|
||||
if kok && vok {
|
||||
data[key] = val
|
||||
}
|
||||
}
|
||||
return Event{EventType: eventType, EventID: utils.GetRandNumber().String(), Data: data}
|
||||
}
|
||||
|
||||
// Manager is an Event Bus which allows subsystems to subscribe to certain EventTypes and publish others.
|
||||
type manager struct {
|
||||
subscribers map[Type][]chan<- Event
|
||||
events chan Event
|
||||
mapMutex sync.Mutex
|
||||
internal chan bool
|
||||
closed bool
|
||||
}
|
||||
|
||||
// Manager is an interface for an event bus
|
||||
type Manager interface {
|
||||
Subscribe(Type, Queue)
|
||||
Publish(Event)
|
||||
PublishLocal(Event)
|
||||
Shutdown()
|
||||
}
|
||||
|
||||
// NewEventManager returns an initialized EventManager
|
||||
func NewEventManager() Manager {
|
||||
em := &manager{}
|
||||
em.initialize()
|
||||
return em
|
||||
}
|
||||
|
||||
// Initialize sets up the Manager.
|
||||
func (em *manager) initialize() {
|
||||
em.subscribers = make(map[Type][]chan<- Event)
|
||||
em.events = make(chan Event)
|
||||
em.internal = make(chan bool)
|
||||
em.closed = false
|
||||
go em.eventBus()
|
||||
}
|
||||
|
||||
// Subscribe takes an eventType and an Channel and associates them in the eventBus. All future events of that type
|
||||
// will be sent to the eventChannel.
|
||||
func (em *manager) Subscribe(eventType Type, queue Queue) {
|
||||
em.mapMutex.Lock()
|
||||
defer em.mapMutex.Unlock()
|
||||
em.subscribers[eventType] = append(em.subscribers[eventType], queue.InChan())
|
||||
}
|
||||
|
||||
// Publish takes an Event and sends it to the internal eventBus where it is distributed to all Subscribers
|
||||
func (em *manager) Publish(event Event) {
|
||||
if event.EventType != "" && em.closed != true {
|
||||
em.events <- event
|
||||
}
|
||||
}
|
||||
|
||||
// Publish an event only locally, not going over an IPC bridge if there is one
|
||||
func (em *manager) PublishLocal(event Event) {
|
||||
em.Publish(event)
|
||||
}
|
||||
|
||||
// eventBus is an internal function that is used to distribute events to all subscribers
|
||||
func (em *manager) eventBus() {
|
||||
for {
|
||||
event := <-em.events
|
||||
|
||||
// In the case on an empty event. Teardown the Queue
|
||||
if event.EventType == "" {
|
||||
break
|
||||
}
|
||||
|
||||
// maps aren't thread safe
|
||||
em.mapMutex.Lock()
|
||||
subscribers := em.subscribers[event.EventType]
|
||||
em.mapMutex.Unlock()
|
||||
|
||||
// Send the event to any subscribers to that event type
|
||||
for _, subscriber := range subscribers {
|
||||
subscriber <- event
|
||||
}
|
||||
}
|
||||
|
||||
// We are about to exit the eventbus thread, fire off an event internally
|
||||
em.internal <- true
|
||||
}
|
||||
|
||||
// Shutdown triggers, and waits for, the internal eventBus goroutine to finish
|
||||
func (em *manager) Shutdown() {
|
||||
em.events <- Event{}
|
||||
em.closed = true
|
||||
// wait for eventBus to finish
|
||||
<-em.internal
|
||||
close(em.events)
|
||||
close(em.internal)
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
package event
|
||||
|
||||
import (
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Most basic Manager Test, Initialize, Subscribe, Publish, Receive
|
||||
func TestEventManager(t *testing.T) {
|
||||
eventManager := NewEventManager()
|
||||
|
||||
// We need to make this buffer at least 1, otherwise we will log an error!
|
||||
testChan := make(chan Event, 1)
|
||||
simpleQueue := &simpleQueue{testChan}
|
||||
eventManager.Subscribe("TEST", simpleQueue)
|
||||
eventManager.Publish(Event{EventType: "TEST", Data: map[Field]string{"Value": "Hello World"}})
|
||||
|
||||
event := <-testChan
|
||||
if event.EventType == "TEST" && event.Data["Value"] == "Hello World" {
|
||||
|
||||
} else {
|
||||
t.Errorf("Received Invalid Event")
|
||||
}
|
||||
|
||||
eventManager.Shutdown()
|
||||
}
|
||||
|
||||
// Most basic Manager Test, Initialize, Subscribe, Publish, Receive
|
||||
func TestEventManagerOverflow(t *testing.T) {
|
||||
eventManager := NewEventManager()
|
||||
|
||||
// Explicitly setting this to 0 log an error!
|
||||
testChan := make(chan Event)
|
||||
simpleQueue := &simpleQueue{testChan}
|
||||
eventManager.Subscribe("TEST", simpleQueue)
|
||||
eventManager.Publish(Event{EventType: "TEST"})
|
||||
}
|
||||
|
||||
func TestEventManagerMultiple(t *testing.T) {
|
||||
log.SetLevel(log.LevelDebug)
|
||||
eventManager := NewEventManager()
|
||||
|
||||
groupEventQueue := NewQueue()
|
||||
peerEventQueue := NewQueue()
|
||||
allEventQueue := NewQueue()
|
||||
|
||||
eventManager.Subscribe("PeerEvent", peerEventQueue)
|
||||
eventManager.Subscribe("GroupEvent", groupEventQueue)
|
||||
eventManager.Subscribe("PeerEvent", allEventQueue)
|
||||
eventManager.Subscribe("GroupEvent", allEventQueue)
|
||||
eventManager.Subscribe("ErrorEvent", allEventQueue)
|
||||
|
||||
eventManager.Publish(Event{EventType: "PeerEvent", Data: map[Field]string{"Value": "Hello World Peer"}})
|
||||
eventManager.Publish(Event{EventType: "GroupEvent", Data: map[Field]string{"Value": "Hello World Group"}})
|
||||
eventManager.Publish(Event{EventType: "PeerEvent", Data: map[Field]string{"Value": "Hello World Peer"}})
|
||||
eventManager.Publish(Event{EventType: "ErrorEvent", Data: map[Field]string{"Value": "Hello World Error"}})
|
||||
eventManager.Publish(Event{EventType: "NobodyIsSubscribedToThisEvent", Data: map[Field]string{"Value": "Noone should see this!"}})
|
||||
|
||||
assertLength := func(len int, expected int, label string) {
|
||||
if len != expected {
|
||||
t.Errorf("Expected %s to be %v was %v", label, expected, len)
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
assertLength(groupEventQueue.Len(), 1, "Group Event Queue Length")
|
||||
assertLength(peerEventQueue.Len(), 2, "Peer Event Queue Length")
|
||||
assertLength(allEventQueue.Len(), 4, "All Event Queue Length")
|
||||
|
||||
checkEvent := func(eventType Type, expected Type, label string) {
|
||||
if eventType != expected {
|
||||
t.Errorf("Expected %s to be %v was %v", label, expected, eventType)
|
||||
}
|
||||
}
|
||||
|
||||
event := groupEventQueue.Next()
|
||||
checkEvent(event.EventType, "GroupEvent", "First Group Event")
|
||||
|
||||
event = peerEventQueue.Next()
|
||||
checkEvent(event.EventType, "PeerEvent", "First Peer Event")
|
||||
event = peerEventQueue.Next()
|
||||
checkEvent(event.EventType, "PeerEvent", "Second Peer Event")
|
||||
|
||||
event = allEventQueue.Next()
|
||||
checkEvent(event.EventType, "PeerEvent", "ALL: First Peer Event")
|
||||
event = allEventQueue.Next()
|
||||
checkEvent(event.EventType, "GroupEvent", "ALL: First Group Event")
|
||||
event = allEventQueue.Next()
|
||||
checkEvent(event.EventType, "PeerEvent", "ALL: Second Peer Event")
|
||||
event = allEventQueue.Next()
|
||||
checkEvent(event.EventType, "ErrorEvent", "ALL: First Error Event")
|
||||
|
||||
eventManager.Shutdown()
|
||||
groupEventQueue.Shutdown()
|
||||
peerEventQueue.Shutdown()
|
||||
allEventQueue.Shutdown()
|
||||
|
||||
// Reading from a closed queue should result in an instant return and an empty event
|
||||
event = groupEventQueue.Next()
|
||||
checkEvent(event.EventType, "", "Test Next() on Empty Queue")
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
package event
|
||||
|
||||
type ipcManager struct {
|
||||
manager Manager
|
||||
|
||||
onion string
|
||||
ipcBridge IPCBridge
|
||||
}
|
||||
|
||||
// NewIPCEventManager returns an EvenetManager that also pipes events over and supplied IPCBridge
|
||||
func NewIPCEventManager(bridge IPCBridge, onion string) Manager {
|
||||
em := &ipcManager{onion: onion, ipcBridge: bridge, manager: NewEventManager()}
|
||||
return em
|
||||
}
|
||||
|
||||
// IPCEventManagerFrom returns an IPCEventManger from the supplied manager and IPCBridge
|
||||
func IPCEventManagerFrom(bridge IPCBridge, onion string, manager Manager) Manager {
|
||||
em := &ipcManager{onion: onion, ipcBridge: bridge, manager: manager}
|
||||
return em
|
||||
}
|
||||
|
||||
func (ipcm *ipcManager) Publish(ev Event) {
|
||||
ipcm.manager.Publish(ev)
|
||||
message := &IPCMessage{Dest: ipcm.onion, Message: ev}
|
||||
ipcm.ipcBridge.Write(message)
|
||||
}
|
||||
|
||||
func (ipcm *ipcManager) PublishLocal(ev Event) {
|
||||
ipcm.manager.Publish(ev)
|
||||
}
|
||||
|
||||
func (ipcm *ipcManager) Subscribe(eventType Type, queue Queue) {
|
||||
ipcm.manager.Subscribe(eventType, queue)
|
||||
}
|
||||
|
||||
func (ipcm *ipcManager) Shutdown() {
|
||||
ipcm.manager.Shutdown()
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
package event
|
||||
|
||||
/*
|
||||
This package is taken from https://github.com/eapache/channels
|
||||
as per their suggestion we are not importing the entire package and instead cherry picking and adapting what is needed
|
||||
|
||||
It is covered by the MIT License https://github.com/eapache/channels/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
// infiniteChannel implements the Channel interface with an infinite buffer between the input and the output.
|
||||
type infiniteChannel struct {
|
||||
input, output chan Event
|
||||
length chan int
|
||||
buffer *infiniteQueue
|
||||
}
|
||||
|
||||
func newInfiniteChannel() *infiniteChannel {
|
||||
ch := &infiniteChannel{
|
||||
input: make(chan Event),
|
||||
output: make(chan Event),
|
||||
length: make(chan int),
|
||||
buffer: newInfinitQueue(),
|
||||
}
|
||||
go ch.infiniteBuffer()
|
||||
return ch
|
||||
}
|
||||
|
||||
func (ch *infiniteChannel) In() chan<- Event {
|
||||
return ch.input
|
||||
}
|
||||
|
||||
func (ch *infiniteChannel) Out() <-chan Event {
|
||||
return ch.output
|
||||
}
|
||||
|
||||
func (ch *infiniteChannel) Len() int {
|
||||
return <-ch.length
|
||||
}
|
||||
|
||||
func (ch *infiniteChannel) Close() {
|
||||
close(ch.input)
|
||||
}
|
||||
|
||||
func (ch *infiniteChannel) infiniteBuffer() {
|
||||
var input, output chan Event
|
||||
var next Event
|
||||
input = ch.input
|
||||
|
||||
for input != nil || output != nil {
|
||||
select {
|
||||
case elem, open := <-input:
|
||||
if open {
|
||||
ch.buffer.Add(elem)
|
||||
} else {
|
||||
input = nil
|
||||
}
|
||||
case output <- next:
|
||||
ch.buffer.Remove()
|
||||
case ch.length <- ch.buffer.Length():
|
||||
}
|
||||
|
||||
if ch.buffer.Length() > 0 {
|
||||
output = ch.output
|
||||
next = ch.buffer.Peek()
|
||||
} else {
|
||||
output = nil
|
||||
//next = nil
|
||||
}
|
||||
}
|
||||
|
||||
close(ch.output)
|
||||
close(ch.length)
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
package event
|
||||
|
||||
/*
|
||||
This package is taken from https://github.com/eapache/channels
|
||||
as per their suggestion we are not importing the entire package and instead cherry picking and adapting what is needed
|
||||
|
||||
It is covered by the MIT License https://github.com/eapache/channels/blob/master/LICENSE
|
||||
*/
|
||||
/*
|
||||
Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki.
|
||||
Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
|
||||
substantial memory and time benefits, and fewer GC pauses.
|
||||
The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe.
|
||||
*/
|
||||
|
||||
// minQueueLen is smallest capacity that queue may have.
|
||||
// Must be power of 2 for bitwise modulus: x % n == x & (n - 1).
|
||||
const minQueueLen = 16
|
||||
|
||||
// Queue represents a single instance of the queue data structure.
|
||||
type infiniteQueue struct {
|
||||
buf []Event
|
||||
head, tail, count int
|
||||
}
|
||||
|
||||
// New constructs and returns a new Queue.
|
||||
func newInfinitQueue() *infiniteQueue {
|
||||
return &infiniteQueue{
|
||||
buf: make([]Event, minQueueLen),
|
||||
}
|
||||
}
|
||||
|
||||
// Length returns the number of elements currently stored in the queue.
|
||||
func (q *infiniteQueue) Length() int {
|
||||
return q.count
|
||||
}
|
||||
|
||||
// resizes the queue to fit exactly twice its current contents
|
||||
// this can result in shrinking if the queue is less than half-full
|
||||
func (q *infiniteQueue) resize() {
|
||||
newBuf := make([]Event, q.count<<1)
|
||||
|
||||
if q.tail > q.head {
|
||||
copy(newBuf, q.buf[q.head:q.tail])
|
||||
} else {
|
||||
n := copy(newBuf, q.buf[q.head:])
|
||||
copy(newBuf[n:], q.buf[:q.tail])
|
||||
}
|
||||
|
||||
q.head = 0
|
||||
q.tail = q.count
|
||||
q.buf = newBuf
|
||||
}
|
||||
|
||||
// Add puts an element on the end of the queue.
|
||||
func (q *infiniteQueue) Add(elem Event) {
|
||||
if q.count == len(q.buf) {
|
||||
q.resize()
|
||||
}
|
||||
|
||||
q.buf[q.tail] = elem
|
||||
// bitwise modulus
|
||||
q.tail = (q.tail + 1) & (len(q.buf) - 1)
|
||||
q.count++
|
||||
}
|
||||
|
||||
// Peek returns the element at the head of the queue. This call panics
|
||||
// if the queue is empty.
|
||||
func (q *infiniteQueue) Peek() Event {
|
||||
if q.count <= 0 {
|
||||
panic("queue: Peek() called on empty queue")
|
||||
}
|
||||
return q.buf[q.head]
|
||||
}
|
||||
|
||||
// Get returns the element at index i in the queue. If the index is
|
||||
// invalid, the call will panic. This method accepts both positive and
|
||||
// negative index values. Index 0 refers to the first element, and
|
||||
// index -1 refers to the last.
|
||||
func (q *infiniteQueue) Get(i int) Event {
|
||||
// If indexing backwards, convert to positive index.
|
||||
if i < 0 {
|
||||
i += q.count
|
||||
}
|
||||
if i < 0 || i >= q.count {
|
||||
panic("queue: Get() called with index out of range")
|
||||
}
|
||||
// bitwise modulus
|
||||
return q.buf[(q.head+i)&(len(q.buf)-1)]
|
||||
}
|
||||
|
||||
// Remove removes and returns the element from the front of the queue. If the
|
||||
// queue is empty, the call will panic.
|
||||
func (q *infiniteQueue) Remove() Event {
|
||||
if q.count <= 0 {
|
||||
panic("queue: Remove() called on empty queue")
|
||||
}
|
||||
ret := q.buf[q.head]
|
||||
//q.buf[q.head] = nil
|
||||
// bitwise modulus
|
||||
q.head = (q.head + 1) & (len(q.buf) - 1)
|
||||
q.count--
|
||||
// Resize down if buffer 1/4 full.
|
||||
if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) {
|
||||
q.resize()
|
||||
}
|
||||
return ret
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
package event
|
||||
|
||||
// IPCMessage is a wrapper for a regular eventMessage with a destination (onion|AppDest) so the other side of the bridge can route appropriately
|
||||
type IPCMessage struct {
|
||||
Dest string
|
||||
Message Event
|
||||
}
|
||||
|
||||
// IPCBridge is an interface to a IPC construct used to communicate IPCMessages
|
||||
type IPCBridge interface {
|
||||
Read() (*IPCMessage, bool)
|
||||
Write(message *IPCMessage)
|
||||
Shutdown()
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
module cwtch.im/cwtch
|
||||
|
||||
require (
|
||||
cwtch.im/tapir v0.1.10
|
||||
git.openprivacy.ca/openprivacy/libricochet-go v1.0.6
|
||||
github.com/c-bata/go-prompt v0.2.3
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/mattn/go-colorable v0.1.2 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/mattn/go-tty v0.0.0-20190424173100-523744f04859 // indirect
|
||||
github.com/pkg/term v0.0.0-20190109203006-aa71e9d9e942 // indirect
|
||||
github.com/struCoder/pidusage v0.1.2
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7
|
||||
)
|
|
@ -0,0 +1,53 @@
|
|||
cwtch.im/tapir v0.1.10 h1:V+TkmwXNd6gySZqlVw468wMYEkmDwMSyvhkkpOfUw7w=
|
||||
cwtch.im/tapir v0.1.10/go.mod h1:EuRYdVrwijeaGBQ4OijDDRHf7R2MDSypqHkSl5DxI34=
|
||||
git.openprivacy.ca/openprivacy/libricochet-go v1.0.4 h1:GWLMJ5jBSIC/gFXzdbbeVz7fIAn2FTgW8+wBci6/3Ek=
|
||||
git.openprivacy.ca/openprivacy/libricochet-go v1.0.4/go.mod h1:yMSG1gBaP4f1U+RMZXN85d29D39OK5s8aTpyVRoH5FY=
|
||||
git.openprivacy.ca/openprivacy/libricochet-go v1.0.6 h1:5o4K2qn3otEE1InC5v5CzU0yL7Wl7DhVp4s8H3K6mXY=
|
||||
git.openprivacy.ca/openprivacy/libricochet-go v1.0.6/go.mod h1:yMSG1gBaP4f1U+RMZXN85d29D39OK5s8aTpyVRoH5FY=
|
||||
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 h1:w1UutsfOrms1J05zt7ISrnJIXKzwaspym5BTKGx93EI=
|
||||
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0=
|
||||
github.com/c-bata/go-prompt v0.2.3 h1:jjCS+QhG/sULBhAaBdjb2PlMRVaKXQgn+4yzaauvs2s=
|
||||
github.com/c-bata/go-prompt v0.2.3/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
|
||||
github.com/cretz/bine v0.1.0 h1:1/fvhLE+fk0bPzjdO5Ci+0ComYxEMuB1JhM4X5skT3g=
|
||||
github.com/cretz/bine v0.1.0/go.mod h1:6PF6fWAvYtwjRGkAuDEJeWNOv3a2hUouSP/yRYXmvHw=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-tty v0.0.0-20190424173100-523744f04859 h1:smQbSzmT3EHl4EUwtFwFGmGIpiYgIiiPeVv1uguIQEE=
|
||||
github.com/mattn/go-tty v0.0.0-20190424173100-523744f04859/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
|
||||
github.com/pkg/term v0.0.0-20190109203006-aa71e9d9e942 h1:A7GG7zcGjl3jqAqGPmcNjd/D9hzL95SuoOQAaFNdLU0=
|
||||
github.com/pkg/term v0.0.0-20190109203006-aa71e9d9e942/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/struCoder/pidusage v0.1.2 h1:fFPTThlcWFQyizv3xKs5Lyq1lpG5lZ36arEGNhWz2Vs=
|
||||
github.com/struCoder/pidusage v0.1.2/go.mod h1:pWBlW3YuSwRl6h7R5KbvA4N8oOqe9LjaKW5CwT1SPjI=
|
||||
golang.org/x/crypto v0.0.0-20190128193316-c7b33c32a30b h1:Ib/yptP38nXZFMwqWSip+OKuMP9OkyDe3p+DssP8n9w=
|
||||
golang.org/x/crypto v0.0.0-20190128193316-c7b33c32a30b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 h1:ulvT7fqt0yHWzpJwI57MezWnYDVpCAYBVuYst/L+fAY=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
127
model/group.go
127
model/group.go
|
@ -5,6 +5,7 @@ import (
|
|||
"cwtch.im/cwtch/protocol"
|
||||
"errors"
|
||||
"fmt"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/utils"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
|
@ -13,38 +14,52 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
//Group defines and encapsulates Cwtch's conception of group chat. Which are sessions
|
||||
// tied to a server under a given group key. Each group has a set of messages.
|
||||
// Group defines and encapsulates Cwtch's conception of group chat. Which are sessions
|
||||
// tied to a server under a given group key. Each group has a set of Messages.
|
||||
type Group struct {
|
||||
GroupID string
|
||||
SignedGroupID []byte
|
||||
GroupKey [32]byte
|
||||
GroupServer string
|
||||
Timeline Timeline
|
||||
Accepted bool
|
||||
Owner string
|
||||
IsCompromised bool
|
||||
lock sync.Mutex
|
||||
GroupID string
|
||||
SignedGroupID []byte
|
||||
GroupKey [32]byte
|
||||
GroupServer string
|
||||
Timeline Timeline `json:"-"`
|
||||
Accepted bool
|
||||
Owner string
|
||||
IsCompromised bool
|
||||
InitialMessage []byte
|
||||
Attributes map[string]string
|
||||
lock sync.Mutex
|
||||
LocalID string
|
||||
State string `json:"-"`
|
||||
unacknowledgedMessages []Message
|
||||
}
|
||||
|
||||
// NewGroup initializes a new group associated with a given CwtchServer
|
||||
func NewGroup(server string) *Group {
|
||||
func NewGroup(server string) (*Group, error) {
|
||||
group := new(Group)
|
||||
group.LocalID = generateRandomID()
|
||||
|
||||
if utils.IsValidHostname(server) == false {
|
||||
return nil, errors.New("Server is not a valid v3 onion")
|
||||
}
|
||||
|
||||
group.GroupServer = server
|
||||
|
||||
var groupID [16]byte
|
||||
if _, err := io.ReadFull(rand.Reader, groupID[:]); err != nil {
|
||||
panic(err)
|
||||
log.Errorf("Cannot read from random: %v\n", err)
|
||||
return nil, err
|
||||
}
|
||||
group.GroupID = fmt.Sprintf("%x", groupID)
|
||||
|
||||
var groupKey [32]byte
|
||||
if _, err := io.ReadFull(rand.Reader, groupKey[:]); err != nil {
|
||||
panic(err)
|
||||
log.Errorf("Error: Cannot read from random: %v\n", err)
|
||||
return nil, err
|
||||
}
|
||||
copy(group.GroupKey[:], groupKey[:])
|
||||
group.Owner = "self"
|
||||
return group
|
||||
group.Attributes = make(map[string]string)
|
||||
return group, nil
|
||||
}
|
||||
|
||||
// SignGroup adds a signature to the group.
|
||||
|
@ -58,18 +73,28 @@ func (g *Group) Compromised() {
|
|||
g.IsCompromised = true
|
||||
}
|
||||
|
||||
// GetInitialMessage returns the first message of the group, if one was sent with the invite.
|
||||
func (g *Group) GetInitialMessage() []byte {
|
||||
g.lock.Lock()
|
||||
defer g.lock.Unlock()
|
||||
return g.InitialMessage
|
||||
}
|
||||
|
||||
// Invite generates a invitation that can be sent to a cwtch peer
|
||||
func (g *Group) Invite() ([]byte, error) {
|
||||
func (g *Group) Invite(initialMessage []byte) ([]byte, error) {
|
||||
|
||||
if g.SignedGroupID == nil {
|
||||
return nil, errors.New("group isn't signed")
|
||||
}
|
||||
|
||||
g.InitialMessage = initialMessage[:]
|
||||
|
||||
gci := &protocol.GroupChatInvite{
|
||||
GroupName: g.GroupID,
|
||||
GroupSharedKey: g.GroupKey[:],
|
||||
ServerHost: g.GroupServer,
|
||||
SignedGroupId: g.SignedGroupID[:],
|
||||
InitialMessage: initialMessage[:],
|
||||
}
|
||||
|
||||
cp := &protocol.CwtchPeerPacket{
|
||||
|
@ -79,42 +104,67 @@ func (g *Group) Invite() ([]byte, error) {
|
|||
return invite, err
|
||||
}
|
||||
|
||||
// AddMessage takes a DecryptedGroupMessage and adds it to the Groups Timeline
|
||||
func (g *Group) AddMessage(message *protocol.DecryptedGroupMessage, sig []byte, verified bool) *Message {
|
||||
// AddSentMessage takes a DecryptedGroupMessage and adds it to the Groups Timeline
|
||||
func (g *Group) AddSentMessage(message *protocol.DecryptedGroupMessage, sig []byte) Message {
|
||||
g.lock.Lock()
|
||||
defer g.lock.Unlock()
|
||||
timelineMessage := Message{
|
||||
Message: message.GetText(),
|
||||
Timestamp: time.Unix(int64(message.GetTimestamp()), 0),
|
||||
Received: time.Unix(0, 0),
|
||||
Signature: sig,
|
||||
PeerID: message.GetOnion(),
|
||||
PreviousMessageSig: message.GetPreviousMessageSig(),
|
||||
}
|
||||
g.unacknowledgedMessages = append(g.unacknowledgedMessages, timelineMessage)
|
||||
return timelineMessage
|
||||
}
|
||||
|
||||
// AddMessage takes a DecryptedGroupMessage and adds it to the Groups Timeline
|
||||
func (g *Group) AddMessage(message *protocol.DecryptedGroupMessage, sig []byte) (*Message, bool) {
|
||||
|
||||
g.lock.Lock()
|
||||
defer g.lock.Unlock()
|
||||
|
||||
// Delete the message from the unack'd buffer if it exists
|
||||
for i, unAckedMessage := range g.unacknowledgedMessages {
|
||||
if compareSignatures(unAckedMessage.Signature, sig) {
|
||||
g.unacknowledgedMessages = append(g.unacknowledgedMessages[:i], g.unacknowledgedMessages[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
timelineMessage := &Message{
|
||||
Message: message.GetText(),
|
||||
Timestamp: time.Unix(int64(message.GetTimestamp()), 0),
|
||||
Received: time.Now(),
|
||||
Signature: sig,
|
||||
Verified: verified,
|
||||
PeerID: message.GetOnion(),
|
||||
PreviousMessageSig: message.GetPreviousMessageSig(),
|
||||
}
|
||||
g.Timeline.Insert(timelineMessage)
|
||||
g.lock.Unlock()
|
||||
return timelineMessage
|
||||
seen := g.Timeline.Insert(timelineMessage)
|
||||
|
||||
return timelineMessage, seen
|
||||
}
|
||||
|
||||
// GetTimeline provides a safe copy of the timeline-=
|
||||
func (g *Group) GetTimeline() (t []Message) {
|
||||
// GetTimeline provides a safe copy of the timeline
|
||||
func (g *Group) GetTimeline() (timeline []Message) {
|
||||
g.lock.Lock()
|
||||
t = g.Timeline.GetMessages()
|
||||
g.lock.Unlock()
|
||||
return
|
||||
|
||||
defer g.lock.Unlock()
|
||||
return append(g.Timeline.GetMessages(), g.unacknowledgedMessages...)
|
||||
}
|
||||
|
||||
//EncryptMessage takes a message and encrypts the message under the group key.
|
||||
func (g *Group) EncryptMessage(message *protocol.DecryptedGroupMessage) []byte {
|
||||
func (g *Group) EncryptMessage(message *protocol.DecryptedGroupMessage) ([]byte, error) {
|
||||
var nonce [24]byte
|
||||
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
|
||||
panic(err)
|
||||
log.Errorf("Cannot read from random: %v\n", err)
|
||||
return nil, err
|
||||
}
|
||||
wire, err := proto.Marshal(message)
|
||||
utils.CheckError(err)
|
||||
encrypted := secretbox.Seal(nonce[:], []byte(wire), &nonce, &g.GroupKey)
|
||||
return encrypted
|
||||
return encrypted, nil
|
||||
}
|
||||
|
||||
// DecryptMessage takes a ciphertext and returns true and the decrypted message if the
|
||||
|
@ -132,3 +182,18 @@ func (g *Group) DecryptMessage(ciphertext []byte) (bool, *protocol.DecryptedGrou
|
|||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// SetAttribute allows applications to store arbitrary configuration info at the group level.
|
||||
func (g *Group) SetAttribute(name string, value string) {
|
||||
g.lock.Lock()
|
||||
defer g.lock.Unlock()
|
||||
g.Attributes[name] = value
|
||||
}
|
||||
|
||||
// GetAttribute returns the value of a value set with SetAttribute. If no such value has been set exists is set to false.
|
||||
func (g *Group) GetAttribute(name string) (value string, exists bool) {
|
||||
g.lock.Lock()
|
||||
defer g.lock.Unlock()
|
||||
value, exists = g.Attributes[name]
|
||||
return
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
)
|
||||
|
||||
func TestGroup(t *testing.T) {
|
||||
g := NewGroup("server.onion")
|
||||
g, _ := NewGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||
dgm := &protocol.DecryptedGroupMessage{
|
||||
Onion: proto.String("onion"),
|
||||
Text: proto.String("Hello World!"),
|
||||
|
@ -17,11 +17,23 @@ func TestGroup(t *testing.T) {
|
|||
PreviousMessageSig: []byte{},
|
||||
Padding: []byte{},
|
||||
}
|
||||
encMessage := g.EncryptMessage(dgm)
|
||||
encMessage, _ := g.EncryptMessage(dgm)
|
||||
ok, message := g.DecryptMessage(encMessage)
|
||||
if !ok || message.GetText() != "Hello World!" {
|
||||
t.Errorf("group encryption was invalid, or returned wrong message decrypted:%v message:%v", ok, message)
|
||||
return
|
||||
}
|
||||
g.SetAttribute("test", "test_value")
|
||||
value, exists := g.GetAttribute("test")
|
||||
if !exists || value != "test_value" {
|
||||
t.Errorf("Custom Attribute Should have been set, instead %v %v", exists, value)
|
||||
}
|
||||
t.Logf("Got message %v", message)
|
||||
}
|
||||
|
||||
func TestGroupErr(t *testing.T) {
|
||||
_, err := NewGroup("not a real group name")
|
||||
if err == nil {
|
||||
t.Errorf("Group Setup Should Have Failed")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Timeline encapsulates a collection of ordered messages, and a mechanism to access them
|
||||
// Timeline encapsulates a collection of ordered Messages, and a mechanism to access them
|
||||
// in a threadsafe manner.
|
||||
type Timeline struct {
|
||||
Messages []Message
|
||||
|
@ -21,10 +22,12 @@ type Message struct {
|
|||
PeerID string
|
||||
Message string
|
||||
Signature []byte
|
||||
Verified bool
|
||||
PreviousMessageSig []byte
|
||||
}
|
||||
|
||||
// MessageBaseSize is a rough estimate of the base number of bytes the struct uses before strings are populated
|
||||
const MessageBaseSize = 104
|
||||
|
||||
func compareSignatures(a []byte, b []byte) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
|
@ -46,23 +49,45 @@ func (t *Timeline) GetMessages() []Message {
|
|||
return messages
|
||||
}
|
||||
|
||||
// GetCopy returns a duplicate of the Timeline
|
||||
func (t *Timeline) GetCopy() *Timeline {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
bytes, _ := json.Marshal(t)
|
||||
newt := &Timeline{}
|
||||
json.Unmarshal(bytes, newt)
|
||||
return newt
|
||||
}
|
||||
|
||||
// SetMessages sets the Messages of this timeline. Only to be used in loading/initialization
|
||||
func (t *Timeline) SetMessages(messages []Message) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
t.Messages = messages
|
||||
}
|
||||
|
||||
// Len gets the length of the timeline
|
||||
func (t *Timeline) Len() int {
|
||||
return len(t.Messages)
|
||||
}
|
||||
|
||||
// Swap swaps 2 messages on the timeline.
|
||||
// Swap swaps 2 Messages on the timeline.
|
||||
func (t *Timeline) Swap(i, j int) {
|
||||
t.Messages[i], t.Messages[j] = t.Messages[j], t.Messages[i]
|
||||
}
|
||||
|
||||
// Less checks 2 messages (i and j) in the timeline and returns true if i occcured before j, else false
|
||||
// Less checks 2 Messages (i and j) in the timeline and returns true if i occurred before j, else false
|
||||
func (t *Timeline) Less(i, j int) bool {
|
||||
|
||||
if t.Messages[i].Timestamp.Before(t.Messages[j].Timestamp) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Short circuit false if j is before i, signature checks will give a wrong order in this case.
|
||||
if t.Messages[j].Timestamp.Before(t.Messages[i].Timestamp) {
|
||||
return false
|
||||
}
|
||||
|
||||
if compareSignatures(t.Messages[i].PreviousMessageSig, t.SignedGroupID) {
|
||||
return true
|
||||
}
|
||||
|
@ -74,18 +99,27 @@ func (t *Timeline) Less(i, j int) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// Sort sorts the timeline in a canonical order.
|
||||
// TODO: There is almost definitely a more efficient way of doing things that involve not calling this method on every timeline load.
|
||||
func (t *Timeline) Sort() {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
sort.Sort(t)
|
||||
}
|
||||
|
||||
// Insert inserts a message into the timeline in a thread safe way.
|
||||
func (t *Timeline) Insert(mi *Message) {
|
||||
func (t *Timeline) Insert(mi *Message) bool {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
|
||||
for _, m := range t.Messages {
|
||||
// If the message already exists, then we don't add it
|
||||
if compareSignatures(m.Signature, mi.Signature) {
|
||||
return
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
t.Messages = append(t.Messages, *mi)
|
||||
sort.Sort(t)
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -16,15 +16,15 @@ func TestMessagePadding(t *testing.T) {
|
|||
sarah.AddContact(alice.Onion, &alice.PublicProfile)
|
||||
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
|
||||
|
||||
gid, invite, _ := alice.StartGroup("aaa.onion")
|
||||
gid, invite, _ := alice.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||
gci := &protocol.CwtchPeerPacket{}
|
||||
proto.Unmarshal(invite, gci)
|
||||
sarah.ProcessInvite(gci.GetGroupChatInvite(), alice.Onion)
|
||||
|
||||
group := alice.GetGroupByGroupID(gid)
|
||||
|
||||
c1, s1, _ := sarah.EncryptMessageToGroup("Hello World 1", group.GroupID)
|
||||
t.Logf("Length of Encrypted Message: %v", len(c1))
|
||||
c1, s1, err := sarah.EncryptMessageToGroup("Hello World 1", group.GroupID)
|
||||
t.Logf("Length of Encrypted Message: %v %v", len(c1), err)
|
||||
alice.AttemptDecryption(c1, s1)
|
||||
|
||||
c2, s2, _ := alice.EncryptMessageToGroup("Hello World 2", group.GroupID)
|
||||
|
@ -50,7 +50,7 @@ func TestTranscriptConsistency(t *testing.T) {
|
|||
sarah.AddContact(alice.Onion, &alice.PublicProfile)
|
||||
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
|
||||
|
||||
gid, invite, _ := alice.StartGroup("aaa.onion")
|
||||
gid, invite, _ := alice.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||
gci := &protocol.CwtchPeerPacket{}
|
||||
proto.Unmarshal(invite, gci)
|
||||
sarah.ProcessInvite(gci.GetGroupChatInvite(), alice.Onion)
|
||||
|
@ -80,14 +80,14 @@ func TestTranscriptConsistency(t *testing.T) {
|
|||
c5, s5, _ := alice.EncryptMessageToGroup("Hello World 5", group.GroupID)
|
||||
t.Logf("Length of Encrypted Message: %v", len(c5))
|
||||
|
||||
_, m1 := sarah.AttemptDecryption(c1, s1)
|
||||
_, _, m1, _ := sarah.AttemptDecryption(c1, s1)
|
||||
sarah.AttemptDecryption(c1, s1) // Try a duplicate
|
||||
_, m2 := sarah.AttemptDecryption(c2, s2)
|
||||
_, m3 := sarah.AttemptDecryption(c3, s3)
|
||||
_, m4 := sarah.AttemptDecryption(c4, s4)
|
||||
_, m5 := sarah.AttemptDecryption(c5, s5)
|
||||
_, _, m2, _ := sarah.AttemptDecryption(c2, s2)
|
||||
_, _, m3, _ := sarah.AttemptDecryption(c3, s3)
|
||||
_, _, m4, _ := sarah.AttemptDecryption(c4, s4)
|
||||
_, _, m5, _ := sarah.AttemptDecryption(c5, s5)
|
||||
|
||||
// Now we simulate a client receiving these messages completely out of order
|
||||
// Now we simulate a client receiving these Messages completely out of order
|
||||
timeline.Insert(m1)
|
||||
timeline.Insert(m5)
|
||||
timeline.Insert(m4)
|
||||
|
|
230
model/profile.go
230
model/profile.go
|
@ -2,14 +2,17 @@ package model
|
|||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"encoding/asn1"
|
||||
"encoding/base32"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/utils"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
@ -21,6 +24,11 @@ type PublicProfile struct {
|
|||
Trusted bool
|
||||
Blocked bool
|
||||
Onion string
|
||||
Attributes map[string]string
|
||||
//Timeline Timeline `json:"-"` // TODO: cache recent messages for client
|
||||
LocalID string // used by storage engine
|
||||
State string `json:"-"`
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// Profile encapsulates all the attributes necessary to be a Cwtch Peer.
|
||||
|
@ -28,26 +36,50 @@ type Profile struct {
|
|||
PublicProfile
|
||||
Contacts map[string]*PublicProfile
|
||||
Ed25519PrivateKey ed25519.PrivateKey
|
||||
OnionPrivateKey *rsa.PrivateKey
|
||||
Groups map[string]*Group
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// MaxGroupMessageLength is the maximum length of a message posted to a server group.
|
||||
// TODO: Should this be per server?
|
||||
const MaxGroupMessageLength = 1800
|
||||
|
||||
func generateRandomID() string {
|
||||
randBytes := make([]byte, 16)
|
||||
rand.Read(randBytes)
|
||||
return filepath.Join(hex.EncodeToString(randBytes))
|
||||
}
|
||||
|
||||
func (p *PublicProfile) init() {
|
||||
if p.Attributes == nil {
|
||||
p.Attributes = make(map[string]string)
|
||||
}
|
||||
p.LocalID = generateRandomID()
|
||||
}
|
||||
|
||||
// SetAttribute allows applications to store arbitrary configuration info at the profile level.
|
||||
func (p *PublicProfile) SetAttribute(name string, value string) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
p.Attributes[name] = value
|
||||
}
|
||||
|
||||
// GetAttribute returns the value of a value set with SetCustomAttribute. If no such value has been set exists is set to false.
|
||||
func (p *PublicProfile) GetAttribute(name string) (value string, exists bool) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
value, exists = p.Attributes[name]
|
||||
return
|
||||
}
|
||||
|
||||
// GenerateNewProfile creates a new profile, with new encryption and signing keys, and a profile name.
|
||||
func GenerateNewProfile(name string) *Profile {
|
||||
p := new(Profile)
|
||||
p.init()
|
||||
p.Name = name
|
||||
pub, priv, _ := ed25519.GenerateKey(rand.Reader)
|
||||
p.Ed25519PublicKey = pub
|
||||
p.Ed25519PrivateKey = priv
|
||||
|
||||
p.OnionPrivateKey, _ = utils.GeneratePrivateKey()
|
||||
// DER Encode the Public Key
|
||||
publicKeyBytes, _ := asn1.Marshal(rsa.PublicKey{
|
||||
N: p.OnionPrivateKey.PublicKey.N,
|
||||
E: p.OnionPrivateKey.PublicKey.E,
|
||||
})
|
||||
p.Onion = utils.GetTorHostname(publicKeyBytes)
|
||||
p.Onion = utils.GetTorV3Hostname(pub)
|
||||
|
||||
p.Contacts = make(map[string]*PublicProfile)
|
||||
p.Contacts[p.Onion] = &p.PublicProfile
|
||||
|
@ -69,19 +101,31 @@ func (p *Profile) GetCwtchIdentityPacket() (message []byte) {
|
|||
return
|
||||
}
|
||||
|
||||
// AddCwtchIdentity takes a wire message and if it is a CwtchIdentity message adds the identity as a contact
|
||||
// otherwise returns an error
|
||||
func (p *Profile) AddCwtchIdentity(onion string, ci *protocol.CwtchIdentity) {
|
||||
p.AddContact(onion, &PublicProfile{Name: ci.GetName(), Ed25519PublicKey: ci.GetEd25519PublicKey(), Onion: onion})
|
||||
}
|
||||
|
||||
// AddContact allows direct manipulation of cwtch contacts
|
||||
func (p *Profile) AddContact(onion string, profile *PublicProfile) {
|
||||
p.lock.Lock()
|
||||
profile.init()
|
||||
// TODO: More Robust V3 Onion Handling
|
||||
decodedPub, _ := base32.StdEncoding.DecodeString(strings.ToUpper(onion[:56]))
|
||||
profile.Ed25519PublicKey = ed25519.PublicKey(decodedPub[:32])
|
||||
p.Contacts[onion] = profile
|
||||
p.lock.Unlock()
|
||||
}
|
||||
|
||||
// DeleteContact deletes a peer contact
|
||||
func (p *Profile) DeleteContact(onion string) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
delete(p.Contacts, onion)
|
||||
}
|
||||
|
||||
// DeleteGroup deletes a group
|
||||
func (p *Profile) DeleteGroup(groupID string) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
delete(p.Groups, groupID)
|
||||
}
|
||||
|
||||
// RejectInvite rejects and removes a group invite
|
||||
func (p *Profile) RejectInvite(groupID string) {
|
||||
p.lock.Lock()
|
||||
|
@ -89,6 +133,26 @@ func (p *Profile) RejectInvite(groupID string) {
|
|||
p.lock.Unlock()
|
||||
}
|
||||
|
||||
/*
|
||||
// AddMessageToContactTimeline allows the saving of a message sent via a direct connection chat to the profile.
|
||||
func (p *Profile) AddMessageToContactTimeline(onion string, fromMe bool, message string, sent time.Time) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
contact, ok := p.Contacts[onion]
|
||||
|
||||
// We don't really need a Signature here, but we use it to maintain order
|
||||
now := time.Now()
|
||||
sig := p.SignMessage(onion + message + sent.String() + now.String())
|
||||
if ok {
|
||||
if fromMe {
|
||||
contact.Timeline.Insert(&Message{PeerID: p.Onion, Message: message, Timestamp: sent, Received: now, Signature: sig})
|
||||
} else {
|
||||
contact.Timeline.Insert(&Message{PeerID: onion, Message: message, Timestamp: sent, Received: now, Signature: sig})
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
// AcceptInvite accepts a group invite
|
||||
func (p *Profile) AcceptInvite(groupID string) (err error) {
|
||||
p.lock.Lock()
|
||||
|
@ -139,6 +203,31 @@ func (p *Profile) BlockPeer(onion string) (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// UnblockPeer unblocks a contact
|
||||
func (p *Profile) UnblockPeer(onion string) (err error) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
contact, ok := p.Contacts[onion]
|
||||
if ok {
|
||||
contact.Blocked = false
|
||||
} else {
|
||||
err = errors.New("peer does not exist")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// BlockedPeers calculates a list of Peers who have been Blocked.
|
||||
func (p *Profile) BlockedPeers() []string {
|
||||
blockedPeers := []string{}
|
||||
for _, contact := range p.GetContacts() {
|
||||
c, _ := p.GetContact(contact)
|
||||
if c.Blocked {
|
||||
blockedPeers = append(blockedPeers, c.Onion)
|
||||
}
|
||||
}
|
||||
return blockedPeers
|
||||
}
|
||||
|
||||
// TrustPeer sets a contact to trusted
|
||||
func (p *Profile) TrustPeer(onion string) (err error) {
|
||||
p.lock.Lock()
|
||||
|
@ -182,10 +271,10 @@ func (p *Profile) VerifyGroupMessage(onion string, groupID string, message strin
|
|||
return ed25519.Verify(p.Ed25519PublicKey, []byte(m), signature)
|
||||
}
|
||||
|
||||
contact, found := p.GetContact(onion)
|
||||
if found {
|
||||
m := groupID + group.GroupServer + string(ciphertext)
|
||||
return ed25519.Verify(contact.Ed25519PublicKey, []byte(m), signature)
|
||||
m := groupID + group.GroupServer + string(ciphertext)
|
||||
decodedPub, err := base32.StdEncoding.DecodeString(strings.ToUpper(onion))
|
||||
if err == nil {
|
||||
return ed25519.Verify(decodedPub[:32], []byte(m), signature)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -196,14 +285,24 @@ func (p *Profile) SignMessage(message string) []byte {
|
|||
return sig
|
||||
}
|
||||
|
||||
//StartGroup when given a server, creates a new Group under this profile and returns the group id an a precomputed
|
||||
// StartGroup when given a server, creates a new Group under this profile and returns the group id an a precomputed
|
||||
// invite which can be sent on the wire.
|
||||
func (p *Profile) StartGroup(server string) (groupID string, invite []byte, err error) {
|
||||
group := NewGroup(server)
|
||||
return p.StartGroupWithMessage(server, []byte{})
|
||||
}
|
||||
|
||||
// StartGroupWithMessage when given a server, and an initial message creates a new Group under this profile and returns the group id an a precomputed
|
||||
// invite which can be sent on the wire.
|
||||
func (p *Profile) StartGroupWithMessage(server string, initialMessage []byte) (groupID string, invite []byte, err error) {
|
||||
group, err := NewGroup(server)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
groupID = group.GroupID
|
||||
group.Owner = p.Onion
|
||||
signedGroupID := p.SignMessage(groupID + server)
|
||||
group.SignGroup(signedGroupID)
|
||||
invite, err = group.Invite()
|
||||
invite, err = group.Invite(initialMessage)
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
p.Groups[group.GroupID] = group
|
||||
|
@ -222,62 +321,48 @@ func (p *Profile) GetGroupByGroupID(groupID string) (g *Group) {
|
|||
func (p *Profile) ProcessInvite(gci *protocol.GroupChatInvite, peerHostname string) {
|
||||
group := new(Group)
|
||||
group.GroupID = gci.GetGroupName()
|
||||
group.LocalID = generateRandomID()
|
||||
group.SignedGroupID = gci.GetSignedGroupId()
|
||||
copy(group.GroupKey[:], gci.GetGroupSharedKey()[:])
|
||||
group.GroupServer = gci.GetServerHost()
|
||||
group.InitialMessage = gci.GetInitialMessage()[:]
|
||||
group.Accepted = false
|
||||
group.Owner = peerHostname
|
||||
group.Attributes = make(map[string]string)
|
||||
p.AddGroup(group)
|
||||
}
|
||||
|
||||
// AddGroup is a convenience method for adding a group to a profile.
|
||||
func (p *Profile) AddGroup(group *Group) {
|
||||
existingGroup, exists := p.Groups[group.GroupID]
|
||||
_, exists := p.Groups[group.GroupID]
|
||||
if !exists {
|
||||
owner, ok := p.GetContact(group.Owner)
|
||||
if ok {
|
||||
valid := ed25519.Verify(owner.Ed25519PublicKey, []byte(group.GroupID+group.GroupServer), group.SignedGroupID)
|
||||
if valid {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
p.Groups[group.GroupID] = group
|
||||
}
|
||||
}
|
||||
} else if exists && existingGroup.Owner == group.Owner {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
p.Groups[group.GroupID] = group
|
||||
}
|
||||
|
||||
// If we are sent an invite or group update by someone who is not an owner
|
||||
// then we reject the group.
|
||||
}
|
||||
|
||||
// AttemptDecryption takes a ciphertext and signature and attempts to decrypt it under known groups.
|
||||
func (p *Profile) AttemptDecryption(ciphertext []byte, signature []byte) (bool, *Message) {
|
||||
func (p *Profile) AttemptDecryption(ciphertext []byte, signature []byte) (bool, string, *Message, bool) {
|
||||
for _, group := range p.Groups {
|
||||
success, dgm := group.DecryptMessage(ciphertext)
|
||||
if success {
|
||||
|
||||
// Assert that we know the owner of the group
|
||||
owner, ok := p.Contacts[group.Owner]
|
||||
if ok {
|
||||
valid := ed25519.Verify(owner.Ed25519PublicKey, []byte(group.GroupID+group.GroupServer), dgm.SignedGroupId)
|
||||
// If we can decrypt the message, but the group id is wrong that means that
|
||||
// this message is from someone who was not invited to the group.
|
||||
// As such this group has been compromised, probably by one of the other members.
|
||||
// We set the flag to be handled by the UX and reject the message.
|
||||
if !valid {
|
||||
group.Compromised()
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
verified := p.VerifyGroupMessage(dgm.GetOnion(), group.GroupID, dgm.GetText(), dgm.GetTimestamp(), ciphertext, signature)
|
||||
return true, group.AddMessage(dgm, signature, verified)
|
||||
|
||||
// So we have a message that has a valid group key, but the signature can't be verified.
|
||||
// The most obvious explanation for this is that the group key has been compromised (or we are in an open group and the server is being malicious)
|
||||
// Either way, someone who has the private key is being detectably bad so we are just going to throw this message away and mark the group as Compromised.
|
||||
if !verified {
|
||||
group.Compromised()
|
||||
return false, group.GroupID, nil, false
|
||||
}
|
||||
message, seen := group.AddMessage(dgm, signature)
|
||||
return true, group.GroupID, message, seen
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
|
||||
// If we couldn't find a group to decrypt the message with we just return false. This is an expected case
|
||||
return false, "", nil, false
|
||||
}
|
||||
|
||||
func getRandomness(arr *[]byte) {
|
||||
|
@ -289,6 +374,11 @@ func getRandomness(arr *[]byte) {
|
|||
// EncryptMessageToGroup when given a message and a group, encrypts and signs the message under the group and
|
||||
// profile
|
||||
func (p *Profile) EncryptMessageToGroup(message string, groupID string) ([]byte, []byte, error) {
|
||||
|
||||
if len(message) > MaxGroupMessageLength {
|
||||
return nil, nil, errors.New("group message is too long")
|
||||
}
|
||||
|
||||
group := p.GetGroupByGroupID(groupID)
|
||||
if group != nil {
|
||||
timestamp := time.Now().Unix()
|
||||
|
@ -300,7 +390,7 @@ func (p *Profile) EncryptMessageToGroup(message string, groupID string) ([]byte,
|
|||
prevSig = group.SignedGroupID
|
||||
}
|
||||
|
||||
lenPadding := 1024 - len(message)
|
||||
lenPadding := MaxGroupMessageLength - len(message)
|
||||
padding := make([]byte, lenPadding)
|
||||
getRandomness(&padding)
|
||||
|
||||
|
@ -312,9 +402,31 @@ func (p *Profile) EncryptMessageToGroup(message string, groupID string) ([]byte,
|
|||
PreviousMessageSig: prevSig,
|
||||
Padding: padding[:],
|
||||
}
|
||||
ciphertext := group.EncryptMessage(dm)
|
||||
ciphertext, err := group.EncryptMessage(dm)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
signature := p.SignMessage(groupID + group.GroupServer + string(ciphertext))
|
||||
group.AddSentMessage(dm, signature)
|
||||
return ciphertext, signature, nil
|
||||
}
|
||||
return nil, nil, errors.New("group does not exist")
|
||||
}
|
||||
|
||||
// GetCopy returns a full deep copy of the Profile struct and its members (timeline inclusion control by arg)
|
||||
func (p *Profile) GetCopy(timeline bool) *Profile {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
newp := new(Profile)
|
||||
bytes, _ := json.Marshal(p)
|
||||
json.Unmarshal(bytes, &newp)
|
||||
|
||||
if timeline {
|
||||
for groupID := range newp.Groups {
|
||||
newp.Groups[groupID].Timeline = *p.Groups[groupID].Timeline.GetCopy()
|
||||
}
|
||||
}
|
||||
|
||||
return newp
|
||||
}
|
||||
|
|
|
@ -17,8 +17,9 @@ func TestProfileIdentity(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("alice should have added sarah as a contact %v", err)
|
||||
}
|
||||
alice.AddCwtchIdentity("sarah.onion", ci.GetCwtchIdentify())
|
||||
if alice.Contacts["sarah.onion"].Name != "Sarah" {
|
||||
|
||||
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
|
||||
if alice.Contacts[sarah.Onion].Name != "Sarah" {
|
||||
t.Errorf("alice should have added sarah as a contact %v", alice.Contacts)
|
||||
}
|
||||
|
||||
|
@ -26,6 +27,12 @@ func TestProfileIdentity(t *testing.T) {
|
|||
t.Errorf("alice should be only contact: %v", alice.GetContacts())
|
||||
}
|
||||
|
||||
alice.SetAttribute("test", "hello world")
|
||||
value, _ := alice.GetAttribute("test")
|
||||
if value != "hello world" {
|
||||
t.Errorf("value from custom attribute should have been 'hello world', instead was: %v", value)
|
||||
}
|
||||
|
||||
t.Logf("%v", alice)
|
||||
}
|
||||
|
||||
|
@ -70,7 +77,7 @@ func TestRejectGroupInvite(t *testing.T) {
|
|||
sarah.AddContact(alice.Onion, &alice.PublicProfile)
|
||||
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
|
||||
|
||||
gid, invite, _ := alice.StartGroup("aaa.onion")
|
||||
gid, invite, _ := alice.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||
gci := &protocol.CwtchPeerPacket{}
|
||||
proto.Unmarshal(invite, gci)
|
||||
sarah.ProcessInvite(gci.GetGroupChatInvite(), alice.Onion)
|
||||
|
@ -94,7 +101,7 @@ func TestProfileGroup(t *testing.T) {
|
|||
sarah.AddContact(alice.Onion, &alice.PublicProfile)
|
||||
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
|
||||
|
||||
gid, invite, _ := alice.StartGroup("aaa.onion")
|
||||
gid, invite, _ := alice.StartGroupWithMessage("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd", []byte("Hello World"))
|
||||
gci := &protocol.CwtchPeerPacket{}
|
||||
proto.Unmarshal(invite, gci)
|
||||
sarah.ProcessInvite(gci.GetGroupChatInvite(), alice.Onion)
|
||||
|
@ -107,7 +114,7 @@ func TestProfileGroup(t *testing.T) {
|
|||
c, s1, _ := sarah.EncryptMessageToGroup("Hello World", group.GroupID)
|
||||
alice.AttemptDecryption(c, s1)
|
||||
|
||||
gid2, invite2, _ := alice.StartGroup("bbb.onion")
|
||||
gid2, invite2, _ := alice.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||
gci2 := &protocol.CwtchPeerPacket{}
|
||||
proto.Unmarshal(invite2, gci2)
|
||||
sarah.ProcessInvite(gci2.GetGroupChatInvite(), alice.Onion)
|
||||
|
@ -115,20 +122,31 @@ func TestProfileGroup(t *testing.T) {
|
|||
c2, s2, _ := sarah.EncryptMessageToGroup("Hello World", group2.GroupID)
|
||||
alice.AttemptDecryption(c2, s2)
|
||||
|
||||
sarahGroup := sarah.GetGroupByGroupID(group.GroupID)
|
||||
im := sarahGroup.GetInitialMessage()
|
||||
if string(im) != "Hello World" {
|
||||
t.Errorf("Initial Message was not stored properly: %v", im)
|
||||
}
|
||||
|
||||
_, _, err := sarah.EncryptMessageToGroup(string(make([]byte, MaxGroupMessageLength*2)), group2.GroupID)
|
||||
if err == nil {
|
||||
t.Errorf("Overly long message should have returned an error")
|
||||
}
|
||||
|
||||
bob := GenerateNewProfile("bob")
|
||||
bob.AddContact(alice.Onion, &alice.PublicProfile)
|
||||
bob.ProcessInvite(gci2.GetGroupChatInvite(), alice.Onion)
|
||||
c3, s3, err := bob.EncryptMessageToGroup("Bobs Message", group2.GroupID)
|
||||
if err == nil {
|
||||
ok, message := alice.AttemptDecryption(c3, s3)
|
||||
if ok != true || message.Verified == true {
|
||||
t.Errorf("Bobs message to the group should be decrypted but not verified by alice instead %v %v", message, ok)
|
||||
ok, _, message, _ := alice.AttemptDecryption(c3, s3)
|
||||
if !ok {
|
||||
t.Errorf("Bobs message to the group should be decrypted %v %v", message, ok)
|
||||
}
|
||||
|
||||
eve := GenerateNewProfile("eve")
|
||||
ok, _ = eve.AttemptDecryption(c3, s3)
|
||||
ok, _, _, _ = eve.AttemptDecryption(c3, s3)
|
||||
if ok {
|
||||
t.Errorf("Eves hould not be able to decrypt messages!")
|
||||
t.Errorf("Eves hould not be able to decrypt Messages!")
|
||||
}
|
||||
} else {
|
||||
t.Errorf("Bob failed to encrypt a message to the group")
|
||||
|
|
|
@ -1,148 +0,0 @@
|
|||
package connections
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/model"
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Manager encapsulates all the logic necessary to manage outgoing peer and server connections.
|
||||
type Manager struct {
|
||||
peerConnections map[string]*PeerPeerConnection
|
||||
serverConnections map[string]*PeerServerConnection
|
||||
lock sync.Mutex
|
||||
breakChannel chan bool
|
||||
}
|
||||
|
||||
// NewConnectionsManager creates a new instance of Manager.
|
||||
func NewConnectionsManager() *Manager {
|
||||
m := new(Manager)
|
||||
m.peerConnections = make(map[string]*PeerPeerConnection)
|
||||
m.serverConnections = make(map[string]*PeerServerConnection)
|
||||
m.breakChannel = make(chan bool)
|
||||
return m
|
||||
}
|
||||
|
||||
// ManagePeerConnection creates a new PeerConnection for the given Host and Profile.
|
||||
func (m *Manager) ManagePeerConnection(host string, profile *model.Profile) {
|
||||
m.lock.Lock()
|
||||
|
||||
_, exists := m.peerConnections[host]
|
||||
if !exists {
|
||||
ppc := NewPeerPeerConnection(host, profile)
|
||||
go ppc.Run()
|
||||
m.peerConnections[host] = ppc
|
||||
}
|
||||
m.lock.Unlock()
|
||||
|
||||
}
|
||||
|
||||
// ManageServerConnection creates a new ServerConnection for Host with the given callback handler.
|
||||
func (m *Manager) ManageServerConnection(host string, handler func(string, *protocol.GroupMessage)) {
|
||||
m.lock.Lock()
|
||||
|
||||
_, exists := m.serverConnections[host]
|
||||
if !exists {
|
||||
psc := NewPeerServerConnection(host)
|
||||
go psc.Run()
|
||||
psc.GroupMessageHandler = handler
|
||||
m.serverConnections[host] = psc
|
||||
}
|
||||
m.lock.Unlock()
|
||||
}
|
||||
|
||||
// GetPeers returns a map of all peer connections with their state
|
||||
func (m *Manager) GetPeers() map[string]ConnectionState {
|
||||
rm := make(map[string]ConnectionState)
|
||||
m.lock.Lock()
|
||||
for onion, ppc := range m.peerConnections {
|
||||
rm[onion] = ppc.GetState()
|
||||
}
|
||||
m.lock.Unlock()
|
||||
return rm
|
||||
}
|
||||
|
||||
// GetServers returns a map of all server connections with their state.
|
||||
func (m *Manager) GetServers() map[string]ConnectionState {
|
||||
rm := make(map[string]ConnectionState)
|
||||
m.lock.Lock()
|
||||
for onion, psc := range m.serverConnections {
|
||||
rm[onion] = psc.GetState()
|
||||
}
|
||||
m.lock.Unlock()
|
||||
return rm
|
||||
}
|
||||
|
||||
// GetPeerPeerConnectionForOnion safely returns a given peer connection
|
||||
func (m *Manager) GetPeerPeerConnectionForOnion(host string) (ppc *PeerPeerConnection) {
|
||||
m.lock.Lock()
|
||||
ppc = m.peerConnections[host]
|
||||
m.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// GetPeerServerConnectionForOnion safely returns a given host connection
|
||||
func (m *Manager) GetPeerServerConnectionForOnion(host string) (psc *PeerServerConnection) {
|
||||
m.lock.Lock()
|
||||
psc = m.serverConnections[host]
|
||||
m.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// AttemptReconnections repeatedly attempts to reconnect with failed peers and servers.
|
||||
func (m *Manager) AttemptReconnections() {
|
||||
timeout := time.Duration(0) // first pass right away
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
m.lock.Lock()
|
||||
for _, ppc := range m.peerConnections {
|
||||
if ppc.GetState() == FAILED {
|
||||
go ppc.Run()
|
||||
}
|
||||
}
|
||||
m.lock.Unlock()
|
||||
|
||||
m.lock.Lock()
|
||||
for _, psc := range m.serverConnections {
|
||||
if psc.GetState() == FAILED {
|
||||
go psc.Run()
|
||||
}
|
||||
}
|
||||
m.lock.Unlock()
|
||||
|
||||
// Launch Another Run In 30 Seconds
|
||||
timeout = time.Duration(30 * time.Second)
|
||||
case <-m.breakChannel:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ClosePeerConnection closes an existing peer connection
|
||||
func (m *Manager) ClosePeerConnection(onion string) {
|
||||
m.lock.Lock()
|
||||
pc, ok := m.peerConnections[onion]
|
||||
if ok {
|
||||
pc.Close()
|
||||
delete(m.peerConnections, onion)
|
||||
}
|
||||
m.lock.Unlock()
|
||||
}
|
||||
|
||||
// Shutdown closes all connections under managment (freeing their goroutines)
|
||||
func (m *Manager) Shutdown() {
|
||||
m.breakChannel <- true
|
||||
m.lock.Lock()
|
||||
for onion, ppc := range m.peerConnections {
|
||||
ppc.Close()
|
||||
delete(m.peerConnections, onion)
|
||||
}
|
||||
for onion, psc := range m.serverConnections {
|
||||
psc.Close()
|
||||
delete(m.serverConnections, onion)
|
||||
}
|
||||
m.lock.Unlock()
|
||||
}
|
|
@ -1,110 +0,0 @@
|
|||
package connections
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/model"
|
||||
"cwtch.im/cwtch/peer/peer"
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/channels"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connection"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/identity"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PeerPeerConnection encapsulates a single outgoing Peer->Peer connection
|
||||
type PeerPeerConnection struct {
|
||||
connection.AutoConnectionHandler
|
||||
PeerHostname string
|
||||
state ConnectionState
|
||||
connection *connection.Connection
|
||||
profile *model.Profile
|
||||
}
|
||||
|
||||
// NewPeerPeerConnection creates a new peer connection for the given hostname and profile.
|
||||
func NewPeerPeerConnection(peerhostname string, profile *model.Profile) *PeerPeerConnection {
|
||||
ppc := new(PeerPeerConnection)
|
||||
ppc.PeerHostname = peerhostname
|
||||
ppc.profile = profile
|
||||
ppc.Init()
|
||||
return ppc
|
||||
}
|
||||
|
||||
// GetState returns the current connection state
|
||||
func (ppc *PeerPeerConnection) GetState() ConnectionState {
|
||||
return ppc.state
|
||||
}
|
||||
|
||||
// ClientIdentity passes the given CwtchIdentity packet to the profile.
|
||||
func (ppc *PeerPeerConnection) ClientIdentity(ci *protocol.CwtchIdentity) {
|
||||
ppc.profile.AddCwtchIdentity(ppc.PeerHostname, ci)
|
||||
}
|
||||
|
||||
// HandleGroupInvite passes the given group invite tothe profile
|
||||
func (ppc *PeerPeerConnection) HandleGroupInvite(gci *protocol.GroupChatInvite) {
|
||||
ppc.profile.ProcessInvite(gci, ppc.PeerHostname)
|
||||
}
|
||||
|
||||
// GetClientIdentityPacket returns nil to avoid peers constantly sending identity packets to eachother.
|
||||
func (ppc *PeerPeerConnection) GetClientIdentityPacket() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SendGroupInvite sends the given serialized invite packet to the Peer
|
||||
func (ppc *PeerPeerConnection) SendGroupInvite(invite []byte) {
|
||||
ppc.connection.Do(func() error {
|
||||
channel := ppc.connection.Channel("im.cwtch.peer", channels.Outbound)
|
||||
if channel != nil {
|
||||
peerchannel, ok := channel.Handler.(*peer.CwtchPeerChannel)
|
||||
if ok {
|
||||
log.Printf("Sending group invite packet\n")
|
||||
peerchannel.SendMessage(invite)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Run manages the setup and teardown of a peer->peer connection
|
||||
func (ppc *PeerPeerConnection) Run() error {
|
||||
ppc.state = CONNECTING
|
||||
rc, err := goricochet.Open(ppc.PeerHostname)
|
||||
if err == nil {
|
||||
rc.TraceLog(false)
|
||||
ppc.connection = rc
|
||||
ppc.state = CONNECTED
|
||||
_, err := connection.HandleOutboundConnection(ppc.connection).ProcessAuthAsClient(identity.Initialize(ppc.profile.Name, ppc.profile.OnionPrivateKey))
|
||||
if err == nil {
|
||||
ppc.state = AUTHENTICATED
|
||||
go func() {
|
||||
ppc.connection.Do(func() error {
|
||||
ppc.connection.RequestOpenChannel("im.cwtch.peer", &peer.CwtchPeerChannel{Handler: ppc})
|
||||
return nil
|
||||
})
|
||||
|
||||
time.Sleep(time.Second * 1)
|
||||
ppc.connection.Do(func() error {
|
||||
channel := ppc.connection.Channel("im.cwtch.peer", channels.Outbound)
|
||||
if channel != nil {
|
||||
peerchannel, ok := channel.Handler.(*peer.CwtchPeerChannel)
|
||||
if ok {
|
||||
peerchannel.SendMessage(ppc.profile.GetCwtchIdentityPacket())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
}()
|
||||
|
||||
ppc.connection.Process(ppc)
|
||||
}
|
||||
}
|
||||
ppc.state = FAILED
|
||||
return err
|
||||
}
|
||||
|
||||
// Close closes the connection
|
||||
func (ppc *PeerPeerConnection) Close() {
|
||||
ppc.state = KILLED
|
||||
ppc.connection.Conn.Close()
|
||||
}
|
|
@ -1,116 +0,0 @@
|
|||
package connections
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"cwtch.im/cwtch/model"
|
||||
"cwtch.im/cwtch/peer/peer"
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/channels"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connection"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/identity"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/utils"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func PeerAuthValid(string, rsa.PublicKey) (allowed, known bool) {
|
||||
return true, true
|
||||
}
|
||||
|
||||
func runtestpeer(t *testing.T, tp *TestPeer, privateKey *rsa.PrivateKey) {
|
||||
ln, _ := net.Listen("tcp", "127.0.0.1:5452")
|
||||
conn, _ := ln.Accept()
|
||||
defer conn.Close()
|
||||
|
||||
rc, err := goricochet.NegotiateVersionInbound(conn)
|
||||
if err != nil {
|
||||
t.Errorf("Negotiate Version Error: %v", err)
|
||||
}
|
||||
rc.TraceLog(true)
|
||||
err = connection.HandleInboundConnection(rc).ProcessAuthAsServer(identity.Initialize("", privateKey), PeerAuthValid)
|
||||
if err != nil {
|
||||
t.Errorf("ServerAuth Error: %v", err)
|
||||
}
|
||||
tp.RegisterChannelHandler("im.cwtch.peer", func() channels.Handler {
|
||||
cpc := new(peer.CwtchPeerChannel)
|
||||
cpc.Handler = tp
|
||||
return cpc
|
||||
})
|
||||
|
||||
go func() {
|
||||
alice := model.GenerateNewProfile("alice")
|
||||
time.Sleep(time.Second * 1)
|
||||
rc.Do(func() error {
|
||||
channel := rc.Channel("im.cwtch.peer", channels.Inbound)
|
||||
if channel != nil {
|
||||
peerchannel, ok := channel.Handler.(*peer.CwtchPeerChannel)
|
||||
if ok {
|
||||
peerchannel.SendMessage(alice.GetCwtchIdentityPacket())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}()
|
||||
|
||||
rc.Process(tp)
|
||||
}
|
||||
|
||||
type TestPeer struct {
|
||||
connection.AutoConnectionHandler
|
||||
ReceivedIdentityPacket bool
|
||||
ReceivedGroupInvite bool
|
||||
}
|
||||
|
||||
func (tp *TestPeer) ClientIdentity(ci *protocol.CwtchIdentity) {
|
||||
tp.ReceivedIdentityPacket = true
|
||||
}
|
||||
|
||||
func (tp *TestPeer) HandleGroupInvite(gci *protocol.GroupChatInvite) {
|
||||
tp.ReceivedGroupInvite = true
|
||||
}
|
||||
|
||||
func (tp *TestPeer) GetClientIdentityPacket() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestPeerPeerConnection(t *testing.T) {
|
||||
privateKey, err := utils.GeneratePrivateKey()
|
||||
if err != nil {
|
||||
t.Errorf("Private Key Error %v", err)
|
||||
}
|
||||
onionAddr, err := utils.GetOnionAddress(privateKey)
|
||||
if err != nil {
|
||||
t.Errorf("Onion address error %v", err)
|
||||
}
|
||||
|
||||
profile := model.GenerateNewProfile("alice")
|
||||
ppc := NewPeerPeerConnection("127.0.0.1:5452|"+onionAddr, profile)
|
||||
//numcalls := 0
|
||||
tp := new(TestPeer)
|
||||
tp.Init()
|
||||
go runtestpeer(t, tp, privateKey)
|
||||
state := ppc.GetState()
|
||||
if state != DISCONNECTED {
|
||||
t.Errorf("new connections should start in disconnected state")
|
||||
}
|
||||
go ppc.Run()
|
||||
time.Sleep(time.Second * 5)
|
||||
state = ppc.GetState()
|
||||
if state != AUTHENTICATED {
|
||||
t.Errorf("connection state should be authenticated(3), was instead %v", state)
|
||||
}
|
||||
|
||||
if tp.ReceivedIdentityPacket == false {
|
||||
t.Errorf("should have received an identity packet")
|
||||
}
|
||||
|
||||
_, invite, _ := profile.StartGroup("aaa.onion")
|
||||
ppc.SendGroupInvite(invite)
|
||||
time.Sleep(time.Second * 3)
|
||||
if tp.ReceivedGroupInvite == false {
|
||||
t.Errorf("should have received an group invite packet")
|
||||
}
|
||||
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
package connections
|
||||
|
||||
// ConnectionState defines the various states a connection can be in from disconnected to authenticated
|
||||
type ConnectionState int
|
||||
|
||||
// Connection States
|
||||
// DISCONNECTED - No existing connection has been made, or all attempts have failed
|
||||
// CONNECTING - We are in the process of attempting to connect to a given endpoint
|
||||
// CONNECTED - We have connected but not yet authenticated
|
||||
// AUTHENTICATED - im.ricochet.auth-hidden-server has succeeded on thec onnection.
|
||||
const (
|
||||
DISCONNECTED ConnectionState = iota
|
||||
CONNECTING
|
||||
CONNECTED
|
||||
AUTHENTICATED
|
||||
FAILED
|
||||
KILLED
|
||||
)
|
|
@ -1,64 +1,53 @@
|
|||
package peer
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"cwtch.im/cwtch/event"
|
||||
"cwtch.im/cwtch/model"
|
||||
"cwtch.im/cwtch/peer/connections"
|
||||
"cwtch.im/cwtch/peer/peer"
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"cwtch.im/cwtch/protocol/connections"
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/application"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/channels"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connection"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/ulule/deepcopier"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
"golang.org/x/crypto/sha3"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// cwtchPeer manages incoming and outgoing connections and all processing for a Cwtch Peer
|
||||
// cwtchPeer manages incoming and outgoing connections and all processing for a Cwtch cwtchPeer
|
||||
type cwtchPeer struct {
|
||||
connection.AutoConnectionHandler
|
||||
Profile *model.Profile
|
||||
app *application.RicochetApplication
|
||||
mutex sync.Mutex
|
||||
Log chan string `json:"-"`
|
||||
connectionsManager *connections.Manager
|
||||
profilefile string
|
||||
key [32]byte
|
||||
salt [128]byte
|
||||
Profile *model.Profile
|
||||
mutex sync.Mutex
|
||||
shutdown bool
|
||||
|
||||
queue event.Queue
|
||||
eventBus event.Manager
|
||||
}
|
||||
|
||||
// CwtchPeerInterface provides us with a way of testing systems built on top of cwtch without having to
|
||||
// CwtchPeer provides us with a way of testing systems built on top of cwtch without having to
|
||||
// directly implement a cwtchPeer.
|
||||
type CwtchPeerInterface interface {
|
||||
Save(string) error
|
||||
type CwtchPeer interface {
|
||||
Init(event.Manager)
|
||||
PeerWithOnion(string)
|
||||
InviteOnionToGroup(string, string) error
|
||||
SendMessageToPeer(string, string) string
|
||||
|
||||
TrustPeer(string) error
|
||||
BlockPeer(string) error
|
||||
UnblockPeer(string) error
|
||||
AcceptInvite(string) error
|
||||
RejectInvite(string)
|
||||
DeleteContact(string)
|
||||
DeleteGroup(string)
|
||||
|
||||
JoinServer(string)
|
||||
SendMessageToGroup(string, string) error
|
||||
SendMessageToGroupTracked(string, string) (string, error)
|
||||
|
||||
GetProfile() *model.Profile
|
||||
|
||||
GetPeers() map[string]connections.ConnectionState
|
||||
GetServers() map[string]connections.ConnectionState
|
||||
GetPeerState(string) connections.ConnectionState
|
||||
|
||||
StartGroup(string) (string, []byte, error)
|
||||
|
||||
|
@ -66,152 +55,62 @@ type CwtchPeerInterface interface {
|
|||
ExportGroup(string) (string, error)
|
||||
|
||||
GetGroup(string) *model.Group
|
||||
GetGroupState(string) connections.ConnectionState
|
||||
GetGroups() []string
|
||||
AddContact(nick, onion string, trusted bool)
|
||||
GetContacts() []string
|
||||
GetContact(string) *model.PublicProfile
|
||||
|
||||
Listen() error
|
||||
Listen()
|
||||
StartPeersConnections()
|
||||
StartGroupConnections()
|
||||
Shutdown()
|
||||
}
|
||||
|
||||
// createKey derives a key and salt for use in encrypting cwtchPeers
|
||||
func createKey(password string) ([32]byte, [128]byte) {
|
||||
var salt [128]byte
|
||||
if _, err := io.ReadFull(rand.Reader, salt[:]); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dk := pbkdf2.Key([]byte(password), salt[:], 4096, 32, sha3.New512)
|
||||
|
||||
var dkr [32]byte
|
||||
copy(dkr[:], dk)
|
||||
return dkr, salt
|
||||
}
|
||||
|
||||
//encryptProfile encrypts the cwtchPeer via the specified key.
|
||||
func encryptProfile(p *cwtchPeer, key [32]byte) []byte {
|
||||
var nonce [24]byte
|
||||
|
||||
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
//copy the struct, then remove the key and salt before saving the copy
|
||||
cpc := &cwtchPeer{}
|
||||
deepcopier.Copy(p).To(cpc)
|
||||
var blankkey [32]byte
|
||||
var blanksalt [128]byte
|
||||
cpc.key = blankkey
|
||||
cpc.salt = blanksalt
|
||||
bytes, _ := json.Marshal(cpc)
|
||||
encrypted := secretbox.Seal(nonce[:], []byte(bytes), &nonce, &key)
|
||||
return encrypted
|
||||
}
|
||||
|
||||
//decryptProfile decrypts the passed ciphertext into a cwtchPeer via the specified key.
|
||||
func decryptProfile(ciphertext []byte, key [32]byte) (*cwtchPeer, error) {
|
||||
|
||||
var decryptNonce [24]byte
|
||||
copy(decryptNonce[:], ciphertext[:24])
|
||||
decrypted, ok := secretbox.Open(nil, ciphertext[24:], &decryptNonce, &key)
|
||||
if ok {
|
||||
cp := &cwtchPeer{}
|
||||
err := json.Unmarshal(decrypted, &cp)
|
||||
if err == nil {
|
||||
return cp, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, fmt.Errorf("Failed to decrypt")
|
||||
}
|
||||
|
||||
func (cp *cwtchPeer) setup() {
|
||||
cp.Log = make(chan string)
|
||||
cp.connectionsManager = connections.NewConnectionsManager()
|
||||
cp.Init()
|
||||
|
||||
go cp.connectionsManager.AttemptReconnections()
|
||||
|
||||
for onion, profile := range cp.Profile.Contacts {
|
||||
if profile.Trusted && !profile.Blocked {
|
||||
cp.PeerWithOnion(onion)
|
||||
}
|
||||
}
|
||||
|
||||
for _, group := range cp.Profile.Groups {
|
||||
if group.Accepted || group.Owner == "self" {
|
||||
cp.JoinServer(group.GroupServer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewCwtchPeer creates and returns a new cwtchPeer with the given name.
|
||||
func NewCwtchPeer(name string, password string) CwtchPeerInterface {
|
||||
func NewCwtchPeer(name string) CwtchPeer {
|
||||
cp := new(cwtchPeer)
|
||||
cp.Profile = model.GenerateNewProfile(name)
|
||||
cp.setup()
|
||||
key, salt := createKey(password)
|
||||
cp.key = key
|
||||
cp.salt = salt
|
||||
cp.shutdown = false
|
||||
return cp
|
||||
}
|
||||
|
||||
// Save saves the cwtchPeer profile state to a file.
|
||||
func (cp *cwtchPeer) Save(profilefile string) error {
|
||||
cp.mutex.Lock()
|
||||
encryptedbytes := encryptProfile(cp, cp.key)
|
||||
|
||||
// the salt for the derived key is appended to the front of the file
|
||||
encryptedbytes = append(cp.salt[:], encryptedbytes...)
|
||||
err := ioutil.WriteFile(profilefile, encryptedbytes, 0600)
|
||||
cp.profilefile = profilefile
|
||||
cp.mutex.Unlock()
|
||||
return err
|
||||
// FromProfile generates a new peer from a profile.
|
||||
func FromProfile(profile *model.Profile) CwtchPeer {
|
||||
cp := new(cwtchPeer)
|
||||
cp.Profile = profile
|
||||
return cp
|
||||
}
|
||||
|
||||
// LoadCwtchPeer loads an existing cwtchPeer from a file.
|
||||
func LoadCwtchPeer(profilefile string, password string) (CwtchPeerInterface, error) {
|
||||
encryptedbytes, err := ioutil.ReadFile(profilefile)
|
||||
// Init instantiates a cwtchPeer
|
||||
func (cp *cwtchPeer) Init(eventBus event.Manager) {
|
||||
cp.queue = event.NewQueue()
|
||||
go cp.eventHandler()
|
||||
|
||||
if err == nil {
|
||||
var dkr [32]byte
|
||||
var salty [128]byte
|
||||
|
||||
//Separate the salt from the encrypted bytes, then generate the derived key
|
||||
salt, encryptedbytes := encryptedbytes[0:128], encryptedbytes[128:]
|
||||
dk := pbkdf2.Key([]byte(password), salt, 4096, 32, sha3.New512)
|
||||
|
||||
//cast to arrays
|
||||
copy(dkr[:], dk)
|
||||
copy(salty[:], salt)
|
||||
|
||||
cp, err := decryptProfile(encryptedbytes, dkr)
|
||||
if err == nil {
|
||||
cp.setup()
|
||||
cp.profilefile = profilefile
|
||||
cp.key = dkr
|
||||
cp.salt = salty
|
||||
return cp, nil
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
cp.eventBus = eventBus
|
||||
cp.eventBus.Subscribe(event.EncryptedGroupMessage, cp.queue)
|
||||
cp.eventBus.Subscribe(event.NewGroupInvite, cp.queue)
|
||||
cp.eventBus.Subscribe(event.ServerStateChange, cp.queue)
|
||||
cp.eventBus.Subscribe(event.PeerStateChange, cp.queue)
|
||||
}
|
||||
|
||||
// ImportGroup intializes a group from an imported source rather than a peer invite
|
||||
func (cp *cwtchPeer) ImportGroup(exportedInvite string) (groupID string, err error) {
|
||||
if strings.HasPrefix(exportedInvite, "torv2") {
|
||||
data, err := base64.StdEncoding.DecodeString(exportedInvite[21+44:])
|
||||
if strings.HasPrefix(exportedInvite, "torv3") {
|
||||
data, err := base64.StdEncoding.DecodeString(exportedInvite[5+44:])
|
||||
if err == nil {
|
||||
cpp := &protocol.CwtchPeerPacket{}
|
||||
err := proto.Unmarshal(data, cpp)
|
||||
err = proto.Unmarshal(data, cpp)
|
||||
if err == nil {
|
||||
pk, err := base64.StdEncoding.DecodeString(exportedInvite[21 : 21+44])
|
||||
jsobj, err := proto.Marshal(cpp.GetGroupChatInvite())
|
||||
if err == nil {
|
||||
edpk := ed25519.PublicKey(pk)
|
||||
onion := exportedInvite[5:21]
|
||||
cp.Profile.AddContact(onion, &model.PublicProfile{Name: "", Ed25519PublicKey: edpk, Trusted: true, Blocked: false, Onion: onion})
|
||||
cp.Profile.ProcessInvite(cpp.GetGroupChatInvite(), onion)
|
||||
return cpp.GroupChatInvite.GetGroupName(), nil
|
||||
cp.eventBus.Publish(event.NewEvent(event.NewGroupInvite, map[event.Field]string{
|
||||
event.GroupInvite: string(jsobj),
|
||||
}))
|
||||
} else {
|
||||
log.Errorf("error serializing group: %v", err)
|
||||
}
|
||||
return cpp.GroupChatInvite.GetGroupName(), nil
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -224,9 +123,9 @@ func (cp *cwtchPeer) ImportGroup(exportedInvite string) (groupID string, err err
|
|||
func (cp *cwtchPeer) ExportGroup(groupID string) (string, error) {
|
||||
group := cp.Profile.GetGroupByGroupID(groupID)
|
||||
if group != nil {
|
||||
invite, err := group.Invite()
|
||||
invite, err := group.Invite(group.GetInitialMessage())
|
||||
if err == nil {
|
||||
exportedInvite := "torv2" + cp.Profile.Onion + base64.StdEncoding.EncodeToString(cp.Profile.Ed25519PublicKey) + base64.StdEncoding.EncodeToString(invite)
|
||||
exportedInvite := "torv3" + base64.StdEncoding.EncodeToString(cp.Profile.Ed25519PublicKey) + base64.StdEncoding.EncodeToString(invite)
|
||||
return exportedInvite, err
|
||||
}
|
||||
}
|
||||
|
@ -235,7 +134,24 @@ func (cp *cwtchPeer) ExportGroup(groupID string) (string, error) {
|
|||
|
||||
// StartGroup create a new group linked to the given server and returns the group ID, an invite or an error.
|
||||
func (cp *cwtchPeer) StartGroup(server string) (string, []byte, error) {
|
||||
return cp.Profile.StartGroup(server)
|
||||
return cp.StartGroupWithMessage(server, []byte{})
|
||||
}
|
||||
|
||||
// StartGroupWithMessage create a new group linked to the given server and returns the group ID, an invite or an error.
|
||||
func (cp *cwtchPeer) StartGroupWithMessage(server string, initialMessage []byte) (groupID string, invite []byte, err error) {
|
||||
groupID, invite, err = cp.Profile.StartGroupWithMessage(server, initialMessage)
|
||||
if err == nil {
|
||||
group := cp.GetGroup(groupID)
|
||||
jsobj, err := json.Marshal(group)
|
||||
if err == nil {
|
||||
cp.eventBus.Publish(event.NewEvent(event.GroupCreated, map[event.Field]string{
|
||||
event.Data: string(jsobj),
|
||||
}))
|
||||
}
|
||||
} else {
|
||||
log.Errorf("error creating group: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetGroups returns an unordered list of all group IDs.
|
||||
|
@ -248,6 +164,17 @@ func (cp *cwtchPeer) GetGroup(groupID string) *model.Group {
|
|||
return cp.Profile.GetGroupByGroupID(groupID)
|
||||
}
|
||||
|
||||
func (cp *cwtchPeer) AddContact(nick, onion string, trusted bool) {
|
||||
decodedPub, _ := base32.StdEncoding.DecodeString(strings.ToUpper(onion))
|
||||
pp := &model.PublicProfile{Name: nick, Ed25519PublicKey: decodedPub, Trusted: trusted, Blocked: false, Onion: onion, Attributes: map[string]string{"nick": nick}}
|
||||
cp.Profile.AddContact(onion, pp)
|
||||
pd, _ := json.Marshal(pp)
|
||||
cp.eventBus.Publish(event.NewEvent(event.PeerCreated, map[event.Field]string{
|
||||
event.Data: string(pd),
|
||||
event.RemotePeer: onion,
|
||||
}))
|
||||
}
|
||||
|
||||
// GetContacts returns an unordered list of onions
|
||||
func (cp *cwtchPeer) GetContacts() []string {
|
||||
return cp.Profile.GetContacts()
|
||||
|
@ -259,82 +186,82 @@ func (cp *cwtchPeer) GetContact(onion string) *model.PublicProfile {
|
|||
return contact
|
||||
}
|
||||
|
||||
// GetProfile returns the profile associated with this Peer.
|
||||
// TODO While it is probably "safe", it is not really "safe", to call functions on this profile. This only exists to return things like Name and Onion,we should gate these.
|
||||
// GetProfile returns the profile associated with this cwtchPeer.
|
||||
func (cp *cwtchPeer) GetProfile() *model.Profile {
|
||||
return cp.Profile
|
||||
}
|
||||
|
||||
func (cp *cwtchPeer) GetPeerState(onion string) connections.ConnectionState {
|
||||
return connections.ConnectionStateToType[cp.Profile.Contacts[onion].State]
|
||||
}
|
||||
|
||||
func (cp *cwtchPeer) GetGroupState(groupid string) connections.ConnectionState {
|
||||
return connections.ConnectionStateToType[cp.Profile.Groups[groupid].State]
|
||||
}
|
||||
|
||||
// PeerWithOnion is the entry point for cwtchPeer relationships
|
||||
func (cp *cwtchPeer) PeerWithOnion(onion string) {
|
||||
cp.connectionsManager.ManagePeerConnection(onion, cp.Profile)
|
||||
cp.eventBus.Publish(event.NewEvent(event.PeerRequest, map[event.Field]string{event.RemotePeer: onion}))
|
||||
}
|
||||
|
||||
// DeleteContact deletes a peer from the profile, storage, and handling
|
||||
func (cp *cwtchPeer) DeleteContact(onion string) {
|
||||
cp.Profile.DeleteContact(onion)
|
||||
cp.eventBus.Publish(event.NewEventList(event.DeleteContact, event.RemotePeer, onion))
|
||||
}
|
||||
|
||||
// DeleteGroup deletes a Group from the profile, storage, and handling
|
||||
func (cp *cwtchPeer) DeleteGroup(groupID string) {
|
||||
cp.Profile.DeleteGroup(groupID)
|
||||
cp.eventBus.Publish(event.NewEventList(event.DeleteGroup, event.GroupID, groupID))
|
||||
}
|
||||
|
||||
// InviteOnionToGroup kicks off the invite process
|
||||
func (cp *cwtchPeer) InviteOnionToGroup(onion string, groupid string) error {
|
||||
|
||||
group := cp.Profile.GetGroupByGroupID(groupid)
|
||||
if group != nil {
|
||||
log.Printf("Constructing invite for group: %v\n", group)
|
||||
invite, err := group.Invite()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ppc := cp.connectionsManager.GetPeerPeerConnectionForOnion(onion)
|
||||
if ppc == nil {
|
||||
return errors.New("peer connection not setup for onion. peers must be trusted before sending")
|
||||
}
|
||||
if ppc.GetState() == connections.AUTHENTICATED {
|
||||
log.Printf("Got connection for group: %v - Sending Invite\n", ppc)
|
||||
ppc.SendGroupInvite(invite)
|
||||
} else {
|
||||
return errors.New("cannot send invite to onion: peer connection is not ready")
|
||||
}
|
||||
return nil
|
||||
if group == nil {
|
||||
return errors.New("invalid group id")
|
||||
}
|
||||
return errors.New("group id could not be found")
|
||||
}
|
||||
|
||||
// ReceiveGroupMessage is a callback function that processes GroupMessages from a given server
|
||||
func (cp *cwtchPeer) ReceiveGroupMessage(server string, gm *protocol.GroupMessage) {
|
||||
cp.Profile.AttemptDecryption(gm.Ciphertext, gm.Signature)
|
||||
invite, err := group.Invite(group.InitialMessage)
|
||||
if err == nil {
|
||||
cp.eventBus.Publish(event.NewEvent(event.InvitePeerToGroup, map[event.Field]string{event.RemotePeer: onion, event.GroupInvite: string(invite)}))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// JoinServer manages a new server connection with the given onion address
|
||||
func (cp *cwtchPeer) JoinServer(onion string) {
|
||||
cp.connectionsManager.ManageServerConnection(onion, cp.ReceiveGroupMessage)
|
||||
cp.eventBus.Publish(event.NewEvent(event.JoinServer, map[event.Field]string{event.GroupServer: onion}))
|
||||
}
|
||||
|
||||
// SendMessageToGroup attemps to sent the given message to the given group id.
|
||||
// SendMessageToGroup attempts to sent the given message to the given group id.
|
||||
// TODO: Deprecate in favour of SendMessageToGroupTracked
|
||||
func (cp *cwtchPeer) SendMessageToGroup(groupid string, message string) error {
|
||||
group := cp.Profile.GetGroupByGroupID(groupid)
|
||||
if group == nil {
|
||||
return errors.New("group does not exist")
|
||||
}
|
||||
psc := cp.connectionsManager.GetPeerServerConnectionForOnion(group.GroupServer)
|
||||
if psc == nil {
|
||||
return errors.New("could not find server connection to send message to")
|
||||
}
|
||||
ct, sig, err := cp.Profile.EncryptMessageToGroup(message, groupid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gm := &protocol.GroupMessage{
|
||||
Ciphertext: ct,
|
||||
Signature: sig,
|
||||
}
|
||||
err = psc.SendGroupMessage(gm)
|
||||
_, err := cp.SendMessageToGroupTracked(groupid, message)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetPeers returns a list of peer connections.
|
||||
func (cp *cwtchPeer) GetPeers() map[string]connections.ConnectionState {
|
||||
return cp.connectionsManager.GetPeers()
|
||||
// SendMessageToGroup attempts to sent the given message to the given group id.
|
||||
// It returns the signature of the message which can be used to identify it in any UX layer.
|
||||
func (cp *cwtchPeer) SendMessageToGroupTracked(groupid string, message string) (string, error) {
|
||||
group := cp.Profile.GetGroupByGroupID(groupid)
|
||||
if group == nil {
|
||||
return "", errors.New("invalid group id")
|
||||
}
|
||||
ct, sig, err := cp.Profile.EncryptMessageToGroup(message, groupid)
|
||||
|
||||
if err == nil {
|
||||
cp.eventBus.Publish(event.NewEvent(event.SendMessageToGroup, map[event.Field]string{event.GroupServer: group.GroupServer, event.Ciphertext: string(ct), event.Signature: string(sig)}))
|
||||
}
|
||||
|
||||
return string(sig), err
|
||||
}
|
||||
|
||||
// GetServers returns a list of server connections
|
||||
func (cp *cwtchPeer) GetServers() map[string]connections.ConnectionState {
|
||||
return cp.connectionsManager.GetServers()
|
||||
func (cp *cwtchPeer) SendMessageToPeer(onion string, message string) string {
|
||||
event := event.NewEvent(event.SendMessageToPeer, map[event.Field]string{event.RemotePeer: onion, event.Data: message})
|
||||
cp.eventBus.Publish(event)
|
||||
return event.EventID
|
||||
}
|
||||
|
||||
// TrustPeer sets an existing peer relationship to trusted
|
||||
|
@ -349,13 +276,27 @@ func (cp *cwtchPeer) TrustPeer(peer string) error {
|
|||
// BlockPeer blocks an existing peer relationship.
|
||||
func (cp *cwtchPeer) BlockPeer(peer string) error {
|
||||
err := cp.Profile.BlockPeer(peer)
|
||||
cp.connectionsManager.ClosePeerConnection(peer)
|
||||
cp.eventBus.Publish(event.NewEvent(event.BlockPeer, map[event.Field]string{event.RemotePeer: peer}))
|
||||
return err
|
||||
}
|
||||
|
||||
// UnblockPeer blocks an existing peer relationship.
|
||||
func (cp *cwtchPeer) UnblockPeer(peer string) error {
|
||||
err := cp.Profile.UnblockPeer(peer)
|
||||
cp.eventBus.Publish(event.NewEvent(event.UnblockPeer, map[event.Field]string{event.RemotePeer: peer}))
|
||||
return err
|
||||
}
|
||||
|
||||
// AcceptInvite accepts a given existing group invite
|
||||
func (cp *cwtchPeer) AcceptInvite(groupID string) error {
|
||||
return cp.Profile.AcceptInvite(groupID)
|
||||
err := cp.Profile.AcceptInvite(groupID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cp.eventBus.Publish(event.NewEvent(event.AcceptGroupInvite, map[event.Field]string{event.GroupID: groupID}))
|
||||
cp.JoinServer(cp.Profile.Groups[groupID].GroupServer)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RejectInvite rejects a given group invite.
|
||||
|
@ -363,82 +304,71 @@ func (cp *cwtchPeer) RejectInvite(groupID string) {
|
|||
cp.Profile.RejectInvite(groupID)
|
||||
}
|
||||
|
||||
// LookupContact returns that a contact is known and allowed to communicate for all cases.
|
||||
func (cp *cwtchPeer) LookupContact(hostname string, publicKey rsa.PublicKey) (allowed, known bool) {
|
||||
blocked := cp.Profile.IsBlocked(hostname)
|
||||
return !blocked, true
|
||||
// Listen makes the peer open a listening port to accept incoming connections (and be detactably online)
|
||||
func (cp *cwtchPeer) Listen() {
|
||||
log.Debugf("cwtchPeer Listen sending ProtocolEngineStartListen\n")
|
||||
cp.eventBus.Publish(event.NewEvent(event.ProtocolEngineStartListen, map[event.Field]string{}))
|
||||
}
|
||||
|
||||
// ContactRequest needed to implement ContactRequestHandler Interface
|
||||
func (cp *cwtchPeer) ContactRequest(name string, message string) string {
|
||||
return "Accepted"
|
||||
}
|
||||
|
||||
// Listen sets up an onion listener to process incoming cwtch messages
|
||||
func (cp *cwtchPeer) Listen() error {
|
||||
cwtchpeer := new(application.RicochetApplication)
|
||||
l, err := application.SetupOnion("127.0.0.1:9051", "tcp4", "", cp.Profile.OnionPrivateKey, 9878)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
// StartGroupConnections attempts to connect to all group servers (thus initiating reconnect attempts in the conectionsmanager)
|
||||
func (cp *cwtchPeer) StartPeersConnections() {
|
||||
for _, contact := range cp.GetContacts() {
|
||||
cp.PeerWithOnion(contact)
|
||||
}
|
||||
}
|
||||
|
||||
af := application.ApplicationInstanceFactory{}
|
||||
af.Init()
|
||||
af.AddHandler("im.cwtch.peer", func(rai *application.ApplicationInstance) func() channels.Handler {
|
||||
cpi := new(CwtchPeerInstance)
|
||||
cpi.Init(rai, cwtchpeer)
|
||||
return func() channels.Handler {
|
||||
cpc := new(peer.CwtchPeerChannel)
|
||||
cpc.Handler = &CwtchPeerHandler{Onion: rai.RemoteHostname, Peer: cp}
|
||||
return cpc
|
||||
// StartPeerConnections attempts to connect to all peers (thus initiating reconnect attempts in the conectionsmanager)
|
||||
func (cp *cwtchPeer) StartGroupConnections() {
|
||||
joinedServers := map[string]bool{}
|
||||
for _, groupID := range cp.GetGroups() {
|
||||
// Only send a join server packet if we haven't joined this server yet...
|
||||
group := cp.GetGroup(groupID)
|
||||
if joined := joinedServers[groupID]; group.Accepted && !joined {
|
||||
log.Infof("Join Server %v (%v)\n", group.GroupServer, joined)
|
||||
cp.JoinServer(group.GroupServer)
|
||||
joinedServers[group.GroupServer] = true
|
||||
}
|
||||
})
|
||||
cwtchpeer.Init(cp.Profile.Name, cp.Profile.OnionPrivateKey, af, cp)
|
||||
log.Printf("Running cwtch peer on %v", l.Addr().String())
|
||||
cp.app = cwtchpeer
|
||||
cwtchpeer.Run(l)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown kills all connections and cleans up all goroutines for the peer
|
||||
func (cp *cwtchPeer) Shutdown() {
|
||||
cp.connectionsManager.Shutdown()
|
||||
cp.app.Shutdown()
|
||||
cp.shutdown = true
|
||||
cp.queue.Shutdown()
|
||||
}
|
||||
|
||||
// CwtchPeerInstance encapsulates incoming peer connections
|
||||
type CwtchPeerInstance struct {
|
||||
rai *application.ApplicationInstance
|
||||
ra *application.RicochetApplication
|
||||
}
|
||||
|
||||
// Init sets up a CwtchPeerInstance
|
||||
func (cpi *CwtchPeerInstance) Init(rai *application.ApplicationInstance, ra *application.RicochetApplication) {
|
||||
cpi.rai = rai
|
||||
cpi.ra = ra
|
||||
}
|
||||
|
||||
// CwtchPeerHandler encapsulates handling of incoming CwtchPackets
|
||||
type CwtchPeerHandler struct {
|
||||
Onion string
|
||||
Peer *cwtchPeer
|
||||
}
|
||||
|
||||
// ClientIdentity handles incoming ClientIdentity packets
|
||||
func (cph *CwtchPeerHandler) ClientIdentity(ci *protocol.CwtchIdentity) {
|
||||
log.Printf("Received Client Identity from %v %v\n", cph.Onion, ci.String())
|
||||
cph.Peer.Profile.AddCwtchIdentity(cph.Onion, ci)
|
||||
cph.Peer.Save(cph.Peer.profilefile)
|
||||
}
|
||||
|
||||
// HandleGroupInvite handles incoming GroupInvites
|
||||
func (cph *CwtchPeerHandler) HandleGroupInvite(gci *protocol.GroupChatInvite) {
|
||||
log.Printf("Received GroupID from %v %v\n", cph.Onion, gci.String())
|
||||
cph.Peer.Profile.ProcessInvite(gci, cph.Onion)
|
||||
}
|
||||
|
||||
// GetClientIdentityPacket returns our ClientIdentity packet so it can be sent to the connected peer.
|
||||
func (cph *CwtchPeerHandler) GetClientIdentityPacket() []byte {
|
||||
return cph.Peer.Profile.GetCwtchIdentityPacket()
|
||||
// eventHandler process events from other subsystems
|
||||
func (cp *cwtchPeer) eventHandler() {
|
||||
for {
|
||||
ev := cp.queue.Next()
|
||||
switch ev.EventType {
|
||||
case event.EncryptedGroupMessage:
|
||||
ok, groupID, message, seen := cp.Profile.AttemptDecryption([]byte(ev.Data[event.Ciphertext]), []byte(ev.Data[event.Signature]))
|
||||
if ok && !seen {
|
||||
cp.eventBus.Publish(event.NewEvent(event.NewMessageFromGroup, map[event.Field]string{event.TimestampReceived: message.Received.Format(time.RFC3339Nano), event.TimestampSent: message.Timestamp.Format(time.RFC3339Nano), event.Data: message.Message, event.GroupID: groupID, event.Signature: string(message.Signature), event.PreviousSignature: string(message.PreviousMessageSig), event.RemotePeer: message.PeerID}))
|
||||
}
|
||||
case event.NewGroupInvite:
|
||||
var groupInvite protocol.GroupChatInvite
|
||||
err := proto.Unmarshal([]byte(ev.Data[event.GroupInvite]), &groupInvite)
|
||||
if err != nil {
|
||||
log.Errorf("NewGroupInvite could not json decode invite: %v\n", err)
|
||||
}
|
||||
cp.Profile.ProcessInvite(&groupInvite, ev.Data[event.RemotePeer])
|
||||
case event.PeerStateChange:
|
||||
if _, exists := cp.Profile.Contacts[ev.Data[event.RemotePeer]]; exists {
|
||||
cp.Profile.Contacts[ev.Data[event.RemotePeer]].State = ev.Data[event.ConnectionState]
|
||||
}
|
||||
case event.ServerStateChange:
|
||||
for _, group := range cp.Profile.Groups {
|
||||
if group.GroupServer == ev.Data[event.GroupServer] {
|
||||
group.State = ev.Data[event.ConnectionState]
|
||||
}
|
||||
}
|
||||
default:
|
||||
if ev.EventType != "" {
|
||||
log.Errorf("peer event handler received an event it was not subscribed for: %v", ev.EventType)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,22 +4,76 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
// TODO: Rewrite these tests (and others) using the news event bus interface.
|
||||
func TestCwtchPeerGenerate(t *testing.T) {
|
||||
/**
|
||||
alice := NewCwtchPeer("alice")
|
||||
|
||||
alice := NewCwtchPeer("alice", "testpass")
|
||||
alice.Save("./test_profile")
|
||||
|
||||
aliceLoaded, err := LoadCwtchPeer("./test_profile", "testpass")
|
||||
if err != nil || aliceLoaded.GetProfile().Name != "alice" {
|
||||
t.Errorf("something went wrong saving and loading profiles %v %v", err, aliceLoaded)
|
||||
}
|
||||
|
||||
groupID, _, _ := aliceLoaded.StartGroup("test.server")
|
||||
exportedGroup, _ := aliceLoaded.ExportGroup(groupID)
|
||||
t.Logf("Exported Group: %v from %v", exportedGroup, aliceLoaded.GetProfile().Onion)
|
||||
groupID, _, _ := alice.StartGroup("test.server")
|
||||
exportedGroup, _ := alice.ExportGroup(groupID)
|
||||
t.Logf("Exported Group: %v from %v", exportedGroup, alice.GetProfile().Onion)
|
||||
|
||||
importedGroupID, err := alice.ImportGroup(exportedGroup)
|
||||
group := alice.GetGroup(importedGroupID)
|
||||
t.Logf("Imported Group: %v, err := %v %v", group, err, importedGroupID)
|
||||
|
||||
*/
|
||||
}
|
||||
|
||||
func TestTrustPeer(t *testing.T) {
|
||||
/**
|
||||
groupName := "2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd"
|
||||
alice := NewCwtchPeer("alice")
|
||||
aem := new(event.Manager)
|
||||
aem.Initialize()
|
||||
alice.Init(connectivity.LocalProvider(),aem)
|
||||
defer alice.Shutdown()
|
||||
bob := NewCwtchPeer("bob")
|
||||
bem := new(event.Manager)
|
||||
bem.Initialize()
|
||||
bob.Init(connectivity.LocalProvider(), bem)
|
||||
defer bob.Shutdown()
|
||||
|
||||
bobOnion := bob.GetProfile().Onion
|
||||
aliceOnion := alice.GetProfile().Onion
|
||||
|
||||
groupID, _, err := alice.StartGroup(groupName)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
groupAlice := alice.GetGroup(groupID)
|
||||
if groupAlice.GroupID != groupID {
|
||||
t.Errorf("Alice should be part of group %v, got %v instead", groupID, groupAlice)
|
||||
}
|
||||
|
||||
exportedGroup, err := alice.ExportGroup(groupID)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
err = alice.InviteOnionToGroup(bobOnion, groupID)
|
||||
if err == nil {
|
||||
t.Errorf("onion invitation should fail since alice does no trust bob")
|
||||
}
|
||||
|
||||
err = alice.TrustPeer(bobOnion)
|
||||
if err == nil {
|
||||
t.Errorf("trust peer should fail since alice does not know about bob")
|
||||
}
|
||||
|
||||
// bob adds alice contact by importing serialized group created by alice
|
||||
_, err = bob.ImportGroup(exportedGroup)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
err = bob.TrustPeer(aliceOnion)
|
||||
if err != nil {
|
||||
t.Errorf("bob must be able to trust alice, got %v", err)
|
||||
}
|
||||
|
||||
err = bob.InviteOnionToGroup(aliceOnion, groupID)
|
||||
if err == nil {
|
||||
t.Errorf("bob trusts alice but peer connection is not ready yet. should not be able to invite her to group, instead got: %v", err)
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
|
|
@ -1,114 +0,0 @@
|
|||
package peer
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/channels"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/utils"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/wire/control"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"log"
|
||||
)
|
||||
|
||||
// CwtchPeerChannel implements the ChannelHandler interface for a channel of
|
||||
// type "im.ricochet.Cwtch". The channel may be inbound or outbound.
|
||||
//
|
||||
// CwtchPeerChannel implements protocol-level sanity and state validation, but
|
||||
// does not handle or acknowledge Cwtch messages. The application must provide
|
||||
// a CwtchPeerChannelHandler implementation to handle Cwtch events.
|
||||
type CwtchPeerChannel struct {
|
||||
// Methods of Handler are called for Cwtch events on this channel
|
||||
Handler CwtchPeerChannelHandler
|
||||
channel *channels.Channel
|
||||
}
|
||||
|
||||
// CwtchPeerChannelHandler is implemented by an application type to receive
|
||||
// events from a CwtchPeerChannel.
|
||||
type CwtchPeerChannelHandler interface {
|
||||
ClientIdentity(*protocol.CwtchIdentity)
|
||||
HandleGroupInvite(*protocol.GroupChatInvite)
|
||||
GetClientIdentityPacket() []byte
|
||||
}
|
||||
|
||||
// SendMessage sends a raw message on this channel
|
||||
func (cpc *CwtchPeerChannel) SendMessage(data []byte) {
|
||||
cpc.channel.SendMessage(data)
|
||||
}
|
||||
|
||||
// Type returns the type string for this channel, e.g. "im.ricochet.Cwtch".
|
||||
func (cpc *CwtchPeerChannel) Type() string {
|
||||
return "im.cwtch.peer"
|
||||
}
|
||||
|
||||
// Closed is called when the channel is closed for any reason.
|
||||
func (cpc *CwtchPeerChannel) Closed(err error) {
|
||||
|
||||
}
|
||||
|
||||
// OnlyClientCanOpen - for Cwtch channels any side can open
|
||||
func (cpc *CwtchPeerChannel) OnlyClientCanOpen() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Singleton - for Cwtch channels there can only be one instance per direction
|
||||
func (cpc *CwtchPeerChannel) Singleton() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Bidirectional - for Cwtch channels are not bidrectional
|
||||
func (cpc *CwtchPeerChannel) Bidirectional() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// RequiresAuthentication - Cwtch channels require hidden service auth
|
||||
func (cpc *CwtchPeerChannel) RequiresAuthentication() string {
|
||||
return "im.ricochet.auth.hidden-service"
|
||||
}
|
||||
|
||||
// OpenInbound is the first method called for an inbound channel request.
|
||||
// If an error is returned, the channel is rejected. If a RawMessage is
|
||||
// returned, it will be sent as the ChannelResult message.
|
||||
func (cpc *CwtchPeerChannel) OpenInbound(channel *channels.Channel, raw *Protocol_Data_Control.OpenChannel) ([]byte, error) {
|
||||
cpc.channel = channel
|
||||
messageBuilder := new(utils.MessageBuilder)
|
||||
return messageBuilder.AckOpenChannel(channel.ID), nil
|
||||
}
|
||||
|
||||
// OpenOutbound is the first method called for an outbound channel request.
|
||||
// If an error is returned, the channel is not opened. If a RawMessage is
|
||||
// returned, it will be sent as the OpenChannel message.
|
||||
func (cpc *CwtchPeerChannel) OpenOutbound(channel *channels.Channel) ([]byte, error) {
|
||||
cpc.channel = channel
|
||||
messageBuilder := new(utils.MessageBuilder)
|
||||
return messageBuilder.OpenChannel(channel.ID, cpc.Type()), nil
|
||||
}
|
||||
|
||||
// OpenOutboundResult is called when a response is received for an
|
||||
// outbound OpenChannel request. If `err` is non-nil, the channel was
|
||||
// rejected and Closed will be called immediately afterwards. `raw`
|
||||
// contains the raw protocol message including any extension data.
|
||||
func (cpc *CwtchPeerChannel) OpenOutboundResult(err error, crm *Protocol_Data_Control.ChannelResult) {
|
||||
if err == nil {
|
||||
if crm.GetOpened() {
|
||||
cpc.channel.Pending = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Packet is called for each raw packet received on this channel.
|
||||
func (cpc *CwtchPeerChannel) Packet(data []byte) {
|
||||
cpp := &protocol.CwtchPeerPacket{}
|
||||
err := proto.Unmarshal(data, cpp)
|
||||
if err == nil {
|
||||
if cpp.GetCwtchIdentify() != nil {
|
||||
cpc.Handler.ClientIdentity(cpp.GetCwtchIdentify())
|
||||
pkt := cpc.Handler.GetClientIdentityPacket()
|
||||
if pkt != nil {
|
||||
cpc.SendMessage(pkt)
|
||||
}
|
||||
} else if cpp.GetGroupChatInvite() != nil {
|
||||
cpc.Handler.HandleGroupInvite(cpp.GetGroupChatInvite())
|
||||
}
|
||||
} else {
|
||||
log.Printf("Error Receivng Packet %v\n", err)
|
||||
}
|
||||
}
|
|
@ -1,121 +0,0 @@
|
|||
package peer
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/channels"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/wire/control"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPeerChannelAttributes(t *testing.T) {
|
||||
cssc := new(CwtchPeerChannel)
|
||||
if cssc.Type() != "im.cwtch.peer" {
|
||||
t.Errorf("cwtch channel type is incorrect %v", cssc.Type())
|
||||
}
|
||||
|
||||
if cssc.OnlyClientCanOpen() {
|
||||
t.Errorf("either side should be able to open im.cwtch.peer channel")
|
||||
}
|
||||
|
||||
if cssc.Bidirectional() {
|
||||
t.Errorf("im.cwtch.peer should not be bidirectional")
|
||||
}
|
||||
|
||||
if !cssc.Singleton() {
|
||||
t.Errorf("im.cwtch.server.listen should be a Singleton")
|
||||
}
|
||||
|
||||
if cssc.RequiresAuthentication() != "im.ricochet.auth.hidden-service" {
|
||||
t.Errorf("cwtch channel required auth is incorrect %v", cssc.RequiresAuthentication())
|
||||
}
|
||||
}
|
||||
|
||||
type TestHandler struct {
|
||||
Received bool
|
||||
ReceviedGroupInvite bool
|
||||
}
|
||||
|
||||
func (th *TestHandler) ClientIdentity(ci *protocol.CwtchIdentity) {
|
||||
if ci.GetName() == "hello" {
|
||||
th.Received = true
|
||||
}
|
||||
}
|
||||
|
||||
func (th *TestHandler) HandleGroupInvite(ci *protocol.GroupChatInvite) {
|
||||
///if ci.GetName() == "hello" {
|
||||
th.ReceviedGroupInvite = true
|
||||
//}
|
||||
}
|
||||
|
||||
func (th *TestHandler) GetClientIdentityPacket() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestPeerChannel(t *testing.T) {
|
||||
th := new(TestHandler)
|
||||
cpc := new(CwtchPeerChannel)
|
||||
cpc.Handler = th
|
||||
channel := new(channels.Channel)
|
||||
channel.ID = 3
|
||||
result, err := cpc.OpenOutbound(channel)
|
||||
if err != nil {
|
||||
t.Errorf("should have send open channel request instead %v, %v", result, err)
|
||||
}
|
||||
|
||||
cpc2 := new(CwtchPeerChannel)
|
||||
channel2 := new(channels.Channel)
|
||||
channel2.ID = 3
|
||||
sent := false
|
||||
channel2.SendMessage = func(message []byte) {
|
||||
sent = true
|
||||
}
|
||||
|
||||
control := new(Protocol_Data_Control.Packet)
|
||||
proto.Unmarshal(result[:], control)
|
||||
ack, err := cpc2.OpenInbound(channel2, control.GetOpenChannel())
|
||||
if err != nil {
|
||||
t.Errorf("should have ack open channel request instead %v, %v", ack, err)
|
||||
}
|
||||
|
||||
ackpacket := new(Protocol_Data_Control.Packet)
|
||||
proto.Unmarshal(ack[:], ackpacket)
|
||||
cpc.OpenOutboundResult(nil, ackpacket.GetChannelResult())
|
||||
if channel.Pending != false {
|
||||
t.Errorf("Channel should no longer be pending")
|
||||
}
|
||||
|
||||
gm := &protocol.CwtchIdentity{
|
||||
Name: "hello",
|
||||
Ed25519PublicKey: []byte{},
|
||||
}
|
||||
|
||||
cpp := &protocol.CwtchPeerPacket{
|
||||
CwtchIdentify: gm,
|
||||
}
|
||||
packet, _ := proto.Marshal(cpp)
|
||||
cpc.Packet(packet)
|
||||
if th.Received == false {
|
||||
t.Errorf("Should have sent packet to handler")
|
||||
}
|
||||
|
||||
cpc2.SendMessage(packet)
|
||||
if sent == false {
|
||||
t.Errorf("Should have sent packet to channel")
|
||||
}
|
||||
|
||||
gci := &protocol.GroupChatInvite{
|
||||
GroupName: "hello",
|
||||
GroupSharedKey: []byte{},
|
||||
ServerHost: "abc.onion",
|
||||
}
|
||||
|
||||
cpp = &protocol.CwtchPeerPacket{
|
||||
GroupChatInvite: gci,
|
||||
}
|
||||
packet, _ = proto.Marshal(cpp)
|
||||
cpc.Packet(packet)
|
||||
if th.ReceviedGroupInvite == false {
|
||||
t.Errorf("Should have sent invite packet to handler")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,105 @@
|
|||
package connections
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connectivity"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Manager encapsulates all the logic necessary to manage outgoing peer and server connections.
|
||||
type Manager struct {
|
||||
serverConnections map[string]*PeerServerConnection
|
||||
lock sync.Mutex
|
||||
breakChannel chan bool
|
||||
acn connectivity.ACN
|
||||
}
|
||||
|
||||
// NewConnectionsManager creates a new instance of Manager.
|
||||
func NewConnectionsManager(acn connectivity.ACN) *Manager {
|
||||
m := new(Manager)
|
||||
m.acn = acn
|
||||
m.serverConnections = make(map[string]*PeerServerConnection)
|
||||
m.breakChannel = make(chan bool)
|
||||
return m
|
||||
}
|
||||
|
||||
// ManageServerConnection creates a new ServerConnection for Host with the given callback handler.
|
||||
// If there is an establish connection, it is replaced with a new one, assuming this came from
|
||||
// a new JoinServer from a new Group being joined. If it is still connecting to a server, the second request will be abandonded
|
||||
func (m *Manager) ManageServerConnection(host string, engine Engine, messageHandler func(string, *protocol.GroupMessage)) {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
psc, exists := m.serverConnections[host]
|
||||
|
||||
if exists {
|
||||
if psc.GetState() == DISCONNECTED || psc.GetState() == CONNECTING || psc.GetState() == CONNECTED {
|
||||
log.Infof("Already connecting to %v, abandoning fresh attempt\n", host)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
newPsc := NewPeerServerConnection(engine, host)
|
||||
newPsc.GroupMessageHandler = messageHandler
|
||||
go newPsc.Run()
|
||||
m.serverConnections[host] = newPsc
|
||||
|
||||
if exists {
|
||||
log.Infof("Closing connection to %v, replacing with this one\n", host)
|
||||
psc.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// SetServerSynced is a helper for peerserver connections and engine to call when a Fetch is done to set the state of the connection to SYNCED
|
||||
func (m *Manager) SetServerSynced(onion string) {
|
||||
m.serverConnections[onion].setState(SYNCED)
|
||||
}
|
||||
|
||||
// GetPeerServerConnectionForOnion safely returns a given host connection
|
||||
func (m *Manager) GetPeerServerConnectionForOnion(host string) (psc *PeerServerConnection) {
|
||||
m.lock.Lock()
|
||||
psc = m.serverConnections[host]
|
||||
m.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// AttemptReconnections repeatedly attempts to reconnect with failed peers and servers.
|
||||
func (m *Manager) AttemptReconnections() {
|
||||
maxTimeout := time.Minute * 5
|
||||
// nearly instant first run, next few runs will prolly be too quick to have any FAILED and will gracefully slow to MAX after that
|
||||
timeout := time.Millisecond * 500
|
||||
for {
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
m.lock.Lock()
|
||||
for _, psc := range m.serverConnections {
|
||||
if psc.GetState() == FAILED {
|
||||
go psc.Run()
|
||||
}
|
||||
}
|
||||
m.lock.Unlock()
|
||||
|
||||
if timeout < maxTimeout {
|
||||
timeout = timeout * 2
|
||||
} else {
|
||||
timeout = maxTimeout
|
||||
}
|
||||
case <-m.breakChannel:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown closes all connections under management (freeing their goroutines)
|
||||
func (m *Manager) Shutdown() {
|
||||
m.breakChannel <- true
|
||||
m.lock.Lock()
|
||||
for onion, psc := range m.serverConnections {
|
||||
psc.Close()
|
||||
delete(m.serverConnections, onion)
|
||||
}
|
||||
m.lock.Unlock()
|
||||
}
|
|
@ -1,10 +1,11 @@
|
|||
package connections
|
||||
|
||||
import (
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connectivity"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestConnectionsManager(t *testing.T) {
|
||||
// TODO We need to encapsulate connections behind a well defined interface for tesintg
|
||||
NewConnectionsManager()
|
||||
NewConnectionsManager(connectivity.LocalProvider())
|
||||
}
|
|
@ -0,0 +1,313 @@
|
|||
package connections
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/event"
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"cwtch.im/tapir"
|
||||
"cwtch.im/tapir/applications"
|
||||
"cwtch.im/tapir/networks/tor"
|
||||
"cwtch.im/tapir/primitives"
|
||||
"errors"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connectivity"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type engine struct {
|
||||
queue event.Queue
|
||||
connectionsManager *Manager
|
||||
|
||||
// Engine Attributes
|
||||
identity primitives.Identity
|
||||
acn connectivity.ACN
|
||||
|
||||
// Engine State
|
||||
started bool
|
||||
|
||||
// Blocklist
|
||||
blocked sync.Map
|
||||
|
||||
// Pointer to the Global Event Manager
|
||||
eventManager event.Manager
|
||||
|
||||
// Nextgen Tapir Service
|
||||
service tapir.Service
|
||||
|
||||
// Required for listen(), inaccessible from identity
|
||||
privateKey ed25519.PrivateKey
|
||||
|
||||
shuttingDown bool
|
||||
}
|
||||
|
||||
// Engine (ProtocolEngine) encapsulates the logic necessary to make and receive Cwtch connections.
|
||||
// Note: ProtocolEngine doesn't have access to any information necessary to encrypt or decrypt GroupMessages
|
||||
type Engine interface {
|
||||
ACN() connectivity.ACN
|
||||
EventManager() event.Manager
|
||||
Shutdown()
|
||||
}
|
||||
|
||||
// NewProtocolEngine initializes a new engine that runs Cwtch using the given parameters
|
||||
func NewProtocolEngine(identity primitives.Identity, privateKey ed25519.PrivateKey, acn connectivity.ACN, eventManager event.Manager, blockedPeers []string) Engine {
|
||||
engine := new(engine)
|
||||
engine.identity = identity
|
||||
engine.privateKey = privateKey
|
||||
engine.queue = event.NewQueue()
|
||||
go engine.eventHandler()
|
||||
|
||||
engine.acn = acn
|
||||
engine.connectionsManager = NewConnectionsManager(engine.acn)
|
||||
go engine.connectionsManager.AttemptReconnections()
|
||||
|
||||
// Init the Server running the Simple App.
|
||||
engine.service = new(tor.BaseOnionService)
|
||||
engine.service.Init(acn, privateKey, &identity)
|
||||
|
||||
engine.eventManager = eventManager
|
||||
|
||||
engine.eventManager.Subscribe(event.ProtocolEngineStartListen, engine.queue)
|
||||
engine.eventManager.Subscribe(event.PeerRequest, engine.queue)
|
||||
engine.eventManager.Subscribe(event.InvitePeerToGroup, engine.queue)
|
||||
engine.eventManager.Subscribe(event.JoinServer, engine.queue)
|
||||
engine.eventManager.Subscribe(event.SendMessageToGroup, engine.queue)
|
||||
engine.eventManager.Subscribe(event.SendMessageToPeer, engine.queue)
|
||||
engine.eventManager.Subscribe(event.DeleteContact, engine.queue)
|
||||
engine.eventManager.Subscribe(event.DeleteGroup, engine.queue)
|
||||
|
||||
engine.eventManager.Subscribe(event.BlockPeer, engine.queue)
|
||||
engine.eventManager.Subscribe(event.UnblockPeer, engine.queue)
|
||||
for _, peer := range blockedPeers {
|
||||
engine.blocked.Store(peer, true)
|
||||
}
|
||||
return engine
|
||||
}
|
||||
|
||||
func (e *engine) ACN() connectivity.ACN {
|
||||
return e.acn
|
||||
}
|
||||
|
||||
func (e *engine) EventManager() event.Manager {
|
||||
return e.eventManager
|
||||
}
|
||||
|
||||
// eventHandler process events from other subsystems
|
||||
func (e *engine) eventHandler() {
|
||||
for {
|
||||
ev := e.queue.Next()
|
||||
switch ev.EventType {
|
||||
case event.StatusRequest:
|
||||
e.eventManager.Publish(event.Event{EventType: event.ProtocolEngineStatus, EventID: ev.EventID})
|
||||
case event.PeerRequest:
|
||||
go e.peerWithOnion(ev.Data[event.RemotePeer])
|
||||
case event.InvitePeerToGroup:
|
||||
e.sendMessageToPeer(ev.EventID, ev.Data[event.RemotePeer], event.ContextInvite, []byte(ev.Data[event.GroupInvite]))
|
||||
case event.JoinServer:
|
||||
e.joinServer(ev.Data[event.GroupServer])
|
||||
case event.DeleteContact:
|
||||
onion := ev.Data[event.RemotePeer]
|
||||
e.deleteConnection(onion)
|
||||
case event.DeleteGroup:
|
||||
// TODO: There isn't a way here to determine if other Groups are using a server connection...
|
||||
case event.SendMessageToGroup:
|
||||
e.sendMessageToGroup(ev.Data[event.GroupServer], []byte(ev.Data[event.Ciphertext]), []byte(ev.Data[event.Signature]))
|
||||
case event.SendMessageToPeer:
|
||||
// TODO: remove this passthrough once the UI is integrated.
|
||||
context, ok := ev.Data[event.EventContext]
|
||||
if !ok {
|
||||
context = event.ContextRaw
|
||||
}
|
||||
err := e.sendMessageToPeer(ev.EventID, ev.Data[event.RemotePeer], context, []byte(ev.Data[event.Data]))
|
||||
if err != nil {
|
||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: ev.Data[event.RemotePeer], event.Signature: ev.EventID, event.Error: "peer is offline or the connection has yet to finalize"}))
|
||||
}
|
||||
case event.UnblockPeer:
|
||||
// We simply remove the peer from our blocklist
|
||||
// The UI has the responsibility to reinitiate contact with the peer.
|
||||
// (this should happen periodically in any case)
|
||||
e.blocked.Delete(ev.Data[event.RemotePeer])
|
||||
case event.BlockPeer:
|
||||
e.blocked.Store(ev.Data[event.RemotePeer], true)
|
||||
connection, err := e.service.GetConnection(ev.Data[event.RemotePeer])
|
||||
if connection != nil && err == nil {
|
||||
connection.Close()
|
||||
}
|
||||
// Explicitly send a disconnected event (if we don't do this here then the UI can wait for a while before
|
||||
// an ongoing Open() connection fails and so the user will see a blocked peer as still connecting (because
|
||||
// there isn't an active connection and we are stuck waiting for tor to time out)
|
||||
e.peerDisconnected(ev.Data[event.RemotePeer])
|
||||
case event.ProtocolEngineStartListen:
|
||||
go e.listenFn()
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *engine) createPeerTemplate() *PeerApp {
|
||||
peerAppTemplate := new(PeerApp)
|
||||
peerAppTemplate.IsBlocked = func(onion string) bool {
|
||||
_, blocked := e.blocked.Load(onion)
|
||||
return blocked
|
||||
}
|
||||
peerAppTemplate.MessageHandler = e.handlePeerMessage
|
||||
peerAppTemplate.OnAcknowledgement = e.ignoreOnShutdown2(e.peerAck)
|
||||
peerAppTemplate.OnAuth = e.ignoreOnShutdown(e.peerAuthed)
|
||||
peerAppTemplate.OnConnecting = e.ignoreOnShutdown(e.peerConnecting)
|
||||
peerAppTemplate.OnClose = e.ignoreOnShutdown(e.peerDisconnected)
|
||||
return peerAppTemplate
|
||||
}
|
||||
|
||||
// Listen sets up an onion listener to process incoming cwtch messages
|
||||
func (e *engine) listenFn() {
|
||||
err := e.service.Listen(e.createPeerTemplate())
|
||||
if !e.shuttingDown {
|
||||
e.eventManager.Publish(event.NewEvent(event.ProtocolEngineStopped, map[event.Field]string{event.Identity: e.identity.Hostname(), event.Error: err.Error()}))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Shutdown tears down the eventHandler goroutine
|
||||
func (e *engine) Shutdown() {
|
||||
e.shuttingDown = true
|
||||
e.connectionsManager.Shutdown()
|
||||
e.service.Shutdown()
|
||||
e.queue.Shutdown()
|
||||
}
|
||||
|
||||
// peerWithOnion is the entry point for cwtchPeer relationships
|
||||
// needs to be run in a goroutine as will block on Open.
|
||||
func (e *engine) peerWithOnion(onion string) {
|
||||
_, blocked := e.blocked.Load(onion)
|
||||
if !blocked {
|
||||
e.ignoreOnShutdown(e.peerConnecting)(onion)
|
||||
connected, err := e.service.Connect(onion, e.createPeerTemplate())
|
||||
|
||||
// If we are already connected...check if we are authed and issue an auth event
|
||||
// (This allows the ui to be stateless)
|
||||
if connected && err != nil {
|
||||
conn, err := e.service.GetConnection(onion)
|
||||
if err == nil {
|
||||
if conn.HasCapability(applications.AuthCapability) {
|
||||
e.ignoreOnShutdown(e.peerAuthed)(onion)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only issue a disconnected error if we are disconnected (Connect will fail if a connection already exists)
|
||||
if !connected && err != nil {
|
||||
e.ignoreOnShutdown(e.peerDisconnected)(onion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *engine) ignoreOnShutdown(f func(string)) func(string) {
|
||||
return func(x string) {
|
||||
if !e.shuttingDown {
|
||||
f(x)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *engine) ignoreOnShutdown2(f func(string, string)) func(string, string) {
|
||||
return func(x, y string) {
|
||||
if !e.shuttingDown {
|
||||
f(x, y)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *engine) peerAuthed(onion string) {
|
||||
e.eventManager.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{
|
||||
event.RemotePeer: string(onion),
|
||||
event.ConnectionState: ConnectionStateName[AUTHENTICATED],
|
||||
}))
|
||||
}
|
||||
|
||||
func (e *engine) peerConnecting(onion string) {
|
||||
e.eventManager.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{
|
||||
event.RemotePeer: string(onion),
|
||||
event.ConnectionState: ConnectionStateName[CONNECTING],
|
||||
}))
|
||||
}
|
||||
|
||||
func (e *engine) peerAck(onion string, eventID string) {
|
||||
e.eventManager.Publish(event.NewEvent(event.PeerAcknowledgement, map[event.Field]string{
|
||||
event.EventID: eventID,
|
||||
event.RemotePeer: onion,
|
||||
}))
|
||||
}
|
||||
|
||||
func (e *engine) peerDisconnected(onion string) {
|
||||
e.eventManager.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{
|
||||
event.RemotePeer: string(onion),
|
||||
event.ConnectionState: ConnectionStateName[DISCONNECTED],
|
||||
}))
|
||||
}
|
||||
|
||||
// sendMessageToPeer sends a message to a peer under a given context
|
||||
func (e *engine) sendMessageToPeer(eventID string, onion string, context string, message []byte) error {
|
||||
conn, err := e.service.GetConnection(onion)
|
||||
if err == nil {
|
||||
peerApp, ok := (conn.App()).(*PeerApp)
|
||||
if ok {
|
||||
peerApp.SendMessage(PeerMessage{eventID, context, message})
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed type assertion conn.App != PeerApp")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (e *engine) deleteConnection(id string) {
|
||||
conn, err := e.service.GetConnection(id)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// receiveGroupMessage is a callback function that processes GroupMessages from a given server
|
||||
func (e *engine) receiveGroupMessage(server string, gm *protocol.GroupMessage) {
|
||||
// Publish Event so that a Profile Engine can deal with it.
|
||||
// Note: This technically means that *multiple* Profile Engines could listen to the same ProtocolEngine!
|
||||
e.eventManager.Publish(event.NewEvent(event.EncryptedGroupMessage, map[event.Field]string{event.Ciphertext: string(gm.GetCiphertext()), event.Signature: string(gm.GetSignature())}))
|
||||
}
|
||||
|
||||
// joinServer manages a new server connection with the given onion address
|
||||
func (e *engine) joinServer(onion string) {
|
||||
e.connectionsManager.ManageServerConnection(onion, e, e.receiveGroupMessage)
|
||||
}
|
||||
|
||||
// sendMessageToGroup attempts to sent the given message to the given group id.
|
||||
func (e *engine) sendMessageToGroup(server string, ct []byte, sig []byte) {
|
||||
psc := e.connectionsManager.GetPeerServerConnectionForOnion(server)
|
||||
if psc == nil {
|
||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupServer: server, event.Signature: string(sig), event.Error: "server is offline or the connection has yet to finalize"}))
|
||||
}
|
||||
gm := &protocol.GroupMessage{
|
||||
Ciphertext: ct,
|
||||
Signature: sig,
|
||||
}
|
||||
err := psc.SendGroupMessage(gm)
|
||||
|
||||
if err != nil {
|
||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupServer: server, event.Signature: string(sig), event.Error: err.Error()}))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *engine) handlePeerMessage(hostname string, context string, message []byte) {
|
||||
log.Debugf("New message from peer: %v %v", hostname, context)
|
||||
if context == event.ContextInvite {
|
||||
cpp := &protocol.CwtchPeerPacket{}
|
||||
err := proto.Unmarshal(message, cpp)
|
||||
if err == nil && cpp.GetGroupChatInvite() != nil {
|
||||
marshal, _ := proto.Marshal(cpp.GetGroupChatInvite())
|
||||
e.eventManager.Publish(event.NewEvent(event.NewGroupInvite, map[event.Field]string{event.TimestampReceived: time.Now().Format(time.RFC3339Nano), event.RemotePeer: hostname, event.GroupInvite: string(marshal)}))
|
||||
}
|
||||
} else {
|
||||
e.eventManager.Publish(event.NewEvent(event.NewMessageFromPeer, map[event.Field]string{event.TimestampReceived: time.Now().Format(time.RFC3339Nano), event.RemotePeer: hostname, event.Data: string(message)}))
|
||||
}
|
||||
}
|
|
@ -19,6 +19,7 @@ type CwtchPeerFetchChannel struct {
|
|||
// CwtchPeerFetchChannelHandler should be implemented by peers to receive new messages.
|
||||
type CwtchPeerFetchChannelHandler interface {
|
||||
HandleGroupMessage(*protocol.GroupMessage)
|
||||
HandleFetchDone()
|
||||
}
|
||||
|
||||
// Type returns the type string for this channel, e.g. "im.ricochet.server.fetch)
|
||||
|
@ -28,7 +29,7 @@ func (cpfc *CwtchPeerFetchChannel) Type() string {
|
|||
|
||||
// Closed is called when the channel is closed for any reason.
|
||||
func (cpfc *CwtchPeerFetchChannel) Closed(err error) {
|
||||
|
||||
cpfc.Handler.HandleFetchDone()
|
||||
}
|
||||
|
||||
// OnlyClientCanOpen - for Cwtch server channels only client can open
|
||||
|
@ -97,7 +98,7 @@ func (cpfc *CwtchPeerFetchChannel) Packet(data []byte) {
|
|||
gm := csp.GetGroupMessage()
|
||||
// We create a new go routine here to avoid leaking any information about processing time
|
||||
// TODO Server can probably try to use this to DoS a peer
|
||||
go cpfc.Handler.HandleGroupMessage(gm)
|
||||
cpfc.Handler.HandleGroupMessage(gm)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -11,12 +11,17 @@ import (
|
|||
|
||||
type TestHandler struct {
|
||||
Received bool
|
||||
Closed bool
|
||||
}
|
||||
|
||||
func (th *TestHandler) HandleGroupMessage(m *protocol.GroupMessage) {
|
||||
th.Received = true
|
||||
}
|
||||
|
||||
func (th *TestHandler) HandleFetchDone() {
|
||||
th.Closed = true
|
||||
}
|
||||
|
||||
func TestPeerFetchChannelAttributes(t *testing.T) {
|
||||
cssc := new(CwtchPeerFetchChannel)
|
||||
if cssc.Type() != "im.cwtch.server.fetch" {
|
|
@ -78,9 +78,7 @@ func (cplc *CwtchPeerListenChannel) Packet(data []byte) {
|
|||
if err == nil {
|
||||
if csp.GetGroupMessage() != nil {
|
||||
gm := csp.GetGroupMessage()
|
||||
// We create a new go routine here to avoid leaking any information about processing time
|
||||
// TODO Server can probably try to use this to DoS a peer
|
||||
go cplc.Handler.HandleGroupMessage(gm)
|
||||
cplc.Handler.HandleGroupMessage(gm)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
package connections
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/event"
|
||||
"cwtch.im/tapir"
|
||||
"cwtch.im/tapir/applications"
|
||||
"encoding/json"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
)
|
||||
|
||||
// PeerApp encapsulates the behaviour of a Cwtch Peer
|
||||
type PeerApp struct {
|
||||
applications.AuthApp
|
||||
connection tapir.Connection
|
||||
MessageHandler func(string, string, []byte)
|
||||
IsBlocked func(string) bool
|
||||
OnAcknowledgement func(string, string)
|
||||
OnAuth func(string)
|
||||
OnClose func(string)
|
||||
OnConnecting func(string)
|
||||
}
|
||||
|
||||
// PeerMessage is an encapsulation that can be used by higher level applications
|
||||
type PeerMessage struct {
|
||||
ID string // A unique Message ID (primarily used for acknowledgments)
|
||||
Context string // A unique context identifier i.e. im.cwtch.chat
|
||||
Data []byte // The serialized data packet.
|
||||
}
|
||||
|
||||
// NewInstance should always return a new instantiation of the application.
|
||||
func (pa PeerApp) NewInstance() tapir.Application {
|
||||
newApp := new(PeerApp)
|
||||
newApp.MessageHandler = pa.MessageHandler
|
||||
newApp.IsBlocked = pa.IsBlocked
|
||||
newApp.OnAcknowledgement = pa.OnAcknowledgement
|
||||
newApp.OnAuth = pa.OnAuth
|
||||
newApp.OnClose = pa.OnClose
|
||||
newApp.OnConnecting = pa.OnConnecting
|
||||
return newApp
|
||||
}
|
||||
|
||||
// Init is run when the connection is first started.
|
||||
func (pa *PeerApp) Init(connection tapir.Connection) {
|
||||
|
||||
// First run the Authentication App
|
||||
pa.AuthApp.Init(connection)
|
||||
|
||||
if connection.HasCapability(applications.AuthCapability) {
|
||||
|
||||
pa.connection = connection
|
||||
|
||||
if pa.IsBlocked(connection.Hostname()) {
|
||||
pa.connection.Close()
|
||||
pa.OnClose(connection.Hostname())
|
||||
} else {
|
||||
pa.OnAuth(connection.Hostname())
|
||||
go pa.listen()
|
||||
}
|
||||
} else {
|
||||
pa.OnClose(connection.Hostname())
|
||||
}
|
||||
}
|
||||
|
||||
func (pa PeerApp) listen() {
|
||||
for {
|
||||
message := pa.connection.Expect()
|
||||
if len(message) == 0 {
|
||||
log.Errorf("0 byte read, socket has likely failed. Closing the listen goroutine")
|
||||
pa.OnClose(pa.connection.Hostname())
|
||||
return
|
||||
}
|
||||
var peerMessage PeerMessage
|
||||
err := json.Unmarshal(message, &peerMessage)
|
||||
if err == nil {
|
||||
if peerMessage.Context == event.ContextAck {
|
||||
pa.OnAcknowledgement(pa.connection.Hostname(), peerMessage.ID)
|
||||
} else {
|
||||
pa.MessageHandler(pa.connection.Hostname(), peerMessage.Context, peerMessage.Data)
|
||||
|
||||
// Acknowledge the message
|
||||
// TODO Should this be in the ui?
|
||||
pa.SendMessage(PeerMessage{peerMessage.ID, event.ContextAck, []byte{}})
|
||||
}
|
||||
} else {
|
||||
log.Errorf("Error unmarshalling PeerMessage package: %x %v", message, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SendMessage sends the peer a preformatted message
|
||||
// NOTE: This is a stub, we will likely want to extend this to better reflect the desired protocol
|
||||
func (pa PeerApp) SendMessage(message PeerMessage) {
|
||||
serialized, _ := json.Marshal(message)
|
||||
pa.connection.Send(serialized)
|
||||
}
|
|
@ -1,34 +1,39 @@
|
|||
package connections
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/peer/fetch"
|
||||
"cwtch.im/cwtch/peer/listen"
|
||||
"cwtch.im/cwtch/peer/send"
|
||||
"crypto/rand"
|
||||
"cwtch.im/cwtch/event"
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"cwtch.im/cwtch/protocol/connections/fetch"
|
||||
"cwtch.im/cwtch/protocol/connections/listen"
|
||||
"cwtch.im/cwtch/protocol/connections/send"
|
||||
"errors"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/channels"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connection"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/identity"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/utils"
|
||||
"log"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PeerServerConnection encapsulates a single Peer->Server connection
|
||||
type PeerServerConnection struct {
|
||||
connection.AutoConnectionHandler
|
||||
Server string
|
||||
state ConnectionState
|
||||
connection *connection.Connection
|
||||
Server string
|
||||
state ConnectionState
|
||||
connection *connection.Connection
|
||||
protocolEngine Engine
|
||||
|
||||
GroupMessageHandler func(string, *protocol.GroupMessage)
|
||||
}
|
||||
|
||||
// NewPeerServerConnection creates a new Peer->Server outbound connection
|
||||
func NewPeerServerConnection(serverhostname string) *PeerServerConnection {
|
||||
func NewPeerServerConnection(engine Engine, serverhostname string) *PeerServerConnection {
|
||||
psc := new(PeerServerConnection)
|
||||
psc.protocolEngine = engine
|
||||
psc.Server = serverhostname
|
||||
psc.setState(DISCONNECTED)
|
||||
psc.Init()
|
||||
return psc
|
||||
}
|
||||
|
@ -38,19 +43,45 @@ func (psc *PeerServerConnection) GetState() ConnectionState {
|
|||
return psc.state
|
||||
}
|
||||
|
||||
func (psc *PeerServerConnection) setState(state ConnectionState) {
|
||||
log.Debugf("Setting State to %v for %v\n", ConnectionStateName[state], psc.Server)
|
||||
psc.state = state
|
||||
psc.protocolEngine.EventManager().Publish(event.NewEvent(event.ServerStateChange, map[event.Field]string{
|
||||
event.GroupServer: string(psc.Server),
|
||||
event.ConnectionState: ConnectionStateName[state],
|
||||
}))
|
||||
}
|
||||
|
||||
// WaitTilSynced waits until the underlying connection is authenticated
|
||||
func (psc *PeerServerConnection) WaitTilSynced() {
|
||||
for {
|
||||
if psc.GetState() == SYNCED {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second * 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Run manages the setup and teardown of a peer server connection
|
||||
func (psc *PeerServerConnection) Run() error {
|
||||
log.Printf("Connecting to %v", psc.Server)
|
||||
rc, err := goricochet.Open(psc.Server)
|
||||
log.Infof("Connecting to %v", psc.Server)
|
||||
psc.setState(CONNECTING)
|
||||
|
||||
rc, err := goricochet.Open(psc.protocolEngine.ACN(), psc.Server)
|
||||
if err == nil {
|
||||
rc.TraceLog(true)
|
||||
psc.connection = rc
|
||||
psc.state = CONNECTED
|
||||
pk, err := utils.GeneratePrivateKey()
|
||||
if psc.GetState() == KILLED {
|
||||
return nil
|
||||
}
|
||||
psc.setState(CONNECTED)
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err == nil {
|
||||
_, err := connection.HandleOutboundConnection(psc.connection).ProcessAuthAsClient(identity.Initialize("cwtchpeer", pk))
|
||||
_, err := connection.HandleOutboundConnection(psc.connection).ProcessAuthAsV3Client(identity.InitializeV3("cwtchpeer", &priv, &pub))
|
||||
if err == nil {
|
||||
psc.state = AUTHENTICATED
|
||||
if psc.GetState() == KILLED {
|
||||
return nil
|
||||
}
|
||||
psc.setState(AUTHENTICATED)
|
||||
|
||||
go func() {
|
||||
psc.connection.Do(func() error {
|
||||
|
@ -67,7 +98,7 @@ func (psc *PeerServerConnection) Run() error {
|
|||
}
|
||||
}
|
||||
}
|
||||
psc.state = FAILED
|
||||
psc.setState(FAILED)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -78,8 +109,8 @@ func (psc *PeerServerConnection) Break() error {
|
|||
|
||||
// SendGroupMessage sends the given protocol message to the Server.
|
||||
func (psc *PeerServerConnection) SendGroupMessage(gm *protocol.GroupMessage) error {
|
||||
if psc.state != AUTHENTICATED {
|
||||
return errors.New("peer is not yet connected & authenticated to server cannot send message")
|
||||
if psc.state != SYNCED {
|
||||
return errors.New("peer is not yet connected & authenticated & synced to server cannot send message")
|
||||
}
|
||||
|
||||
err := psc.connection.Do(func() error {
|
||||
|
@ -89,7 +120,7 @@ func (psc *PeerServerConnection) SendGroupMessage(gm *protocol.GroupMessage) err
|
|||
|
||||
errCount := 0
|
||||
for errCount < 5 {
|
||||
time.Sleep(time.Second * 1)
|
||||
time.Sleep(time.Second * time.Duration(errCount+1)) // back off retry
|
||||
err = psc.connection.Do(func() error {
|
||||
channel := psc.connection.Channel("im.cwtch.server.send", channels.Outbound)
|
||||
if channel == nil {
|
||||
|
@ -114,12 +145,18 @@ func (psc *PeerServerConnection) SendGroupMessage(gm *protocol.GroupMessage) err
|
|||
|
||||
// Close shuts down the connection (freeing the handler goroutines)
|
||||
func (psc *PeerServerConnection) Close() {
|
||||
psc.state = KILLED
|
||||
psc.connection.Conn.Close()
|
||||
psc.setState(KILLED)
|
||||
if psc.connection != nil {
|
||||
psc.connection.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// HandleGroupMessage passes the given group message back to the profile.
|
||||
func (psc *PeerServerConnection) HandleGroupMessage(gm *protocol.GroupMessage) {
|
||||
log.Printf("Received Group Message: %v", gm)
|
||||
psc.GroupMessageHandler(psc.Server, gm)
|
||||
}
|
||||
|
||||
// HandleFetchDone calls the supplied callback for when a fetch connection is closed
|
||||
func (psc *PeerServerConnection) HandleFetchDone() {
|
||||
psc.setState(SYNCED)
|
||||
}
|
|
@ -1,39 +1,42 @@
|
|||
package connections
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"cwtch.im/cwtch/event"
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"cwtch.im/cwtch/server/fetch"
|
||||
"cwtch.im/cwtch/server/send"
|
||||
"cwtch.im/tapir/primitives"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/channels"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connection"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/identity"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/utils"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connectivity"
|
||||
identityOld "git.openprivacy.ca/openprivacy/libricochet-go/identity"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func ServerAuthValid(string, rsa.PublicKey) (allowed, known bool) {
|
||||
func ServerAuthValid(hostname string, key ed25519.PublicKey) (allowed, known bool) {
|
||||
return true, true
|
||||
}
|
||||
|
||||
type TestServer struct {
|
||||
connection.AutoConnectionHandler
|
||||
Received bool
|
||||
Received chan bool
|
||||
}
|
||||
|
||||
func (ts *TestServer) HandleGroupMessage(gm *protocol.GroupMessage) {
|
||||
ts.Received = true
|
||||
ts.Received <- true
|
||||
}
|
||||
|
||||
func (ts *TestServer) HandleFetchRequest() []*protocol.GroupMessage {
|
||||
return []*protocol.GroupMessage{{Ciphertext: []byte("hello"), Signature: []byte{}, Spamguard: []byte{}}, {Ciphertext: []byte("hello"), Signature: []byte{}, Spamguard: []byte{}}}
|
||||
}
|
||||
|
||||
func runtestserver(t *testing.T, ts *TestServer, privateKey *rsa.PrivateKey) {
|
||||
func runtestserver(t *testing.T, ts *TestServer, priv ed25519.PrivateKey, identity primitives.Identity, listenChan chan bool) {
|
||||
ln, _ := net.Listen("tcp", "127.0.0.1:5451")
|
||||
listenChan <- true
|
||||
conn, _ := ln.Accept()
|
||||
defer conn.Close()
|
||||
|
||||
|
@ -41,8 +44,9 @@ func runtestserver(t *testing.T, ts *TestServer, privateKey *rsa.PrivateKey) {
|
|||
if err != nil {
|
||||
t.Errorf("Negotiate Version Error: %v", err)
|
||||
}
|
||||
rc.TraceLog(true)
|
||||
err = connection.HandleInboundConnection(rc).ProcessAuthAsServer(identity.Initialize("", privateKey), ServerAuthValid)
|
||||
// TODO switch from old identity to new tapir identity.
|
||||
pub := identity.PublicKey()
|
||||
err = connection.HandleInboundConnection(rc).ProcessAuthAsV3Server(identityOld.InitializeV3("", &priv, &pub), ServerAuthValid)
|
||||
if err != nil {
|
||||
t.Errorf("ServerAuth Error: %v", err)
|
||||
}
|
||||
|
@ -63,19 +67,20 @@ func runtestserver(t *testing.T, ts *TestServer, privateKey *rsa.PrivateKey) {
|
|||
}
|
||||
|
||||
func TestPeerServerConnection(t *testing.T) {
|
||||
privateKey, err := utils.GeneratePrivateKey()
|
||||
if err != nil {
|
||||
t.Errorf("Private Key Error %v", err)
|
||||
}
|
||||
|
||||
identity, priv := primitives.InitializeEphemeralIdentity()
|
||||
t.Logf("Launching Server....\n")
|
||||
ts := new(TestServer)
|
||||
ts.Init()
|
||||
go runtestserver(t, ts, privateKey)
|
||||
onionAddr, err := utils.GetOnionAddress(privateKey)
|
||||
if err != nil {
|
||||
t.Errorf("Error getting onion address: %v", err)
|
||||
}
|
||||
psc := NewPeerServerConnection("127.0.0.1:5451|" + onionAddr)
|
||||
ts.Received = make(chan bool)
|
||||
listenChan := make(chan bool)
|
||||
go runtestserver(t, ts, priv, identity, listenChan)
|
||||
<-listenChan
|
||||
onionAddr := identity.Hostname()
|
||||
|
||||
manager := event.NewEventManager()
|
||||
engine := NewProtocolEngine(identity, priv, connectivity.LocalProvider(), manager, nil)
|
||||
|
||||
psc := NewPeerServerConnection(engine, "127.0.0.1:5451|"+onionAddr)
|
||||
numcalls := 0
|
||||
psc.GroupMessageHandler = func(s string, gm *protocol.GroupMessage) {
|
||||
numcalls++
|
||||
|
@ -86,21 +91,15 @@ func TestPeerServerConnection(t *testing.T) {
|
|||
}
|
||||
time.Sleep(time.Second * 1)
|
||||
go psc.Run()
|
||||
time.Sleep(time.Second * 2)
|
||||
state = psc.GetState()
|
||||
if state != AUTHENTICATED {
|
||||
t.Errorf("connection should now be authed(%v), instead was %v", AUTHENTICATED, state)
|
||||
}
|
||||
psc.WaitTilSynced()
|
||||
|
||||
gm := &protocol.GroupMessage{Ciphertext: []byte("hello"), Signature: []byte{}}
|
||||
psc.SendGroupMessage(gm)
|
||||
time.Sleep(time.Second * 2)
|
||||
if ts.Received == false {
|
||||
t.Errorf("Should have received a group message in test server")
|
||||
}
|
||||
|
||||
// Wait until message is received
|
||||
<-ts.Received
|
||||
|
||||
if numcalls != 2 {
|
||||
t.Errorf("Should have received 2 calls from fetch request, instead received %v", numcalls)
|
||||
t.Errorf("Should have received 2 calls from fetch request, instead received %v", numcalls)
|
||||
}
|
||||
|
||||
}
|
|
@ -2,7 +2,7 @@ package send
|
|||
|
||||
import (
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"cwtch.im/cwtch/protocol/spam"
|
||||
"cwtch.im/cwtch/protocol/connections/spam"
|
||||
"errors"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/channels"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/utils"
|
||||
|
@ -64,10 +64,9 @@ func (cpsc *CwtchPeerSendChannel) OpenOutbound(channel *channels.Channel) ([]byt
|
|||
func (cpsc *CwtchPeerSendChannel) OpenOutboundResult(err error, crm *Protocol_Data_Control.ChannelResult) {
|
||||
if err == nil {
|
||||
if crm.GetOpened() {
|
||||
cpsc.channel.Pending = false
|
||||
ce, _ := proto.GetExtension(crm, protocol.E_ServerNonce)
|
||||
cpsc.challenge = ce.([]byte)[:]
|
||||
|
||||
cpsc.channel.Pending = false
|
||||
}
|
||||
}
|
||||
}
|
|
@ -2,7 +2,7 @@ package send
|
|||
|
||||
import (
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"cwtch.im/cwtch/protocol/spam"
|
||||
"cwtch.im/cwtch/protocol/connections/spam"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/channels"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/wire/control"
|
||||
"github.com/golang/protobuf/proto"
|
|
@ -51,7 +51,7 @@ func (sg *Guard) GenerateChallenge(channelID int32) []byte {
|
|||
func (sg *Guard) SolveChallenge(challenge []byte, message []byte) []byte {
|
||||
solved := false
|
||||
var spamguard [24]byte
|
||||
sum := sha256.Sum256([]byte{})
|
||||
var sum [32]byte
|
||||
solve := make([]byte, len(challenge)+len(message)+len(spamguard))
|
||||
for !solved {
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
package connections
|
||||
|
||||
// ConnectionState defines the various states a connection can be in from disconnected to authenticated
|
||||
type ConnectionState int
|
||||
|
||||
// Connection States
|
||||
// DISCONNECTED - No existing connection has been made, or all attempts have failed
|
||||
// CONNECTING - We are in the process of attempting to connect to a given endpoint
|
||||
// CONNECTED - We have connected but not yet authenticated
|
||||
// AUTHENTICATED - im.ricochet.auth-hidden-server has succeeded on the connection.
|
||||
// SYNCED - we have pulled all the messages for groups from the server and are ready to send
|
||||
const (
|
||||
DISCONNECTED ConnectionState = iota
|
||||
CONNECTING
|
||||
CONNECTED
|
||||
AUTHENTICATED
|
||||
SYNCED
|
||||
FAILED
|
||||
KILLED
|
||||
)
|
||||
|
||||
var (
|
||||
// ConnectionStateName allows conversion of states to their string representations
|
||||
ConnectionStateName = []string{"Disconnected", "Connecting", "Connected", "Authenticated", "Synced", "Failed", "Killed"}
|
||||
|
||||
// ConnectionStateToType allows conversion of strings to their state type
|
||||
ConnectionStateToType = map[string]ConnectionState{"Disconnected": DISCONNECTED, "Connecting": CONNECTING,
|
||||
"Connected": CONNECTED, "Authenticated": AUTHENTICATED, "Synced": SYNCED, "Failed": FAILED, "Killed": KILLED}
|
||||
)
|
|
@ -1,6 +1,17 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: cwtch-profile.proto
|
||||
|
||||
/*
|
||||
Package protocol is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
cwtch-profile.proto
|
||||
|
||||
It has these top-level messages:
|
||||
CwtchPeerPacket
|
||||
CwtchIdentity
|
||||
GroupChatInvite
|
||||
*/
|
||||
package protocol
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
|
@ -12,6 +23,12 @@ var _ = proto.Marshal
|
|||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type CwtchPeerPacket struct {
|
||||
CwtchIdentify *CwtchIdentity `protobuf:"bytes,1,opt,name=cwtch_identify,json=cwtchIdentify" json:"cwtch_identify,omitempty"`
|
||||
GroupChatInvite *GroupChatInvite `protobuf:"bytes,2,opt,name=group_chat_invite,json=groupChatInvite" json:"group_chat_invite,omitempty"`
|
||||
|
@ -20,7 +37,7 @@ type CwtchPeerPacket struct {
|
|||
func (m *CwtchPeerPacket) Reset() { *m = CwtchPeerPacket{} }
|
||||
func (m *CwtchPeerPacket) String() string { return proto.CompactTextString(m) }
|
||||
func (*CwtchPeerPacket) ProtoMessage() {}
|
||||
func (*CwtchPeerPacket) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
|
||||
func (*CwtchPeerPacket) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (m *CwtchPeerPacket) GetCwtchIdentify() *CwtchIdentity {
|
||||
if m != nil {
|
||||
|
@ -44,7 +61,7 @@ type CwtchIdentity struct {
|
|||
func (m *CwtchIdentity) Reset() { *m = CwtchIdentity{} }
|
||||
func (m *CwtchIdentity) String() string { return proto.CompactTextString(m) }
|
||||
func (*CwtchIdentity) ProtoMessage() {}
|
||||
func (*CwtchIdentity) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
|
||||
func (*CwtchIdentity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
|
||||
func (m *CwtchIdentity) GetName() string {
|
||||
if m != nil {
|
||||
|
@ -66,12 +83,13 @@ type GroupChatInvite struct {
|
|||
GroupSharedKey []byte `protobuf:"bytes,2,opt,name=group_shared_key,json=groupSharedKey,proto3" json:"group_shared_key,omitempty"`
|
||||
ServerHost string `protobuf:"bytes,3,opt,name=server_host,json=serverHost" json:"server_host,omitempty"`
|
||||
SignedGroupId []byte `protobuf:"bytes,4,opt,name=signed_group_id,json=signedGroupId,proto3" json:"signed_group_id,omitempty"`
|
||||
InitialMessage []byte `protobuf:"bytes,5,opt,name=initial_message,json=initialMessage,proto3" json:"initial_message,omitempty"`
|
||||
}
|
||||
|
||||
func (m *GroupChatInvite) Reset() { *m = GroupChatInvite{} }
|
||||
func (m *GroupChatInvite) String() string { return proto.CompactTextString(m) }
|
||||
func (*GroupChatInvite) ProtoMessage() {}
|
||||
func (*GroupChatInvite) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
|
||||
func (*GroupChatInvite) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||
|
||||
func (m *GroupChatInvite) GetGroupName() string {
|
||||
if m != nil {
|
||||
|
@ -101,33 +119,17 @@ func (m *GroupChatInvite) GetSignedGroupId() []byte {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *GroupChatInvite) GetInitialMessage() []byte {
|
||||
if m != nil {
|
||||
return m.InitialMessage
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*CwtchPeerPacket)(nil), "protocol.CwtchPeerPacket")
|
||||
proto.RegisterType((*CwtchIdentity)(nil), "protocol.CwtchIdentity")
|
||||
proto.RegisterType((*GroupChatInvite)(nil), "protocol.GroupChatInvite")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("cwtch-profile.proto", fileDescriptor1) }
|
||||
|
||||
var fileDescriptor1 = []byte{
|
||||
// 299 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x90, 0xc1, 0x4a, 0xf3, 0x40,
|
||||
0x14, 0x85, 0xc9, 0xff, 0x17, 0xb1, 0xb7, 0xb6, 0xa9, 0xe3, 0xc2, 0xb8, 0x10, 0xa5, 0x0b, 0xe9,
|
||||
0x42, 0x0b, 0x56, 0xba, 0x70, 0xe3, 0xa6, 0x88, 0x86, 0x82, 0xc4, 0xf8, 0x00, 0x43, 0x3a, 0x73,
|
||||
0x93, 0x0c, 0x8d, 0x99, 0x30, 0x99, 0x56, 0xe6, 0x4d, 0xdc, 0xfb, 0xa2, 0x92, 0x1b, 0xa5, 0xad,
|
||||
0xab, 0x99, 0x39, 0xe7, 0xde, 0xef, 0x1c, 0x06, 0x4e, 0xc4, 0x87, 0x15, 0xf9, 0x4d, 0x65, 0x74,
|
||||
0xaa, 0x0a, 0x9c, 0x54, 0x46, 0x5b, 0xcd, 0x0e, 0xe9, 0x10, 0xba, 0x18, 0x7d, 0x7a, 0xe0, 0xcf,
|
||||
0x9b, 0x89, 0x08, 0xd1, 0x44, 0x89, 0x58, 0xa1, 0x65, 0x0f, 0x30, 0xa0, 0x25, 0xae, 0x24, 0x96,
|
||||
0x56, 0xa5, 0x2e, 0xf0, 0x2e, 0xbd, 0x71, 0x6f, 0x7a, 0x3a, 0xf9, 0x5d, 0x9b, 0xd0, 0x4a, 0x48,
|
||||
0xb6, 0x75, 0x71, 0x5f, 0x6c, 0x9f, 0xa9, 0x63, 0x8f, 0x70, 0x9c, 0x19, 0xbd, 0xae, 0xb8, 0xc8,
|
||||
0x13, 0xcb, 0x55, 0xb9, 0x51, 0x16, 0x83, 0x7f, 0x84, 0x38, 0xdb, 0x22, 0x9e, 0x9a, 0x91, 0x79,
|
||||
0x9e, 0xd8, 0x90, 0x06, 0x62, 0x3f, 0xdb, 0x17, 0x46, 0xaf, 0xd0, 0xdf, 0x8b, 0x61, 0x0c, 0x3a,
|
||||
0x65, 0xf2, 0x8e, 0xd4, 0xa6, 0x1b, 0xd3, 0x9d, 0x5d, 0x03, 0x43, 0x39, 0x9d, 0xcd, 0x6e, 0xef,
|
||||
0x79, 0xb5, 0x5e, 0x16, 0x4a, 0xf0, 0x15, 0x3a, 0x0a, 0x3b, 0x8a, 0x87, 0x3f, 0x4e, 0x44, 0xc6,
|
||||
0x02, 0xdd, 0xe8, 0xcb, 0x03, 0xff, 0x4f, 0x2e, 0x3b, 0x07, 0x68, 0xdb, 0xee, 0xb0, 0xbb, 0xa4,
|
||||
0xbc, 0x34, 0x01, 0x63, 0x18, 0xb6, 0x76, 0x9d, 0x27, 0x06, 0xe5, 0x0e, 0x7e, 0x40, 0xfa, 0x1b,
|
||||
0xc9, 0x0b, 0x74, 0xec, 0x02, 0x7a, 0x35, 0x9a, 0x0d, 0x1a, 0x9e, 0xeb, 0xda, 0x06, 0xff, 0x89,
|
||||
0x04, 0xad, 0xf4, 0xac, 0x6b, 0xcb, 0xae, 0xc0, 0xaf, 0x55, 0x56, 0xa2, 0xe4, 0x2d, 0x51, 0xc9,
|
||||
0xa0, 0x43, 0xa4, 0x7e, 0x2b, 0x53, 0xb3, 0x50, 0x2e, 0x0f, 0xe8, 0x8f, 0xee, 0xbe, 0x03, 0x00,
|
||||
0x00, 0xff, 0xff, 0x62, 0x61, 0x2d, 0x00, 0xbb, 0x01, 0x00, 0x00,
|
||||
}
|
||||
func init() { proto.RegisterFile("cwtch-profile.proto", fileDescriptor0) }
|
||||
|
|
|
@ -17,4 +17,5 @@ message GroupChatInvite {
|
|||
bytes group_shared_key = 2;
|
||||
string server_host = 3;
|
||||
bytes signed_group_id = 4;
|
||||
bytes initial_message = 5;
|
||||
}
|
||||
|
|
|
@ -2,7 +2,10 @@ package main
|
|||
|
||||
import (
|
||||
cwtchserver "cwtch.im/cwtch/server"
|
||||
"log"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connectivity"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -10,11 +13,23 @@ const (
|
|||
)
|
||||
|
||||
func main() {
|
||||
serverConfig := cwtchserver.LoadConfig(serverConfigFile)
|
||||
log.AddEverythingFromPattern("server/app/main")
|
||||
log.AddEverythingFromPattern("server/server")
|
||||
configDir := os.Getenv("CWTCH_CONFIG_DIR")
|
||||
|
||||
serverConfig := cwtchserver.LoadConfig(configDir, serverConfigFile)
|
||||
|
||||
acn, err := connectivity.StartTor(path.Join(configDir, "tor"), "")
|
||||
if err != nil {
|
||||
log.Errorf("\nError connecting to Tor: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer acn.Close()
|
||||
|
||||
server := new(cwtchserver.Server)
|
||||
log.Printf("starting cwtch server...")
|
||||
log.Infoln("starting cwtch server...")
|
||||
|
||||
// TODO load params from .cwtch/server.conf or command line flag
|
||||
server.Run(serverConfig)
|
||||
// TODO: respond to HUP so t.Close is gracefully called
|
||||
server.Run(acn, serverConfig)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
#!/bin/sh
|
||||
set -o errexit
|
||||
|
||||
chmod_files() { find $2 -type f -exec chmod -v $1 {} \;
|
||||
}
|
||||
chmod_dirs() { find $2 -type d -exec chmod -v $1 {} \;
|
||||
}
|
||||
|
||||
chown ${TOR_USER}:${TOR_USER} /run/tor/
|
||||
chmod 770 /run/tor
|
||||
|
||||
chown -Rv ${TOR_USER}:${TOR_USER} /var/lib/tor
|
||||
chmod_dirs 700 /var/lib/tor
|
||||
chmod_files 600 /var/lib/tor
|
||||
|
||||
echo -e "\n========================================================"
|
||||
# Display OS version, Tor version & torrc in log
|
||||
echo -e "Alpine Version: \c" && cat /etc/alpine-release
|
||||
tor --version
|
||||
#cat /etc/tor/torrc
|
||||
echo -e "========================================================\n"
|
||||
|
||||
tor -f /etc/tor/torrc
|
||||
|
||||
#Cwtch will crash and burn if 9051 isn't ready
|
||||
sleep 15
|
||||
|
||||
if [ -z "${CWTCH_CONFIG_DIR}" ]; then
|
||||
CWTCH_CONFIG_DIR=/etc/cwtch/
|
||||
fi
|
||||
|
||||
#Run cwtch (or whatever the user passed)
|
||||
CWTCH_CONFIG_DIR=$CWTCH_CONFIG_DIR exec "$@"
|
|
@ -0,0 +1,27 @@
|
|||
User _tor
|
||||
DataDirectory /var/lib/tor
|
||||
|
||||
ORPort 0
|
||||
ExitRelay 0
|
||||
IPv6Exit 0
|
||||
|
||||
#We need this running in the background as the server doesn't launch it itself
|
||||
RunAsDaemon 1
|
||||
|
||||
ClientOnly 1
|
||||
SocksPort 9050
|
||||
|
||||
ControlPort 9051
|
||||
ControlSocket /run/tor/control
|
||||
ControlSocketsGroupWritable 1
|
||||
CookieAuthentication 1
|
||||
CookieAuthFile /run/tor/control.authcookie
|
||||
CookieAuthFileGroupReadable 1
|
||||
#HashedControlPassword 16:B4C8EE980C085EE460AEA9094350DAA9C2B5F841400E9BBA247368400A
|
||||
|
||||
# Run as a relay only (change policy to enable exit node)
|
||||
ExitPolicy reject *:* # no exits allowed
|
||||
ExitPolicy reject6 *:*
|
||||
|
||||
# Additional config built by the entrypoint will go here
|
||||
|
|
@ -4,9 +4,10 @@ import (
|
|||
"bufio"
|
||||
"fmt"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/application"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"github.com/struCoder/pidusage"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -24,11 +25,13 @@ type Monitors struct {
|
|||
starttime time.Time
|
||||
breakChannel chan bool
|
||||
log bool
|
||||
configDir string
|
||||
}
|
||||
|
||||
// Start initializes a Monitors's monitors
|
||||
func (mp *Monitors) Start(ra *application.RicochetApplication, log bool) {
|
||||
func (mp *Monitors) Start(ra *application.RicochetApplication, configDir string, log bool) {
|
||||
mp.log = log
|
||||
mp.configDir = configDir
|
||||
mp.starttime = time.Now()
|
||||
mp.breakChannel = make(chan bool)
|
||||
mp.MessageCounter = NewCounter()
|
||||
|
@ -54,9 +57,9 @@ func (mp *Monitors) run() {
|
|||
}
|
||||
|
||||
func (mp *Monitors) report() {
|
||||
f, err := os.Create(reportFile)
|
||||
f, err := os.Create(path.Join(mp.configDir, reportFile))
|
||||
if err != nil {
|
||||
log.Println("ERROR: Could not open monitor reporting file: ", err)
|
||||
log.Errorf("Could not open monitor reporting file: %v", err)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
@ -65,7 +68,7 @@ func (mp *Monitors) report() {
|
|||
|
||||
fmt.Fprintf(w, "Uptime: %v\n\n", time.Now().Sub(mp.starttime))
|
||||
|
||||
fmt.Fprintln(w, "Messages:")
|
||||
fmt.Fprintln(w, "messages:")
|
||||
mp.Messages.Report(w)
|
||||
|
||||
fmt.Fprintln(w, "\nClient Connections:")
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
{"ciphertext":"SGVsbG8gdGhpcyBpcyBhIGZhaXJseSBhdmVyYWdlIGxlbmd0aCBtZXNzYWdlIHRoYXQgd2UgYXJlIHdyaXRpbmcgaGVyZS4="}
|
||||
{"ciphertext":"SGVsbG8gdGhpcyBpcyBhIGZhaXJseSBhdmVyYWdlIGxlbmd0aCBtZXNzYWdlIHRoYXQgd2UgYXJlIHdyaXRpbmcgaGVyZS4="}
|
|
@ -2,12 +2,12 @@ package send
|
|||
|
||||
import (
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"cwtch.im/cwtch/protocol/spam"
|
||||
"cwtch.im/cwtch/protocol/connections/spam"
|
||||
"errors"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/channels"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/wire/control"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"log"
|
||||
)
|
||||
|
||||
// CwtchServerSendChannel implements the ChannelHandler interface for a channel of
|
||||
|
@ -89,11 +89,11 @@ func (cc *CwtchServerSendChannel) Packet(data []byte) {
|
|||
if ok {
|
||||
cc.Handler.HandleGroupMessage(gm)
|
||||
} else {
|
||||
log.Printf("[ERROR] Failed to validate spamguard %v\n", gm)
|
||||
log.Errorf("Failed to validate spamguard %v\n", gm)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Printf("[ERROR] Failed to decode packet on SEND channel %v\n", err)
|
||||
log.Errorf("Failed to decode packet on SEND channel %v\n", err)
|
||||
}
|
||||
cc.channel.CloseChannel()
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ package send
|
|||
|
||||
import (
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"cwtch.im/cwtch/protocol/spam"
|
||||
"cwtch.im/cwtch/protocol/connections/spam"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/channels"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/wire/control"
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
|
|
@ -5,44 +5,53 @@ import (
|
|||
"cwtch.im/cwtch/server/listen"
|
||||
"cwtch.im/cwtch/server/metrics"
|
||||
"cwtch.im/cwtch/server/send"
|
||||
"cwtch.im/cwtch/storage"
|
||||
"cwtch.im/cwtch/server/storage"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/application"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/channels"
|
||||
"log"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connectivity"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/utils"
|
||||
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Server encapsulates a complete, compliant Cwtch server.
|
||||
type Server struct {
|
||||
app *application.RicochetApplication
|
||||
config *Config
|
||||
config Config
|
||||
metricsPack metrics.Monitors
|
||||
closed bool
|
||||
}
|
||||
|
||||
// Run starts a server with the given privateKey
|
||||
// TODO: surface errors
|
||||
func (s *Server) Run(serverConfig *Config) {
|
||||
// TODO: handle HUP/KILL signals to exit and close Tor gracefully
|
||||
// TODO: handle user input to exit
|
||||
func (s *Server) Run(acn connectivity.ACN, serverConfig Config) {
|
||||
s.closed = false
|
||||
s.config = serverConfig
|
||||
cwtchserver := new(application.RicochetApplication)
|
||||
s.metricsPack.Start(cwtchserver, s.config.ServerReporting.LogMetricsToFile)
|
||||
s.metricsPack.Start(cwtchserver, serverConfig.ConfigDir, s.config.ServerReporting.LogMetricsToFile)
|
||||
|
||||
l, err := application.SetupOnion("127.0.0.1:9051", "tcp4", "", s.config.PrivateKey(), 9878)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("error setting up onion service: %v", err)
|
||||
}
|
||||
|
||||
af := application.ApplicationInstanceFactory{}
|
||||
af := application.InstanceFactory{}
|
||||
af.Init()
|
||||
ms := new(storage.MessageStore)
|
||||
ms.Init("cwtch.messages", s.config.MaxBufferLines, s.metricsPack.MessageCounter)
|
||||
af.AddHandler("im.cwtch.server.listen", func(rai *application.ApplicationInstance) func() channels.Handler {
|
||||
err := ms.Init(serverConfig.ConfigDir, s.config.MaxBufferLines, s.metricsPack.MessageCounter)
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
acn.Close()
|
||||
os.Exit(1)
|
||||
}
|
||||
af.AddHandler("im.cwtch.server.listen", func(rai *application.Instance) func() channels.Handler {
|
||||
return func() channels.Handler {
|
||||
cslc := new(listen.CwtchServerListenChannel)
|
||||
return cslc
|
||||
}
|
||||
})
|
||||
|
||||
af.AddHandler("im.cwtch.server.fetch", func(rai *application.ApplicationInstance) func() channels.Handler {
|
||||
af.AddHandler("im.cwtch.server.fetch", func(rai *application.Instance) func() channels.Handler {
|
||||
si := new(Instance)
|
||||
si.Init(rai, cwtchserver, ms)
|
||||
return func() channels.Handler {
|
||||
|
@ -52,7 +61,7 @@ func (s *Server) Run(serverConfig *Config) {
|
|||
}
|
||||
})
|
||||
|
||||
af.AddHandler("im.cwtch.server.send", func(rai *application.ApplicationInstance) func() channels.Handler {
|
||||
af.AddHandler("im.cwtch.server.send", func(rai *application.Instance) func() channels.Handler {
|
||||
si := new(Instance)
|
||||
si.Init(rai, cwtchserver, ms)
|
||||
return func() channels.Handler {
|
||||
|
@ -62,14 +71,30 @@ func (s *Server) Run(serverConfig *Config) {
|
|||
}
|
||||
})
|
||||
|
||||
cwtchserver.Init("cwtch server for "+l.Addr().String()[0:16], s.config.PrivateKey(), af, new(application.AcceptAllContactManager))
|
||||
log.Printf("cwtch server running on cwtch:%s", l.Addr().String()[0:16])
|
||||
addressIdentity := utils.GetTorV3Hostname(s.config.PublicKey)
|
||||
cwtchserver.Init(acn, "cwtch server for "+addressIdentity, s.config.Identity(), af, new(application.AcceptAllContactManager))
|
||||
port := strconv.Itoa(application.RicochetPort)
|
||||
log.Infof("cwtch server running on cwtch:%s\n", addressIdentity+".onion:"+port)
|
||||
|
||||
s.app = cwtchserver
|
||||
s.app.Run(l)
|
||||
|
||||
for true {
|
||||
listenService, err := acn.Listen(s.config.PrivateKey, application.RicochetPort)
|
||||
if err != nil {
|
||||
log.Errorf("Listen() error setting up onion service: %v\n", err)
|
||||
} else {
|
||||
s.app.Run(listenService)
|
||||
}
|
||||
if s.closed {
|
||||
return
|
||||
}
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown kills the app closing all connections and freeing all goroutines
|
||||
func (s *Server) Shutdown() {
|
||||
s.closed = true
|
||||
s.app.Shutdown()
|
||||
s.metricsPack.Stop()
|
||||
}
|
||||
|
|
|
@ -1,102 +1,64 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/utils"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/identity"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"sync"
|
||||
"path"
|
||||
)
|
||||
|
||||
// Reporting is a struct for storing a the config a server needs to be a peer, and connect to a group to report
|
||||
type Reporting struct {
|
||||
LogMetricsToFile bool `json:"logMetricsToFile"`
|
||||
PeerPrivateKey string `json:"privateKey"`
|
||||
ReportingGroupID string `json:"reportingGroupId"`
|
||||
ReportingServerAddr string `json:"reportingServerAddr"`
|
||||
}
|
||||
|
||||
// Config is a struct for storing basic server configuration
|
||||
type Config struct {
|
||||
MaxBufferLines int `json:"maxBufferLines"`
|
||||
PrivateKeyBytes string `json:"privateKey"`
|
||||
ServerReporting Reporting `json:"serverReporting"`
|
||||
lock sync.Mutex
|
||||
ConfigDir string `json:"-"`
|
||||
MaxBufferLines int `json:"maxBufferLines"`
|
||||
PublicKey ed25519.PublicKey `json:"publicKey"`
|
||||
PrivateKey ed25519.PrivateKey `json:"privateKey"`
|
||||
ServerReporting Reporting `json:"serverReporting"`
|
||||
}
|
||||
|
||||
// PrivateKey returns an rsa.PrivateKey generated from the config's PrivateKeyBytes
|
||||
func (config *Config) PrivateKey() *rsa.PrivateKey {
|
||||
pk, err := utils.ParsePrivateKey([]byte(config.PrivateKeyBytes))
|
||||
if err != nil {
|
||||
log.Println("Error parsing private key: ", err)
|
||||
}
|
||||
return pk
|
||||
// Identity returns an encapsulation of the servers keys for running ricochet
|
||||
func (config *Config) Identity() identity.Identity {
|
||||
return identity.InitializeV3("", &config.PrivateKey, &config.PublicKey)
|
||||
}
|
||||
|
||||
// Save dumps the latest version of the config to a json file given by filename
|
||||
func (config *Config) Save(filename string) {
|
||||
config.lock.Lock()
|
||||
defer config.lock.Unlock()
|
||||
func (config *Config) Save(dir, filename string) {
|
||||
log.Infof("Saving config to %s\n", path.Join(dir, filename))
|
||||
bytes, _ := json.MarshalIndent(config, "", "\t")
|
||||
ioutil.WriteFile(filename, bytes, 0600)
|
||||
}
|
||||
|
||||
// newConfig generates a simple config with defaults. Unmarshal will return them if they aren't specified
|
||||
func newConfig() *Config {
|
||||
config := Config{}
|
||||
config.MaxBufferLines = 100000
|
||||
config.ServerReporting.LogMetricsToFile = false
|
||||
|
||||
return &config
|
||||
ioutil.WriteFile(path.Join(dir, filename), bytes, 0600)
|
||||
}
|
||||
|
||||
// LoadConfig loads a Config from a json file specified by filename
|
||||
func LoadConfig(filename string) *Config {
|
||||
config := newConfig()
|
||||
|
||||
raw, err := ioutil.ReadFile(filename)
|
||||
func LoadConfig(configDir, filename string) Config {
|
||||
log.Infof("Loading config from %s\n", path.Join(configDir, filename))
|
||||
config := Config{}
|
||||
config.ConfigDir = configDir
|
||||
config.MaxBufferLines = 100000
|
||||
config.ServerReporting.LogMetricsToFile = false
|
||||
raw, err := ioutil.ReadFile(path.Join(configDir, filename))
|
||||
if err == nil {
|
||||
err = json.Unmarshal(raw, &config)
|
||||
|
||||
if err != nil {
|
||||
log.Println("Error reading config: ", err)
|
||||
log.Errorf("reading config: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
configAutoPopulate(config)
|
||||
if config.PrivateKey == nil {
|
||||
config.PublicKey, config.PrivateKey, _ = ed25519.GenerateKey(rand.Reader)
|
||||
}
|
||||
|
||||
// Always save (first time generation, new version with new variables populated)
|
||||
config.Save(filename)
|
||||
config.Save(configDir, filename)
|
||||
return config
|
||||
}
|
||||
|
||||
// Auto populate required values if missing and save
|
||||
func configAutoPopulate(config *Config) {
|
||||
if config.PrivateKeyBytes == "" {
|
||||
config.generatePrivateKey()
|
||||
}
|
||||
|
||||
if config.ServerReporting.PeerPrivateKey == "" {
|
||||
config.generatePeerPrivateKey()
|
||||
}
|
||||
}
|
||||
|
||||
func (config *Config) generatePrivateKey() {
|
||||
pk, err := utils.GeneratePrivateKey()
|
||||
if err != nil {
|
||||
log.Fatalf("error generating new private key: %v\n", err)
|
||||
}
|
||||
config.lock.Lock()
|
||||
config.PrivateKeyBytes = utils.PrivateKeyToString(pk)
|
||||
config.lock.Unlock()
|
||||
}
|
||||
|
||||
func (config *Config) generatePeerPrivateKey() {
|
||||
pk, err := utils.GeneratePrivateKey()
|
||||
if err != nil {
|
||||
log.Fatalf("error generating new peer private key: %v\n", err)
|
||||
}
|
||||
config.lock.Lock()
|
||||
config.ServerReporting.PeerPrivateKey = utils.PrivateKeyToString(pk)
|
||||
config.lock.Unlock()
|
||||
}
|
||||
|
|
|
@ -3,20 +3,20 @@ package server
|
|||
import (
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"cwtch.im/cwtch/server/listen"
|
||||
"cwtch.im/cwtch/storage"
|
||||
"cwtch.im/cwtch/server/storage"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/application"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/channels"
|
||||
)
|
||||
|
||||
// Instance encapsulates the Ricochet application.
|
||||
type Instance struct {
|
||||
rai *application.ApplicationInstance
|
||||
rai *application.Instance
|
||||
ra *application.RicochetApplication
|
||||
msi storage.MessageStoreInterface
|
||||
}
|
||||
|
||||
// Init sets up a Server Instance
|
||||
func (si *Instance) Init(rai *application.ApplicationInstance, ra *application.RicochetApplication, msi storage.MessageStoreInterface) {
|
||||
func (si *Instance) Init(rai *application.Instance, ra *application.RicochetApplication, msi storage.MessageStoreInterface) {
|
||||
si.rai = rai
|
||||
si.ra = ra
|
||||
si.msi = msi
|
||||
|
@ -30,7 +30,7 @@ func (si *Instance) HandleFetchRequest() []*protocol.GroupMessage {
|
|||
// HandleGroupMessage takes in a group message and distributes it to all listening peers
|
||||
func (si *Instance) HandleGroupMessage(gm *protocol.GroupMessage) {
|
||||
si.msi.AddMessage(*gm)
|
||||
go si.ra.Broadcast(func(rai *application.ApplicationInstance) {
|
||||
go si.ra.Broadcast(func(rai *application.Instance) {
|
||||
rai.Connection.Do(func() error {
|
||||
channel := rai.Connection.Channel("im.cwtch.server.listen", channels.Inbound)
|
||||
if channel != nil {
|
||||
|
|
|
@ -3,7 +3,7 @@ package server
|
|||
import (
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"cwtch.im/cwtch/server/metrics"
|
||||
"cwtch.im/cwtch/storage"
|
||||
"cwtch.im/cwtch/server/storage"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/application"
|
||||
"os"
|
||||
"testing"
|
||||
|
@ -12,11 +12,11 @@ import (
|
|||
|
||||
func TestServerInstance(t *testing.T) {
|
||||
si := new(Instance)
|
||||
ai := new(application.ApplicationInstance)
|
||||
ai := new(application.Instance)
|
||||
ra := new(application.RicochetApplication)
|
||||
msi := new(storage.MessageStore)
|
||||
os.Remove("ms.test")
|
||||
msi.Init("ms.test", 5, metrics.NewCounter())
|
||||
os.RemoveAll("messages")
|
||||
msi.Init(".", 5, metrics.NewCounter())
|
||||
gm := protocol.GroupMessage{
|
||||
Ciphertext: []byte("Hello this is a fairly average length message that we are writing here."),
|
||||
Spamguard: []byte{},
|
||||
|
@ -27,7 +27,7 @@ func TestServerInstance(t *testing.T) {
|
|||
res := si.HandleFetchRequest()
|
||||
|
||||
if len(res) != 1 {
|
||||
t.Errorf("Expected 1 Group Messages Instead got %v", res)
|
||||
t.Errorf("Expected 1 Group messages Instead got %v", res)
|
||||
}
|
||||
|
||||
// ra.HandleApplicationInstance(ai)
|
||||
|
|
|
@ -0,0 +1,152 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"cwtch.im/cwtch/server/metrics"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
fileStorePartitions = 10
|
||||
fileStoreFilename = "cwtch.messages"
|
||||
directory = "messages"
|
||||
)
|
||||
|
||||
// MessageStoreInterface defines an interface to interact with a store of cwtch messages.
|
||||
type MessageStoreInterface interface {
|
||||
AddMessage(protocol.GroupMessage)
|
||||
FetchMessages() []*protocol.GroupMessage
|
||||
}
|
||||
|
||||
// MessageStore is a file-backed implementation of MessageStoreInterface
|
||||
type MessageStore struct {
|
||||
activeLogFile *os.File
|
||||
filePos int
|
||||
storeDirectory string
|
||||
lock sync.Mutex
|
||||
messages []*protocol.GroupMessage
|
||||
messageCounter metrics.Counter
|
||||
maxBufferLines int
|
||||
bufferPos int
|
||||
bufferRotated bool
|
||||
}
|
||||
|
||||
// Close closes the message store and underlying resources.
|
||||
func (ms *MessageStore) Close() {
|
||||
ms.lock.Lock()
|
||||
ms.messages = nil
|
||||
ms.activeLogFile.Close()
|
||||
ms.lock.Unlock()
|
||||
}
|
||||
|
||||
func (ms *MessageStore) updateBuffer(gm *protocol.GroupMessage) {
|
||||
ms.messages[ms.bufferPos] = gm
|
||||
ms.bufferPos++
|
||||
if ms.bufferPos == ms.maxBufferLines {
|
||||
ms.bufferPos = 0
|
||||
ms.bufferRotated = true
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *MessageStore) initAndLoadFiles() error {
|
||||
ms.activeLogFile = nil
|
||||
for i := fileStorePartitions - 1; i >= 0; i-- {
|
||||
ms.filePos = 0
|
||||
filename := path.Join(ms.storeDirectory, fmt.Sprintf("%s.%d", fileStoreFilename, i))
|
||||
f, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0600)
|
||||
if err != nil {
|
||||
log.Errorf("MessageStore could not open: %v: %v", filename, err)
|
||||
continue
|
||||
}
|
||||
ms.activeLogFile = f
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
gms := scanner.Text()
|
||||
ms.filePos++
|
||||
gm := &protocol.GroupMessage{}
|
||||
err := json.Unmarshal([]byte(gms), gm)
|
||||
if err == nil {
|
||||
ms.updateBuffer(gm)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ms.activeLogFile == nil {
|
||||
return fmt.Errorf("Could not create log file to write to in %s", ms.storeDirectory)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *MessageStore) updateFile(gm *protocol.GroupMessage) {
|
||||
s, err := json.Marshal(gm)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to unmarshal group message %v\n", err)
|
||||
}
|
||||
fmt.Fprintf(ms.activeLogFile, "%s\n", s)
|
||||
ms.filePos++
|
||||
if ms.filePos >= ms.maxBufferLines/fileStorePartitions {
|
||||
ms.rotateFileStore()
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *MessageStore) rotateFileStore() {
|
||||
ms.activeLogFile.Close()
|
||||
os.Remove(path.Join(ms.storeDirectory, fmt.Sprintf("%s.%d", fileStoreFilename, fileStorePartitions-1)))
|
||||
|
||||
for i := fileStorePartitions - 2; i >= 0; i-- {
|
||||
os.Rename(path.Join(ms.storeDirectory, fmt.Sprintf("%s.%d", fileStoreFilename, i)), path.Join(ms.storeDirectory, fmt.Sprintf("%s.%d", fileStoreFilename, i+1)))
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(path.Join(ms.storeDirectory, fmt.Sprintf("%s.%d", fileStoreFilename, 0)), os.O_CREATE|os.O_APPEND|os.O_RDWR, 0600)
|
||||
if err != nil {
|
||||
log.Errorf("Could not open new message store file in: %s", ms.storeDirectory)
|
||||
}
|
||||
ms.filePos = 0
|
||||
ms.activeLogFile = f
|
||||
}
|
||||
|
||||
// Init sets up a MessageStore of size maxBufferLines (# of messages) backed by filename
|
||||
func (ms *MessageStore) Init(appDirectory string, maxBufferLines int, messageCounter metrics.Counter) error {
|
||||
ms.storeDirectory = path.Join(appDirectory, directory)
|
||||
os.Mkdir(ms.storeDirectory, 0700)
|
||||
|
||||
ms.bufferPos = 0
|
||||
ms.maxBufferLines = maxBufferLines
|
||||
ms.messages = make([]*protocol.GroupMessage, maxBufferLines)
|
||||
ms.bufferRotated = false
|
||||
ms.messageCounter = messageCounter
|
||||
|
||||
err := ms.initAndLoadFiles()
|
||||
return err
|
||||
}
|
||||
|
||||
// FetchMessages returns all messages from the backing file.
|
||||
func (ms *MessageStore) FetchMessages() (messages []*protocol.GroupMessage) {
|
||||
ms.lock.Lock()
|
||||
if !ms.bufferRotated {
|
||||
messages = make([]*protocol.GroupMessage, ms.bufferPos)
|
||||
copy(messages, ms.messages[0:ms.bufferPos])
|
||||
} else {
|
||||
messages = make([]*protocol.GroupMessage, ms.maxBufferLines)
|
||||
copy(messages, ms.messages[ms.bufferPos:ms.maxBufferLines])
|
||||
copy(messages[ms.bufferPos:], ms.messages[0:ms.bufferPos])
|
||||
}
|
||||
ms.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// AddMessage adds a GroupMessage to the store
|
||||
func (ms *MessageStore) AddMessage(gm protocol.GroupMessage) {
|
||||
ms.messageCounter.Add(1)
|
||||
ms.lock.Lock()
|
||||
ms.updateBuffer(&gm)
|
||||
ms.updateFile(&gm)
|
||||
|
||||
ms.lock.Unlock()
|
||||
}
|
|
@ -12,27 +12,27 @@ func TestMessageStore(t *testing.T) {
|
|||
os.Remove("ms.test")
|
||||
ms := new(MessageStore)
|
||||
counter := metrics.NewCounter()
|
||||
ms.Init("ms.test", 100000, counter)
|
||||
for i := 0; i < 50000; i++ {
|
||||
ms.Init("./", 1000, counter)
|
||||
for i := 0; i < 499; i++ {
|
||||
gm := protocol.GroupMessage{
|
||||
Ciphertext: []byte("Hello this is a fairly average length message that we are writing here. " + strconv.Itoa(i)),
|
||||
Spamguard: []byte{},
|
||||
}
|
||||
ms.AddMessage(gm)
|
||||
}
|
||||
if counter.Count() != 50000 {
|
||||
t.Errorf("Counter should be at 50000 was %v", counter.Count())
|
||||
if counter.Count() != 499 {
|
||||
t.Errorf("Counter should be at 499 was %v", counter.Count())
|
||||
}
|
||||
ms.Close()
|
||||
ms.Init("ms.test", 100000, counter)
|
||||
ms.Init("./", 1000, counter)
|
||||
m := ms.FetchMessages()
|
||||
if len(m) != 50000 {
|
||||
t.Errorf("Should have been 50000 was %v", len(m))
|
||||
if len(m) != 499 {
|
||||
t.Errorf("Should have been 499 was %v", len(m))
|
||||
}
|
||||
|
||||
counter.Reset()
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
for i := 0; i < 1000; i++ {
|
||||
gm := protocol.GroupMessage{
|
||||
Ciphertext: []byte("Hello this is a fairly average length message that we are writing here. " + strconv.Itoa(i)),
|
||||
Spamguard: []byte{},
|
||||
|
@ -41,10 +41,16 @@ func TestMessageStore(t *testing.T) {
|
|||
}
|
||||
|
||||
m = ms.FetchMessages()
|
||||
if len(m) != 100000 {
|
||||
t.Errorf("Should have been 100000 was %v", len(m))
|
||||
if len(m) != 1000 {
|
||||
t.Errorf("Should have been 1000 was %v", len(m))
|
||||
}
|
||||
|
||||
ms.Close()
|
||||
os.Remove("ms.test")
|
||||
ms.Init("./", 1000, counter)
|
||||
m = ms.FetchMessages()
|
||||
if len(m) != 999 {
|
||||
t.Errorf("Should have been 999 was %v", len(m))
|
||||
}
|
||||
ms.Close()
|
||||
|
||||
os.RemoveAll("./messages")
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
"golang.org/x/crypto/sha3"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
)
|
||||
|
||||
// createKey derives a key from a password
|
||||
func createKey(password string) ([32]byte, [128]byte, error) {
|
||||
var salt [128]byte
|
||||
if _, err := io.ReadFull(rand.Reader, salt[:]); err != nil {
|
||||
log.Errorf("Cannot read from random: %v\n", err)
|
||||
return [32]byte{}, salt, err
|
||||
}
|
||||
dk := pbkdf2.Key([]byte(password), salt[:], 4096, 32, sha3.New512)
|
||||
|
||||
var dkr [32]byte
|
||||
copy(dkr[:], dk)
|
||||
return dkr, salt, nil
|
||||
}
|
||||
|
||||
//encryptFileData encrypts the cwtchPeer via the specified key.
|
||||
func encryptFileData(data []byte, key [32]byte) ([]byte, error) {
|
||||
var nonce [24]byte
|
||||
|
||||
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
|
||||
log.Errorf("Cannot read from random: %v\n", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
encrypted := secretbox.Seal(nonce[:], data, &nonce, &key)
|
||||
return encrypted, nil
|
||||
}
|
||||
|
||||
//decryptFile decrypts the passed ciphertext into a cwtchPeer via the specified key.
|
||||
func decryptFile(ciphertext []byte, key [32]byte) ([]byte, error) {
|
||||
var decryptNonce [24]byte
|
||||
copy(decryptNonce[:], ciphertext[:24])
|
||||
decrypted, ok := secretbox.Open(nil, ciphertext[24:], &decryptNonce, &key)
|
||||
if ok {
|
||||
return decrypted, nil
|
||||
}
|
||||
return nil, errors.New("Failed to decrypt")
|
||||
}
|
||||
|
||||
// Load instantiates a cwtchPeer from the file store
|
||||
func readEncryptedFile(directory, filename, password string) ([]byte, error) {
|
||||
encryptedbytes, err := ioutil.ReadFile(path.Join(directory, filename))
|
||||
if err == nil && len(encryptedbytes) > 128 {
|
||||
var dkr [32]byte
|
||||
//Separate the salt from the encrypted bytes, then generate the derived key
|
||||
salt, encryptedbytes := encryptedbytes[0:128], encryptedbytes[128:]
|
||||
dk := pbkdf2.Key([]byte(password), salt, 4096, 32, sha3.New512)
|
||||
copy(dkr[:], dk)
|
||||
|
||||
data, err := decryptFile(encryptedbytes, dkr)
|
||||
if err == nil {
|
||||
return data, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path"
|
||||
)
|
||||
|
||||
// fileStore stores a cwtchPeer in an encrypted file
|
||||
type fileStore struct {
|
||||
directory string
|
||||
filename string
|
||||
password string
|
||||
}
|
||||
|
||||
// FileStore is a primitive around storing encrypted files
|
||||
type FileStore interface {
|
||||
Save([]byte) error
|
||||
Load() ([]byte, error)
|
||||
}
|
||||
|
||||
// NewFileStore instantiates a fileStore given a filename and a password
|
||||
func NewFileStore(directory string, filename string, password string) FileStore {
|
||||
filestore := new(fileStore)
|
||||
filestore.password = password
|
||||
filestore.filename = filename
|
||||
filestore.directory = directory
|
||||
return filestore
|
||||
}
|
||||
|
||||
// save serializes a cwtchPeer to a file
|
||||
func (fps *fileStore) Save(data []byte) error {
|
||||
key, salt, _ := createKey(fps.password)
|
||||
encryptedbytes, err := encryptFileData(data, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// the salt for the derived key is appended to the front of the file
|
||||
encryptedbytes = append(salt[:], encryptedbytes...)
|
||||
err = ioutil.WriteFile(path.Join(fps.directory, fps.filename), encryptedbytes, 0600)
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
func (fps *fileStore) Load() ([]byte, error) {
|
||||
return readEncryptedFile(fps.directory, fps.filename, fps.password)
|
||||
}
|
|
@ -1,104 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"cwtch.im/cwtch/server/metrics"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// MessageStoreInterface defines an interface to interact with a store of cwtch messages.
|
||||
type MessageStoreInterface interface {
|
||||
AddMessage(protocol.GroupMessage)
|
||||
FetchMessages() []*protocol.GroupMessage
|
||||
}
|
||||
|
||||
// MessageStore is a file-backed implementation of MessageStoreInterface
|
||||
type MessageStore struct {
|
||||
file *os.File
|
||||
lock sync.Mutex
|
||||
messages []*protocol.GroupMessage
|
||||
messageCounter metrics.Counter
|
||||
bufferSize int
|
||||
pos int
|
||||
rotated bool
|
||||
}
|
||||
|
||||
// Close closes the message store and underlying resources.
|
||||
func (ms *MessageStore) Close() {
|
||||
ms.lock.Lock()
|
||||
ms.messages = nil
|
||||
ms.file.Close()
|
||||
ms.lock.Unlock()
|
||||
}
|
||||
|
||||
func (ms *MessageStore) updateBuffer(gm *protocol.GroupMessage) {
|
||||
ms.messages[ms.pos] = gm
|
||||
ms.pos++
|
||||
if ms.pos == ms.bufferSize {
|
||||
ms.pos = 0
|
||||
ms.rotated = true
|
||||
}
|
||||
}
|
||||
|
||||
// Init sets up a MessageStore of size bufferSize backed by filename
|
||||
func (ms *MessageStore) Init(filename string, bufferSize int, messageCounter metrics.Counter) {
|
||||
f, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0600)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ms.file = f
|
||||
ms.pos = 0
|
||||
ms.bufferSize = bufferSize
|
||||
ms.messages = make([]*protocol.GroupMessage, bufferSize)
|
||||
ms.rotated = false
|
||||
ms.messageCounter = messageCounter
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
gms := scanner.Text()
|
||||
gm := &protocol.GroupMessage{}
|
||||
err := json.Unmarshal([]byte(gms), gm)
|
||||
if err == nil {
|
||||
ms.updateBuffer(gm)
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// FetchMessages returns all messages from the backing file.
|
||||
func (ms *MessageStore) FetchMessages() (messages []*protocol.GroupMessage) {
|
||||
ms.lock.Lock()
|
||||
if !ms.rotated {
|
||||
messages = make([]*protocol.GroupMessage, ms.pos)
|
||||
copy(messages, ms.messages[0:ms.pos])
|
||||
} else {
|
||||
messages = make([]*protocol.GroupMessage, ms.bufferSize)
|
||||
copy(messages, ms.messages)
|
||||
}
|
||||
ms.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// AddMessage adds a GroupMessage to the store
|
||||
func (ms *MessageStore) AddMessage(gm protocol.GroupMessage) {
|
||||
ms.messageCounter.Add(1)
|
||||
ms.lock.Lock()
|
||||
ms.updateBuffer(&gm)
|
||||
s, err := json.Marshal(gm)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] Failed to unmarshal group message %v\n", err)
|
||||
}
|
||||
fmt.Fprintf(ms.file, "%s\n", s)
|
||||
ms.lock.Unlock()
|
||||
}
|
|
@ -0,0 +1,270 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/event"
|
||||
"cwtch.im/cwtch/model"
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"encoding/json"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
const profileFilename = "profile"
|
||||
|
||||
type profileStore struct {
|
||||
fs FileStore
|
||||
streamStores map[string]StreamStore
|
||||
directory string
|
||||
password string
|
||||
profile *model.Profile
|
||||
eventManager event.Manager
|
||||
queue event.Queue
|
||||
writer bool
|
||||
}
|
||||
|
||||
// ProfileStore is an interface to managing the storage of Cwtch Profiles
|
||||
type ProfileStore interface {
|
||||
Load() error
|
||||
Shutdown()
|
||||
GetProfileCopy(timeline bool) *model.Profile
|
||||
GetNewPeerMessage() *event.Event
|
||||
GetStatusMessages() []*event.Event
|
||||
}
|
||||
|
||||
// NewProfileWriterStore returns a profile store backed by a filestore listening for events and saving them
|
||||
// directory should be $appDir/profiles/$rand
|
||||
func NewProfileWriterStore(eventManager event.Manager, directory, password string, profile *model.Profile) ProfileStore {
|
||||
os.Mkdir(directory, 0700)
|
||||
ps := &profileStore{fs: NewFileStore(directory, profileFilename, password), password: password, directory: directory, profile: profile, eventManager: eventManager, streamStores: map[string]StreamStore{}, writer: true}
|
||||
//ps.queue = event.NewQueue(100)
|
||||
ps.queue = event.NewQueue()
|
||||
if profile != nil {
|
||||
ps.save()
|
||||
}
|
||||
go ps.eventHandler()
|
||||
|
||||
ps.eventManager.Subscribe(event.BlockPeer, ps.queue)
|
||||
ps.eventManager.Subscribe(event.UnblockPeer, ps.queue)
|
||||
ps.eventManager.Subscribe(event.PeerCreated, ps.queue)
|
||||
ps.eventManager.Subscribe(event.GroupCreated, ps.queue)
|
||||
ps.eventManager.Subscribe(event.SetProfileName, ps.queue)
|
||||
ps.eventManager.Subscribe(event.SetAttribute, ps.queue)
|
||||
ps.eventManager.Subscribe(event.SetPeerAttribute, ps.queue)
|
||||
ps.eventManager.Subscribe(event.SetGroupAttribute, ps.queue)
|
||||
ps.eventManager.Subscribe(event.AcceptGroupInvite, ps.queue)
|
||||
ps.eventManager.Subscribe(event.NewGroupInvite, ps.queue)
|
||||
ps.eventManager.Subscribe(event.NewMessageFromGroup, ps.queue)
|
||||
ps.eventManager.Subscribe(event.PeerStateChange, ps.queue)
|
||||
ps.eventManager.Subscribe(event.ServerStateChange, ps.queue)
|
||||
ps.eventManager.Subscribe(event.DeleteContact, ps.queue)
|
||||
ps.eventManager.Subscribe(event.DeleteGroup, ps.queue)
|
||||
|
||||
return ps
|
||||
}
|
||||
|
||||
// ReadProfile reads a profile from storqage and returns the profile
|
||||
// directory should be $appDir/profiles/$rand
|
||||
func ReadProfile(directory, password string) (*model.Profile, error) {
|
||||
os.Mkdir(directory, 0700)
|
||||
ps := &profileStore{fs: NewFileStore(directory, profileFilename, password), password: password, directory: directory, profile: nil, eventManager: nil, streamStores: map[string]StreamStore{}, writer: true}
|
||||
|
||||
err := ps.Load()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
profile := ps.GetProfileCopy(true)
|
||||
|
||||
return profile, nil
|
||||
}
|
||||
|
||||
// NewProfile creates a new profile for use in the profile store.
|
||||
func NewProfile(name string) *model.Profile {
|
||||
profile := model.GenerateNewProfile(name)
|
||||
return profile
|
||||
}
|
||||
|
||||
// GetNewPeerMessage is for AppService to call on Reload events, to reseed the AppClient with the loaded peers
|
||||
func (ps *profileStore) GetNewPeerMessage() *event.Event {
|
||||
message := event.NewEventList(event.NewPeer, event.Identity, ps.profile.LocalID, event.Password, ps.password, event.Status, "running")
|
||||
return &message
|
||||
}
|
||||
|
||||
func (ps *profileStore) GetStatusMessages() []*event.Event {
|
||||
messages := []*event.Event{}
|
||||
for _, contact := range ps.profile.Contacts {
|
||||
message := event.NewEvent(event.PeerStateChange, map[event.Field]string{
|
||||
event.RemotePeer: string(contact.Onion),
|
||||
event.ConnectionState: contact.State,
|
||||
})
|
||||
messages = append(messages, &message)
|
||||
}
|
||||
|
||||
doneServers := make(map[string]bool)
|
||||
for _, group := range ps.profile.Groups {
|
||||
if _, exists := doneServers[group.GroupServer]; !exists {
|
||||
message := event.NewEvent(event.ServerStateChange, map[event.Field]string{
|
||||
event.GroupServer: string(group.GroupServer),
|
||||
event.ConnectionState: group.State,
|
||||
})
|
||||
messages = append(messages, &message)
|
||||
doneServers[group.GroupServer] = true
|
||||
}
|
||||
}
|
||||
|
||||
return messages
|
||||
}
|
||||
|
||||
func (ps *profileStore) save() error {
|
||||
if ps.writer {
|
||||
bytes, _ := json.Marshal(ps.profile)
|
||||
return ps.fs.Save(bytes)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load instantiates a cwtchPeer from the file store
|
||||
func (ps *profileStore) Load() error {
|
||||
decrypted, err := ps.fs.Load()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cp := new(model.Profile)
|
||||
err = json.Unmarshal(decrypted, &cp)
|
||||
if err == nil {
|
||||
ps.profile = cp
|
||||
|
||||
for gid, group := range cp.Groups {
|
||||
ss := NewStreamStore(ps.directory, group.LocalID, ps.password)
|
||||
cp.Groups[gid].Timeline.SetMessages(ss.Read())
|
||||
ps.streamStores[group.GroupID] = ss
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (ps *profileStore) GetProfileCopy(timeline bool) *model.Profile {
|
||||
return ps.profile.GetCopy(timeline)
|
||||
}
|
||||
|
||||
func (ps *profileStore) eventHandler() {
|
||||
for {
|
||||
ev := ps.queue.Next()
|
||||
switch ev.EventType {
|
||||
case event.BlockPeer:
|
||||
contact, exists := ps.profile.GetContact(ev.Data[event.RemotePeer])
|
||||
if exists {
|
||||
contact.Blocked = true
|
||||
ps.save()
|
||||
}
|
||||
case event.UnblockPeer:
|
||||
contact, exists := ps.profile.GetContact(ev.Data[event.RemotePeer])
|
||||
if exists {
|
||||
contact.Blocked = false
|
||||
ps.save()
|
||||
}
|
||||
case event.PeerCreated:
|
||||
var pp *model.PublicProfile
|
||||
json.Unmarshal([]byte(ev.Data[event.Data]), &pp)
|
||||
ps.profile.AddContact(ev.Data[event.RemotePeer], pp)
|
||||
// TODO: configure - allow peers to be configured to turn on limited storage
|
||||
/*ss := NewStreamStore(ps.directory, pp.LocalID, ps.password)
|
||||
pp.Timeline.SetMessages(ss.Read())
|
||||
ps.streamStores[pp.Onion] = ss
|
||||
ps.save()*/
|
||||
case event.GroupCreated:
|
||||
var group *model.Group
|
||||
json.Unmarshal([]byte(ev.Data[event.Data]), &group)
|
||||
ps.profile.AddGroup(group)
|
||||
ps.streamStores[group.GroupID] = NewStreamStore(ps.directory, group.LocalID, ps.password)
|
||||
ps.save()
|
||||
case event.SetProfileName:
|
||||
ps.profile.Name = ev.Data[event.ProfileName]
|
||||
ps.profile.SetAttribute("name", ev.Data[event.ProfileName])
|
||||
ps.save()
|
||||
case event.SetAttribute:
|
||||
ps.profile.SetAttribute(ev.Data[event.Key], ev.Data[event.Data])
|
||||
ps.save()
|
||||
case event.SetPeerAttribute:
|
||||
contact, exists := ps.profile.GetContact(ev.Data[event.RemotePeer])
|
||||
if exists {
|
||||
contact.SetAttribute(ev.Data[event.Key], ev.Data[event.Data])
|
||||
ps.save()
|
||||
} else {
|
||||
log.Errorf("error setting attribute on peer %v peer does not exist", ev)
|
||||
}
|
||||
case event.SetGroupAttribute:
|
||||
group := ps.profile.GetGroupByGroupID(ev.Data[event.GroupID])
|
||||
if group != nil {
|
||||
group.SetAttribute(ev.Data[event.Key], ev.Data[event.Data])
|
||||
ps.save()
|
||||
} else {
|
||||
log.Errorf("error setting attribute on group %v group does not exist", ev)
|
||||
}
|
||||
case event.AcceptGroupInvite:
|
||||
err := ps.profile.AcceptInvite(ev.Data[event.GroupID])
|
||||
if err == nil {
|
||||
ps.save()
|
||||
} else {
|
||||
log.Errorf("error accepting group invite")
|
||||
}
|
||||
case event.NewGroupInvite:
|
||||
var gci protocol.GroupChatInvite
|
||||
err := proto.Unmarshal([]byte(ev.Data[event.GroupInvite]), &gci)
|
||||
if err == nil {
|
||||
ps.profile.ProcessInvite(&gci, ev.Data[event.RemotePeer])
|
||||
ps.save()
|
||||
group := ps.profile.Groups[gci.GetGroupName()]
|
||||
ps.streamStores[group.GroupID] = NewStreamStore(ps.directory, group.LocalID, ps.password)
|
||||
} else {
|
||||
log.Errorf("error storing new group invite: %v %v", ev, err)
|
||||
}
|
||||
case event.NewMessageFromGroup:
|
||||
groupid := ev.Data[event.GroupID]
|
||||
received, _ := time.Parse(time.RFC3339Nano, ev.Data[event.TimestampReceived])
|
||||
sent, _ := time.Parse(time.RFC3339Nano, ev.Data[event.TimestampSent])
|
||||
message := model.Message{Received: received, Timestamp: sent, Message: ev.Data[event.Data], PeerID: ev.Data[event.RemotePeer], Signature: []byte(ev.Data[event.Signature]), PreviousMessageSig: []byte(ev.Data[event.PreviousSignature])}
|
||||
ss, exists := ps.streamStores[groupid]
|
||||
if exists {
|
||||
ss.Write(message)
|
||||
} else {
|
||||
log.Errorf("error storing new group message: %v stream store does not exist", ev)
|
||||
}
|
||||
case event.PeerStateChange:
|
||||
if _, exists := ps.profile.Contacts[ev.Data[event.RemotePeer]]; exists {
|
||||
ps.profile.Contacts[ev.Data[event.RemotePeer]].State = ev.Data[event.ConnectionState]
|
||||
}
|
||||
case event.ServerStateChange:
|
||||
for _, group := range ps.profile.Groups {
|
||||
if group.GroupServer == ev.Data[event.GroupServer] {
|
||||
group.State = ev.Data[event.ConnectionState]
|
||||
}
|
||||
}
|
||||
case event.DeleteContact:
|
||||
onion := ev.Data[event.RemotePeer]
|
||||
ps.profile.DeleteContact(onion)
|
||||
ps.save()
|
||||
case event.DeleteGroup:
|
||||
groupID := ev.Data[event.GroupID]
|
||||
ps.profile.DeleteGroup(groupID)
|
||||
ps.save()
|
||||
ss, exists := ps.streamStores[groupID]
|
||||
if exists {
|
||||
ss.Delete()
|
||||
delete(ps.streamStores, groupID)
|
||||
}
|
||||
default:
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *profileStore) Shutdown() {
|
||||
if ps.queue != nil {
|
||||
ps.queue.Shutdown()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/event"
|
||||
"cwtch.im/cwtch/protocol"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const testProfileName = "Alice"
|
||||
const testKey = "key"
|
||||
const testVal = "value"
|
||||
const testInitialMessage = "howdy"
|
||||
const testMessage = "Hello from storage"
|
||||
|
||||
func TestProfileStoreWriteRead(t *testing.T) {
|
||||
os.RemoveAll(testingDir)
|
||||
eventBus := event.NewEventManager()
|
||||
profile := NewProfile(testProfileName)
|
||||
ps1 := NewProfileWriterStore(eventBus, testingDir, password, profile)
|
||||
|
||||
eventBus.Publish(event.NewEvent(event.SetAttribute, map[event.Field]string{event.Key: testKey, event.Data: testVal}))
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
groupid, invite, err := profile.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||
if err != nil {
|
||||
t.Errorf("Creating group: %v\n", err)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Creating group invite: %v\n", err)
|
||||
}
|
||||
|
||||
packet := protocol.CwtchPeerPacket{}
|
||||
proto.Unmarshal(invite, &packet)
|
||||
invite, _ = proto.Marshal(packet.GetGroupChatInvite())
|
||||
eventBus.Publish(event.NewEvent(event.NewGroupInvite, map[event.Field]string{event.TimestampReceived: time.Now().Format(time.RFC3339Nano), event.RemotePeer: ps1.GetProfileCopy(true).Onion, event.GroupInvite: string(invite)}))
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
eventBus.Publish(event.NewEvent(event.NewMessageFromGroup, map[event.Field]string{
|
||||
event.GroupID: groupid,
|
||||
event.TimestampSent: time.Now().Format(time.RFC3339Nano),
|
||||
event.TimestampReceived: time.Now().Format(time.RFC3339Nano),
|
||||
event.RemotePeer: ps1.GetProfileCopy(true).Onion,
|
||||
event.Data: testMessage,
|
||||
}))
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
ps1.Shutdown()
|
||||
|
||||
ps2 := NewProfileWriterStore(eventBus, testingDir, password, nil)
|
||||
err = ps2.Load()
|
||||
if err != nil {
|
||||
t.Errorf("Error createing profileStore: %v\n", err)
|
||||
}
|
||||
|
||||
profile = ps2.GetProfileCopy(true)
|
||||
if profile.Name != testProfileName {
|
||||
t.Errorf("Profile name from loaded profile incorrect. Expected: '%v' Actual: '%v'\n", testProfileName, profile.Name)
|
||||
}
|
||||
|
||||
v, _ := profile.GetAttribute(testKey)
|
||||
if v != testVal {
|
||||
t.Errorf("Profile attribute '%v' inccorect. Expected: '%v' Actual: '%v'\n", testKey, testVal, v)
|
||||
}
|
||||
|
||||
group2 := ps2.GetProfileCopy(true).Groups[groupid]
|
||||
if group2 == nil {
|
||||
t.Errorf("Group not loaded\n")
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,147 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/model"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
fileStorePartitions = 16
|
||||
bytesPerFile = 15 * 1024
|
||||
)
|
||||
|
||||
// streamStore is a file-backed implementation of StreamStore using an in memory buffer of ~16KB and a rotating set of files
|
||||
type streamStore struct {
|
||||
password string
|
||||
|
||||
storeDirectory string
|
||||
filenameBase string
|
||||
|
||||
// Buffer is used just for current file to write to
|
||||
messages []model.Message
|
||||
bufferByteCount int
|
||||
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// StreamStore provides a stream like interface to encrypted storage
|
||||
type StreamStore interface {
|
||||
Write(message model.Message)
|
||||
Read() []model.Message
|
||||
Delete()
|
||||
}
|
||||
|
||||
// NewStreamStore returns an initialized StreamStore ready for reading and writing
|
||||
func NewStreamStore(directory string, filenameBase string, password string) (store StreamStore) {
|
||||
ss := &streamStore{storeDirectory: directory, filenameBase: filenameBase, password: password}
|
||||
os.Mkdir(ss.storeDirectory, 0700)
|
||||
|
||||
ss.initBuffer()
|
||||
ss.initBufferFromStorage()
|
||||
|
||||
return ss
|
||||
}
|
||||
|
||||
func (ss *streamStore) initBuffer() {
|
||||
ss.messages = []model.Message{}
|
||||
ss.bufferByteCount = 0
|
||||
}
|
||||
|
||||
func (ss *streamStore) initBufferFromStorage() error {
|
||||
filename := fmt.Sprintf("%s.%d", ss.filenameBase, 0)
|
||||
|
||||
bytes, _ := readEncryptedFile(ss.storeDirectory, filename, ss.password)
|
||||
|
||||
msgs := []model.Message{}
|
||||
err := json.Unmarshal([]byte(bytes), &msgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, message := range msgs {
|
||||
ss.updateBuffer(message)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *streamStore) updateBuffer(m model.Message) {
|
||||
ss.messages = append(ss.messages, m)
|
||||
ss.bufferByteCount += (model.MessageBaseSize * 1.5) + len(m.Message)
|
||||
}
|
||||
|
||||
func (ss *streamStore) updateFile() error {
|
||||
msgs, err := json.Marshal(ss.messages)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to marshal group messages %v\n", err)
|
||||
}
|
||||
|
||||
// ENCRYPT
|
||||
key, salt, _ := createKey(ss.password)
|
||||
encryptedMsgs, err := encryptFileData(msgs, key)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to encrypt messages: %v\n", err)
|
||||
return err
|
||||
}
|
||||
encryptedMsgs = append(salt[:], encryptedMsgs...)
|
||||
|
||||
ioutil.WriteFile(path.Join(ss.storeDirectory, fmt.Sprintf("%s.%d", ss.filenameBase, 0)), encryptedMsgs, 0700)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *streamStore) rotateFileStore() {
|
||||
os.Remove(path.Join(ss.storeDirectory, fmt.Sprintf("%s.%d", ss.filenameBase, fileStorePartitions-1)))
|
||||
|
||||
for i := fileStorePartitions - 2; i >= 0; i-- {
|
||||
os.Rename(path.Join(ss.storeDirectory, fmt.Sprintf("%s.%d", ss.filenameBase, i)), path.Join(ss.storeDirectory, fmt.Sprintf("%s.%d", ss.filenameBase, i+1)))
|
||||
}
|
||||
}
|
||||
|
||||
// Delete deletes all the files associated with this streamStore
|
||||
func (ss *streamStore) Delete() {
|
||||
for i := fileStorePartitions - 1; i >= 0; i-- {
|
||||
os.Remove(path.Join(ss.storeDirectory, fmt.Sprintf("%s.%d", ss.filenameBase, i)))
|
||||
}
|
||||
}
|
||||
|
||||
// Read returns all messages from the backing file (not the buffer, which is jsut for writing to the current file)
|
||||
func (ss *streamStore) Read() (messages []model.Message) {
|
||||
ss.lock.Lock()
|
||||
defer ss.lock.Unlock()
|
||||
|
||||
resp := []model.Message{}
|
||||
|
||||
for i := fileStorePartitions - 1; i >= 0; i-- {
|
||||
filename := fmt.Sprintf("%s.%d", ss.filenameBase, i)
|
||||
|
||||
bytes, err := readEncryptedFile(ss.storeDirectory, filename, ss.password)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
msgs := []model.Message{}
|
||||
json.Unmarshal([]byte(bytes), &msgs)
|
||||
resp = append(resp, msgs...)
|
||||
}
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// AddMessage adds a GroupMessage to the store
|
||||
func (ss *streamStore) Write(m model.Message) {
|
||||
ss.lock.Lock()
|
||||
defer ss.lock.Unlock()
|
||||
ss.updateBuffer(m)
|
||||
ss.updateFile()
|
||||
|
||||
if ss.bufferByteCount > bytesPerFile {
|
||||
log.Debugf("rotating log file")
|
||||
ss.rotateFileStore()
|
||||
ss.initBuffer()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/model"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const testingDir = "./testing"
|
||||
const filenameBase = "testStream"
|
||||
const password = "asdfqwer"
|
||||
const line1 = "Hello from storage!"
|
||||
|
||||
func TestStreamStoreWriteRead(t *testing.T) {
|
||||
|
||||
log.SetLevel(log.LevelDebug)
|
||||
|
||||
os.Remove(".test.json")
|
||||
os.RemoveAll(testingDir)
|
||||
os.Mkdir(testingDir, 0777)
|
||||
ss1 := NewStreamStore(testingDir, filenameBase, password)
|
||||
m := model.Message{Message: line1}
|
||||
ss1.Write(m)
|
||||
|
||||
ss2 := NewStreamStore(testingDir, filenameBase, password)
|
||||
messages := ss2.Read()
|
||||
if len(messages) != 1 {
|
||||
t.Errorf("Read messages has wrong length. Expected: 1 Actual: %d\n", len(messages))
|
||||
}
|
||||
if messages[0].Message != line1 {
|
||||
t.Errorf("Read message has wrong content. Expected: '%v' Actual: '%v'\n", line1, messages[0].Message)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamStoreWriteReadRotate(t *testing.T) {
|
||||
|
||||
log.SetLevel(log.LevelDebug)
|
||||
os.Remove(".test.json")
|
||||
os.RemoveAll(testingDir)
|
||||
os.Mkdir(testingDir, 0777)
|
||||
ss1 := NewStreamStore(testingDir, filenameBase, password)
|
||||
m := model.Message{Message: line1}
|
||||
for i := 0; i < 400; i++ {
|
||||
ss1.Write(m)
|
||||
}
|
||||
|
||||
ss2 := NewStreamStore(testingDir, filenameBase, password)
|
||||
messages := ss2.Read()
|
||||
if len(messages) != 400 {
|
||||
t.Errorf("Read messages has wrong length. Expected: 400 Actual: %d\n", len(messages))
|
||||
}
|
||||
if messages[0].Message != line1 {
|
||||
t.Errorf("Read message has wrong content. Expected: '%v' Actual: '%v'\n", line1, messages[0].Message)
|
||||
}
|
||||
}
|
|
@ -1,18 +1,20 @@
|
|||
package testing
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
app2 "cwtch.im/cwtch/app"
|
||||
"cwtch.im/cwtch/app/utils"
|
||||
"cwtch.im/cwtch/event/bridge"
|
||||
"cwtch.im/cwtch/model"
|
||||
"cwtch.im/cwtch/peer"
|
||||
"cwtch.im/cwtch/peer/connections"
|
||||
"cwtch.im/cwtch/protocol/connections"
|
||||
cwtchserver "cwtch.im/cwtch/server"
|
||||
"fmt"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/utils"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/connectivity"
|
||||
"git.openprivacy.ca/openprivacy/libricochet-go/log"
|
||||
"golang.org/x/net/proxy"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
@ -28,40 +30,11 @@ var (
|
|||
carolLines = []string{"Howdy, thanks!"}
|
||||
)
|
||||
|
||||
// TODO: fix to load private key from server/app/serverConfig.json
|
||||
func loadPrivateKey(t *testing.T) *rsa.PrivateKey {
|
||||
if _, err := os.Stat(serverKeyfile); os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Println("Found server key " + serverKeyfile + ", loading...")
|
||||
pk, err := utils.LoadPrivateKeyFromFile(serverKeyfile)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not load server's key from %v", serverKeyfile)
|
||||
}
|
||||
return pk
|
||||
}
|
||||
|
||||
func genPrivateKey(t *testing.T) *rsa.PrivateKey {
|
||||
fmt.Println("generating new private key...")
|
||||
pk, err := utils.GeneratePrivateKey()
|
||||
if err != nil {
|
||||
t.Fatalf("error generating new private key: %v\n", err)
|
||||
}
|
||||
err = ioutil.WriteFile(localKeyfile, []byte(utils.PrivateKeyToString(pk)), 0600)
|
||||
if err != nil {
|
||||
t.Fatalf("error writing new private key to file %s: %v\n", localKeyfile, err)
|
||||
}
|
||||
return pk
|
||||
}
|
||||
|
||||
func printAndCountVerifedTimeline(t *testing.T, timeline []model.Message) int {
|
||||
numVerified := 0
|
||||
for _, message := range timeline {
|
||||
fmt.Printf("%v %v> %s [%t]\n", message.Timestamp, message.PeerID, message.Message, message.Verified)
|
||||
if message.Verified {
|
||||
numVerified++
|
||||
}
|
||||
fmt.Printf("%v %v> %s\n", message.Timestamp, message.PeerID, message.Message)
|
||||
numVerified++
|
||||
}
|
||||
return numVerified
|
||||
}
|
||||
|
@ -84,53 +57,81 @@ func serverCheck(t *testing.T, serverAddr string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func waitForPeerConnection(t *testing.T, peer peer.CwtchPeerInterface, server string) {
|
||||
func waitForPeerGroupConnection(t *testing.T, peer peer.CwtchPeer, groupID string) {
|
||||
for {
|
||||
servers := peer.GetServers()
|
||||
state, ok := servers[server]
|
||||
_, ok := peer.GetProfile().Groups[groupID]
|
||||
if ok {
|
||||
state := peer.GetGroupState(groupID)
|
||||
//log.Infof("Waiting for Peer %v to join group %v - state: %v\n", peer.GetProfile().Name, groupID, state)
|
||||
if state == connections.FAILED {
|
||||
t.Fatalf("%v could not connect to %v", peer.GetProfile().Onion, server)
|
||||
t.Fatalf("%v could not connect to %v", peer.GetProfile().Onion, groupID)
|
||||
}
|
||||
if state != connections.SYNCED {
|
||||
fmt.Printf("peer %v waiting connect to group %v, currently: %v\n", peer.GetProfile().Onion, groupID, connections.ConnectionStateName[state])
|
||||
time.Sleep(time.Second * 5)
|
||||
continue
|
||||
} else {
|
||||
break
|
||||
}
|
||||
} // It might take a second for the server to show up as it is now going through the event bus
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func waitForPeerPeerConnection(t *testing.T, peera peer.CwtchPeer, peerb peer.CwtchPeer) {
|
||||
for {
|
||||
//peers := peera.GetPeers()
|
||||
_, ok := peera.GetProfile().Contacts[peerb.GetProfile().Onion]
|
||||
if ok {
|
||||
state := peera.GetPeerState(peerb.GetProfile().Onion)
|
||||
//log.Infof("Waiting for Peer %v to peer with peer: %v - state: %v\n", peera.GetProfile().Name, peerb.GetProfile().Name, state)
|
||||
if state == connections.FAILED {
|
||||
t.Fatalf("%v could not connect to %v", peera.GetProfile().Onion, peerb.GetProfile().Onion)
|
||||
}
|
||||
if state != connections.AUTHENTICATED {
|
||||
time.Sleep(time.Second * 10)
|
||||
fmt.Printf("peer% v waiting connect to peer %v, currently: %v\n", peera.GetProfile().Onion, peerb.GetProfile().Onion, connections.ConnectionStateName[state])
|
||||
time.Sleep(time.Second * 5)
|
||||
continue
|
||||
} else {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
t.Fatalf("peer server connectiond %v should have entry for server %v", servers, server)
|
||||
}
|
||||
break
|
||||
} // It might take a second for the peer to show up as it is now going through the event bus
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func TestCwtchPeerIntegration(t *testing.T) {
|
||||
// Hide logging "noise"
|
||||
log.SetOutput(ioutil.Discard)
|
||||
numGoRoutinesStart := runtime.NumGoroutine()
|
||||
|
||||
// ***** Cwtch Server managment *****
|
||||
log.AddEverythingFromPattern("connectivity")
|
||||
log.SetLevel(log.LevelDebug)
|
||||
log.ExcludeFromPattern("connection/connection")
|
||||
log.ExcludeFromPattern("outbound/3dhauthchannel")
|
||||
log.ExcludeFromPattern("event/eventmanager")
|
||||
log.ExcludeFromPattern("pipeBridge")
|
||||
acn, err := connectivity.StartTor(".", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Could not start Tor: %v", err)
|
||||
}
|
||||
|
||||
// ***** Cwtch Server management *****
|
||||
var server *cwtchserver.Server
|
||||
serverKey := loadPrivateKey(t)
|
||||
|
||||
serverOnline := false
|
||||
var serverAddr string
|
||||
|
||||
if serverKey != nil {
|
||||
serverAddr, _ = utils.GetOnionAddress(serverKey)
|
||||
fmt.Printf("Checking if test server %v is online...\n", serverAddr)
|
||||
serverOnline = serverCheck(t, serverAddr)
|
||||
}
|
||||
|
||||
if !serverOnline {
|
||||
// launch app with new key
|
||||
fmt.Println("No server found!")
|
||||
serverKey = genPrivateKey(t)
|
||||
serverAddr, _ = utils.GetOnionAddress(serverKey)
|
||||
server = new(cwtchserver.Server)
|
||||
fmt.Println("Starting cwtch server...")
|
||||
config := cwtchserver.Config{PrivateKeyBytes: utils.PrivateKeyToString(serverKey), MaxBufferLines: 100, ServerReporting: cwtchserver.Reporting{}}
|
||||
go server.Run(&config)
|
||||
os.Remove("server-test.json")
|
||||
config := cwtchserver.LoadConfig(".", "server-test.json")
|
||||
identity := config.Identity()
|
||||
serverAddr = identity.Hostname()
|
||||
go server.Run(acn, config)
|
||||
|
||||
// let tor get established
|
||||
fmt.Printf("Establishing Tor hidden service: %v...\n", serverAddr)
|
||||
|
@ -140,25 +141,40 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
|||
|
||||
numGoRoutinesPostServer := runtime.NumGoroutine()
|
||||
|
||||
// ***** Peer setup *****
|
||||
app := app2.NewApp(acn, "./storage")
|
||||
|
||||
bridgeClient := bridge.NewPipeBridgeClient("./clientPipe", "./servicePipe")
|
||||
bridgeService := bridge.NewPipeBridgeService("./servicePipe", "./clientPipe")
|
||||
appClient := app2.NewAppClient("./storage", bridgeClient)
|
||||
appService := app2.NewAppService(acn, "./storage", bridgeService)
|
||||
|
||||
numGoRoutinesPostAppStart := runtime.NumGoroutine()
|
||||
|
||||
// ***** cwtchPeer setup *****
|
||||
|
||||
fmt.Println("Creating Alice...")
|
||||
alice := peer.NewCwtchPeer("Alice", "alicepass")
|
||||
go alice.Listen()
|
||||
fmt.Println("Alice created:", alice.GetProfile().Onion)
|
||||
app.CreatePeer("alice", "asdfasdf")
|
||||
|
||||
fmt.Println("Creating Bob...")
|
||||
bob := peer.NewCwtchPeer("Bob", "bobpass")
|
||||
go bob.Listen()
|
||||
fmt.Println("Bob created:", bob.GetProfile().Onion)
|
||||
app.CreatePeer("bob", "asdfasdf")
|
||||
|
||||
fmt.Println("Creating Carol...")
|
||||
carol := peer.NewCwtchPeer("Carol", "carolpass")
|
||||
go carol.Listen()
|
||||
appClient.CreatePeer("carol", "asdfasdf")
|
||||
|
||||
alice := utils.WaitGetPeer(app, "alice")
|
||||
fmt.Println("Alice created:", alice.GetProfile().Onion)
|
||||
|
||||
bob := utils.WaitGetPeer(app, "bob")
|
||||
fmt.Println("Bob created:", bob.GetProfile().Onion)
|
||||
|
||||
carol := utils.WaitGetPeer(appClient, "carol")
|
||||
fmt.Println("Carol created:", carol.GetProfile().Onion)
|
||||
|
||||
fmt.Println("Waiting for Alice, Bob, and Carol to connection with onion network...")
|
||||
time.Sleep(time.Second * 70)
|
||||
app.LaunchPeers()
|
||||
appClient.LaunchPeers()
|
||||
|
||||
fmt.Println("Waiting for Alice, Bob, and Carol to connect with onion network...")
|
||||
time.Sleep(time.Second * 90)
|
||||
numGoRoutinesPostPeerStart := runtime.NumGoroutine()
|
||||
|
||||
// ***** Peering, server joining, group creation / invite *****
|
||||
|
@ -172,8 +188,10 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
|||
}
|
||||
|
||||
fmt.Println("Alice peering with Bob...")
|
||||
alice.AddContact("Bob", bob.GetProfile().Onion, false) // Add contact so we can track connection state
|
||||
alice.PeerWithOnion(bob.GetProfile().Onion)
|
||||
fmt.Println("Alice peering with Carol...")
|
||||
alice.AddContact("Carol", carol.GetProfile().Onion, false)
|
||||
alice.PeerWithOnion(carol.GetProfile().Onion)
|
||||
|
||||
fmt.Println("Alice joining server...")
|
||||
|
@ -181,8 +199,17 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
|||
fmt.Println("Bob joining server...")
|
||||
bob.JoinServer(serverAddr)
|
||||
|
||||
fmt.Println("Waiting for peerings and server joins...")
|
||||
time.Sleep(time.Second * 120)
|
||||
fmt.Println("Waiting for alice to join server...")
|
||||
waitForPeerGroupConnection(t, alice, groupID)
|
||||
|
||||
//fmt.Println("Waiting for bob to join server...")
|
||||
//waitForPeerGroupConnection(t, bob, groupID)
|
||||
|
||||
fmt.Println("Waiting for alice and Bob to peer...")
|
||||
waitForPeerPeerConnection(t, alice, bob)
|
||||
|
||||
/*fmt.Println("Waiting for peerings and server joins...")
|
||||
time.Sleep(time.Second * 240)*/
|
||||
|
||||
fmt.Println("Alice inviting Bob to group...")
|
||||
err = alice.InviteOnionToGroup(bob.GetProfile().Onion, groupID)
|
||||
|
@ -205,35 +232,9 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
|||
|
||||
numGoRoutinesPostServerConnect := runtime.NumGoroutine()
|
||||
|
||||
// ***** Fill up message history of server ******
|
||||
|
||||
/*
|
||||
// filler group will be used to fill up the servers message history a bit to stress test fetch later for carol
|
||||
fillerGroupId, _, err := alice.Profile.StartGroup(serverAddr)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to init filler group: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("Alice filling message history of server...")
|
||||
for i := 0; i < 100; i++ {
|
||||
|
||||
go func (x int) {
|
||||
time.Sleep(time.Second * time.Duration(x))
|
||||
err := alice.SendMessageToGroup(fillerGroupId, aliceLines[0])
|
||||
if err != nil {
|
||||
fmt.Println("SEND", x, "ERROR:", err)
|
||||
} else {
|
||||
fmt.Println("SEND", x, " SUCCESS!")
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
time.Sleep(time.Second * 110)
|
||||
*/
|
||||
// Wait for them to join the server
|
||||
waitForPeerConnection(t, alice, serverAddr)
|
||||
waitForPeerConnection(t, bob, serverAddr)
|
||||
waitForPeerGroupConnection(t, alice, groupID)
|
||||
waitForPeerGroupConnection(t, bob, groupID)
|
||||
//numGouRoutinesPostServerConnect := runtime.NumGoroutine()
|
||||
|
||||
// ***** Conversation *****
|
||||
|
@ -279,13 +280,13 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
|||
}
|
||||
|
||||
fmt.Println("Shutting down Alice...")
|
||||
alice.Shutdown()
|
||||
app.ShutdownPeer(alice.GetProfile().Onion)
|
||||
time.Sleep(time.Second * 5)
|
||||
numGoRoutinesPostAlice := runtime.NumGoroutine()
|
||||
|
||||
fmt.Println("Carol joining server...")
|
||||
carol.JoinServer(serverAddr)
|
||||
waitForPeerConnection(t, carol, serverAddr)
|
||||
waitForPeerGroupConnection(t, carol, groupID)
|
||||
numGoRotinesPostCarolConnect := runtime.NumGoroutine()
|
||||
|
||||
fmt.Println("Bob> ", bobLines[2])
|
||||
|
@ -320,14 +321,14 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
|||
}
|
||||
fmt.Printf("Bob's TimeLine:\n")
|
||||
bobVerified := printAndCountVerifedTimeline(t, bobsGroup.GetTimeline())
|
||||
if bobVerified != 5 {
|
||||
if bobVerified != 6 {
|
||||
t.Errorf("Bob did not have 5 verified messages")
|
||||
}
|
||||
|
||||
carolsGroup := carol.GetGroup(groupID)
|
||||
fmt.Printf("Carol's TimeLine:\n")
|
||||
carolVerified := printAndCountVerifedTimeline(t, carolsGroup.GetTimeline())
|
||||
if carolVerified != 3 {
|
||||
if carolVerified != 6 {
|
||||
t.Errorf("Carol did not have 3 verified messages")
|
||||
}
|
||||
|
||||
|
@ -367,7 +368,7 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
|||
}
|
||||
|
||||
fmt.Println("Shutting down Bob...")
|
||||
bob.Shutdown()
|
||||
app.ShutdownPeer(bob.GetProfile().Onion)
|
||||
time.Sleep(time.Second * 3)
|
||||
numGoRoutinesPostBob := runtime.NumGoroutine()
|
||||
if server != nil {
|
||||
|
@ -377,18 +378,46 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
|||
}
|
||||
numGoRoutinesPostServerShutdown := runtime.NumGoroutine()
|
||||
|
||||
fmt.Println("Shuttind down Carol...")
|
||||
carol.Shutdown()
|
||||
fmt.Println("Shutting down Carol...")
|
||||
appClient.ShutdownPeer(carol.GetProfile().Onion)
|
||||
time.Sleep(time.Second * 3)
|
||||
numGoRoutinesPostCarol := runtime.NumGoroutine()
|
||||
|
||||
fmt.Printf("numGoRoutinesStart: %v\nnumGoRoutinesPostServer: %v\nnumGoRoutinesPostPeerStart: %v\nnumGoRoutinesPostPeerAndServerConnect: %v\n"+
|
||||
"numGoRoutinesPostAlice: %v\nnumGoRotinesPostCarolConnect: %v\nnumGoRoutinesPostBob: %v\nnumGoRoutinesPostServerShutdown: %v\nnumGoRoutinesPostCarol: %v\n",
|
||||
numGoRoutinesStart, numGoRoutinesPostServer, numGoRoutinesPostPeerStart, numGoRoutinesPostServerConnect,
|
||||
numGoRoutinesPostAlice, numGoRotinesPostCarolConnect, numGoRoutinesPostBob, numGoRoutinesPostServerShutdown, numGoRoutinesPostCarol)
|
||||
fmt.Println("Shutting down apps...")
|
||||
fmt.Printf("app Shutdown: %v\n", runtime.NumGoroutine())
|
||||
app.Shutdown()
|
||||
fmt.Printf("appClientShutdown: %v\n", runtime.NumGoroutine())
|
||||
appClient.Shutdown()
|
||||
fmt.Printf("appServiceShutdown: %v\n", runtime.NumGoroutine())
|
||||
appService.Shutdown()
|
||||
|
||||
if numGoRoutinesStart != numGoRoutinesPostCarol {
|
||||
t.Errorf("Number of GoRoutines at start (%v) does not match number of goRoutines after cleanup of peers and servers (%v), clean up failed, leak detected!", numGoRoutinesStart, numGoRoutinesPostCarol)
|
||||
fmt.Printf("bridgeClientShutdown: %v\n", runtime.NumGoroutine())
|
||||
bridgeClient.Shutdown()
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
fmt.Printf("brideServiceShutdown: %v\n", runtime.NumGoroutine())
|
||||
bridgeService.Shutdown()
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
fmt.Printf("Done shutdown: %v\n", runtime.NumGoroutine())
|
||||
numGoRoutinesPostAppShutdown := runtime.NumGoroutine()
|
||||
|
||||
fmt.Println("Shutting down ACN...")
|
||||
acn.Close()
|
||||
time.Sleep(time.Second * 2) // Server ^^ has a 5 second loop attempting reconnect before exiting
|
||||
numGoRoutinesPostACN := runtime.NumGoroutine()
|
||||
|
||||
// Printing out the current goroutines
|
||||
// Very useful if we are leaking any.
|
||||
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||
|
||||
fmt.Printf("numGoRoutinesStart: %v\nnumGoRoutinesPostServer: %v\nnumGoRoutinesPostAppStart: %v\nnumGoRoutinesPostPeerStart: %v\nnumGoRoutinesPostPeerAndServerConnect: %v\n"+
|
||||
"numGoRoutinesPostAlice: %v\nnumGoRotinesPostCarolConnect: %v\nnumGoRoutinesPostBob: %v\nnumGoRoutinesPostServerShutdown: %v\nnumGoRoutinesPostCarol: %v\nnumGoRoutinesPostAppShutdown: %v\nnumGoRoutinesPostACN: %v\n",
|
||||
numGoRoutinesStart, numGoRoutinesPostServer, numGoRoutinesPostAppStart, numGoRoutinesPostPeerStart, numGoRoutinesPostServerConnect,
|
||||
numGoRoutinesPostAlice, numGoRotinesPostCarolConnect, numGoRoutinesPostBob, numGoRoutinesPostServerShutdown, numGoRoutinesPostCarol, numGoRoutinesPostAppShutdown, numGoRoutinesPostACN)
|
||||
|
||||
if numGoRoutinesStart != numGoRoutinesPostACN {
|
||||
t.Errorf("Number of GoRoutines at start (%v) does not match number of goRoutines after cleanup of peers and servers (%v), clean up failed, leak detected!", numGoRoutinesStart, numGoRoutinesPostACN)
|
||||
}
|
||||
|
||||
}
|
|
@ -14,3 +14,11 @@ go list ./... | xargs golint
|
|||
|
||||
echo "Time to format"
|
||||
gofmt -l -s -w .
|
||||
|
||||
# ineffassign (https://github.com/gordonklaus/ineffassign)
|
||||
echo "Checking for ineffectual assignment of errors (unchecked errors...)"
|
||||
ineffassign .
|
||||
|
||||
# misspell (https://github.com/client9/misspell/cmd/misspell)
|
||||
echo "Checking for misspelled words..."
|
||||
misspell . | grep -v "vendor/" | grep -v "go.sum" | grep -v ".idea"
|
||||
|
|
|
@ -3,20 +3,19 @@
|
|||
set -e
|
||||
pwd
|
||||
go test ${1} -coverprofile=model.cover.out -v ./model
|
||||
go test ${1} -coverprofile=protocol.spam.cover.out -v ./protocol/spam
|
||||
go test ${1} -coverprofile=event.cover.out -v ./event
|
||||
go test ${1} -coverprofile=storage.cover.out -v ./storage
|
||||
go test ${1} -coverprofile=peer.connections.cover.out -v ./peer/connections
|
||||
go test ${1} -coverprofile=peer.fetch.cover.out -v ./peer/fetch
|
||||
go test ${1} -coverprofile=peer.listen.cover.out -v ./peer/listen
|
||||
go test ${1} -coverprofile=peer.peer.cover.out -v ./peer/peer
|
||||
go test ${1} -coverprofile=peer.send.cover.out -v ./peer/send
|
||||
go test ${1} -coverprofile=peer.connections.cover.out -v ./protocol/connections
|
||||
go test ${1} -coverprofile=protocol.spam.cover.out -v ./protocol/connections/spam
|
||||
go test ${1} -coverprofile=peer.fetch.cover.out -v ./protocol/connections/fetch
|
||||
go test ${1} -coverprofile=peer.listen.cover.out -v ./protocol/connections/listen
|
||||
go test ${1} -coverprofile=peer.send.cover.out -v ./protocol/connections/send
|
||||
go test ${1} -coverprofile=peer.cover.out -v ./peer
|
||||
go test ${1} -coverprofile=server.fetch.cover.out -v ./server/fetch
|
||||
go test ${1} -coverprofile=server.listen.cover.out -v ./server/listen
|
||||
go test ${1} -coverprofile=server.send.cover.out -v ./server/send
|
||||
go test ${1} -coverprofile=server.metrics.cover.out -v ./server/metrics
|
||||
go test ${1} -coverprofile=server.cover.out -v ./server
|
||||
go test ${1} -coverprofile=tor.cover.out -v ./connectivity/tor
|
||||
echo "mode: set" > coverage.out && cat *.cover.out | grep -v mode: | sort -r | \
|
||||
awk '{if($1 != last) {print $0;last=$1}}' >> coverage.out
|
||||
rm -rf *.cover.out
|
||||
|
|
Loading…
Reference in New Issue