Compare commits
284 Commits
Author | SHA1 | Date |
---|---|---|
Sarah Jamie Lewis | a7b885166a | |
Sarah Jamie Lewis | b32b11c711 | |
Sarah Jamie Lewis | 0e96539f22 | |
Sarah Jamie Lewis | e55f342324 | |
Sarah Jamie Lewis | 89aca91b37 | |
Sarah Jamie Lewis | cd918c02ea | |
Sarah Jamie Lewis | 05a198c89f | |
Sarah Jamie Lewis | 1d9202ff93 | |
Sarah Jamie Lewis | 0907af57d5 | |
Sarah Jamie Lewis | 826ac40a5c | |
Sarah Jamie Lewis | 1a034953df | |
Sarah Jamie Lewis | 3124f7b7c4 | |
Sarah Jamie Lewis | 792e79dceb | |
Sarah Jamie Lewis | 3e0680943a | |
Sarah Jamie Lewis | 9cb62d269e | |
Sarah Jamie Lewis | ec71e56d23 | |
Sarah Jamie Lewis | aaabb12b6c | |
Sarah Jamie Lewis | b0a87ee8d0 | |
Sarah Jamie Lewis | d66beb95e5 | |
Sarah Jamie Lewis | 41b3e20aff | |
Sarah Jamie Lewis | 1c7003fb96 | |
Dan Ballard | cb3b0b4c46 | |
Sarah Jamie Lewis | a18c19bbf2 | |
Sarah Jamie Lewis | be4230d16e | |
Sarah Jamie Lewis | 34957f809b | |
Sarah Jamie Lewis | 456a5f5c4d | |
Sarah Jamie Lewis | 657fb76b04 | |
Sarah Jamie Lewis | c0bc3b0803 | |
Sarah Jamie Lewis | 7a962359b3 | |
Sarah Jamie Lewis | 935b4a1103 | |
Sarah Jamie Lewis | 51d146fb5c | |
Sarah Jamie Lewis | 6d9e892408 | |
Sarah Jamie Lewis | 44856003d6 | |
Sarah Jamie Lewis | f16eeb1922 | |
Sarah Jamie Lewis | 13583f3e8c | |
Sarah Jamie Lewis | 58b1008cae | |
Sarah Jamie Lewis | 45d6d76a7d | |
Sarah Jamie Lewis | f42e25e926 | |
Sarah Jamie Lewis | 7538f1a531 | |
Sarah Jamie Lewis | a5cea1ca7b | |
Sarah Jamie Lewis | e311301d72 | |
Sarah Jamie Lewis | 7464e3922d | |
Sarah Jamie Lewis | 298a8d8aea | |
Sarah Jamie Lewis | 75a3c14285 | |
Sarah Jamie Lewis | 407902b8ee | |
Sarah Jamie Lewis | 6d29ca322e | |
Sarah Jamie Lewis | fb164b104b | |
Sarah Jamie Lewis | 048effc91a | |
Sarah Jamie Lewis | ca63205934 | |
Sarah Jamie Lewis | 0997406e51 | |
Sarah Jamie Lewis | 602041d1c2 | |
Sarah Jamie Lewis | 95527f8978 | |
Sarah Jamie Lewis | d5c3795f13 | |
Sarah Jamie Lewis | 51f993973c | |
Sarah Jamie Lewis | 5b2b839865 | |
Sarah Jamie Lewis | 151e25b607 | |
Sarah Jamie Lewis | fac34ad814 | |
Sarah Jamie Lewis | aae8a7fc03 | |
Sarah Jamie Lewis | e1877d69b7 | |
Sarah Jamie Lewis | 066ed86598 | |
Sarah Jamie Lewis | 4db041f850 | |
Sarah Jamie Lewis | 546180d65e | |
Sarah Jamie Lewis | 9dbc398690 | |
Sarah Jamie Lewis | b27229091a | |
Dan Ballard | 1f2617e4ae | |
Sarah Jamie Lewis | 6b212beb00 | |
Sarah Jamie Lewis | f2ad64fe8b | |
Sarah Jamie Lewis | 8d7052bb8d | |
Sarah Jamie Lewis | a47d916eac | |
Sarah Jamie Lewis | 3a7d2fce05 | |
Sarah Jamie Lewis | 3f1e2d7a14 | |
Sarah Jamie Lewis | 1e0cbe1dc6 | |
Sarah Jamie Lewis | 77e4e981e8 | |
Sarah Jamie Lewis | b84de2aa61 | |
Sarah Jamie Lewis | 75eb49d6ee | |
Sarah Jamie Lewis | cfb2335c05 | |
Sarah Jamie Lewis | 31f397e332 | |
Dan Ballard | eb0636a229 | |
Sarah Jamie Lewis | def585b23b | |
Sarah Jamie Lewis | 9605894463 | |
Sarah Jamie Lewis | 2bbe0c48d6 | |
Sarah Jamie Lewis | 655b1cf208 | |
Sarah Jamie Lewis | 86ae2a7c1a | |
Sarah Jamie Lewis | cff2a8cafe | |
Sarah Jamie Lewis | 035c6c669f | |
Sarah Jamie Lewis | 462a294c93 | |
Sarah Jamie Lewis | f982e55c4f | |
Sarah Jamie Lewis | bc522b57c1 | |
Sarah Jamie Lewis | 8fd6d5ead2 | |
Sarah Jamie Lewis | 50cca925de | |
Sarah Jamie Lewis | b81353c128 | |
Sarah Jamie Lewis | 05cc347ba2 | |
Sarah Jamie Lewis | 92eed46c56 | |
Sarah Jamie Lewis | 2abfaf82a1 | |
Sarah Jamie Lewis | f5c397876b | |
Sarah Jamie Lewis | 3b822393cd | |
Dan Ballard | 7053f4a31b | |
Dan Ballard | e9e2a18678 | |
Dan Ballard | 440b7f422c | |
Dan Ballard | 12b89966de | |
Sarah Jamie Lewis | 70c335df81 | |
Sarah Jamie Lewis | 8ab0e9993a | |
Sarah Jamie Lewis | 48e5f44f84 | |
Sarah Jamie Lewis | 79c51b0e6d | |
Sarah Jamie Lewis | 4e0fbbc1de | |
Sarah Jamie Lewis | d9298f84b2 | |
Sarah Jamie Lewis | 210c91f7f7 | |
Sarah Jamie Lewis | 746bfffb7c | |
Sarah Jamie Lewis | 93c9813d96 | |
Sarah Jamie Lewis | 7255a6c71e | |
Sarah Jamie Lewis | 5f448ac2c2 | |
Sarah Jamie Lewis | 02fe9323c4 | |
Sarah Jamie Lewis | af0914103d | |
Sarah Jamie Lewis | 3967cceb83 | |
Sarah Jamie Lewis | 221c55868e | |
Sarah Jamie Lewis | cbfead7455 | |
Sarah Jamie Lewis | c4460b67a1 | |
Sarah Jamie Lewis | dbac41d949 | |
Sarah Jamie Lewis | f3296ffdd9 | |
Sarah Jamie Lewis | 28ddbcc132 | |
Sarah Jamie Lewis | cccb97d5f0 | |
Sarah Jamie Lewis | 2e59cc43ab | |
Sarah Jamie Lewis | 51f85ea619 | |
Sarah Jamie Lewis | 7107ad1eaa | |
Sarah Jamie Lewis | 4d81529ce2 | |
Sarah Jamie Lewis | 4588cbc604 | |
Sarah Jamie Lewis | e94964c583 | |
Sarah Jamie Lewis | 08c6cdd858 | |
Sarah Jamie Lewis | b02d9f7fb9 | |
Sarah Jamie Lewis | 264b8b9363 | |
Sarah Jamie Lewis | fcb07042d7 | |
Sarah Jamie Lewis | de32ae240a | |
Sarah Jamie Lewis | 186a33deb6 | |
Sarah Jamie Lewis | 0139f7a5a9 | |
Sarah Jamie Lewis | d50f210e35 | |
Sarah Jamie Lewis | 7bb75e4365 | |
Sarah Jamie Lewis | 0ea5cbba31 | |
Sarah Jamie Lewis | 456afb0262 | |
Sarah Jamie Lewis | 243b827522 | |
Sarah Jamie Lewis | a6a196a1c1 | |
Sarah Jamie Lewis | 14962e2428 | |
Sarah Jamie Lewis | aceb4adeb1 | |
Sarah Jamie Lewis | 848d5971b6 | |
Sarah Jamie Lewis | 9abece0f50 | |
Sarah Jamie Lewis | 05e77604d2 | |
Sarah Jamie Lewis | 195e048410 | |
Sarah Jamie Lewis | 0e49d70d65 | |
Sarah Jamie Lewis | 861390b11d | |
Sarah Jamie Lewis | f246ea1e40 | |
Sarah Jamie Lewis | 26c5c11216 | |
Sarah Jamie Lewis | 697b3df54c | |
Sarah Jamie Lewis | a698f34bfa | |
Sarah Jamie Lewis | c946ff5574 | |
Dan Ballard | 3bb2b0988e | |
Dan Ballard | 2876fdf7f4 | |
Sarah Jamie Lewis | ea3ef33ac5 | |
Sarah Jamie Lewis | 4e2000cae4 | |
Dan Ballard | 32a02b68dc | |
Sarah Jamie Lewis | 667fc15294 | |
Sarah Jamie Lewis | 5ef2f6f94c | |
Sarah Jamie Lewis | 06a2539502 | |
Sarah Jamie Lewis | bfe8b1e51f | |
Sarah Jamie Lewis | 7de9c21f7b | |
Dan Ballard | 491ff6e710 | |
Dan Ballard | 6eef88fc2d | |
Sarah Jamie Lewis | f630dedab6 | |
Dan Ballard | ca309096eb | |
Dan Ballard | 58921e381b | |
Dan Ballard | 530f2d9773 | |
Dan Ballard | bdb4b93f59 | |
Dan Ballard | 06d402c4d7 | |
Sarah Jamie Lewis | 321b08bfd3 | |
Dan Ballard | c8a6a1b079 | |
Dan Ballard | 5658e9aa9f | |
Dan Ballard | 2a877ff408 | |
Dan Ballard | 726fe28498 | |
Dan Ballard | ad72ce6e7a | |
Dan Ballard | 6d8f31773e | |
Sarah Jamie Lewis | 9ef244bc80 | |
Sarah Jamie Lewis | e319976832 | |
Sarah Jamie Lewis | 0ba45cd59a | |
Sarah Jamie Lewis | 4324ffae03 | |
Sarah Jamie Lewis | f2b879a9c4 | |
Sarah Jamie Lewis | c66561d84f | |
Dan Ballard | 8a1f9376e2 | |
Sarah Jamie Lewis | a84d627926 | |
Sarah Jamie Lewis | 120a2136b2 | |
Dan Ballard | b06f32b9e2 | |
Sarah Jamie Lewis | 9c4ed7cc7b | |
Sarah Jamie Lewis | bb0246b8d9 | |
Sarah Jamie Lewis | 7863ed2aef | |
Sarah Jamie Lewis | cf036bdee4 | |
Sarah Jamie Lewis | 9c65ad4af3 | |
Sarah Jamie Lewis | 0e10b47c42 | |
Sarah Jamie Lewis | 0b72a90b1f | |
Sarah Jamie Lewis | 35ca930628 | |
Sarah Jamie Lewis | 8d2134c4db | |
Sarah Jamie Lewis | 0f4c6de2e6 | |
Sarah Jamie Lewis | 27cec93ad7 | |
Sarah Jamie Lewis | d455eb6477 | |
Sarah Jamie Lewis | f52919271c | |
Dan Ballard | c8d7ec80ed | |
Sarah Jamie Lewis | 9554e428d2 | |
Dan Ballard | ab14884bcf | |
Dan Ballard | bdb9ac5db4 | |
Dan Ballard | c8f807ac7d | |
Dan Ballard | cd37f29341 | |
Dan Ballard | 7fe7ba72c7 | |
Sarah Jamie Lewis | f46c717ff9 | |
Sarah Jamie Lewis | 79bf060c2f | |
Sarah Jamie Lewis | 5765cfd6c4 | |
Sarah Jamie Lewis | 15836ad7de | |
Dan Ballard | 41f55451d4 | |
Dan Ballard | 33bcc40206 | |
Sarah Jamie Lewis | e3efdde7b5 | |
Sarah Jamie Lewis | 3d49511c6c | |
Dan Ballard | 82346e399f | |
Sarah Jamie Lewis | 720fb664de | |
Sarah Jamie Lewis | b2efbb8843 | |
Dan Ballard | bd64f708cf | |
Sarah Jamie Lewis | 8b9b0906ec | |
Dan Ballard | 7c753437f9 | |
Dan Ballard | 2183c0b051 | |
Sarah Jamie Lewis | b95c2c12eb | |
Dan Ballard | 60caa08868 | |
Dan Ballard | 3dc5dbb38e | |
Dan Ballard | b64229c8b7 | |
Dan Ballard | 56cf2b7bf6 | |
Dan Ballard | 5b1ac38473 | |
Sarah Jamie Lewis | 4d080a2854 | |
Sarah Jamie Lewis | 803d953778 | |
Sarah Jamie Lewis | 1a24e8d4b1 | |
Sarah Jamie Lewis | fa3358cb89 | |
Sarah Jamie Lewis | eb5a60bbb6 | |
Sarah Jamie Lewis | 02044e10f3 | |
Sarah Jamie Lewis | bc38f4ec0a | |
Sarah Jamie Lewis | 45e9dfe869 | |
Sarah Jamie Lewis | 88ddecae56 | |
Dan Ballard | 191e287d75 | |
Sarah Jamie Lewis | cade5f7793 | |
Sarah Jamie Lewis | 6fa627c1fa | |
Sarah Jamie Lewis | 1300c94d08 | |
Sarah Jamie Lewis | d02feecda0 | |
Sarah Jamie Lewis | b9d0a843fc | |
Sarah Jamie Lewis | 78fab87569 | |
Sarah Jamie Lewis | 7c25ddaf3d | |
Sarah Jamie Lewis | 4334d3ff3f | |
Sarah Jamie Lewis | 75703bf359 | |
Sarah Jamie Lewis | 9896961b40 | |
Dan Ballard | d13dc5529b | |
Dan Ballard | a27fd47755 | |
Dan Ballard | 664a6dc198 | |
Sarah Jamie Lewis | 3fbf88d34b | |
Sarah Jamie Lewis | a39775d56b | |
Sarah Jamie Lewis | 7fd53a3b16 | |
Sarah Jamie Lewis | abfa95cddb | |
Dan Ballard | 0126379436 | |
Dan Ballard | dd8ed97f90 | |
Sarah Jamie Lewis | 8dfe391122 | |
Sarah Jamie Lewis | cd5f461a33 | |
Sarah Jamie Lewis | dae2d358bc | |
Sarah Jamie Lewis | 512a0834e0 | |
Dan Ballard | 9e506e5190 | |
Sarah Jamie Lewis | ff91300c39 | |
Sarah Jamie Lewis | bf4cca631c | |
Dan Ballard | b13a56d1db | |
Sarah Jamie Lewis | 5a87f835b4 | |
Sarah Jamie Lewis | 8f138b47b0 | |
Sarah Jamie Lewis | a9ab91688b | |
Dan Ballard | 93e2a25673 | |
erinn | d6a34258be | |
Sarah Jamie Lewis | c24bb95af5 | |
erinn | d9eefd4be5 | |
Sarah Jamie Lewis | 1345cf519b | |
Sarah Jamie Lewis | 7d2d3979c1 | |
Sarah Jamie Lewis | 9621e294c2 | |
Sarah Jamie Lewis | dec2b7182c | |
Sarah Jamie Lewis | f3ac8c0098 | |
erinn | 5dc0579075 | |
Sarah Jamie Lewis | ec6e025284 | |
Sarah Jamie Lewis | a088e588b1 | |
Sarah Jamie Lewis | ea9cf5ca87 | |
Sarah Jamie Lewis | ff4249e2bc | |
Sarah Jamie Lewis | 6bb510e39e |
133
.drone.yml
133
.drone.yml
|
@ -1,72 +1,89 @@
|
||||||
workspace:
|
---
|
||||||
base: /go
|
kind: pipeline
|
||||||
path: src/cwtch.im/cwtch
|
type: docker
|
||||||
|
name: linux-test
|
||||||
|
|
||||||
pipeline:
|
steps:
|
||||||
fetch:
|
- name: fetch
|
||||||
image: golang
|
image: golang:1.21.5
|
||||||
when:
|
volumes:
|
||||||
repo: cwtch.im/cwtch
|
- name: deps
|
||||||
branch: master
|
path: /go
|
||||||
event: [ push, pull_request ]
|
|
||||||
commands:
|
commands:
|
||||||
- go install honnef.co/go/tools/cmd/staticcheck@latest
|
- go install honnef.co/go/tools/cmd/staticcheck@latest
|
||||||
- wget https://git.openprivacy.ca/openprivacy/buildfiles/raw/master/tor/tor
|
- go install go.uber.org/nilaway/cmd/nilaway@latest
|
||||||
- wget https://git.openprivacy.ca/openprivacy/buildfiles/raw/master/tor/torrc
|
- wget https://git.openprivacy.ca/openprivacy/buildfiles/raw/branch/master/tor/tor-0.4.8.9-linux-x86_64.tar.gz -O tor.tar.gz
|
||||||
- chmod a+x tor
|
- tar -xzf tor.tar.gz
|
||||||
- go get -u golang.org/x/lint/golint
|
- chmod a+x Tor/tor
|
||||||
|
- export PATH=$PWD/Tor/:$PATH
|
||||||
|
- export LD_LIBRARY_PATH=$PWD/Tor/
|
||||||
|
- tor --version
|
||||||
- export GO111MODULE=on
|
- export GO111MODULE=on
|
||||||
- go mod vendor
|
- name: quality
|
||||||
quality:
|
image: golang:1.21.5
|
||||||
image: golang
|
volumes:
|
||||||
when:
|
- name: deps
|
||||||
repo: cwtch.im/cwtch
|
path: /go
|
||||||
branch: master
|
|
||||||
event: [ push, pull_request ]
|
|
||||||
commands:
|
commands:
|
||||||
- staticcheck ./...
|
- ./testing/quality.sh
|
||||||
units-tests:
|
- name: units-tests
|
||||||
image: golang
|
image: golang:1.21.5
|
||||||
when:
|
volumes:
|
||||||
repo: cwtch.im/cwtch
|
- name: deps
|
||||||
branch: master
|
path: /go
|
||||||
event: [ push, pull_request ]
|
|
||||||
commands:
|
commands:
|
||||||
- export PATH=$PATH:/go/src/cwtch.im/cwtch
|
- export PATH=`pwd`:$PATH
|
||||||
- sh testing/tests.sh
|
- sh testing/tests.sh
|
||||||
integ-test:
|
- name: integ-test
|
||||||
image: golang
|
image: golang:1.21.5
|
||||||
when:
|
volumes:
|
||||||
repo: cwtch.im/cwtch
|
- name: deps
|
||||||
branch: master
|
path: /go
|
||||||
event: [ push, pull_request ]
|
|
||||||
commands:
|
commands:
|
||||||
- go test -race -v cwtch.im/cwtch/testing/
|
- export PATH=$PWD/Tor/:$PATH
|
||||||
filesharing-integ-test:
|
- export LD_LIBRARY_PATH=$PWD/Tor/
|
||||||
image: golang
|
- tor --version
|
||||||
when:
|
- go test -timeout=30m -race -v cwtch.im/cwtch/testing/
|
||||||
repo: cwtch.im/cwtch
|
- name: filesharing-integ-test
|
||||||
branch: master
|
image: golang:1.21.5
|
||||||
event: [ push, pull_request ]
|
volumes:
|
||||||
|
- name: deps
|
||||||
|
path: /go
|
||||||
commands:
|
commands:
|
||||||
- go test -race -v cwtch.im/cwtch/testing/filesharing
|
- export PATH=$PWD/Tor/:$PATH
|
||||||
notify-email:
|
- export LD_LIBRARY_PATH=$PWD/Tor/
|
||||||
image: drillster/drone-email
|
- go test -timeout=20m -race -v cwtch.im/cwtch/testing/filesharing
|
||||||
host: build.openprivacy.ca
|
- name: filesharing-autodownload-integ-test
|
||||||
port: 25
|
image: golang:1.21.5
|
||||||
skip_verify: true
|
volumes:
|
||||||
from: drone@openprivacy.ca
|
- name: deps
|
||||||
when:
|
path: /go
|
||||||
repo: cwtch.im/cwtch
|
commands:
|
||||||
branch: master
|
- export PATH=$PWD/Tor/:$PATH
|
||||||
event: [ push, pull_request ]
|
- export LD_LIBRARY_PATH=$PWD/Tor/
|
||||||
status: [ failure ]
|
- go test -timeout=20m -race -v cwtch.im/cwtch/testing/autodownload
|
||||||
notify-gogs:
|
- name: notify-gogs
|
||||||
image: openpriv/drone-gogs
|
image: openpriv/drone-gogs
|
||||||
|
pull: if-not-exists
|
||||||
when:
|
when:
|
||||||
repo: cwtch.im/cwtch
|
|
||||||
branch: master
|
|
||||||
event: pull_request
|
event: pull_request
|
||||||
status: [ success, changed, failure ]
|
status: [ success, changed, failure ]
|
||||||
secrets: [gogs_account_token]
|
environment:
|
||||||
|
GOGS_ACCOUNT_TOKEN:
|
||||||
|
from_secret: gogs_account_token
|
||||||
|
settings:
|
||||||
gogs_url: https://git.openprivacy.ca
|
gogs_url: https://git.openprivacy.ca
|
||||||
|
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
# gopath where bin and pkg lives to persist across steps
|
||||||
|
- name: deps
|
||||||
|
temp: {}
|
||||||
|
|
||||||
|
trigger:
|
||||||
|
repo: cwtch.im/cwtch
|
||||||
|
branch: master
|
||||||
|
event:
|
||||||
|
- push
|
||||||
|
- pull_request
|
||||||
|
- tag
|
||||||
|
|
|
@ -28,3 +28,11 @@ tokens1.db
|
||||||
arch/
|
arch/
|
||||||
testing/encryptedstorage/encrypted_storage_profiles
|
testing/encryptedstorage/encrypted_storage_profiles
|
||||||
testing/encryptedstorage/tordir
|
testing/encryptedstorage/tordir
|
||||||
|
*.tar.gz
|
||||||
|
data-dir-cwtchtool/
|
||||||
|
tokens
|
||||||
|
tordir/
|
||||||
|
testing/autodownload/download_dir
|
||||||
|
testing/autodownload/storage
|
||||||
|
*.swp
|
||||||
|
testing/managerstorage/*
|
457
app/app.go
457
app/app.go
|
@ -3,15 +3,18 @@ package app
|
||||||
import (
|
import (
|
||||||
"cwtch.im/cwtch/app/plugins"
|
"cwtch.im/cwtch/app/plugins"
|
||||||
"cwtch.im/cwtch/event"
|
"cwtch.im/cwtch/event"
|
||||||
|
"cwtch.im/cwtch/extensions"
|
||||||
|
"cwtch.im/cwtch/functionality/filesharing"
|
||||||
|
"cwtch.im/cwtch/functionality/servers"
|
||||||
"cwtch.im/cwtch/model"
|
"cwtch.im/cwtch/model"
|
||||||
"cwtch.im/cwtch/model/attr"
|
"cwtch.im/cwtch/model/attr"
|
||||||
"cwtch.im/cwtch/model/constants"
|
"cwtch.im/cwtch/model/constants"
|
||||||
"cwtch.im/cwtch/peer"
|
"cwtch.im/cwtch/peer"
|
||||||
"cwtch.im/cwtch/protocol/connections"
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
|
"cwtch.im/cwtch/settings"
|
||||||
"cwtch.im/cwtch/storage"
|
"cwtch.im/cwtch/storage"
|
||||||
"git.openprivacy.ca/openprivacy/connectivity"
|
"git.openprivacy.ca/openprivacy/connectivity"
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
path "path/filepath"
|
path "path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -21,28 +24,53 @@ import (
|
||||||
type application struct {
|
type application struct {
|
||||||
eventBuses map[string]event.Manager
|
eventBuses map[string]event.Manager
|
||||||
directory string
|
directory string
|
||||||
coremutex sync.Mutex
|
|
||||||
appletPeers
|
peers map[string]peer.CwtchPeer
|
||||||
appletACN
|
acn connectivity.ACN
|
||||||
appletPlugins
|
plugins sync.Map //map[string] []plugins.Plugin
|
||||||
|
|
||||||
engines map[string]connections.Engine
|
engines map[string]connections.Engine
|
||||||
appBus event.Manager
|
appBus event.Manager
|
||||||
|
eventQueue event.Queue
|
||||||
appmutex sync.Mutex
|
appmutex sync.Mutex
|
||||||
|
engineHooks connections.EngineHooks
|
||||||
|
|
||||||
|
settings *settings.GlobalSettingsFile
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *application) IsFeatureEnabled(experiment string) bool {
|
||||||
|
globalSettings := app.ReadSettings()
|
||||||
|
if globalSettings.ExperimentsEnabled {
|
||||||
|
if status, exists := globalSettings.Experiments[experiment]; exists {
|
||||||
|
return status
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Application is a full cwtch peer application. It allows management, usage and storage of multiple peers
|
// Application is a full cwtch peer application. It allows management, usage and storage of multiple peers
|
||||||
type Application interface {
|
type Application interface {
|
||||||
LoadProfiles(password string)
|
LoadProfiles(password string)
|
||||||
CreateTaggedPeer(name string, password string, tag string)
|
CreateProfile(name string, password string, autostart bool)
|
||||||
DeletePeer(onion string, currentPassword string)
|
InstallEngineHooks(engineHooks connections.EngineHooks)
|
||||||
|
ImportProfile(exportedCwtchFile string, password string) (peer.CwtchPeer, error)
|
||||||
|
EnhancedImportProfile(exportedCwtchFile string, password string) string
|
||||||
|
DeleteProfile(onion string, currentPassword string)
|
||||||
AddPeerPlugin(onion string, pluginID plugins.PluginID)
|
AddPeerPlugin(onion string, pluginID plugins.PluginID)
|
||||||
LaunchPeers()
|
|
||||||
|
|
||||||
GetPrimaryBus() event.Manager
|
GetPrimaryBus() event.Manager
|
||||||
GetEventBus(onion string) event.Manager
|
GetEventBus(onion string) event.Manager
|
||||||
QueryACNStatus()
|
QueryACNStatus()
|
||||||
QueryACNVersion()
|
QueryACNVersion()
|
||||||
|
|
||||||
|
ConfigureConnections(onion string, doListn, doPeers, doServers bool)
|
||||||
|
ActivatePeerEngine(onion string)
|
||||||
|
DeactivatePeerEngine(onion string)
|
||||||
|
|
||||||
|
ReadSettings() settings.GlobalSettings
|
||||||
|
UpdateSettings(settings settings.GlobalSettings)
|
||||||
|
IsFeatureEnabled(experiment string) bool
|
||||||
|
|
||||||
ShutdownPeer(string)
|
ShutdownPeer(string)
|
||||||
Shutdown()
|
Shutdown()
|
||||||
|
|
||||||
|
@ -53,19 +81,156 @@ type Application interface {
|
||||||
// LoadProfileFn is the function signature for a function in an app that loads a profile
|
// LoadProfileFn is the function signature for a function in an app that loads a profile
|
||||||
type LoadProfileFn func(profile peer.CwtchPeer)
|
type LoadProfileFn func(profile peer.CwtchPeer)
|
||||||
|
|
||||||
// NewApp creates a new app with some environment awareness and initializes a Tor Manager
|
func LoadAppSettings(appDirectory string) *settings.GlobalSettingsFile {
|
||||||
func NewApp(acn connectivity.ACN, appDirectory string) Application {
|
|
||||||
log.Debugf("NewApp(%v)\n", appDirectory)
|
log.Debugf("NewApp(%v)\n", appDirectory)
|
||||||
os.MkdirAll(path.Join(appDirectory, "profiles"), 0700)
|
os.MkdirAll(path.Join(appDirectory, "profiles"), 0700)
|
||||||
|
|
||||||
app := &application{engines: make(map[string]connections.Engine), eventBuses: make(map[string]event.Manager), directory: appDirectory, appBus: event.NewEventManager()}
|
// Note: we basically presume this doesn't fail. If the file doesn't exist we create it, and as such the
|
||||||
app.appletPeers.init()
|
// only plausible error conditions are related to file create e.g. low disk space. If that is the case then
|
||||||
|
// many other parts of Cwtch are likely to fail also.
|
||||||
|
globalSettingsFile, err := settings.InitGlobalSettingsFile(appDirectory, DefactoPasswordForUnencryptedProfiles)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error initializing global globalSettingsFile file %s. Global globalSettingsFile might not be loaded or saved", err)
|
||||||
|
}
|
||||||
|
return globalSettingsFile
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewApp creates a new app with some environment awareness and initializes a Tor Manager
|
||||||
|
func NewApp(acn connectivity.ACN, appDirectory string, settings *settings.GlobalSettingsFile) Application {
|
||||||
|
|
||||||
|
app := &application{engines: make(map[string]connections.Engine), eventBuses: make(map[string]event.Manager), directory: appDirectory, appBus: event.NewEventManager(), settings: settings, eventQueue: event.NewQueue()}
|
||||||
|
app.peers = make(map[string]peer.CwtchPeer)
|
||||||
|
app.engineHooks = connections.DefaultEngineHooks{}
|
||||||
|
app.acn = acn
|
||||||
|
statusHandler := app.getACNStatusHandler()
|
||||||
|
acn.SetStatusCallback(statusHandler)
|
||||||
|
acn.SetVersionCallback(app.getACNVersionHandler())
|
||||||
|
prog, status := acn.GetBootstrapStatus()
|
||||||
|
statusHandler(prog, status)
|
||||||
|
|
||||||
|
app.GetPrimaryBus().Subscribe(event.ACNStatus, app.eventQueue)
|
||||||
|
go app.eventHandler()
|
||||||
|
|
||||||
app.appletACN.init(acn, app.getACNStatusHandler())
|
|
||||||
return app
|
return app
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *application) CreateTaggedPeer(name string, password string, tag string) {
|
func (app *application) InstallEngineHooks(engineHooks connections.EngineHooks) {
|
||||||
|
app.appmutex.Lock()
|
||||||
|
defer app.appmutex.Unlock()
|
||||||
|
app.engineHooks = engineHooks
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *application) ReadSettings() settings.GlobalSettings {
|
||||||
|
app.appmutex.Lock()
|
||||||
|
defer app.appmutex.Unlock()
|
||||||
|
return app.settings.ReadGlobalSettings()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *application) UpdateSettings(settings settings.GlobalSettings) {
|
||||||
|
// don't allow any other application changes while settings update
|
||||||
|
app.appmutex.Lock()
|
||||||
|
defer app.appmutex.Unlock()
|
||||||
|
app.settings.WriteGlobalSettings(settings)
|
||||||
|
|
||||||
|
for _, profile := range app.peers {
|
||||||
|
profile.UpdateExperiments(settings.ExperimentsEnabled, settings.Experiments)
|
||||||
|
|
||||||
|
// Explicitly toggle blocking/unblocking of unknown connections for profiles
|
||||||
|
// that have been loaded.
|
||||||
|
if settings.BlockUnknownConnections {
|
||||||
|
profile.BlockUnknownConnections()
|
||||||
|
} else {
|
||||||
|
profile.AllowUnknownConnections()
|
||||||
|
}
|
||||||
|
|
||||||
|
profile.NotifySettingsUpdate(settings)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListProfiles returns a map of onions to their profile's Name
|
||||||
|
func (app *application) ListProfiles() []string {
|
||||||
|
var keys []string
|
||||||
|
|
||||||
|
app.appmutex.Lock()
|
||||||
|
defer app.appmutex.Unlock()
|
||||||
|
for handle := range app.peers {
|
||||||
|
keys = append(keys, handle)
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPeer returns a cwtchPeer for a given onion address
|
||||||
|
func (app *application) GetPeer(onion string) peer.CwtchPeer {
|
||||||
|
app.appmutex.Lock()
|
||||||
|
defer app.appmutex.Unlock()
|
||||||
|
if profile, ok := app.peers[onion]; ok {
|
||||||
|
return profile
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *application) AddPlugin(peerid string, id plugins.PluginID, bus event.Manager, acn connectivity.ACN) {
|
||||||
|
if _, exists := app.plugins.Load(peerid); !exists {
|
||||||
|
app.plugins.Store(peerid, []plugins.Plugin{})
|
||||||
|
}
|
||||||
|
|
||||||
|
pluginsinf, _ := app.plugins.Load(peerid)
|
||||||
|
peerPlugins := pluginsinf.([]plugins.Plugin)
|
||||||
|
|
||||||
|
for _, plugin := range peerPlugins {
|
||||||
|
if plugin.Id() == id {
|
||||||
|
log.Errorf("trying to add second instance of plugin %v to peer %v", id, peerid)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
newp, err := plugins.Get(id, bus, acn, peerid)
|
||||||
|
if err == nil {
|
||||||
|
newp.Start()
|
||||||
|
peerPlugins = append(peerPlugins, newp)
|
||||||
|
log.Debugf("storing plugin for %v %v", peerid, peerPlugins)
|
||||||
|
app.plugins.Store(peerid, peerPlugins)
|
||||||
|
} else {
|
||||||
|
log.Errorf("error adding plugin: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *application) CreateProfile(name string, password string, autostart bool) {
|
||||||
|
autostartVal := constants.True
|
||||||
|
if !autostart {
|
||||||
|
autostartVal = constants.False
|
||||||
|
}
|
||||||
|
tagVal := constants.ProfileTypeV1Password
|
||||||
|
if password == DefactoPasswordForUnencryptedProfiles {
|
||||||
|
tagVal = constants.ProfileTypeV1DefaultPassword
|
||||||
|
}
|
||||||
|
|
||||||
|
app.CreatePeer(name, password, map[attr.ZonedPath]string{
|
||||||
|
attr.ProfileZone.ConstructZonedPath(constants.Tag): tagVal,
|
||||||
|
attr.ProfileZone.ConstructZonedPath(constants.PeerAutostart): autostartVal,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *application) setupPeer(profile peer.CwtchPeer) {
|
||||||
|
eventBus := event.NewEventManager()
|
||||||
|
app.eventBuses[profile.GetOnion()] = eventBus
|
||||||
|
|
||||||
|
// Initialize the Peer with the Given Event Bus
|
||||||
|
app.peers[profile.GetOnion()] = profile
|
||||||
|
profile.Init(eventBus)
|
||||||
|
|
||||||
|
// Update the Peer with the Most Recent Experiment State...
|
||||||
|
globalSettings := app.settings.ReadGlobalSettings()
|
||||||
|
profile.UpdateExperiments(globalSettings.ExperimentsEnabled, globalSettings.Experiments)
|
||||||
|
app.registerHooks(profile)
|
||||||
|
|
||||||
|
// Register the Peer With Application Plugins..
|
||||||
|
app.AddPeerPlugin(profile.GetOnion(), plugins.CONNECTIONRETRY) // Now Mandatory
|
||||||
|
app.AddPeerPlugin(profile.GetOnion(), plugins.HEARTBEAT) // Now Mandatory
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *application) CreatePeer(name string, password string, attributes map[attr.ZonedPath]string) {
|
||||||
app.appmutex.Lock()
|
app.appmutex.Lock()
|
||||||
defer app.appmutex.Unlock()
|
defer app.appmutex.Unlock()
|
||||||
|
|
||||||
|
@ -78,42 +243,42 @@ func (app *application) CreateTaggedPeer(name string, password string, tag strin
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
eventBus := event.NewEventManager()
|
app.setupPeer(profile)
|
||||||
app.eventBuses[profile.GetOnion()] = eventBus
|
|
||||||
profile.Init(app.eventBuses[profile.GetOnion()])
|
|
||||||
app.peers[profile.GetOnion()] = profile
|
|
||||||
app.engines[profile.GetOnion()], _ = profile.GenerateProtocolEngine(app.acn, app.eventBuses[profile.GetOnion()])
|
|
||||||
|
|
||||||
if tag != "" {
|
for zp, val := range attributes {
|
||||||
profile.SetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Tag, tag)
|
zone, key := attr.ParseZone(zp.ToString())
|
||||||
|
profile.SetScopedZonedAttribute(attr.LocalScope, zone, key, val)
|
||||||
}
|
}
|
||||||
|
|
||||||
app.appBus.Publish(event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.GetOnion(), event.Created: event.True}))
|
app.appBus.Publish(event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.GetOnion(), event.Created: event.True}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *application) DeletePeer(onion string, password string) {
|
func (app *application) DeleteProfile(onion string, password string) {
|
||||||
log.Infof("DeletePeer called on %v\n", onion)
|
log.Debugf("DeleteProfile called on %v\n", onion)
|
||||||
app.appmutex.Lock()
|
app.appmutex.Lock()
|
||||||
defer app.appmutex.Unlock()
|
defer app.appmutex.Unlock()
|
||||||
|
|
||||||
if app.peers[onion].CheckPassword(password) {
|
// short circuit to prevent nil-pointer panic if this function is called twice (or incorrectly)
|
||||||
app.appletPlugins.ShutdownPeer(onion)
|
peer := app.peers[onion]
|
||||||
app.plugins.Delete(onion)
|
if peer == nil {
|
||||||
|
log.Errorf("shutdownPeer called with invalid onion %v", onion)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// allow a blank password to delete "unencrypted" accounts...
|
||||||
|
if password == "" {
|
||||||
|
password = DefactoPasswordForUnencryptedProfiles
|
||||||
|
}
|
||||||
|
|
||||||
|
if peer.CheckPassword(password) {
|
||||||
|
// soft-shutdown
|
||||||
|
peer.Shutdown()
|
||||||
|
// delete the underlying storage
|
||||||
|
peer.Delete()
|
||||||
|
// hard shutdown / remove from app
|
||||||
|
app.shutdownPeer(onion)
|
||||||
|
|
||||||
// Shutdown and Remove the Engine
|
// Shutdown and Remove the Engine
|
||||||
app.engines[onion].Shutdown()
|
|
||||||
delete(app.engines, onion)
|
|
||||||
|
|
||||||
app.peers[onion].Shutdown()
|
|
||||||
app.peers[onion].Delete()
|
|
||||||
delete(app.peers, onion)
|
|
||||||
app.eventBuses[onion].Publish(event.NewEventList(event.ShutdownPeer, event.Identity, onion))
|
|
||||||
|
|
||||||
app.coremutex.Lock()
|
|
||||||
defer app.coremutex.Unlock()
|
|
||||||
app.eventBuses[onion].Shutdown()
|
|
||||||
delete(app.eventBuses, onion)
|
|
||||||
|
|
||||||
log.Debugf("Delete peer for %v Done\n", onion)
|
log.Debugf("Delete peer for %v Done\n", onion)
|
||||||
app.appBus.Publish(event.NewEventList(event.PeerDeleted, event.Identity, onion))
|
app.appBus.Publish(event.NewEventList(event.PeerDeleted, event.Identity, onion))
|
||||||
return
|
return
|
||||||
|
@ -125,12 +290,29 @@ func (app *application) AddPeerPlugin(onion string, pluginID plugins.PluginID) {
|
||||||
app.AddPlugin(onion, pluginID, app.eventBuses[onion], app.acn)
|
app.AddPlugin(onion, pluginID, app.eventBuses[onion], app.acn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (app *application) ImportProfile(exportedCwtchFile string, password string) (peer.CwtchPeer, error) {
|
||||||
|
profileDirectory := path.Join(app.directory, "profiles")
|
||||||
|
profile, err := peer.ImportProfile(exportedCwtchFile, profileDirectory, password)
|
||||||
|
if profile != nil || err == nil {
|
||||||
|
app.installProfile(profile)
|
||||||
|
}
|
||||||
|
return profile, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *application) EnhancedImportProfile(exportedCwtchFile string, password string) string {
|
||||||
|
_, err := app.ImportProfile(exportedCwtchFile, password)
|
||||||
|
if err == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
// LoadProfiles takes a password and attempts to load any profiles it can from storage with it and create Peers for them
|
// LoadProfiles takes a password and attempts to load any profiles it can from storage with it and create Peers for them
|
||||||
func (app *application) LoadProfiles(password string) {
|
func (app *application) LoadProfiles(password string) {
|
||||||
count := 0
|
count := 0
|
||||||
migrating := false
|
migrating := false
|
||||||
|
|
||||||
files, err := ioutil.ReadDir(path.Join(app.directory, "profiles"))
|
files, err := os.ReadDir(path.Join(app.directory, "profiles"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error: cannot read profiles directory: %v", err)
|
log.Errorf("error: cannot read profiles directory: %v", err)
|
||||||
return
|
return
|
||||||
|
@ -160,6 +342,7 @@ func (app *application) LoadProfiles(password string) {
|
||||||
cps, err := peer.CreateEncryptedStore(profileDirectory, password)
|
cps, err := peer.CreateEncryptedStore(profileDirectory, password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error creating encrypted store: %v", err)
|
log.Errorf("error creating encrypted store: %v", err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
profile := peer.ImportLegacyProfile(legacyProfile, cps)
|
profile := peer.ImportLegacyProfile(legacyProfile, cps)
|
||||||
loaded = app.installProfile(profile)
|
loaded = app.installProfile(profile)
|
||||||
|
@ -177,6 +360,17 @@ func (app *application) LoadProfiles(password string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (app *application) registerHooks(profile peer.CwtchPeer) {
|
||||||
|
// Register Hooks
|
||||||
|
profile.RegisterHook(extensions.ProfileValueExtension{})
|
||||||
|
profile.RegisterHook(extensions.SendWhenOnlineExtension{})
|
||||||
|
profile.RegisterHook(new(filesharing.Functionality))
|
||||||
|
profile.RegisterHook(new(filesharing.ImagePreviewsFunctionality))
|
||||||
|
profile.RegisterHook(new(servers.Functionality))
|
||||||
|
// Ensure that Profiles have the Most Up to Date Settings...
|
||||||
|
profile.NotifySettingsUpdate(app.settings.ReadGlobalSettings())
|
||||||
|
}
|
||||||
|
|
||||||
// installProfile takes a profile and if it isn't loaded in the app, installs it and returns true
|
// installProfile takes a profile and if it isn't loaded in the app, installs it and returns true
|
||||||
func (app *application) installProfile(profile peer.CwtchPeer) bool {
|
func (app *application) installProfile(profile peer.CwtchPeer) bool {
|
||||||
app.appmutex.Lock()
|
app.appmutex.Lock()
|
||||||
|
@ -184,11 +378,8 @@ func (app *application) installProfile(profile peer.CwtchPeer) bool {
|
||||||
|
|
||||||
// Only attempt to finalize the profile if we don't have one loaded...
|
// Only attempt to finalize the profile if we don't have one loaded...
|
||||||
if app.peers[profile.GetOnion()] == nil {
|
if app.peers[profile.GetOnion()] == nil {
|
||||||
eventBus := event.NewEventManager()
|
app.setupPeer(profile)
|
||||||
app.eventBuses[profile.GetOnion()] = eventBus
|
// Finalize the Creation of Peer / Notify any Interfaces..
|
||||||
profile.Init(app.eventBuses[profile.GetOnion()])
|
|
||||||
app.peers[profile.GetOnion()] = profile
|
|
||||||
app.engines[profile.GetOnion()], _ = profile.GenerateProtocolEngine(app.acn, app.eventBuses[profile.GetOnion()])
|
|
||||||
app.appBus.Publish(event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.GetOnion(), event.Created: event.False}))
|
app.appBus.Publish(event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.GetOnion(), event.Created: event.False}))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -197,6 +388,68 @@ func (app *application) installProfile(profile peer.CwtchPeer) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ActivatePeerEngine creates a peer engine for use with an ACN, should be called once the underlying ACN is online
|
||||||
|
func (app *application) ActivatePeerEngine(onion string) {
|
||||||
|
profile := app.GetPeer(onion)
|
||||||
|
if profile != nil {
|
||||||
|
if _, exists := app.engines[onion]; !exists {
|
||||||
|
eventBus, exists := app.eventBuses[profile.GetOnion()]
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
// todo handle this case?
|
||||||
|
log.Errorf("cannot activate peer engine without an event bus")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
engine, err := profile.GenerateProtocolEngine(app.acn, eventBus, app.engineHooks)
|
||||||
|
if err == nil {
|
||||||
|
log.Debugf("restartFlow: Creating a New Protocol Engine...")
|
||||||
|
app.engines[profile.GetOnion()] = engine
|
||||||
|
eventBus.Publish(event.NewEventList(event.ProtocolEngineCreated))
|
||||||
|
app.QueryACNStatus()
|
||||||
|
} else {
|
||||||
|
log.Errorf("corrupted profile detected for %v", onion)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigureConnections autostarts the given kinds of connections.
|
||||||
|
func (app *application) ConfigureConnections(onion string, listen bool, peers bool, servers bool) {
|
||||||
|
profile := app.GetPeer(onion)
|
||||||
|
if profile != nil {
|
||||||
|
|
||||||
|
profileBus, exists := app.eventBuses[profile.GetOnion()]
|
||||||
|
if exists {
|
||||||
|
// if we are making a decision to ignore
|
||||||
|
if !peers || !servers {
|
||||||
|
profileBus.Publish(event.NewEventList(event.PurgeRetries))
|
||||||
|
}
|
||||||
|
|
||||||
|
// enable the engine if it doesn't exist...
|
||||||
|
// note: this function is idempotent
|
||||||
|
app.ActivatePeerEngine(onion)
|
||||||
|
if listen {
|
||||||
|
profile.Listen()
|
||||||
|
}
|
||||||
|
|
||||||
|
profileBus.Publish(event.NewEventList(event.ResumeRetries))
|
||||||
|
// do this in the background, for large contact lists it can take a long time...
|
||||||
|
go profile.StartConnections(peers, servers)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Errorf("profile does not exist %v", onion)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeactivatePeerEngine shutsdown and cleans up a peer engine, should be called when an underlying ACN goes offline
|
||||||
|
func (app *application) DeactivatePeerEngine(onion string) {
|
||||||
|
if engine, exists := app.engines[onion]; exists {
|
||||||
|
engine.Shutdown()
|
||||||
|
delete(app.engines, onion)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// GetPrimaryBus returns the bus the Application uses for events that aren't peer specific
|
// GetPrimaryBus returns the bus the Application uses for events that aren't peer specific
|
||||||
func (app *application) GetPrimaryBus() event.Manager {
|
func (app *application) GetPrimaryBus() event.Manager {
|
||||||
return app.appBus
|
return app.appBus
|
||||||
|
@ -222,6 +475,14 @@ func (app *application) getACNStatusHandler() func(int, string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (app *application) getACNVersionHandler() func(string) {
|
||||||
|
return func(version string) {
|
||||||
|
app.appmutex.Lock()
|
||||||
|
defer app.appmutex.Unlock()
|
||||||
|
app.appBus.Publish(event.NewEventList(event.ACNVersion, event.Data, version))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (app *application) QueryACNStatus() {
|
func (app *application) QueryACNStatus() {
|
||||||
prog, status := app.acn.GetBootstrapStatus()
|
prog, status := app.acn.GetBootstrapStatus()
|
||||||
app.getACNStatusHandler()(prog, status)
|
app.getACNStatusHandler()(prog, status)
|
||||||
|
@ -232,31 +493,107 @@ func (app *application) QueryACNVersion() {
|
||||||
app.appBus.Publish(event.NewEventList(event.ACNVersion, event.Data, version))
|
app.appBus.Publish(event.NewEventList(event.ACNVersion, event.Data, version))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (app *application) eventHandler() {
|
||||||
|
acnStatus := -1
|
||||||
|
for {
|
||||||
|
e := app.eventQueue.Next()
|
||||||
|
switch e.EventType {
|
||||||
|
case event.ACNStatus:
|
||||||
|
newAcnStatus, err := strconv.Atoi(e.Data[event.Progress])
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if newAcnStatus == 100 {
|
||||||
|
if acnStatus != 100 {
|
||||||
|
for _, onion := range app.ListProfiles() {
|
||||||
|
profile := app.GetPeer(onion)
|
||||||
|
if profile != nil {
|
||||||
|
autostart, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.PeerAutostart)
|
||||||
|
appearOffline, appearOfflineExists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.PeerAppearOffline)
|
||||||
|
if !exists || autostart == "true" {
|
||||||
|
if appearOfflineExists && appearOffline == "true" {
|
||||||
|
// don't configure any connections...
|
||||||
|
log.Infof("peer appearing offline, not launching listen threads or connecting jobs")
|
||||||
|
app.ConfigureConnections(onion, false, false, false)
|
||||||
|
} else {
|
||||||
|
app.ConfigureConnections(onion, true, true, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if acnStatus == 100 {
|
||||||
|
// just fell offline
|
||||||
|
for _, onion := range app.ListProfiles() {
|
||||||
|
app.DeactivatePeerEngine(onion)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
acnStatus = newAcnStatus
|
||||||
|
|
||||||
|
default:
|
||||||
|
// invalid event, signifies shutdown
|
||||||
|
if e.EventType == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ShutdownPeer shuts down a peer and removes it from the app's management
|
// ShutdownPeer shuts down a peer and removes it from the app's management
|
||||||
func (app *application) ShutdownPeer(onion string) {
|
func (app *application) ShutdownPeer(onion string) {
|
||||||
app.appmutex.Lock()
|
app.appmutex.Lock()
|
||||||
defer app.appmutex.Unlock()
|
defer app.appmutex.Unlock()
|
||||||
app.eventBuses[onion].Shutdown()
|
app.shutdownPeer(onion)
|
||||||
delete(app.eventBuses, onion)
|
|
||||||
app.peers[onion].Shutdown()
|
|
||||||
delete(app.peers, onion)
|
|
||||||
app.engines[onion].Shutdown()
|
|
||||||
delete(app.engines, onion)
|
|
||||||
app.appletPlugins.Shutdown()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown shutsdown all peers of an app and then the tormanager
|
// shutdownPeer mutex unlocked helper shutdown peer
|
||||||
|
//
|
||||||
|
//nolint:nilaway
|
||||||
|
func (app *application) shutdownPeer(onion string) {
|
||||||
|
|
||||||
|
// short circuit to prevent nil-pointer panic if this function is called twice (or incorrectly)
|
||||||
|
onionEventBus := app.eventBuses[onion]
|
||||||
|
onionPeer := app.peers[onion]
|
||||||
|
if onionEventBus == nil || onionPeer == nil {
|
||||||
|
log.Errorf("shutdownPeer called with invalid onion %v", onion)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// we are an internal locked method, app.eventBuses[onion] cannot fail...
|
||||||
|
onionEventBus.Publish(event.NewEventList(event.ShutdownPeer, event.Identity, onion))
|
||||||
|
onionEventBus.Shutdown()
|
||||||
|
|
||||||
|
delete(app.eventBuses, onion)
|
||||||
|
onionPeer.Shutdown()
|
||||||
|
delete(app.peers, onion)
|
||||||
|
if onionEngine, ok := app.engines[onion]; ok {
|
||||||
|
onionEngine.Shutdown()
|
||||||
|
delete(app.engines, onion)
|
||||||
|
}
|
||||||
|
log.Debugf("shutting down plugins for %v", onion)
|
||||||
|
pluginsI, ok := app.plugins.Load(onion)
|
||||||
|
if ok {
|
||||||
|
appPlugins := pluginsI.([]plugins.Plugin)
|
||||||
|
for _, plugin := range appPlugins {
|
||||||
|
plugin.Shutdown()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
app.plugins.Delete(onion)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown shutsdown all peers of an app
|
||||||
func (app *application) Shutdown() {
|
func (app *application) Shutdown() {
|
||||||
for id, peer := range app.peers {
|
app.appmutex.Lock()
|
||||||
peer.Shutdown()
|
defer app.appmutex.Unlock()
|
||||||
|
for id := range app.peers {
|
||||||
log.Debugf("Shutting Down Peer %v", id)
|
log.Debugf("Shutting Down Peer %v", id)
|
||||||
app.appletPlugins.ShutdownPeer(id)
|
app.shutdownPeer(id)
|
||||||
log.Debugf("Shutting Down Engines for %v", id)
|
|
||||||
app.engines[id].Shutdown()
|
|
||||||
log.Debugf("Shutting Down Bus for %v", id)
|
|
||||||
app.eventBuses[id].Shutdown()
|
|
||||||
}
|
}
|
||||||
log.Debugf("Shutting Down App")
|
log.Debugf("Shutting Down App")
|
||||||
|
app.eventQueue.Shutdown()
|
||||||
app.appBus.Shutdown()
|
app.appBus.Shutdown()
|
||||||
log.Debugf("Shut Down Complete")
|
log.Debugf("Shut Down Complete")
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
package app
|
||||||
|
|
||||||
|
// DefactoPasswordForUnencryptedProfiles is used to offer "un-passworded" profiles. Our storage encrypts everything with a password. We need an agreed upon
|
||||||
|
// password to use in that case, that the app case use behind the scenes to password and unlock with
|
||||||
|
// https://docs.openprivacy.ca/cwtch-security-handbook/profile_encryption_and_storage.html
|
||||||
|
const DefactoPasswordForUnencryptedProfiles = "be gay do crime"
|
125
app/applets.go
125
app/applets.go
|
@ -1,125 +0,0 @@
|
||||||
package app
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cwtch.im/cwtch/event"
|
|
||||||
"git.openprivacy.ca/openprivacy/connectivity"
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"cwtch.im/cwtch/app/plugins"
|
|
||||||
"cwtch.im/cwtch/peer"
|
|
||||||
)
|
|
||||||
|
|
||||||
type appletPeers struct {
|
|
||||||
peerLock sync.Mutex
|
|
||||||
peers map[string]peer.CwtchPeer
|
|
||||||
launched bool // bit hacky, place holder while we transition to full multi peer support and a better api
|
|
||||||
}
|
|
||||||
|
|
||||||
type appletACN struct {
|
|
||||||
acn connectivity.ACN
|
|
||||||
}
|
|
||||||
|
|
||||||
type appletPlugins struct {
|
|
||||||
plugins sync.Map //map[string] []plugins.Plugin
|
|
||||||
}
|
|
||||||
|
|
||||||
// ***** applet ACN
|
|
||||||
|
|
||||||
func (a *appletACN) init(acn connectivity.ACN, publish func(int, string)) {
|
|
||||||
a.acn = acn
|
|
||||||
acn.SetStatusCallback(publish)
|
|
||||||
prog, status := acn.GetBootstrapStatus()
|
|
||||||
publish(prog, status)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appletACN) Shutdown() {
|
|
||||||
a.acn.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ***** appletPeers
|
|
||||||
|
|
||||||
func (ap *appletPeers) init() {
|
|
||||||
ap.peers = make(map[string]peer.CwtchPeer)
|
|
||||||
ap.launched = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// LaunchPeers starts each peer Listening and connecting to peers and groups
|
|
||||||
func (ap *appletPeers) LaunchPeers() {
|
|
||||||
log.Debugf("appletPeers LaunchPeers\n")
|
|
||||||
ap.peerLock.Lock()
|
|
||||||
defer ap.peerLock.Unlock()
|
|
||||||
if ap.launched {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for pid, p := range ap.peers {
|
|
||||||
log.Debugf("Launching %v\n", pid)
|
|
||||||
p.Listen()
|
|
||||||
log.Debugf("done Listen() for %v\n", pid)
|
|
||||||
p.StartPeersConnections()
|
|
||||||
log.Debugf("done StartPeersConnections() for %v\n", pid)
|
|
||||||
}
|
|
||||||
ap.launched = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListProfiles returns a map of onions to their profile's Name
|
|
||||||
func (ap *appletPeers) ListProfiles() []string {
|
|
||||||
var keys []string
|
|
||||||
|
|
||||||
ap.peerLock.Lock()
|
|
||||||
defer ap.peerLock.Unlock()
|
|
||||||
for handle := range ap.peers {
|
|
||||||
keys = append(keys, handle)
|
|
||||||
}
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPeer returns a cwtchPeer for a given onion address
|
|
||||||
func (ap *appletPeers) GetPeer(onion string) peer.CwtchPeer {
|
|
||||||
if peer, ok := ap.peers[onion]; ok {
|
|
||||||
return peer
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ***** applet Plugins
|
|
||||||
|
|
||||||
func (ap *appletPlugins) Shutdown() {
|
|
||||||
log.Debugf("shutting down applet plugins...")
|
|
||||||
ap.plugins.Range(func(k, v interface{}) bool {
|
|
||||||
log.Debugf("shutting down plugins for %v", k)
|
|
||||||
ap.ShutdownPeer(k.(string))
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ap *appletPlugins) ShutdownPeer(peerid string) {
|
|
||||||
log.Debugf("shutting down plugins for %v", peerid)
|
|
||||||
pluginsI, ok := ap.plugins.Load(peerid)
|
|
||||||
if ok {
|
|
||||||
plugins := pluginsI.([]plugins.Plugin)
|
|
||||||
for _, plugin := range plugins {
|
|
||||||
log.Debugf("shutting down plugin: %v", plugin)
|
|
||||||
plugin.Shutdown()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ap *appletPlugins) AddPlugin(peerid string, id plugins.PluginID, bus event.Manager, acn connectivity.ACN) {
|
|
||||||
if _, exists := ap.plugins.Load(peerid); !exists {
|
|
||||||
ap.plugins.Store(peerid, []plugins.Plugin{})
|
|
||||||
}
|
|
||||||
|
|
||||||
pluginsinf, _ := ap.plugins.Load(peerid)
|
|
||||||
peerPlugins := pluginsinf.([]plugins.Plugin)
|
|
||||||
|
|
||||||
newp, err := plugins.Get(id, bus, acn, peerid)
|
|
||||||
if err == nil {
|
|
||||||
newp.Start()
|
|
||||||
peerPlugins = append(peerPlugins, newp)
|
|
||||||
log.Debugf("storing plugin for %v %v", peerid, peerPlugins)
|
|
||||||
ap.plugins.Store(peerid, peerPlugins)
|
|
||||||
} else {
|
|
||||||
log.Errorf("error adding plugin: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -0,0 +1,47 @@
|
||||||
|
package plugins
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const antispamTickTime = 30 * time.Second
|
||||||
|
|
||||||
|
type antispam struct {
|
||||||
|
bus event.Manager
|
||||||
|
queue event.Queue
|
||||||
|
breakChan chan bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *antispam) Start() {
|
||||||
|
go a.run()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *antispam) Id() PluginID {
|
||||||
|
return ANTISPAM
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *antispam) Shutdown() {
|
||||||
|
a.breakChan <- true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *antispam) run() {
|
||||||
|
log.Debugf("running antispam trigger plugin")
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-time.After(antispamTickTime):
|
||||||
|
// no fuss, just trigger the check. Downstream will filter out superfluous actions
|
||||||
|
a.bus.Publish(event.NewEvent(event.TriggerAntispamCheck, map[event.Field]string{}))
|
||||||
|
continue
|
||||||
|
case <-a.breakChan:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAntiSpam returns a Plugin that when started will trigger antispam payments on a regular interval
|
||||||
|
func NewAntiSpam(bus event.Manager) Plugin {
|
||||||
|
cr := &antispam{bus: bus, queue: event.NewQueue(), breakChan: make(chan bool, 1)}
|
||||||
|
return cr
|
||||||
|
}
|
|
@ -3,13 +3,26 @@ package plugins
|
||||||
import (
|
import (
|
||||||
"cwtch.im/cwtch/event"
|
"cwtch.im/cwtch/event"
|
||||||
"cwtch.im/cwtch/protocol/connections"
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
|
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const tickTime = 10 * time.Second
|
// Todo: Move to protocol/connections
|
||||||
const maxBackoff int = 32 // 320 seconds or ~5 min
|
// This Plugin is now required and it makes more sense to run more integrated in engine
|
||||||
|
|
||||||
|
const tickTimeSec = 30
|
||||||
|
const tickTime = tickTimeSec * time.Second
|
||||||
|
|
||||||
|
const circuitTimeoutSecs int = 120
|
||||||
|
|
||||||
|
const MaxBaseTimeoutSec = 5 * 60 // a max base time out of 5 min
|
||||||
|
const maxFailedBackoff = 6 // 2^6 = 64 -> 64 * [2m to 5m] = 2h8m to 5h20m
|
||||||
|
|
||||||
|
const PriorityQueueTimeSinceQualifierHours float64 = 168
|
||||||
|
|
||||||
type connectionType int
|
type connectionType int
|
||||||
|
|
||||||
|
@ -23,28 +36,129 @@ type contact struct {
|
||||||
state connections.ConnectionState
|
state connections.ConnectionState
|
||||||
ctype connectionType
|
ctype connectionType
|
||||||
|
|
||||||
ticks int
|
lastAttempt time.Time
|
||||||
backoff int
|
failedCount int
|
||||||
|
|
||||||
|
lastSeen time.Time
|
||||||
|
queued bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// compare a to b
|
||||||
|
// returns -1 if a < b
|
||||||
|
//
|
||||||
|
// 0 if a == b
|
||||||
|
// +1 if a > b
|
||||||
|
//
|
||||||
|
// algo: sort by failedCount first favouring less attempts, then sort by lastSeen time favouring more recent connections
|
||||||
|
func (a *contact) compare(b *contact) int {
|
||||||
|
if a.failedCount < b.failedCount {
|
||||||
|
return -1
|
||||||
|
} else if a.failedCount > b.failedCount {
|
||||||
|
return +1
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.lastSeen.After(b.lastSeen) {
|
||||||
|
return -1
|
||||||
|
} else if a.lastSeen.Before(b.lastSeen) {
|
||||||
|
return +1
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type connectionQueue struct {
|
||||||
|
queue []*contact
|
||||||
|
}
|
||||||
|
|
||||||
|
func newConnectionQueue() *connectionQueue {
|
||||||
|
return &connectionQueue{queue: []*contact{}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cq *connectionQueue) insert(c *contact) {
|
||||||
|
// find loc
|
||||||
|
i := 0
|
||||||
|
var b *contact
|
||||||
|
for i, b = range cq.queue {
|
||||||
|
if c.compare(b) >= 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert
|
||||||
|
if len(cq.queue) == i { // nil or empty slice or after last element
|
||||||
|
cq.queue = append(cq.queue, c)
|
||||||
|
} else {
|
||||||
|
cq.queue = append(cq.queue[:i+1], cq.queue[i:]...) // index < len(a)
|
||||||
|
cq.queue[i] = c
|
||||||
|
}
|
||||||
|
|
||||||
|
c.queued = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cq *connectionQueue) dequeue() *contact {
|
||||||
|
if len(cq.queue) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
c := cq.queue[0]
|
||||||
|
cq.queue = cq.queue[1:]
|
||||||
|
c.queued = false
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cq *connectionQueue) len() int {
|
||||||
|
return len(cq.queue)
|
||||||
}
|
}
|
||||||
|
|
||||||
type contactRetry struct {
|
type contactRetry struct {
|
||||||
bus event.Manager
|
bus event.Manager
|
||||||
queue event.Queue
|
queue event.Queue
|
||||||
networkUp bool
|
ACNUp bool
|
||||||
|
ACNUpTime time.Time
|
||||||
|
protocolEngine bool
|
||||||
running bool
|
running bool
|
||||||
breakChan chan bool
|
breakChan chan bool
|
||||||
onion string
|
onion string
|
||||||
lastCheck time.Time
|
lastCheck time.Time
|
||||||
|
acnProgress int
|
||||||
|
|
||||||
connections sync.Map //[string]*contact
|
connections sync.Map //[string]*contact
|
||||||
|
pendingQueue *connectionQueue
|
||||||
|
priorityQueue *connectionQueue
|
||||||
|
authorizedPeers sync.Map
|
||||||
|
stallRetries bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConnectionRetry returns a Plugin that when started will retry connecting to contacts with a backoff timing
|
// NewConnectionRetry returns a Plugin that when started will retry connecting to contacts with a failedCount timing
|
||||||
func NewConnectionRetry(bus event.Manager, onion string) Plugin {
|
func NewConnectionRetry(bus event.Manager, onion string) Plugin {
|
||||||
cr := &contactRetry{bus: bus, queue: event.NewQueue(), breakChan: make(chan bool), connections: sync.Map{}, networkUp: false, onion: onion}
|
cr := &contactRetry{bus: bus, queue: event.NewQueue(), breakChan: make(chan bool, 1), authorizedPeers: sync.Map{}, connections: sync.Map{}, stallRetries: true, ACNUp: false, ACNUpTime: time.Now(), protocolEngine: false, onion: onion, pendingQueue: newConnectionQueue(), priorityQueue: newConnectionQueue()}
|
||||||
return cr
|
return cr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// maxTorCircuitsPending a function to throttle access to tor network during start up
|
||||||
|
func (cr *contactRetry) maxTorCircuitsPending() int {
|
||||||
|
timeSinceStart := time.Since(cr.ACNUpTime)
|
||||||
|
if timeSinceStart < 30*time.Second {
|
||||||
|
return 4
|
||||||
|
} else if timeSinceStart < 4*time.Minute {
|
||||||
|
return 8
|
||||||
|
} else if timeSinceStart < 8*time.Minute {
|
||||||
|
return 16
|
||||||
|
}
|
||||||
|
return connections.TorMaxPendingConns
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cr *contactRetry) connectingCount() int {
|
||||||
|
connecting := 0
|
||||||
|
cr.connections.Range(func(k, v interface{}) bool {
|
||||||
|
conn := v.(*contact)
|
||||||
|
if conn.state == connections.CONNECTING {
|
||||||
|
connecting++
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
return connecting
|
||||||
|
}
|
||||||
|
|
||||||
func (cr *contactRetry) Start() {
|
func (cr *contactRetry) Start() {
|
||||||
if !cr.running {
|
if !cr.running {
|
||||||
go cr.run()
|
go cr.run()
|
||||||
|
@ -53,48 +167,173 @@ func (cr *contactRetry) Start() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cr *contactRetry) Id() PluginID {
|
||||||
|
return CONNECTIONRETRY
|
||||||
|
}
|
||||||
|
|
||||||
func (cr *contactRetry) run() {
|
func (cr *contactRetry) run() {
|
||||||
cr.running = true
|
cr.running = true
|
||||||
cr.bus.Subscribe(event.PeerStateChange, cr.queue)
|
cr.bus.Subscribe(event.PeerStateChange, cr.queue)
|
||||||
cr.bus.Subscribe(event.ACNStatus, cr.queue)
|
cr.bus.Subscribe(event.ACNStatus, cr.queue)
|
||||||
cr.bus.Subscribe(event.ServerStateChange, cr.queue)
|
cr.bus.Subscribe(event.ServerStateChange, cr.queue)
|
||||||
|
cr.bus.Subscribe(event.QueuePeerRequest, cr.queue)
|
||||||
|
cr.bus.Subscribe(event.QueueJoinServer, cr.queue)
|
||||||
|
cr.bus.Subscribe(event.DisconnectPeerRequest, cr.queue)
|
||||||
|
cr.bus.Subscribe(event.DisconnectServerRequest, cr.queue)
|
||||||
|
cr.bus.Subscribe(event.ProtocolEngineShutdown, cr.queue)
|
||||||
|
cr.bus.Subscribe(event.ProtocolEngineCreated, cr.queue)
|
||||||
|
cr.bus.Subscribe(event.DeleteContact, cr.queue)
|
||||||
|
cr.bus.Subscribe(event.UpdateConversationAuthorization, cr.queue)
|
||||||
|
cr.bus.Subscribe(event.PurgeRetries, cr.queue)
|
||||||
|
cr.bus.Subscribe(event.ResumeRetries, cr.queue)
|
||||||
for {
|
for {
|
||||||
if time.Since(cr.lastCheck) > tickTime {
|
// Only attempt connection if both the ACN and the Protocol Engines are Online...
|
||||||
cr.retryDisconnected()
|
log.Debugf("restartFlow checking state")
|
||||||
|
if cr.ACNUp && cr.protocolEngine && !cr.stallRetries {
|
||||||
|
log.Debugf("restartFlow time to queue!!")
|
||||||
|
cr.requeueReady()
|
||||||
|
connectingCount := cr.connectingCount()
|
||||||
|
|
||||||
|
// do priority connections first...
|
||||||
|
for connectingCount < cr.maxTorCircuitsPending() && len(cr.priorityQueue.queue) > 0 {
|
||||||
|
contact := cr.priorityQueue.dequeue()
|
||||||
|
if contact == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// could have received incoming connection while in queue, make sure still disconnected before trying
|
||||||
|
if contact.state == connections.DISCONNECTED {
|
||||||
|
cr.publishConnectionRequest(contact)
|
||||||
|
connectingCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for connectingCount < cr.maxTorCircuitsPending() && len(cr.pendingQueue.queue) > 0 {
|
||||||
|
contact := cr.pendingQueue.dequeue()
|
||||||
|
if contact == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// could have received incoming connection while in queue, make sure still disconnected before trying
|
||||||
|
if contact.state == connections.DISCONNECTED {
|
||||||
|
cr.publishConnectionRequest(contact)
|
||||||
|
connectingCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
cr.lastCheck = time.Now()
|
cr.lastCheck = time.Now()
|
||||||
}
|
}
|
||||||
select {
|
// regardless of if we're up, run manual force deconnectiong of timed out connections
|
||||||
case e := <-cr.queue.OutChan():
|
|
||||||
switch e.EventType {
|
|
||||||
case event.PeerStateChange:
|
|
||||||
state := connections.ConnectionStateToType()[e.Data[event.ConnectionState]]
|
|
||||||
peer := e.Data[event.RemotePeer]
|
|
||||||
cr.handleEvent(peer, state, peerConn)
|
|
||||||
|
|
||||||
case event.ServerStateChange:
|
|
||||||
state := connections.ConnectionStateToType()[e.Data[event.ConnectionState]]
|
|
||||||
server := e.Data[event.GroupServer]
|
|
||||||
cr.handleEvent(server, state, serverConn)
|
|
||||||
|
|
||||||
case event.ACNStatus:
|
|
||||||
prog := e.Data[event.Progress]
|
|
||||||
if prog == "100" && !cr.networkUp {
|
|
||||||
cr.networkUp = true
|
|
||||||
cr.connections.Range(func(k, v interface{}) bool {
|
cr.connections.Range(func(k, v interface{}) bool {
|
||||||
p := v.(*contact)
|
p := v.(*contact)
|
||||||
p.ticks = 0
|
if p.state == connections.CONNECTING && time.Since(p.lastAttempt) > time.Duration(circuitTimeoutSecs)*time.Second*2 {
|
||||||
p.backoff = 1
|
// we have been "connecting" for twice the circuttimeout so it's failed, we just didn't learn about it, manually disconnect
|
||||||
if p.ctype == peerConn {
|
cr.handleEvent(p.id, connections.DISCONNECTED, p.ctype)
|
||||||
cr.bus.Publish(event.NewEvent(event.RetryPeerRequest, map[event.Field]string{event.RemotePeer: p.id}))
|
log.Errorf("had to manually set peer %v of profile %v to DISCONNECTED due to assumed circuit timeout (%v) seconds", p.id, cr.onion, circuitTimeoutSecs*2)
|
||||||
}
|
|
||||||
if p.ctype == serverConn {
|
|
||||||
cr.bus.Publish(event.NewEvent(event.RetryServerRequest, map[event.Field]string{event.GroupServer: p.id}))
|
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
} else if prog != "100" {
|
|
||||||
cr.networkUp = false
|
select {
|
||||||
|
case e := <-cr.queue.OutChan():
|
||||||
|
switch e.EventType {
|
||||||
|
case event.PurgeRetries:
|
||||||
|
// Purge All Authorized Peers
|
||||||
|
cr.authorizedPeers.Range(func(key interface{}, value interface{}) bool {
|
||||||
|
cr.authorizedPeers.Delete(key)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
// Purge All Connection States
|
||||||
|
cr.connections.Range(func(key interface{}, value interface{}) bool {
|
||||||
|
cr.connections.Delete(key)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
case event.ResumeRetries:
|
||||||
|
log.Infof("resuming retries...")
|
||||||
|
cr.stallRetries = false
|
||||||
|
case event.DisconnectPeerRequest:
|
||||||
|
peer := e.Data[event.RemotePeer]
|
||||||
|
cr.authorizedPeers.Delete(peer)
|
||||||
|
case event.DisconnectServerRequest:
|
||||||
|
peer := e.Data[event.GroupServer]
|
||||||
|
cr.authorizedPeers.Delete(peer)
|
||||||
|
case event.DeleteContact:
|
||||||
|
// this case covers both servers and peers (servers are peers, and go through the
|
||||||
|
// same delete conversation flow)
|
||||||
|
peer := e.Data[event.RemotePeer]
|
||||||
|
cr.authorizedPeers.Delete(peer)
|
||||||
|
case event.UpdateConversationAuthorization:
|
||||||
|
// if we update the conversation authorization then we need to check if
|
||||||
|
// we need to remove blocked conversations from the regular flow.
|
||||||
|
peer := e.Data[event.RemotePeer]
|
||||||
|
blocked := e.Data[event.Blocked]
|
||||||
|
if blocked == "true" {
|
||||||
|
cr.authorizedPeers.Delete(peer)
|
||||||
|
}
|
||||||
|
case event.PeerStateChange:
|
||||||
|
state := connections.ConnectionStateToType()[e.Data[event.ConnectionState]]
|
||||||
|
peer := e.Data[event.RemotePeer]
|
||||||
|
// only handle state change events from pre-authorized peers;
|
||||||
|
if _, exists := cr.authorizedPeers.Load(peer); exists {
|
||||||
|
cr.handleEvent(peer, state, peerConn)
|
||||||
|
}
|
||||||
|
case event.ServerStateChange:
|
||||||
|
state := connections.ConnectionStateToType()[e.Data[event.ConnectionState]]
|
||||||
|
server := e.Data[event.GroupServer]
|
||||||
|
// only handle state change events from pre-authorized servers;
|
||||||
|
if _, exists := cr.authorizedPeers.Load(server); exists {
|
||||||
|
cr.handleEvent(server, state, serverConn)
|
||||||
|
}
|
||||||
|
case event.QueueJoinServer:
|
||||||
|
fallthrough
|
||||||
|
case event.QueuePeerRequest:
|
||||||
|
lastSeen, err := time.Parse(time.RFC3339Nano, e.Data[event.LastSeen])
|
||||||
|
if err != nil {
|
||||||
|
lastSeen = event.CwtchEpoch
|
||||||
|
}
|
||||||
|
|
||||||
|
id := ""
|
||||||
|
if peer, exists := e.Data[event.RemotePeer]; exists {
|
||||||
|
id = peer
|
||||||
|
cr.addConnection(peer, connections.DISCONNECTED, peerConn, lastSeen)
|
||||||
|
} else if server, exists := e.Data[event.GroupServer]; exists {
|
||||||
|
id = server
|
||||||
|
cr.addConnection(server, connections.DISCONNECTED, serverConn, lastSeen)
|
||||||
|
}
|
||||||
|
// this was an authorized event, and so we store this peer.
|
||||||
|
log.Debugf("authorizing id: %v", id)
|
||||||
|
cr.authorizedPeers.Store(id, true)
|
||||||
|
if c, ok := cr.connections.Load(id); ok {
|
||||||
|
contact := c.(*contact)
|
||||||
|
if contact.state == connections.DISCONNECTED {
|
||||||
|
// prioritize connections made in the last week
|
||||||
|
if time.Since(contact.lastSeen).Hours() < PriorityQueueTimeSinceQualifierHours {
|
||||||
|
cr.priorityQueue.insert(contact)
|
||||||
|
} else {
|
||||||
|
cr.pendingQueue.insert(contact)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case event.ProtocolEngineShutdown:
|
||||||
|
cr.ACNUp = false
|
||||||
|
cr.protocolEngine = false
|
||||||
|
cr.stallRetries = true
|
||||||
|
cr.connections.Range(func(k, v interface{}) bool {
|
||||||
|
p := v.(*contact)
|
||||||
|
if p.state == connections.AUTHENTICATED || p.state == connections.SYNCED {
|
||||||
|
p.lastSeen = time.Now()
|
||||||
|
}
|
||||||
|
p.state = connections.DISCONNECTED
|
||||||
|
p.failedCount = 0
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
case event.ProtocolEngineCreated:
|
||||||
|
cr.protocolEngine = true
|
||||||
|
cr.processStatus()
|
||||||
|
|
||||||
|
case event.ACNStatus:
|
||||||
|
progData := e.Data[event.Progress]
|
||||||
|
if prog, err := strconv.Atoi(progData); err == nil {
|
||||||
|
cr.acnProgress = prog
|
||||||
|
cr.processStatus()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -108,54 +347,173 @@ func (cr *contactRetry) run() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *contactRetry) retryDisconnected() {
|
func (cr *contactRetry) processStatus() {
|
||||||
|
if !cr.protocolEngine {
|
||||||
|
cr.ACNUp = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if cr.acnProgress == 100 && !cr.ACNUp {
|
||||||
|
// ACN is up...at this point we need to completely reset our state
|
||||||
|
// as there is no guarantee that the tor daemon shares our state anymore...
|
||||||
|
cr.ACNUp = true
|
||||||
|
cr.ACNUpTime = time.Now()
|
||||||
|
|
||||||
|
// reset all of the queues...
|
||||||
|
cr.priorityQueue = newConnectionQueue()
|
||||||
|
cr.pendingQueue = newConnectionQueue()
|
||||||
|
|
||||||
|
// Loop through connections. Reset state, and requeue...
|
||||||
cr.connections.Range(func(k, v interface{}) bool {
|
cr.connections.Range(func(k, v interface{}) bool {
|
||||||
p := v.(*contact)
|
p := v.(*contact)
|
||||||
|
|
||||||
if p.state == connections.DISCONNECTED {
|
// only reload connections if they are on the authorized peers list
|
||||||
p.ticks++
|
if _, exists := cr.authorizedPeers.Load(p.id); exists {
|
||||||
if p.ticks >= p.backoff {
|
p.queued = true
|
||||||
p.ticks = 0
|
// prioritize connections made recently...
|
||||||
if cr.networkUp {
|
log.Debugf("adding %v to queue", p.id)
|
||||||
if p.ctype == peerConn {
|
if time.Since(p.lastSeen).Hours() < PriorityQueueTimeSinceQualifierHours {
|
||||||
cr.bus.Publish(event.NewEvent(event.RetryPeerRequest, map[event.Field]string{event.RemotePeer: p.id}))
|
cr.priorityQueue.insert(p)
|
||||||
|
} else {
|
||||||
|
cr.pendingQueue.insert(p)
|
||||||
}
|
}
|
||||||
if p.ctype == serverConn {
|
|
||||||
cr.bus.Publish(event.NewEvent(event.RetryServerRequest, map[event.Field]string{event.GroupServer: p.id}))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
} else if cr.acnProgress != 100 {
|
||||||
|
cr.ACNUp = false
|
||||||
|
cr.connections.Range(func(k, v interface{}) bool {
|
||||||
|
p := v.(*contact)
|
||||||
|
p.failedCount = 0
|
||||||
|
p.queued = false
|
||||||
|
p.state = connections.DISCONNECTED
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cr *contactRetry) requeueReady() {
|
||||||
|
if !cr.ACNUp {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var retryable []*contact
|
||||||
|
|
||||||
|
throughPutPerMin := int((float64(cr.maxTorCircuitsPending()) / float64(circuitTimeoutSecs)) * 60.0)
|
||||||
|
queueCount := cr.priorityQueue.len() + cr.pendingQueue.len()
|
||||||
|
// adjustedBaseTimeout = basetimeoust * (queuedItemsCount / throughPutPerMin)
|
||||||
|
// when less items are queued than through put it'll lower adjustedBaseTimeOut, but that'll be reset in the next block
|
||||||
|
// when more items are queued it will increase the timeout, to a max of MaxBaseTimeoutSec (enforced in the next block)
|
||||||
|
adjustedBaseTimeout := circuitTimeoutSecs * (queueCount / throughPutPerMin)
|
||||||
|
|
||||||
|
// circuitTimeoutSecs (120s) < adjustedBaseTimeout < MaxBaseTimeoutSec (300s)
|
||||||
|
if adjustedBaseTimeout < circuitTimeoutSecs {
|
||||||
|
adjustedBaseTimeout = circuitTimeoutSecs
|
||||||
|
} else if adjustedBaseTimeout > MaxBaseTimeoutSec {
|
||||||
|
adjustedBaseTimeout = MaxBaseTimeoutSec
|
||||||
|
}
|
||||||
|
|
||||||
|
cr.connections.Range(func(k, v interface{}) bool {
|
||||||
|
p := v.(*contact)
|
||||||
|
|
||||||
|
// Don't retry anyone who isn't on the authorized peers list
|
||||||
|
if _, exists := cr.authorizedPeers.Load(p.id); exists {
|
||||||
|
if p.state == connections.DISCONNECTED && !p.queued {
|
||||||
|
timeout := time.Duration((math.Pow(2, float64(p.failedCount)))*float64(adjustedBaseTimeout /*baseTimeoutSec*/)) * time.Second
|
||||||
|
if time.Since(p.lastAttempt) > timeout {
|
||||||
|
retryable = append(retryable, p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
for _, contact := range retryable {
|
||||||
|
if time.Since(contact.lastSeen).Hours() < PriorityQueueTimeSinceQualifierHours {
|
||||||
|
cr.priorityQueue.insert(contact)
|
||||||
|
} else {
|
||||||
|
cr.pendingQueue.insert(contact)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cr *contactRetry) publishConnectionRequest(contact *contact) {
|
||||||
|
log.Debugf("RestartFlow Publish Connection Request listener %v", contact)
|
||||||
|
if contact.ctype == peerConn {
|
||||||
|
cr.bus.Publish(event.NewEvent(event.PeerRequest, map[event.Field]string{event.RemotePeer: contact.id}))
|
||||||
|
}
|
||||||
|
if contact.ctype == serverConn {
|
||||||
|
cr.bus.Publish(event.NewEvent(event.RetryServerRequest, map[event.Field]string{event.GroupServer: contact.id}))
|
||||||
|
}
|
||||||
|
contact.state = connections.CONNECTING // Hacky but needed so we don't over flood waiting for PeerStateChange from engine
|
||||||
|
contact.lastAttempt = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cr *contactRetry) addConnection(id string, state connections.ConnectionState, ctype connectionType, lastSeen time.Time) {
|
||||||
|
// don't handle contact retries for ourselves
|
||||||
|
if id == cr.onion {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, exists := cr.connections.Load(id); !exists {
|
||||||
|
p := &contact{id: id, state: state, failedCount: 0, lastAttempt: event.CwtchEpoch, ctype: ctype, lastSeen: lastSeen, queued: false}
|
||||||
|
cr.connections.Store(id, p)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
// we have rerequested this connnection, probably via an explicit ask, update it's state
|
||||||
|
if c, ok := cr.connections.Load(id); ok {
|
||||||
|
contact := c.(*contact)
|
||||||
|
contact.state = state
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *contactRetry) handleEvent(id string, state connections.ConnectionState, ctype connectionType) {
|
func (cr *contactRetry) handleEvent(id string, state connections.ConnectionState, ctype connectionType) {
|
||||||
|
log.Debugf("cr.handleEvent state to %v on id %v", connections.ConnectionStateName[state], id)
|
||||||
|
|
||||||
|
// don't handle contact retries for ourselves
|
||||||
|
if id == cr.onion {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// reject events that contain invalid hostnames...we cannot connect to them
|
||||||
|
// and they could result in spurious connection attempts...
|
||||||
|
if !tor.IsValidHostname(id) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if _, exists := cr.connections.Load(id); !exists {
|
if _, exists := cr.connections.Load(id); !exists {
|
||||||
p := &contact{id: id, state: connections.DISCONNECTED, backoff: 0, ticks: 0, ctype: ctype}
|
// We have an event for something we don't know about...
|
||||||
cr.connections.Store(id, p)
|
// The only reason this should happen is if a *new* Peer/Server connection has changed.
|
||||||
|
// Let's set the timeout to Now() to indicate that this is a fresh connection, and so should likely be prioritized.
|
||||||
|
cr.addConnection(id, state, ctype, time.Now())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
pinf, _ := cr.connections.Load(id)
|
pinf, _ := cr.connections.Load(id)
|
||||||
p := pinf.(*contact)
|
p := pinf.(*contact)
|
||||||
|
log.Debugf(" managing state change for %v %v to %v by self %v", id, connections.ConnectionStateName[p.state], connections.ConnectionStateName[state], cr.onion)
|
||||||
if state == connections.DISCONNECTED || state == connections.FAILED || state == connections.KILLED {
|
if state == connections.DISCONNECTED || state == connections.FAILED || state == connections.KILLED {
|
||||||
p.state = connections.DISCONNECTED
|
if p.state == connections.SYNCED || p.state == connections.AUTHENTICATED {
|
||||||
if p.backoff == 0 {
|
p.lastSeen = time.Now()
|
||||||
p.backoff = 1
|
} else {
|
||||||
} else if p.backoff < maxBackoff {
|
p.failedCount += 1
|
||||||
p.backoff *= 2
|
}
|
||||||
|
p.state = connections.DISCONNECTED
|
||||||
|
p.lastAttempt = time.Now()
|
||||||
|
if p.failedCount > maxFailedBackoff {
|
||||||
|
p.failedCount = maxFailedBackoff
|
||||||
}
|
}
|
||||||
p.ticks = 0
|
|
||||||
} else if state == connections.CONNECTING || state == connections.CONNECTED {
|
} else if state == connections.CONNECTING || state == connections.CONNECTED {
|
||||||
p.state = state
|
p.state = state
|
||||||
} else if state == connections.AUTHENTICATED {
|
} else if state == connections.AUTHENTICATED || state == connections.SYNCED {
|
||||||
p.state = state
|
p.state = state
|
||||||
p.backoff = 0
|
p.lastSeen = time.Now()
|
||||||
|
p.failedCount = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *contactRetry) Shutdown() {
|
func (cr *contactRetry) Shutdown() {
|
||||||
cr.breakChan <- true
|
cr.breakChan <- true
|
||||||
|
cr.queue.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,128 @@
|
||||||
|
package plugins
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestContactRetryQueue simulates some basic connection queueing
|
||||||
|
// NOTE: This whole test is a race condition, and does flag go's detector
|
||||||
|
// We are invasively checking the internal state of the retry plugin and accessing pointers from another
|
||||||
|
// thread.
|
||||||
|
// We could build an entire thread safe monitoring functonality, but that would dramatically expand the scope of this test.
|
||||||
|
|
||||||
|
func TestContactRetryQueue(t *testing.T) {
|
||||||
|
log.SetLevel(log.LevelDebug)
|
||||||
|
bus := event.NewEventManager()
|
||||||
|
cr := NewConnectionRetry(bus, "").(*contactRetry)
|
||||||
|
cr.ACNUp = true // fake an ACN connection...
|
||||||
|
cr.protocolEngine = true // fake protocol engine
|
||||||
|
cr.stallRetries = false // fake not being in offline mode...
|
||||||
|
go cr.run()
|
||||||
|
|
||||||
|
testOnion := "2wgvbza2mbuc72a4u6r6k4hc2blcvrmk4q26bfvlwbqxv2yq5k52fcqd"
|
||||||
|
|
||||||
|
t.Logf("contact plugin up and running..sending peer connection...")
|
||||||
|
// Assert that there is a peer connection identified as "test"
|
||||||
|
bus.Publish(event.NewEvent(event.QueuePeerRequest, map[event.Field]string{event.RemotePeer: testOnion, event.LastSeen: "test"}))
|
||||||
|
|
||||||
|
// Wait until the test actually exists, and is queued
|
||||||
|
// This is the worst part of this test setup. Ideally we would sleep, or some other yielding, but
|
||||||
|
// go test scheduling doesn't like that and even sleeping long periods won't cause the event thread to make
|
||||||
|
// progress...
|
||||||
|
setup := false
|
||||||
|
for !setup {
|
||||||
|
if _, exists := cr.connections.Load(testOnion); exists {
|
||||||
|
if _, exists := cr.authorizedPeers.Load(testOnion); exists {
|
||||||
|
t.Logf("authorized")
|
||||||
|
setup = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We should very quickly become connecting...
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
pinf, _ := cr.connections.Load(testOnion)
|
||||||
|
if pinf.(*contact).state != 1 {
|
||||||
|
t.Fatalf("test connection should be in connecting after update, actually: %v", pinf.(*contact).state)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Asset that "test" is authenticated
|
||||||
|
cr.handleEvent(testOnion, connections.AUTHENTICATED, peerConn)
|
||||||
|
|
||||||
|
// Assert that "test has a valid state"
|
||||||
|
pinf, _ = cr.connections.Load(testOnion)
|
||||||
|
if pinf.(*contact).state != 3 {
|
||||||
|
t.Fatalf("test connection should be in authenticated after update, actually: %v", pinf.(*contact).state)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish an unrelated event to trigger the Plugin to go through a queuing cycle
|
||||||
|
// If we didn't do this we would have to wait 30 seconds for a check-in
|
||||||
|
bus.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{event.RemotePeer: "test2", event.ConnectionState: "Disconnected"}))
|
||||||
|
bus.Publish(event.NewEvent(event.QueuePeerRequest, map[event.Field]string{event.RemotePeer: testOnion, event.LastSeen: time.Now().Format(time.RFC3339Nano)}))
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
pinf, _ = cr.connections.Load(testOnion)
|
||||||
|
if pinf.(*contact).state != 1 {
|
||||||
|
t.Fatalf("test connection should be in connecting after update, actually: %v", pinf.(*contact).state)
|
||||||
|
}
|
||||||
|
|
||||||
|
cr.Shutdown()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Takes around 4 min unless you adjust the consts for tickTimeSec and circuitTimeoutSecs
|
||||||
|
/*
|
||||||
|
func TestRetryEmission(t *testing.T) {
|
||||||
|
log.SetLevel(log.LevelDebug)
|
||||||
|
log.Infof("*** Starting TestRetryEmission! ***")
|
||||||
|
bus := event.NewEventManager()
|
||||||
|
|
||||||
|
testQueue := event.NewQueue()
|
||||||
|
bus.Subscribe(event.PeerRequest, testQueue)
|
||||||
|
|
||||||
|
cr := NewConnectionRetry(bus, "").(*contactRetry)
|
||||||
|
cr.Start()
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
|
bus.Publish(event.NewEventList(event.ACNStatus, event.Progress, "100"))
|
||||||
|
bus.Publish(event.NewEventList(event.ProtocolEngineCreated))
|
||||||
|
|
||||||
|
pub, _, _ := ed25519.GenerateKey(rand.Reader)
|
||||||
|
peerAddr := tor.GetTorV3Hostname(pub)
|
||||||
|
|
||||||
|
bus.Publish(event.NewEventList(event.QueuePeerRequest, event.RemotePeer, peerAddr, event.LastSeen, time.Now().Format(time.RFC3339Nano)))
|
||||||
|
|
||||||
|
log.Infof("Fetching 1st event")
|
||||||
|
ev := testQueue.Next()
|
||||||
|
if ev.EventType != event.PeerRequest {
|
||||||
|
t.Errorf("1st event emitted was %v, expected %v", ev.EventType, event.PeerRequest)
|
||||||
|
}
|
||||||
|
log.Infof("1st event: %v", ev)
|
||||||
|
|
||||||
|
bus.Publish(event.NewEventList(event.PeerStateChange, event.RemotePeer, peerAddr, event.ConnectionState, connections.ConnectionStateName[connections.DISCONNECTED]))
|
||||||
|
|
||||||
|
log.Infof("fetching 2nd event")
|
||||||
|
ev = testQueue.Next()
|
||||||
|
log.Infof("2nd event: %v", ev)
|
||||||
|
if ev.EventType != event.PeerRequest {
|
||||||
|
t.Errorf("2nd event emitted was %v, expected %v", ev.EventType, event.PeerRequest)
|
||||||
|
}
|
||||||
|
|
||||||
|
bus.Publish(event.NewEventList(event.PeerStateChange, event.RemotePeer, peerAddr, event.ConnectionState, connections.ConnectionStateName[connections.CONNECTED]))
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
bus.Publish(event.NewEventList(event.PeerStateChange, event.RemotePeer, peerAddr, event.ConnectionState, connections.ConnectionStateName[connections.DISCONNECTED]))
|
||||||
|
|
||||||
|
log.Infof("fetching 3rd event")
|
||||||
|
ev = testQueue.Next()
|
||||||
|
log.Infof("3nd event: %v", ev)
|
||||||
|
if ev.EventType != event.PeerRequest {
|
||||||
|
t.Errorf("3nd event emitted was %v, expected %v", ev.EventType, event.PeerRequest)
|
||||||
|
}
|
||||||
|
|
||||||
|
cr.Shutdown()
|
||||||
|
}
|
||||||
|
*/
|
|
@ -0,0 +1,49 @@
|
||||||
|
package plugins
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const heartbeatTickTime = 60 * time.Second
|
||||||
|
|
||||||
|
type heartbeat struct {
|
||||||
|
bus event.Manager
|
||||||
|
queue event.Queue
|
||||||
|
breakChan chan bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hb *heartbeat) Start() {
|
||||||
|
go hb.run()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hb *heartbeat) Id() PluginID {
|
||||||
|
return HEARTBEAT
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hb *heartbeat) Shutdown() {
|
||||||
|
hb.breakChan <- true
|
||||||
|
hb.queue.Shutdown()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hb *heartbeat) run() {
|
||||||
|
log.Debugf("running heartbeat trigger plugin")
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-time.After(heartbeatTickTime):
|
||||||
|
// no fuss, just trigger the beat.
|
||||||
|
hb.bus.Publish(event.NewEvent(event.Heartbeat, map[event.Field]string{}))
|
||||||
|
continue
|
||||||
|
case <-hb.breakChan:
|
||||||
|
log.Debugf("shutting down heartbeat plugin")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHeartbeat returns a Plugin that when started will trigger heartbeat checks on a regular interval
|
||||||
|
func NewHeartbeat(bus event.Manager) Plugin {
|
||||||
|
cr := &heartbeat{bus: bus, queue: event.NewQueue(), breakChan: make(chan bool, 1)}
|
||||||
|
return cr
|
||||||
|
}
|
|
@ -3,7 +3,7 @@ package plugins
|
||||||
import (
|
import (
|
||||||
"cwtch.im/cwtch/event"
|
"cwtch.im/cwtch/event"
|
||||||
"cwtch.im/cwtch/protocol/connections"
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
"fmt"
|
"cwtch.im/cwtch/utils"
|
||||||
"git.openprivacy.ca/openprivacy/connectivity"
|
"git.openprivacy.ca/openprivacy/connectivity"
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -16,12 +16,14 @@ const NetworkCheckError = "Error"
|
||||||
// NetworkCheckSuccess is a status for when the NetworkCheck Plugin has had a successful message from a peer, indicating it is online right now
|
// NetworkCheckSuccess is a status for when the NetworkCheck Plugin has had a successful message from a peer, indicating it is online right now
|
||||||
const NetworkCheckSuccess = "Success"
|
const NetworkCheckSuccess = "Success"
|
||||||
|
|
||||||
|
const NetworkCheckPeriod = time.Minute
|
||||||
|
|
||||||
// networkCheck is a convenience plugin for testing high level availability of onion services
|
// networkCheck is a convenience plugin for testing high level availability of onion services
|
||||||
type networkCheck struct {
|
type networkCheck struct {
|
||||||
bus event.Manager
|
bus event.Manager
|
||||||
queue event.Queue
|
queue event.Queue
|
||||||
|
onion string
|
||||||
acn connectivity.ACN
|
acn connectivity.ACN
|
||||||
onionsToCheck sync.Map // onion:string => true:bool
|
|
||||||
breakChan chan bool
|
breakChan chan bool
|
||||||
running bool
|
running bool
|
||||||
offline bool
|
offline bool
|
||||||
|
@ -29,8 +31,8 @@ type networkCheck struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNetworkCheck returns a Plugin that when started will attempt various network tests
|
// NewNetworkCheck returns a Plugin that when started will attempt various network tests
|
||||||
func NewNetworkCheck(bus event.Manager, acn connectivity.ACN) Plugin {
|
func NewNetworkCheck(onion string, bus event.Manager, acn connectivity.ACN) Plugin {
|
||||||
nc := &networkCheck{bus: bus, acn: acn, queue: event.NewQueue(), breakChan: make(chan bool, 1)}
|
nc := &networkCheck{onion: onion, bus: bus, acn: acn, queue: event.NewQueue(), breakChan: make(chan bool, 1)}
|
||||||
return nc
|
return nc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,6 +40,10 @@ func (nc *networkCheck) Start() {
|
||||||
go nc.run()
|
go nc.run()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (nc *networkCheck) Id() PluginID {
|
||||||
|
return NETWORKCHECK
|
||||||
|
}
|
||||||
|
|
||||||
func (nc *networkCheck) run() {
|
func (nc *networkCheck) run() {
|
||||||
nc.running = true
|
nc.running = true
|
||||||
nc.offline = true
|
nc.offline = true
|
||||||
|
@ -49,7 +55,7 @@ func (nc *networkCheck) run() {
|
||||||
nc.bus.Subscribe(event.ServerStateChange, nc.queue)
|
nc.bus.Subscribe(event.ServerStateChange, nc.queue)
|
||||||
nc.bus.Subscribe(event.NewGetValMessageFromPeer, nc.queue)
|
nc.bus.Subscribe(event.NewGetValMessageFromPeer, nc.queue)
|
||||||
nc.bus.Subscribe(event.NewRetValMessageFromPeer, nc.queue)
|
nc.bus.Subscribe(event.NewRetValMessageFromPeer, nc.queue)
|
||||||
var lastMessageReceived time.Time
|
var lastMessageReceived = time.Now()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-nc.breakChan:
|
case <-nc.breakChan:
|
||||||
|
@ -61,12 +67,13 @@ func (nc *networkCheck) run() {
|
||||||
// and then we will wait a minute and check the connection for the first time (the onion should be up)
|
// and then we will wait a minute and check the connection for the first time (the onion should be up)
|
||||||
// under normal operating circumstances
|
// under normal operating circumstances
|
||||||
case event.ProtocolEngineStartListen:
|
case event.ProtocolEngineStartListen:
|
||||||
if _, exists := nc.onionsToCheck.Load(e.Data[event.Onion]); !exists {
|
if nc.onion == (e.Data[event.Onion]) {
|
||||||
log.Debugf("initiating connection check for %v", e.Data[event.Onion])
|
log.Debugf("initiating connection check for %v", e.Data[event.Onion])
|
||||||
nc.onionsToCheck.Store(e.Data[event.Onion], true)
|
|
||||||
if time.Since(lastMessageReceived) > time.Minute {
|
if time.Since(lastMessageReceived) > time.Minute {
|
||||||
nc.selfTest()
|
nc.selfTest()
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
log.Errorf("network check plugin received an event for a different profile than it was started with. Internal wiring is probably wrong.")
|
||||||
}
|
}
|
||||||
case event.PeerStateChange:
|
case event.PeerStateChange:
|
||||||
fallthrough
|
fallthrough
|
||||||
|
@ -96,10 +103,11 @@ func (nc *networkCheck) run() {
|
||||||
}
|
}
|
||||||
nc.offlineLock.Unlock()
|
nc.offlineLock.Unlock()
|
||||||
}
|
}
|
||||||
case <-time.After(tickTime):
|
case <-time.After(NetworkCheckPeriod):
|
||||||
// if we haven't received an action in the last minute...kick off a set of testing
|
// if we haven't received an action in the last minute...kick off a set of testing
|
||||||
if time.Since(lastMessageReceived) > time.Minute {
|
if time.Since(lastMessageReceived) > time.Minute {
|
||||||
nc.selfTest()
|
nc.selfTest()
|
||||||
|
lastMessageReceived = time.Now()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -114,26 +122,22 @@ func (nc *networkCheck) Shutdown() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nc *networkCheck) selfTest() {
|
func (nc *networkCheck) selfTest() {
|
||||||
nc.onionsToCheck.Range(func(key, val interface{}) bool {
|
go nc.checkConnection(nc.onion)
|
||||||
go nc.checkConnection(key.(string))
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
|
||||||
func (nc *networkCheck) checkConnection(onion string) {
|
func (nc *networkCheck) checkConnection(onion string) {
|
||||||
prog, _ := nc.acn.GetBootstrapStatus()
|
progress, _ := nc.acn.GetBootstrapStatus()
|
||||||
if prog != 100 {
|
if progress != 100 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// we want to definitively time these actions out faster than tor will, because these onions should definitely be
|
// we want to definitively time these actions out faster than tor will, because these onions should definitely be
|
||||||
// online
|
// online
|
||||||
ClientTimeout := TimeoutPolicy(time.Second * 60)
|
ClientTimeout := utils.TimeoutPolicy(time.Second * 60)
|
||||||
err := ClientTimeout.ExecuteAction(func() error {
|
err := ClientTimeout.ExecuteAction(func() error {
|
||||||
conn, _, err := nc.acn.Open(onion)
|
conn, _, err := nc.acn.Open(onion)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
conn.Close()
|
_ = conn.Close()
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
@ -150,26 +154,3 @@ func (nc *networkCheck) checkConnection(onion string) {
|
||||||
nc.offline = false
|
nc.offline = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO we might want to reuse this, but for now it is only used by this plugin so it can live here
|
|
||||||
|
|
||||||
// TimeoutPolicy is an interface for enforcing common timeout patterns
|
|
||||||
type TimeoutPolicy time.Duration
|
|
||||||
|
|
||||||
// ExecuteAction runs a function and returns an error if it hasn't returned
|
|
||||||
// by the time specified by TimeoutPolicy
|
|
||||||
func (tp *TimeoutPolicy) ExecuteAction(action func() error) error {
|
|
||||||
|
|
||||||
c := make(chan error)
|
|
||||||
go func() {
|
|
||||||
c <- action()
|
|
||||||
}()
|
|
||||||
|
|
||||||
tick := time.NewTicker(time.Duration(*tp))
|
|
||||||
select {
|
|
||||||
case <-tick.C:
|
|
||||||
return fmt.Errorf("ActionTimedOutError")
|
|
||||||
case err := <-c:
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -13,12 +13,15 @@ type PluginID int
|
||||||
const (
|
const (
|
||||||
CONNECTIONRETRY PluginID = iota
|
CONNECTIONRETRY PluginID = iota
|
||||||
NETWORKCHECK
|
NETWORKCHECK
|
||||||
|
ANTISPAM
|
||||||
|
HEARTBEAT
|
||||||
)
|
)
|
||||||
|
|
||||||
// Plugin is the interface for a plugin
|
// Plugin is the interface for a plugin
|
||||||
type Plugin interface {
|
type Plugin interface {
|
||||||
Start()
|
Start()
|
||||||
Shutdown()
|
Shutdown()
|
||||||
|
Id() PluginID
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get is a plugin factory for the requested plugin
|
// Get is a plugin factory for the requested plugin
|
||||||
|
@ -27,7 +30,11 @@ func Get(id PluginID, bus event.Manager, acn connectivity.ACN, onion string) (Pl
|
||||||
case CONNECTIONRETRY:
|
case CONNECTIONRETRY:
|
||||||
return NewConnectionRetry(bus, onion), nil
|
return NewConnectionRetry(bus, onion), nil
|
||||||
case NETWORKCHECK:
|
case NETWORKCHECK:
|
||||||
return NewNetworkCheck(bus, acn), nil
|
return NewNetworkCheck(onion, bus, acn), nil
|
||||||
|
case ANTISPAM:
|
||||||
|
return NewAntiSpam(bus), nil
|
||||||
|
case HEARTBEAT:
|
||||||
|
return NewHeartbeat(bus), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("plugin not defined %v", id)
|
return nil, fmt.Errorf("plugin not defined %v", id)
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package utils
|
package app
|
||||||
|
|
||||||
import (
|
import (
|
||||||
app2 "cwtch.im/cwtch/app"
|
|
||||||
"cwtch.im/cwtch/model/attr"
|
"cwtch.im/cwtch/model/attr"
|
||||||
"cwtch.im/cwtch/model/constants"
|
"cwtch.im/cwtch/model/constants"
|
||||||
"cwtch.im/cwtch/peer"
|
"cwtch.im/cwtch/peer"
|
||||||
|
@ -12,10 +11,13 @@ import (
|
||||||
// Proper use of an App is to call CreatePeer and then process the NewPeer event
|
// Proper use of an App is to call CreatePeer and then process the NewPeer event
|
||||||
// however for small utility use, this function which polls the app until the peer is created
|
// however for small utility use, this function which polls the app until the peer is created
|
||||||
// may fill that usecase better
|
// may fill that usecase better
|
||||||
func WaitGetPeer(app app2.Application, name string) peer.CwtchPeer {
|
func WaitGetPeer(app Application, name string) peer.CwtchPeer {
|
||||||
for {
|
for {
|
||||||
for _, handle := range app.ListProfiles() {
|
for _, handle := range app.ListProfiles() {
|
||||||
peer := app.GetPeer(handle)
|
peer := app.GetPeer(handle)
|
||||||
|
if peer == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
localName, _ := peer.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name)
|
localName, _ := peer.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name)
|
||||||
if localName == name {
|
if localName == name {
|
||||||
return peer
|
return peer
|
|
@ -1,5 +1,9 @@
|
||||||
package event
|
package event
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
var CwtchEpoch = time.Date(2020, 6, 1, 0, 0, 0, 0, time.UTC)
|
||||||
|
|
||||||
// Type captures the definition of many common Cwtch application events
|
// Type captures the definition of many common Cwtch application events
|
||||||
type Type string
|
type Type string
|
||||||
|
|
||||||
|
@ -13,11 +17,22 @@ const (
|
||||||
// RemotePeer: [eg "chpr7qm6op5vfcg2pi4vllco3h6aa7exexc4rqwnlupqhoogx2zgd6qd"
|
// RemotePeer: [eg "chpr7qm6op5vfcg2pi4vllco3h6aa7exexc4rqwnlupqhoogx2zgd6qd"
|
||||||
PeerRequest = Type("PeerRequest")
|
PeerRequest = Type("PeerRequest")
|
||||||
|
|
||||||
// RetryPeerRequest
|
// QueuePeerRequest
|
||||||
// Identical to PeerRequest, but allows Engine to make decisions regarding blocked peers
|
// When peer has too many peers to try and wants to ease off Tor throttling, use this to notify ContactRetry plugin to schedule a peer for later try
|
||||||
// attributes:
|
// LastSeen: last seen time of the contact
|
||||||
// RemotePeer: [eg "chpr7qm6op5vfcg2pi4vllco3h6aa7exexc4rqwnlupqhoogx2zgd6qd"
|
// And one of
|
||||||
RetryPeerRequest = Type("RetryPeerRequest")
|
// RemotePeer
|
||||||
|
// GroupServer
|
||||||
|
QueuePeerRequest = Type("QueuePeerRequest")
|
||||||
|
|
||||||
|
// Disconnect*Request
|
||||||
|
// Close active connections and prevent new connections
|
||||||
|
DisconnectPeerRequest = Type("DisconnectPeerRequest")
|
||||||
|
DisconnectServerRequest = Type("DisconnectServerRequest")
|
||||||
|
|
||||||
|
// Events to Manage Retry Contacts
|
||||||
|
PurgeRetries = Type("PurgeRetries")
|
||||||
|
ResumeRetries = Type("ResumeRetries")
|
||||||
|
|
||||||
// RetryServerRequest
|
// RetryServerRequest
|
||||||
// Asks CwtchPeer to retry a server connection...
|
// Asks CwtchPeer to retry a server connection...
|
||||||
|
@ -35,11 +50,14 @@ const (
|
||||||
AllowUnknownPeers = Type("AllowUnknownPeers")
|
AllowUnknownPeers = Type("AllowUnknownPeers")
|
||||||
|
|
||||||
// GroupServer
|
// GroupServer
|
||||||
|
QueueJoinServer = Type("QueueJoinServer")
|
||||||
JoinServer = Type("JoinServer")
|
JoinServer = Type("JoinServer")
|
||||||
|
|
||||||
// attributes GroupServer - the onion of the server to leave
|
// attributes GroupServer - the onion of the server to leave
|
||||||
LeaveServer = Type("LeaveServer")
|
LeaveServer = Type("LeaveServer")
|
||||||
|
|
||||||
|
ProtocolEngineCreated = Type("ProtocolEngineCreated")
|
||||||
|
ProtocolEngineShutdown = Type("ProtocolEngineShutdown")
|
||||||
ProtocolEngineStartListen = Type("ProtocolEngineStartListen")
|
ProtocolEngineStartListen = Type("ProtocolEngineStartListen")
|
||||||
ProtocolEngineStopped = Type("ProtocolEngineStopped")
|
ProtocolEngineStopped = Type("ProtocolEngineStopped")
|
||||||
|
|
||||||
|
@ -124,12 +142,6 @@ const (
|
||||||
// Password, NewPassword
|
// Password, NewPassword
|
||||||
ChangePassword = Type("ChangePassword")
|
ChangePassword = Type("ChangePassword")
|
||||||
|
|
||||||
// Error(err), EventID
|
|
||||||
ChangePasswordError = Type("ChangePasswordError")
|
|
||||||
|
|
||||||
// EventID
|
|
||||||
ChangePasswordSuccess = Type("ChangePasswordSuccess")
|
|
||||||
|
|
||||||
// a group has been successfully added or newly created
|
// a group has been successfully added or newly created
|
||||||
// attributes:
|
// attributes:
|
||||||
// Data [serialized *model.Group]
|
// Data [serialized *model.Group]
|
||||||
|
@ -138,9 +150,6 @@ const (
|
||||||
// RemotePeer
|
// RemotePeer
|
||||||
DeleteContact = Type("DeleteContact")
|
DeleteContact = Type("DeleteContact")
|
||||||
|
|
||||||
// GroupID
|
|
||||||
DeleteGroup = Type("DeleteGroup")
|
|
||||||
|
|
||||||
// PeerStateChange servers as a new incoming connection message as well, and can/is consumed by frontends to alert of new p2p connections
|
// PeerStateChange servers as a new incoming connection message as well, and can/is consumed by frontends to alert of new p2p connections
|
||||||
// RemotePeer
|
// RemotePeer
|
||||||
// ConnectionState
|
// ConnectionState
|
||||||
|
@ -192,10 +201,9 @@ const (
|
||||||
Syn = Type("Syn")
|
Syn = Type("Syn")
|
||||||
Ack = Type("Ack")
|
Ack = Type("Ack")
|
||||||
|
|
||||||
// For situations where we want to update $Identity -> $RemotePeer/$GroupID's total message count to be $Data
|
|
||||||
MessageCounterResync = Type("MessageCounterResync")
|
|
||||||
|
|
||||||
// File Handling Events
|
// File Handling Events
|
||||||
|
StopFileShare = Type("StopFileShare")
|
||||||
|
StopAllFileShares = Type("StopAllFileShares")
|
||||||
ShareManifest = Type("ShareManifest")
|
ShareManifest = Type("ShareManifest")
|
||||||
ManifestSizeReceived = Type("ManifestSizeReceived")
|
ManifestSizeReceived = Type("ManifestSizeReceived")
|
||||||
ManifestError = Type("ManifestError")
|
ManifestError = Type("ManifestError")
|
||||||
|
@ -207,9 +215,21 @@ const (
|
||||||
|
|
||||||
// Profile Attribute Event
|
// Profile Attribute Event
|
||||||
UpdatedProfileAttribute = Type("UpdatedProfileAttribute")
|
UpdatedProfileAttribute = Type("UpdatedProfileAttribute")
|
||||||
|
// Conversation Attribute Update...
|
||||||
|
UpdatedConversationAttribute = Type("UpdatedConversationAttribute")
|
||||||
StartingStorageMiragtion = Type("StartingStorageMigration")
|
StartingStorageMiragtion = Type("StartingStorageMigration")
|
||||||
DoneStorageMigration = Type("DoneStorageMigration")
|
DoneStorageMigration = Type("DoneStorageMigration")
|
||||||
|
|
||||||
|
TokenManagerInfo = Type("TokenManagerInfo")
|
||||||
|
TriggerAntispamCheck = Type("TriggerAntispamCheck")
|
||||||
|
MakeAntispamPayment = Type("MakeAntispamPayment")
|
||||||
|
|
||||||
|
// Heartbeat is used to trigger actions that need to happen every so often...
|
||||||
|
Heartbeat = Type("Heartbeat")
|
||||||
|
|
||||||
|
// Conversation Search
|
||||||
|
SearchResult = Type("SearchResult")
|
||||||
|
SearchCancelled = Type("SearchCancelled")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Field defines common event attributes
|
// Field defines common event attributes
|
||||||
|
@ -220,10 +240,13 @@ const (
|
||||||
|
|
||||||
// A peers local onion address
|
// A peers local onion address
|
||||||
Onion = Field("Onion")
|
Onion = Field("Onion")
|
||||||
|
ProfileOnion = Field("ProfileOnion")
|
||||||
|
|
||||||
RemotePeer = Field("RemotePeer")
|
RemotePeer = Field("RemotePeer")
|
||||||
|
LastSeen = Field("LastSeen")
|
||||||
Ciphertext = Field("Ciphertext")
|
Ciphertext = Field("Ciphertext")
|
||||||
Signature = Field("Signature")
|
Signature = Field("Signature")
|
||||||
|
CachedTokens = Field("CachedTokens")
|
||||||
PreviousSignature = Field("PreviousSignature")
|
PreviousSignature = Field("PreviousSignature")
|
||||||
TimestampSent = Field("TimestampSent")
|
TimestampSent = Field("TimestampSent")
|
||||||
TimestampReceived = Field("TimestampReceived")
|
TimestampReceived = Field("TimestampReceived")
|
||||||
|
@ -237,6 +260,7 @@ const (
|
||||||
ServerTokenY = Field("ServerTokenY")
|
ServerTokenY = Field("ServerTokenY")
|
||||||
ServerTokenOnion = Field("ServerTokenOnion")
|
ServerTokenOnion = Field("ServerTokenOnion")
|
||||||
GroupInvite = Field("GroupInvite")
|
GroupInvite = Field("GroupInvite")
|
||||||
|
ServerTokenCount = Field("ServerTokenCount")
|
||||||
|
|
||||||
ProfileName = Field("ProfileName")
|
ProfileName = Field("ProfileName")
|
||||||
Password = Field("Password")
|
Password = Field("Password")
|
||||||
|
@ -261,6 +285,7 @@ const (
|
||||||
EventID = Field("EventID")
|
EventID = Field("EventID")
|
||||||
EventContext = Field("EventContext")
|
EventContext = Field("EventContext")
|
||||||
Index = Field("Index")
|
Index = Field("Index")
|
||||||
|
RowIndex = Field("RowIndex")
|
||||||
ContentHash = Field("ContentHash")
|
ContentHash = Field("ContentHash")
|
||||||
|
|
||||||
// Handle denotes a contact handle of any type.
|
// Handle denotes a contact handle of any type.
|
||||||
|
@ -285,7 +310,10 @@ const (
|
||||||
SerializedManifest = Field("SerializedManifest")
|
SerializedManifest = Field("SerializedManifest")
|
||||||
TempFile = Field("TempFile")
|
TempFile = Field("TempFile")
|
||||||
FilePath = Field("FilePath")
|
FilePath = Field("FilePath")
|
||||||
|
FileDownloadFinished = Field("FileDownloadFinished")
|
||||||
NameSuggestion = Field("NameSuggestion")
|
NameSuggestion = Field("NameSuggestion")
|
||||||
|
|
||||||
|
SearchID = Field("SearchID")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Defining Common errors
|
// Defining Common errors
|
||||||
|
@ -300,6 +328,7 @@ const (
|
||||||
ContextInvite = "im.cwtch.invite"
|
ContextInvite = "im.cwtch.invite"
|
||||||
ContextRaw = "im.cwtch.raw"
|
ContextRaw = "im.cwtch.raw"
|
||||||
ContextGetVal = "im.cwtch.getVal"
|
ContextGetVal = "im.cwtch.getVal"
|
||||||
|
ContextVersion = "im.cwtch.version"
|
||||||
ContextRetVal = "im.cwtch.retVal"
|
ContextRetVal = "im.cwtch.retVal"
|
||||||
ContextRequestManifest = "im.cwtch.file.request.manifest"
|
ContextRequestManifest = "im.cwtch.file.request.manifest"
|
||||||
ContextSendManifest = "im.cwtch.file.send.manifest"
|
ContextSendManifest = "im.cwtch.file.send.manifest"
|
||||||
|
@ -307,19 +336,25 @@ const (
|
||||||
ContextSendFile = "im.cwtch.file.send.chunk"
|
ContextSendFile = "im.cwtch.file.send.chunk"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Define Default Attribute Keys
|
// Define Attribute Keys related to history preservation
|
||||||
const (
|
const (
|
||||||
SaveHistoryKey = "SavePeerHistory"
|
PreserveHistoryDefaultSettingKey = "SaveHistoryDefault" // profile level default
|
||||||
|
SaveHistoryKey = "SavePeerHistory" // peer level setting
|
||||||
)
|
)
|
||||||
|
|
||||||
// Define Default Attribute Values
|
// Define Default Attribute Values
|
||||||
const (
|
const (
|
||||||
// Save History has 3 distinct states. By default we don't save history (DefaultDeleteHistory), if the peer confirms this
|
// Save History has 3 distinct states. By default we refer to the profile level
|
||||||
// we change to DeleteHistoryConfirmed, if they confirm they want to save then this becomes SaveHistoryConfirmed
|
// attribute PreserveHistoryDefaultSettingKey ( default: false i.e. DefaultDeleteHistory),
|
||||||
// We use this distinction between default and confirmed to drive UI
|
// For each contact, if the profile owner confirms deletion we change to DeleteHistoryConfirmed,
|
||||||
DeleteHistoryDefault = "DefaultDeleteHistory"
|
// if the profile owner confirms they want to save history then this becomes SaveHistoryConfirmed
|
||||||
|
// These settings are set at the UI level using Get/SetScopeZoneAttribute with scoped zone: local.profile.*
|
||||||
SaveHistoryConfirmed = "SaveHistory"
|
SaveHistoryConfirmed = "SaveHistory"
|
||||||
DeleteHistoryConfirmed = "DeleteHistoryConfirmed"
|
DeleteHistoryConfirmed = "DeleteHistoryConfirmed"
|
||||||
|
|
||||||
|
// NOTE: While this says "[DeleteHistory]Default", The actual behaviour will now depend on the
|
||||||
|
// global app/profile value of PreserveHistoryDefaultSettingKey
|
||||||
|
DeleteHistoryDefault = "DefaultDeleteHistory"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Bool strings
|
// Bool strings
|
||||||
|
|
|
@ -10,12 +10,6 @@ type queue struct {
|
||||||
closed bool
|
closed bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type simpleQueue struct {
|
|
||||||
eventChannel chan Event
|
|
||||||
lock sync.Mutex
|
|
||||||
closed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Queue is a wrapper around a channel for handling Events in a consistent way across subsystems.
|
// Queue is a wrapper around a channel for handling Events in a consistent way across subsystems.
|
||||||
// The expectation is that each subsystem in Cwtch will manage a given an event.Queue fed from
|
// The expectation is that each subsystem in Cwtch will manage a given an event.Queue fed from
|
||||||
// the event.Manager.
|
// the event.Manager.
|
||||||
|
@ -33,49 +27,6 @@ func NewQueue() Queue {
|
||||||
return queue
|
return queue
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSimpleQueue initializes an event.Queue of the given buffer size.
|
|
||||||
func NewSimpleQueue(buffer int) Queue {
|
|
||||||
queue := new(simpleQueue)
|
|
||||||
queue.eventChannel = make(chan Event, buffer)
|
|
||||||
return queue
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sq *simpleQueue) inChan() chan<- Event {
|
|
||||||
return sq.eventChannel
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sq *simpleQueue) OutChan() <-chan Event {
|
|
||||||
return sq.eventChannel
|
|
||||||
}
|
|
||||||
|
|
||||||
// Backlog returns the length of the queue backlog
|
|
||||||
func (sq *simpleQueue) Len() int {
|
|
||||||
return len(sq.eventChannel)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next returns the next available event from the front of the queue
|
|
||||||
func (sq *simpleQueue) Next() Event {
|
|
||||||
event := <-sq.eventChannel
|
|
||||||
return event
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown closes our eventChannel
|
|
||||||
func (sq *simpleQueue) Shutdown() {
|
|
||||||
sq.lock.Lock()
|
|
||||||
sq.closed = true
|
|
||||||
close(sq.eventChannel)
|
|
||||||
sq.lock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown closes our eventChannel
|
|
||||||
func (sq *simpleQueue) Publish(event Event) {
|
|
||||||
sq.lock.Lock()
|
|
||||||
if !sq.closed {
|
|
||||||
sq.inChan() <- event
|
|
||||||
}
|
|
||||||
sq.lock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (iq *queue) inChan() chan<- Event {
|
func (iq *queue) inChan() chan<- Event {
|
||||||
return iq.infChan.In()
|
return iq.infChan.In()
|
||||||
}
|
}
|
||||||
|
@ -84,7 +35,7 @@ func (iq *queue) OutChan() <-chan Event {
|
||||||
return iq.infChan.Out()
|
return iq.infChan.Out()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Out returns the next available event from the front of the queue
|
// Next returns the next available event from the front of the queue
|
||||||
func (iq *queue) Next() Event {
|
func (iq *queue) Next() Event {
|
||||||
event := <-iq.infChan.Out()
|
event := <-iq.infChan.Out()
|
||||||
return event
|
return event
|
||||||
|
@ -97,8 +48,10 @@ func (iq *queue) Len() int {
|
||||||
// Shutdown closes our eventChannel
|
// Shutdown closes our eventChannel
|
||||||
func (iq *queue) Shutdown() {
|
func (iq *queue) Shutdown() {
|
||||||
iq.lock.Lock()
|
iq.lock.Lock()
|
||||||
|
if !iq.closed {
|
||||||
iq.closed = true
|
iq.closed = true
|
||||||
iq.infChan.Close()
|
iq.infChan.Close()
|
||||||
|
}
|
||||||
iq.lock.Unlock()
|
iq.lock.Unlock()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,7 @@ type Event struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRandNumber is a helper function which returns a random integer, this is
|
// GetRandNumber is a helper function which returns a random integer, this is
|
||||||
// currently mostly used to generate messageids
|
// currently mostly used to generate message IDs
|
||||||
func GetRandNumber() *big.Int {
|
func GetRandNumber() *big.Int {
|
||||||
num, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32))
|
num, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32))
|
||||||
// If we can't generate random numbers then panicking is probably
|
// If we can't generate random numbers then panicking is probably
|
||||||
|
@ -46,6 +46,8 @@ func NewEventList(eventType Type, args ...interface{}) Event {
|
||||||
val, vok := args[i+1].(string)
|
val, vok := args[i+1].(string)
|
||||||
if kok && vok {
|
if kok && vok {
|
||||||
data[key] = val
|
data[key] = val
|
||||||
|
} else {
|
||||||
|
log.Errorf("attempted to send a field that could not be parsed to a string: %v %v", args[i], args[i+1])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return Event{EventType: eventType, EventID: GetRandNumber().String(), Data: data}
|
return Event{EventType: eventType, EventID: GetRandNumber().String(), Data: data}
|
||||||
|
@ -56,6 +58,7 @@ type manager struct {
|
||||||
subscribers map[Type][]Queue
|
subscribers map[Type][]Queue
|
||||||
events chan []byte
|
events chan []byte
|
||||||
mapMutex sync.Mutex
|
mapMutex sync.Mutex
|
||||||
|
chanMutex sync.Mutex
|
||||||
internal chan bool
|
internal chan bool
|
||||||
closed bool
|
closed bool
|
||||||
trace bool
|
trace bool
|
||||||
|
@ -92,11 +95,18 @@ func (em *manager) initialize() {
|
||||||
func (em *manager) Subscribe(eventType Type, queue Queue) {
|
func (em *manager) Subscribe(eventType Type, queue Queue) {
|
||||||
em.mapMutex.Lock()
|
em.mapMutex.Lock()
|
||||||
defer em.mapMutex.Unlock()
|
defer em.mapMutex.Unlock()
|
||||||
|
for _, sub := range em.subscribers[eventType] {
|
||||||
|
if sub == queue {
|
||||||
|
return // don't add the same queue for the same event twice...
|
||||||
|
}
|
||||||
|
}
|
||||||
em.subscribers[eventType] = append(em.subscribers[eventType], queue)
|
em.subscribers[eventType] = append(em.subscribers[eventType], queue)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Publish takes an Event and sends it to the internal eventBus where it is distributed to all Subscribers
|
// Publish takes an Event and sends it to the internal eventBus where it is distributed to all Subscribers
|
||||||
func (em *manager) Publish(event Event) {
|
func (em *manager) Publish(event Event) {
|
||||||
|
em.chanMutex.Lock()
|
||||||
|
defer em.chanMutex.Unlock()
|
||||||
if event.EventType != "" && !em.closed {
|
if event.EventType != "" && !em.closed {
|
||||||
|
|
||||||
// Debug Events for Tracing, locked behind an environment variable
|
// Debug Events for Tracing, locked behind an environment variable
|
||||||
|
@ -126,7 +136,7 @@ func (em *manager) eventBus() {
|
||||||
for {
|
for {
|
||||||
eventJSON := <-em.events
|
eventJSON := <-em.events
|
||||||
|
|
||||||
// In the case on an empty event. Teardown the Queue
|
// In the case on an empty event. Tear down the Queue
|
||||||
if len(eventJSON) == 0 {
|
if len(eventJSON) == 0 {
|
||||||
log.Errorf("Received zero length event")
|
log.Errorf("Received zero length event")
|
||||||
break
|
break
|
||||||
|
@ -148,7 +158,10 @@ func (em *manager) eventBus() {
|
||||||
for _, subscriber := range subscribers {
|
for _, subscriber := range subscribers {
|
||||||
// Deep Copy for Each Subscriber
|
// Deep Copy for Each Subscriber
|
||||||
var eventCopy Event
|
var eventCopy Event
|
||||||
json.Unmarshal(eventJSON, &eventCopy)
|
err = json.Unmarshal(eventJSON, &eventCopy)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error unmarshalling event: %v ", err)
|
||||||
|
}
|
||||||
subscriber.Publish(eventCopy)
|
subscriber.Publish(eventCopy)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -160,7 +173,9 @@ func (em *manager) eventBus() {
|
||||||
// Shutdown triggers, and waits for, the internal eventBus goroutine to finish
|
// Shutdown triggers, and waits for, the internal eventBus goroutine to finish
|
||||||
func (em *manager) Shutdown() {
|
func (em *manager) Shutdown() {
|
||||||
em.events <- []byte{}
|
em.events <- []byte{}
|
||||||
|
em.chanMutex.Lock()
|
||||||
em.closed = true
|
em.closed = true
|
||||||
|
em.chanMutex.Unlock()
|
||||||
// wait for eventBus to finish
|
// wait for eventBus to finish
|
||||||
<-em.internal
|
<-em.internal
|
||||||
close(em.events)
|
close(em.events)
|
||||||
|
|
|
@ -2,7 +2,6 @@ package event
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
"sync"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
@ -12,12 +11,11 @@ func TestEventManager(t *testing.T) {
|
||||||
eventManager := NewEventManager()
|
eventManager := NewEventManager()
|
||||||
|
|
||||||
// We need to make this buffer at least 1, otherwise we will log an error!
|
// We need to make this buffer at least 1, otherwise we will log an error!
|
||||||
testChan := make(chan Event, 1)
|
simpleQueue := NewQueue()
|
||||||
simpleQueue := &simpleQueue{testChan, sync.Mutex{}, false}
|
|
||||||
eventManager.Subscribe("TEST", simpleQueue)
|
eventManager.Subscribe("TEST", simpleQueue)
|
||||||
eventManager.Publish(Event{EventType: "TEST", Data: map[Field]string{"Value": "Hello World"}})
|
eventManager.Publish(Event{EventType: "TEST", Data: map[Field]string{"Value": "Hello World"}})
|
||||||
|
|
||||||
event := <-testChan
|
event := simpleQueue.Next()
|
||||||
if event.EventType == "TEST" && event.Data["Value"] == "Hello World" {
|
if event.EventType == "TEST" && event.Data["Value"] == "Hello World" {
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -27,17 +25,6 @@ func TestEventManager(t *testing.T) {
|
||||||
eventManager.Shutdown()
|
eventManager.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Most basic Manager Test, Initialize, Subscribe, Publish, Receive
|
|
||||||
func TestEventManagerOverflow(t *testing.T) {
|
|
||||||
eventManager := NewEventManager()
|
|
||||||
|
|
||||||
// Explicitly setting this to 0 log an error!
|
|
||||||
testChan := make(chan Event)
|
|
||||||
simpleQueue := &simpleQueue{testChan, sync.Mutex{}, false}
|
|
||||||
eventManager.Subscribe("TEST", simpleQueue)
|
|
||||||
eventManager.Publish(Event{EventType: "TEST"})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEventManagerMultiple(t *testing.T) {
|
func TestEventManagerMultiple(t *testing.T) {
|
||||||
log.SetLevel(log.LevelDebug)
|
log.SetLevel(log.LevelDebug)
|
||||||
eventManager := NewEventManager()
|
eventManager := NewEventManager()
|
||||||
|
@ -56,7 +43,7 @@ func TestEventManagerMultiple(t *testing.T) {
|
||||||
eventManager.Publish(Event{EventType: "GroupEvent", Data: map[Field]string{"Value": "Hello World Group"}})
|
eventManager.Publish(Event{EventType: "GroupEvent", Data: map[Field]string{"Value": "Hello World Group"}})
|
||||||
eventManager.Publish(Event{EventType: "PeerEvent", Data: map[Field]string{"Value": "Hello World Peer"}})
|
eventManager.Publish(Event{EventType: "PeerEvent", Data: map[Field]string{"Value": "Hello World Peer"}})
|
||||||
eventManager.Publish(Event{EventType: "ErrorEvent", Data: map[Field]string{"Value": "Hello World Error"}})
|
eventManager.Publish(Event{EventType: "ErrorEvent", Data: map[Field]string{"Value": "Hello World Error"}})
|
||||||
eventManager.Publish(Event{EventType: "NobodyIsSubscribedToThisEvent", Data: map[Field]string{"Value": "Noone should see this!"}})
|
eventManager.Publish(Event{EventType: "NobodyIsSubscribedToThisEvent", Data: map[Field]string{"Value": "No one should see this!"}})
|
||||||
|
|
||||||
assertLength := func(len int, expected int, label string) {
|
assertLength := func(len int, expected int, label string) {
|
||||||
if len != expected {
|
if len != expected {
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
// nolint:nilaway - the infiniteBuffer function causes issues with static analysis because it is very unidomatic.
|
||||||
package event
|
package event
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -19,7 +20,7 @@ func newInfiniteChannel() *infiniteChannel {
|
||||||
input: make(chan Event),
|
input: make(chan Event),
|
||||||
output: make(chan Event),
|
output: make(chan Event),
|
||||||
length: make(chan int),
|
length: make(chan int),
|
||||||
buffer: newInfinitQueue(),
|
buffer: newInfiniteQueue(),
|
||||||
}
|
}
|
||||||
go ch.infiniteBuffer()
|
go ch.infiniteBuffer()
|
||||||
return ch
|
return ch
|
||||||
|
|
|
@ -24,7 +24,7 @@ type infiniteQueue struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// New constructs and returns a new Queue.
|
// New constructs and returns a new Queue.
|
||||||
func newInfinitQueue() *infiniteQueue {
|
func newInfiniteQueue() *infiniteQueue {
|
||||||
return &infiniteQueue{
|
return &infiniteQueue{
|
||||||
buf: make([]Event, minQueueLen),
|
buf: make([]Event, minQueueLen),
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,128 @@
|
||||||
|
package extensions
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"cwtch.im/cwtch/model"
|
||||||
|
"cwtch.im/cwtch/model/attr"
|
||||||
|
"cwtch.im/cwtch/model/constants"
|
||||||
|
"cwtch.im/cwtch/peer"
|
||||||
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
|
"cwtch.im/cwtch/settings"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProfileValueExtension implements custom Profile Names over Cwtch
|
||||||
|
type ProfileValueExtension struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pne ProfileValueExtension) NotifySettingsUpdate(_ settings.GlobalSettings) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pne ProfileValueExtension) EventsToRegister() []event.Type {
|
||||||
|
return []event.Type{event.PeerStateChange, event.Heartbeat}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pne ProfileValueExtension) ExperimentsToRegister() []string {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pne ProfileValueExtension) requestProfileInfo(profile peer.CwtchPeer, ci *model.Conversation) {
|
||||||
|
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.Name)
|
||||||
|
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.ProfileStatus)
|
||||||
|
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.ProfileAttribute1)
|
||||||
|
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.ProfileAttribute2)
|
||||||
|
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.ProfileAttribute3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pne ProfileValueExtension) OnEvent(ev event.Event, profile peer.CwtchPeer) {
|
||||||
|
switch ev.EventType {
|
||||||
|
case event.Heartbeat:
|
||||||
|
// once every heartbeat, loop through conversations and, if they are online, request an update to any long info..
|
||||||
|
conversations, err := profile.FetchConversations()
|
||||||
|
if err == nil {
|
||||||
|
for _, ci := range conversations {
|
||||||
|
if profile.GetPeerState(ci.Handle) == connections.AUTHENTICATED {
|
||||||
|
pne.requestProfileInfo(profile, ci)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case event.PeerStateChange:
|
||||||
|
ci, err := profile.FetchConversationInfo(ev.Data["RemotePeer"])
|
||||||
|
if err == nil {
|
||||||
|
// if we have re-authenticated with thie peer then request their profile image...
|
||||||
|
if connections.ConnectionStateToType()[ev.Data[event.ConnectionState]] == connections.AUTHENTICATED {
|
||||||
|
// Request some profile information...
|
||||||
|
pne.requestProfileInfo(profile, ci)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnContactReceiveValue for ProfileValueExtension handles saving specific Public Profile Values like Profile Name
|
||||||
|
func (pne ProfileValueExtension) OnContactReceiveValue(profile peer.CwtchPeer, conversation model.Conversation, szp attr.ScopedZonedPath, value string, exists bool) {
|
||||||
|
// Allow public profile parameters to be added as contact specific attributes...
|
||||||
|
scope, zone, _ := szp.GetScopeZonePath()
|
||||||
|
if exists && scope.IsPublic() && zone == attr.ProfileZone {
|
||||||
|
|
||||||
|
// Check the current value of the attribute
|
||||||
|
currentValue, err := profile.GetConversationAttribute(conversation.ID, szp)
|
||||||
|
if err == nil && currentValue == value {
|
||||||
|
// Value exists and the value is the same, short-circuit
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the new Attribute
|
||||||
|
err = profile.SetConversationAttribute(conversation.ID, szp, value)
|
||||||
|
if err != nil {
|
||||||
|
// Something else wen't wrong.. short-circuit
|
||||||
|
log.Errorf("error setting conversation attribute %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally publish an update for listeners to react to.
|
||||||
|
scope, zone, zpath := szp.GetScopeZonePath()
|
||||||
|
profile.PublishEvent(event.NewEvent(event.UpdatedConversationAttribute, map[event.Field]string{
|
||||||
|
event.Scope: string(scope),
|
||||||
|
event.Path: string(zone.ConstructZonedPath(zpath)),
|
||||||
|
event.Data: value,
|
||||||
|
event.RemotePeer: conversation.Handle,
|
||||||
|
event.ConversationID: strconv.Itoa(conversation.ID),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnContactRequestValue for ProfileValueExtension handles returning Public Profile Values
|
||||||
|
func (pne ProfileValueExtension) OnContactRequestValue(profile peer.CwtchPeer, conversation model.Conversation, eventID string, szp attr.ScopedZonedPath) {
|
||||||
|
scope, zone, zpath := szp.GetScopeZonePath()
|
||||||
|
log.Debugf("Looking up public | conversation scope/zone %v", szp.ToString())
|
||||||
|
if scope.IsPublic() || scope.IsConversation() {
|
||||||
|
val, exists := profile.GetScopedZonedAttribute(scope, zone, zpath)
|
||||||
|
|
||||||
|
// NOTE: Temporary Override because UI currently wipes names if it can't find them...
|
||||||
|
if !exists && zone == attr.UnknownZone && zpath == constants.Name {
|
||||||
|
val, exists = profile.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: Cwtch 1.15+ requires that profiles be able to restrict file downloading to specific contacts. As such we need an ACL check here
|
||||||
|
// on the fileshareing zone.
|
||||||
|
// TODO: Split this functionality into FilesharingFunctionality, and restrict this function to only considering Profile zoned attributes?
|
||||||
|
if zone == attr.FilesharingZone {
|
||||||
|
if !conversation.GetPeerAC().ShareFiles {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct a Response
|
||||||
|
resp := event.NewEvent(event.SendRetValMessageToPeer, map[event.Field]string{event.ConversationID: strconv.Itoa(conversation.ID), event.RemotePeer: conversation.Handle, event.Exists: strconv.FormatBool(exists)})
|
||||||
|
resp.EventID = eventID
|
||||||
|
if exists {
|
||||||
|
resp.Data[event.Data] = val
|
||||||
|
} else {
|
||||||
|
resp.Data[event.Data] = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("Responding with SendRetValMessageToPeer exists:%v data: %v\n", exists, val)
|
||||||
|
profile.PublishEvent(resp)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,66 @@
|
||||||
|
package extensions
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"cwtch.im/cwtch/model"
|
||||||
|
"cwtch.im/cwtch/model/attr"
|
||||||
|
"cwtch.im/cwtch/model/constants"
|
||||||
|
"cwtch.im/cwtch/peer"
|
||||||
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
|
"cwtch.im/cwtch/settings"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SendWhenOnlineExtension implements automatic sending
|
||||||
|
// Some Considerations:
|
||||||
|
// - There are race conditions inherant in this approach e.g. a peer could go offline just after recieving a message and never sending an ack
|
||||||
|
// - In that case the next time we connect we will send a duplicate message.
|
||||||
|
// - Currently we do not include metadata like sent time in raw peer protocols (however Overlay does now have support for that information)
|
||||||
|
type SendWhenOnlineExtension struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (soe SendWhenOnlineExtension) NotifySettingsUpdate(_ settings.GlobalSettings) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (soe SendWhenOnlineExtension) EventsToRegister() []event.Type {
|
||||||
|
return []event.Type{event.PeerStateChange}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (soe SendWhenOnlineExtension) ExperimentsToRegister() []string {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (soe SendWhenOnlineExtension) OnEvent(ev event.Event, profile peer.CwtchPeer) {
|
||||||
|
switch ev.EventType {
|
||||||
|
case event.PeerStateChange:
|
||||||
|
ci, err := profile.FetchConversationInfo(ev.Data["RemotePeer"])
|
||||||
|
if err == nil {
|
||||||
|
// if we have re-authenticated with thie peer then request their profile image...
|
||||||
|
if connections.ConnectionStateToType()[ev.Data[event.ConnectionState]] == connections.AUTHENTICATED {
|
||||||
|
// Check the last 100 messages, if any of them are pending, then send them now...
|
||||||
|
messsages, _ := profile.GetMostRecentMessages(ci.ID, 0, 0, uint(100))
|
||||||
|
for _, message := range messsages {
|
||||||
|
if message.Attr[constants.AttrAck] == constants.False {
|
||||||
|
body := message.Body
|
||||||
|
ev := event.NewEvent(event.SendMessageToPeer, map[event.Field]string{event.ConversationID: strconv.Itoa(ci.ID), event.RemotePeer: ci.Handle, event.Data: body})
|
||||||
|
ev.EventID = message.Signature // we need this ensure that we correctly ack this in the db when it comes back
|
||||||
|
// TODO: The EventBus is becoming very noisy...we may want to consider a one-way shortcut to Engine i.e. profile.Engine.SendMessageToPeer
|
||||||
|
log.Debugf("resending message that was sent when peer was offline")
|
||||||
|
profile.PublishEvent(ev)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnContactReceiveValue is nop for SendWhenOnnlineExtension
|
||||||
|
func (soe SendWhenOnlineExtension) OnContactReceiveValue(profile peer.CwtchPeer, conversation model.Conversation, szp attr.ScopedZonedPath, value string, exists bool) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnContactRequestValue is nop for SendWhenOnnlineExtension
|
||||||
|
func (soe SendWhenOnlineExtension) OnContactRequestValue(profile peer.CwtchPeer, conversation model.Conversation, eventID string, szp attr.ScopedZonedPath) {
|
||||||
|
|
||||||
|
}
|
|
@ -2,6 +2,8 @@ package filesharing
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"cwtch.im/cwtch/settings"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
@ -11,6 +13,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
path "path/filepath"
|
path "path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -27,13 +30,121 @@ import (
|
||||||
type Functionality struct {
|
type Functionality struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// FunctionalityGate returns filesharing if enabled in the given experiment map
|
func (f *Functionality) NotifySettingsUpdate(settings settings.GlobalSettings) {
|
||||||
// Note: Experiment maps are currently in libcwtch-go
|
}
|
||||||
func FunctionalityGate(experimentMap map[string]bool) (*Functionality, error) {
|
|
||||||
if experimentMap[constants.FileSharingExperiment] {
|
func (f *Functionality) EventsToRegister() []event.Type {
|
||||||
return new(Functionality), nil
|
return []event.Type{event.ProtocolEngineCreated, event.ManifestReceived, event.FileDownloaded}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Functionality) ExperimentsToRegister() []string {
|
||||||
|
return []string{constants.FileSharingExperiment}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnEvent handles File Sharing Hooks like Manifest Received and FileDownloaded
|
||||||
|
func (f *Functionality) OnEvent(ev event.Event, profile peer.CwtchPeer) {
|
||||||
|
if profile.IsFeatureEnabled(constants.FileSharingExperiment) {
|
||||||
|
switch ev.EventType {
|
||||||
|
case event.ProtocolEngineCreated:
|
||||||
|
f.ReShareFiles(profile)
|
||||||
|
case event.ManifestReceived:
|
||||||
|
log.Debugf("Manifest Received Event!: %v", ev)
|
||||||
|
handle := ev.Data[event.Handle]
|
||||||
|
fileKey := ev.Data[event.FileKey]
|
||||||
|
serializedManifest := ev.Data[event.SerializedManifest]
|
||||||
|
|
||||||
|
manifestFilePath, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%v.manifest", fileKey))
|
||||||
|
if exists {
|
||||||
|
downloadFilePath, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%v.path", fileKey))
|
||||||
|
if exists {
|
||||||
|
log.Debugf("downloading manifest to %v, file to %v", manifestFilePath, downloadFilePath)
|
||||||
|
var manifest files.Manifest
|
||||||
|
err := json.Unmarshal([]byte(serializedManifest), &manifest)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
// We only need to check the file size here, as manifest is sent to engine and the file created
|
||||||
|
// will be bound to the size advertised in manifest.
|
||||||
|
fileSizeLimitValue, fileSizeLimitExists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%v.limit", fileKey))
|
||||||
|
if fileSizeLimitExists {
|
||||||
|
fileSizeLimit, err := strconv.ParseUint(fileSizeLimitValue, 10, 64)
|
||||||
|
if err == nil {
|
||||||
|
if manifest.FileSizeInBytes >= fileSizeLimit {
|
||||||
|
log.Debugf("could not download file, size %v greater than limit %v", manifest.FileSizeInBytes, fileSizeLimitValue)
|
||||||
|
} else {
|
||||||
|
manifest.Title = manifest.FileName
|
||||||
|
manifest.FileName = downloadFilePath
|
||||||
|
log.Debugf("saving manifest")
|
||||||
|
err = manifest.Save(manifestFilePath)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("could not save manifest: %v", err)
|
||||||
|
} else {
|
||||||
|
tempFile := ""
|
||||||
|
if runtime.GOOS == "android" {
|
||||||
|
tempFile = manifestFilePath[0 : len(manifestFilePath)-len(".manifest")]
|
||||||
|
log.Debugf("derived android temp path: %v", tempFile)
|
||||||
}
|
}
|
||||||
return nil, errors.New("filesharing is not enabled")
|
profile.PublishEvent(event.NewEvent(event.ManifestSaved, map[event.Field]string{
|
||||||
|
event.FileKey: fileKey,
|
||||||
|
event.Handle: handle,
|
||||||
|
event.SerializedManifest: string(manifest.Serialize()),
|
||||||
|
event.TempFile: tempFile,
|
||||||
|
event.NameSuggestion: manifest.Title,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Errorf("error saving manifest: file size limit is incorrect: %v", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Errorf("error saving manifest: could not find file size limit info")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Errorf("error saving manifest: %v", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Errorf("found manifest path but not download path for %v", fileKey)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Errorf("no download path found for manifest: %v", fileKey)
|
||||||
|
}
|
||||||
|
case event.FileDownloaded:
|
||||||
|
fileKey := ev.Data[event.FileKey]
|
||||||
|
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.complete", fileKey), "true")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Errorf("profile called filesharing experiment OnContactReceiveValue even though file sharing was not enabled. This is likely a programming error.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Functionality) OnContactRequestValue(profile peer.CwtchPeer, conversation model.Conversation, eventID string, path attr.ScopedZonedPath) {
|
||||||
|
// nop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Functionality) OnContactReceiveValue(profile peer.CwtchPeer, conversation model.Conversation, path attr.ScopedZonedPath, value string, exists bool) {
|
||||||
|
// Profile should not call us if FileSharing is disabled
|
||||||
|
if profile.IsFeatureEnabled(constants.FileSharingExperiment) {
|
||||||
|
scope, zone, zpath := path.GetScopeZonePath()
|
||||||
|
log.Debugf("file sharing contact receive value")
|
||||||
|
if exists && scope.IsConversation() && zone == attr.FilesharingZone && strings.HasSuffix(zpath, ".manifest.size") {
|
||||||
|
fileKey := strings.Replace(zpath, ".manifest.size", "", 1)
|
||||||
|
size, err := strconv.Atoi(value)
|
||||||
|
// if size is valid and below the maximum size for a manifest
|
||||||
|
// this is to prevent malicious sharers from using large amounts of memory when distributing
|
||||||
|
// a manifest as we reconstruct this in-memory
|
||||||
|
if err == nil && size < files.MaxManifestSize {
|
||||||
|
profile.PublishEvent(event.NewEvent(event.ManifestSizeReceived, map[event.Field]string{event.FileKey: fileKey, event.ManifestSize: value, event.Handle: conversation.Handle}))
|
||||||
|
} else {
|
||||||
|
profile.PublishEvent(event.NewEvent(event.ManifestError, map[event.Field]string{event.FileKey: fileKey, event.Handle: conversation.Handle}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Errorf("profile called filesharing experiment OnContactReceiveValue even though file sharing was not enabled. This is likely a programming error.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FunctionalityGate returns filesharing functionality - gates now happen on function calls.
|
||||||
|
func FunctionalityGate() *Functionality {
|
||||||
|
return new(Functionality)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PreviewFunctionalityGate returns filesharing if image previews are enabled
|
// PreviewFunctionalityGate returns filesharing if image previews are enabled
|
||||||
|
@ -72,32 +183,259 @@ func (om *OverlayMessage) ShouldAutoDL() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *Functionality) VerifyOrResumeDownloadDefaultLimit(profile peer.CwtchPeer, conversation int, fileKey string) error {
|
||||||
|
return f.VerifyOrResumeDownload(profile, conversation, fileKey, files.MaxManifestSize*files.DefaultChunkSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Functionality) VerifyOrResumeDownload(profile peer.CwtchPeer, conversation int, fileKey string, size uint64) error {
|
||||||
|
if manifestFilePath, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", fileKey)); exists {
|
||||||
|
if downloadfilepath, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.path", fileKey)); exists {
|
||||||
|
manifest, err := files.LoadManifest(manifestFilePath)
|
||||||
|
if err == nil {
|
||||||
|
// Assert the filename...this is technically not necessary, but is here for completeness
|
||||||
|
manifest.FileName = downloadfilepath
|
||||||
|
if manifest.VerifyFile() == nil {
|
||||||
|
// Send a FileDownloaded Event. Usually when VerifyOrResumeDownload is triggered it's because some UI is awaiting the results of a
|
||||||
|
// Download.
|
||||||
|
profile.PublishEvent(event.NewEvent(event.FileDownloaded, map[event.Field]string{event.FileKey: fileKey, event.FilePath: downloadfilepath, event.TempFile: downloadfilepath}))
|
||||||
|
// File is verified and there is nothing else to do...
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
// Kick off another Download...
|
||||||
|
return f.DownloadFile(profile, conversation, downloadfilepath, manifestFilePath, fileKey, size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return errors.New("file download metadata does not exist, or is corrupted")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Functionality) CheckDownloadStatus(profile peer.CwtchPeer, fileKey string) error {
|
||||||
|
path, _ := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.path", fileKey))
|
||||||
|
if value, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.complete", fileKey)); exists && value == event.True {
|
||||||
|
profile.PublishEvent(event.NewEvent(event.FileDownloaded, map[event.Field]string{
|
||||||
|
event.ProfileOnion: profile.GetOnion(),
|
||||||
|
event.FileKey: fileKey,
|
||||||
|
event.FilePath: path,
|
||||||
|
event.TempFile: "",
|
||||||
|
}))
|
||||||
|
} else {
|
||||||
|
log.Debugf("CheckDownloadStatus found .path but not .complete")
|
||||||
|
profile.PublishEvent(event.NewEvent(event.FileDownloadProgressUpdate, map[event.Field]string{
|
||||||
|
event.ProfileOnion: profile.GetOnion(),
|
||||||
|
event.FileKey: fileKey,
|
||||||
|
event.Progress: "-1",
|
||||||
|
event.FileSizeInChunks: "-1",
|
||||||
|
event.FilePath: path,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
return nil // cannot fail
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Functionality) EnhancedShareFile(profile peer.CwtchPeer, conversationID int, sharefilepath string) string {
|
||||||
|
fileKey, overlay, err := f.ShareFile(sharefilepath, profile)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error sharing file: %v", err)
|
||||||
|
} else if conversationID == -1 {
|
||||||
|
// FIXME: At some point we might want to allow arbitrary public files, but for now this API will assume
|
||||||
|
// there is only one, and it is the custom profile image...
|
||||||
|
profile.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.CustomProfileImageKey, fileKey)
|
||||||
|
} else {
|
||||||
|
// Set a new attribute so we can associate this download with this conversation...
|
||||||
|
profile.SetConversationAttribute(conversationID, attr.ConversationScope.ConstructScopedZonedPath(attr.FilesharingZone.ConstructZonedPath(fileKey)), "")
|
||||||
|
id, err := profile.SendMessage(conversationID, overlay)
|
||||||
|
if err == nil {
|
||||||
|
return profile.EnhancedGetMessageById(conversationID, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFileDefaultLimit given a profile, a conversation handle and a file sharing key, start off a download process
|
||||||
|
// to downloadFilePath with a default filesize limit
|
||||||
|
func (f *Functionality) DownloadFileDefaultLimit(profile peer.CwtchPeer, conversationID int, downloadFilePath string, manifestFilePath string, key string) error {
|
||||||
|
return f.DownloadFile(profile, conversationID, downloadFilePath, manifestFilePath, key, files.MaxManifestSize*files.DefaultChunkSize)
|
||||||
|
}
|
||||||
|
|
||||||
// DownloadFile given a profile, a conversation handle and a file sharing key, start off a download process
|
// DownloadFile given a profile, a conversation handle and a file sharing key, start off a download process
|
||||||
// to downloadFilePath
|
// to downloadFilePath
|
||||||
func (f *Functionality) DownloadFile(profile peer.CwtchPeer, conversationID int, downloadFilePath string, manifestFilePath string, key string) {
|
func (f *Functionality) DownloadFile(profile peer.CwtchPeer, conversationID int, downloadFilePath string, manifestFilePath string, key string, limit uint64) error {
|
||||||
|
|
||||||
|
// assert that we are allowed to download the file
|
||||||
|
if !profile.IsFeatureEnabled(constants.FileSharingExperiment) {
|
||||||
|
return errors.New("filesharing functionality is not enabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't download files if the download or manifest path is not set
|
||||||
|
if downloadFilePath == "" || manifestFilePath == "" {
|
||||||
|
return errors.New("download path or manifest path is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't download files if the download file directory does not exist
|
||||||
|
// Unless we are on Android where the kernel wishes to keep us ignorant of the
|
||||||
|
// actual path and/or existence of the file. We handle this case further down
|
||||||
|
// the line when the manifest is received and protocol engine and the Android layer
|
||||||
|
// negotiate a temporary local file -> final file copy. We don't want to worry
|
||||||
|
// about that here...
|
||||||
|
if runtime.GOOS != "android" {
|
||||||
|
if _, err := os.Stat(path.Dir(downloadFilePath)); os.IsNotExist(err) {
|
||||||
|
return errors.New("download directory does not exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't download files if the manifest file directory does not exist
|
||||||
|
if _, err := os.Stat(path.Dir(manifestFilePath)); os.IsNotExist(err) {
|
||||||
|
return errors.New("manifest directory does not exist")
|
||||||
|
}
|
||||||
|
}
|
||||||
// Store local.filesharing.filekey.manifest as the location of the manifest
|
// Store local.filesharing.filekey.manifest as the location of the manifest
|
||||||
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", key), manifestFilePath)
|
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", key), manifestFilePath)
|
||||||
|
|
||||||
// Store local.filesharing.filekey.path as the location of the download
|
// Store local.filesharing.filekey.path as the location of the download
|
||||||
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.path", key), downloadFilePath)
|
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.path", key), downloadFilePath)
|
||||||
|
|
||||||
|
// Store local.filesharing.filekey.limit as the max file size of the download
|
||||||
|
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.limit", key), strconv.FormatUint(limit, 10))
|
||||||
|
|
||||||
// Get the value of conversation.filesharing.filekey.manifest.size from `handle`
|
// Get the value of conversation.filesharing.filekey.manifest.size from `handle`
|
||||||
profile.SendScopedZonedGetValToContact(conversationID, attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest.size", key))
|
profile.SendScopedZonedGetValToContact(conversationID, attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest.size", key))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// startFileShare is a private method used to finalize a file share and publish it to the protocol engine for processing.
|
||||||
|
// if force is set to true, this function will ignore timestamp checks...
|
||||||
|
func (f *Functionality) startFileShare(profile peer.CwtchPeer, filekey string, manifest string, force bool) error {
|
||||||
|
tsStr, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.ts", filekey))
|
||||||
|
if exists && !force {
|
||||||
|
ts, err := strconv.ParseInt(tsStr, 10, 64)
|
||||||
|
if err != nil || ts < time.Now().Unix()-2592000 {
|
||||||
|
log.Errorf("ignoring request to download a file offered more than 30 days ago")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// set the filekey status to active
|
||||||
|
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.active", filekey), constants.True)
|
||||||
|
// reset the timestamp...
|
||||||
|
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.ts", filekey), strconv.FormatInt(time.Now().Unix(), 10))
|
||||||
|
// share the manifest
|
||||||
|
profile.PublishEvent(event.NewEvent(event.ShareManifest, map[event.Field]string{event.FileKey: filekey, event.SerializedManifest: manifest}))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RestartFileShare takes in an existing filekey and, assuming the manifest exists, restarts sharing of the manifest
|
||||||
|
// by default this function always forces a file share, even if the file has timed out.
|
||||||
|
func (f *Functionality) RestartFileShare(profile peer.CwtchPeer, filekey string) error {
|
||||||
|
return f.restartFileShareAdvanced(profile, filekey, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RestartFileShareAdvanced takes in an existing filekey and, assuming the manifest exists, restarts sharing of the manifest in addition
|
||||||
|
// to a set of parameters
|
||||||
|
func (f *Functionality) restartFileShareAdvanced(profile peer.CwtchPeer, filekey string, force bool) error {
|
||||||
|
|
||||||
|
// assert that we are allowed to restart filesharing
|
||||||
|
if !profile.IsFeatureEnabled(constants.FileSharingExperiment) {
|
||||||
|
return errors.New("filesharing functionality is not enabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
// check that a manifest exists
|
||||||
|
manifest, manifestExists := profile.GetScopedZonedAttribute(attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", filekey))
|
||||||
|
if manifestExists {
|
||||||
|
// everything is in order, so reshare this file with the engine
|
||||||
|
log.Debugf("restarting file share: %v", filekey)
|
||||||
|
return f.startFileShare(profile, filekey, manifest, force)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("manifest does not exist for filekey: %v", filekey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReShareFiles given a profile we iterate through all existing fileshares and re-share them
|
||||||
|
// if the time limit has not expired
|
||||||
|
func (f *Functionality) ReShareFiles(profile peer.CwtchPeer) error {
|
||||||
|
|
||||||
|
// assert that we are allowed to restart filesharing
|
||||||
|
if !profile.IsFeatureEnabled(constants.FileSharingExperiment) {
|
||||||
|
return errors.New("filesharing functionality is not enabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
keys, err := profile.GetScopedZonedAttributeKeys(attr.LocalScope, attr.FilesharingZone)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range keys {
|
||||||
|
// only look at timestamp keys
|
||||||
|
// this is an arbitrary choice
|
||||||
|
|
||||||
|
if strings.HasSuffix(key, ".ts") {
|
||||||
|
_, zonedpath := attr.ParseScope(key)
|
||||||
|
_, keypath := attr.ParseZone(zonedpath)
|
||||||
|
keyparts := strings.Split(keypath, ".")
|
||||||
|
|
||||||
|
// assert that the key is well-formed
|
||||||
|
if len(keyparts) == 3 && keyparts[2] == "ts" {
|
||||||
|
// fetch the timestamp key
|
||||||
|
filekey := strings.Join(keyparts[:2], ".")
|
||||||
|
sharedFile, err := f.GetFileShareInfo(profile, filekey)
|
||||||
|
|
||||||
|
// If we haven't explicitly stopped sharing the file then attempt a reshare
|
||||||
|
if err == nil && sharedFile.Active {
|
||||||
|
// this reshare can fail because we don't force sharing of files older than 30 days...
|
||||||
|
err := f.restartFileShareAdvanced(profile, filekey, false)
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("could not reshare file: %v", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Debugf("could not get fileshare info %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileShareInfo returns information related to a known fileshare.
|
||||||
|
// An error is returned if the data is incomplete
|
||||||
|
func (f *Functionality) GetFileShareInfo(profile peer.CwtchPeer, filekey string) (*SharedFile, error) {
|
||||||
|
timestampString, tsExists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.ts", filekey))
|
||||||
|
pathString, pathExists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.path", filekey))
|
||||||
|
activeString, activeExists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.active", filekey))
|
||||||
|
if tsExists && pathExists && activeExists {
|
||||||
|
timestamp, err := strconv.Atoi(timestampString)
|
||||||
|
if err == nil {
|
||||||
|
|
||||||
|
dateShared := time.Unix(int64(timestamp), 0)
|
||||||
|
expired := time.Since(dateShared) >= time.Hour*24*30
|
||||||
|
|
||||||
|
return &SharedFile{
|
||||||
|
FileKey: filekey,
|
||||||
|
Path: pathString,
|
||||||
|
DateShared: dateShared,
|
||||||
|
Active: !expired && activeString == constants.True,
|
||||||
|
Expired: expired,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("nonexistant or malformed fileshare %v", filekey)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShareFile given a profile and a conversation handle, sets up a file sharing process to share the file
|
// ShareFile given a profile and a conversation handle, sets up a file sharing process to share the file
|
||||||
// at filepath
|
// at filepath
|
||||||
func (f *Functionality) ShareFile(filepath string, profile peer.CwtchPeer, conversationID int) error {
|
func (f *Functionality) ShareFile(filepath string, profile peer.CwtchPeer) (string, string, error) {
|
||||||
|
|
||||||
|
// assert that we are allowed to share files
|
||||||
|
if !profile.IsFeatureEnabled(constants.FileSharingExperiment) {
|
||||||
|
return "", "", errors.New("filesharing functionality is not enabled")
|
||||||
|
}
|
||||||
|
|
||||||
manifest, err := files.CreateManifest(filepath)
|
manifest, err := files.CreateManifest(filepath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return "", "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
var nonce [24]byte
|
var nonce [24]byte
|
||||||
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
|
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
|
||||||
log.Errorf("Cannot read from random: %v\n", err)
|
log.Errorf("Cannot read from random: %v\n", err)
|
||||||
return err
|
return "", "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
message := OverlayMessage{
|
message := OverlayMessage{
|
||||||
|
@ -136,15 +474,65 @@ func (f *Functionality) ShareFile(filepath string, profile peer.CwtchPeer, conve
|
||||||
profile.SetScopedZonedAttribute(attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", key), string(serializedManifest))
|
profile.SetScopedZonedAttribute(attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", key), string(serializedManifest))
|
||||||
profile.SetScopedZonedAttribute(attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest.size", key), strconv.Itoa(int(math.Ceil(float64(len(serializedManifest)-lenDiff)/float64(files.DefaultChunkSize)))))
|
profile.SetScopedZonedAttribute(attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest.size", key), strconv.Itoa(int(math.Ceil(float64(len(serializedManifest)-lenDiff)/float64(files.DefaultChunkSize)))))
|
||||||
|
|
||||||
profile.ShareFile(key, string(serializedManifest))
|
err = f.startFileShare(profile, key, string(serializedManifest), false)
|
||||||
|
|
||||||
profile.SendMessage(conversationID, string(wrapperJSON))
|
return key, string(wrapperJSON), err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
// SharedFile encapsulates information about a shared file
|
||||||
|
// including the file key, file path, the original share date and the
|
||||||
|
// current sharing status
|
||||||
|
type SharedFile struct {
|
||||||
|
|
||||||
|
// The roothash.nonce identifier derived for this file share
|
||||||
|
FileKey string
|
||||||
|
|
||||||
|
// Path is the OS specific location of the file
|
||||||
|
Path string
|
||||||
|
|
||||||
|
// DateShared is the original datetime the file was shared
|
||||||
|
DateShared time.Time
|
||||||
|
|
||||||
|
// Active is true if the file is currently being shared, false otherwise
|
||||||
|
Active bool
|
||||||
|
|
||||||
|
// Expired is true if the file is not eligible to be shared (because e.g. it has been too long since the file was originally shared,
|
||||||
|
// or the file no longer exists).
|
||||||
|
Expired bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Functionality) EnhancedGetSharedFiles(profile peer.CwtchPeer, conversationID int) string {
|
||||||
|
data, err := json.Marshal(f.GetSharedFiles(profile, conversationID))
|
||||||
|
if err == nil {
|
||||||
|
return string(data)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSharedFiles returns all file shares associated with a given conversation
|
||||||
|
func (f *Functionality) GetSharedFiles(profile peer.CwtchPeer, conversationID int) []SharedFile {
|
||||||
|
var sharedFiles []SharedFile
|
||||||
|
ci, err := profile.GetConversationInfo(conversationID)
|
||||||
|
if err == nil {
|
||||||
|
for k := range ci.Attributes {
|
||||||
|
// when we share a file with a conversation we set a single attribute conversation.filesharing.<filekey>
|
||||||
|
if strings.HasPrefix(k, "conversation.filesharing") {
|
||||||
|
parts := strings.SplitN(k, ".", 3)
|
||||||
|
if len(parts) == 3 {
|
||||||
|
key := parts[2]
|
||||||
|
sharedFile, err := f.GetFileShareInfo(profile, key)
|
||||||
|
if err == nil {
|
||||||
|
sharedFiles = append(sharedFiles, *sharedFile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sharedFiles
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateDownloadPath creates a file path that doesn't currently exist on the filesystem
|
// GenerateDownloadPath creates a file path that doesn't currently exist on the filesystem
|
||||||
func GenerateDownloadPath(basePath, fileName string) (filePath, manifestPath string) {
|
func GenerateDownloadPath(basePath, fileName string, overwrite bool) (filePath, manifestPath string) {
|
||||||
// avoid all kina funky shit
|
// avoid all kina funky shit
|
||||||
re := regexp.MustCompile(`[^A-Za-z0-9._-]`)
|
re := regexp.MustCompile(`[^A-Za-z0-9._-]`)
|
||||||
filePath = re.ReplaceAllString(filePath, "")
|
filePath = re.ReplaceAllString(filePath, "")
|
||||||
|
@ -173,6 +561,7 @@ func GenerateDownloadPath(basePath, fileName string) (filePath, manifestPath str
|
||||||
fileNameExt = fmt.Sprintf(".%s", parts[len(parts)-1])
|
fileNameExt = fmt.Sprintf(".%s", parts[len(parts)-1])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !overwrite {
|
||||||
for i := 2; ; i++ {
|
for i := 2; ; i++ {
|
||||||
if _, err := os.Open(filePath); os.IsNotExist(err) {
|
if _, err := os.Open(filePath); os.IsNotExist(err) {
|
||||||
if _, err := os.Open(manifestPath); os.IsNotExist(err) {
|
if _, err := os.Open(manifestPath); os.IsNotExist(err) {
|
||||||
|
@ -182,4 +571,21 @@ func GenerateDownloadPath(basePath, fileName string) (filePath, manifestPath str
|
||||||
filePath = fmt.Sprintf("%s%s (%d)%s", basePath, fileNameBase, i, fileNameExt)
|
filePath = fmt.Sprintf("%s%s (%d)%s", basePath, fileNameBase, i, fileNameExt)
|
||||||
manifestPath = fmt.Sprintf("%s.manifest", filePath)
|
manifestPath = fmt.Sprintf("%s.manifest", filePath)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// StopFileShare sends a message to the ProtocolEngine to cease sharing a particular file
|
||||||
|
func (f *Functionality) StopFileShare(profile peer.CwtchPeer, fileKey string) error {
|
||||||
|
// Note we do not do a permissions check here, as we are *always* permitted to stop sharing files.
|
||||||
|
// set the filekey status to inactive
|
||||||
|
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.active", fileKey), constants.False)
|
||||||
|
profile.PublishEvent(event.NewEvent(event.StopFileShare, map[event.Field]string{event.FileKey: fileKey}))
|
||||||
|
return nil // cannot fail
|
||||||
|
}
|
||||||
|
|
||||||
|
// StopAllFileShares sends a message to the ProtocolEngine to cease sharing all files
|
||||||
|
func (f *Functionality) StopAllFileShares(profile peer.CwtchPeer) {
|
||||||
|
// Note we do not do a permissions check here, as we are *always* permitted to stop sharing files.
|
||||||
|
profile.PublishEvent(event.NewEvent(event.StopAllFileShares, map[event.Field]string{}))
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,181 @@
|
||||||
|
package filesharing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"cwtch.im/cwtch/model"
|
||||||
|
"cwtch.im/cwtch/model/attr"
|
||||||
|
"cwtch.im/cwtch/model/constants"
|
||||||
|
"cwtch.im/cwtch/peer"
|
||||||
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
|
"cwtch.im/cwtch/settings"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ImagePreviewsFunctionality struct {
|
||||||
|
downloadFolder string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ImagePreviewsFunctionality) NotifySettingsUpdate(settings settings.GlobalSettings) {
|
||||||
|
i.downloadFolder = settings.DownloadPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ImagePreviewsFunctionality) EventsToRegister() []event.Type {
|
||||||
|
return []event.Type{event.ProtocolEngineCreated, event.NewMessageFromPeer, event.NewMessageFromGroup, event.PeerStateChange, event.Heartbeat}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ImagePreviewsFunctionality) ExperimentsToRegister() []string {
|
||||||
|
return []string{constants.FileSharingExperiment, constants.ImagePreviewsExperiment}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ImagePreviewsFunctionality) OnEvent(ev event.Event, profile peer.CwtchPeer) {
|
||||||
|
if profile.IsFeatureEnabled(constants.FileSharingExperiment) && profile.IsFeatureEnabled(constants.ImagePreviewsExperiment) {
|
||||||
|
switch ev.EventType {
|
||||||
|
case event.NewMessageFromPeer:
|
||||||
|
ci, err := profile.FetchConversationInfo(ev.Data["RemotePeer"])
|
||||||
|
if err == nil {
|
||||||
|
if ci.GetPeerAC().RenderImages {
|
||||||
|
i.handleImagePreviews(profile, &ev, ci.ID, ci.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case event.NewMessageFromGroup:
|
||||||
|
ci, err := profile.FetchConversationInfo(ev.Data["RemotePeer"])
|
||||||
|
if err == nil {
|
||||||
|
if ci.GetPeerAC().RenderImages {
|
||||||
|
i.handleImagePreviews(profile, &ev, ci.ID, ci.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case event.PeerStateChange:
|
||||||
|
ci, err := profile.FetchConversationInfo(ev.Data["RemotePeer"])
|
||||||
|
if err == nil {
|
||||||
|
// if we have re-authenticated with this peer then request their profile image...
|
||||||
|
if connections.ConnectionStateToType()[ev.Data[event.ConnectionState]] == connections.AUTHENTICATED {
|
||||||
|
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.CustomProfileImageKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case event.Heartbeat:
|
||||||
|
conversations, err := profile.FetchConversations()
|
||||||
|
if err == nil {
|
||||||
|
for _, ci := range conversations {
|
||||||
|
if profile.GetPeerState(ci.Handle) == connections.AUTHENTICATED {
|
||||||
|
// if we have enabled file shares for this contact, then send them our profile image
|
||||||
|
// NOTE: In the past, Cwtch treated "profile image" as a public file share. As such, anyone with the file key and who is able
|
||||||
|
// to authenticate with the profile (i.e. non-blocked peers) can download the file (if the global profile images experiment is enabled)
|
||||||
|
// To better allow for fine-grained permissions (and to support hybrid group permissions), we want to enable per-conversation file
|
||||||
|
// sharing permissions. As such, profile images are now only shared with contacts with that permission enabled.
|
||||||
|
// (i.e. all previous accepted contacts, new accepted contacts, and contacts who have this toggle set explictly)
|
||||||
|
if ci.GetPeerAC().ShareFiles {
|
||||||
|
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.CustomProfileImageKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case event.ProtocolEngineCreated:
|
||||||
|
// Now that the Peer Engine is Activated, Reshare Profile Images
|
||||||
|
key, exists := profile.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.CustomProfileImageKey)
|
||||||
|
if exists {
|
||||||
|
serializedManifest, _ := profile.GetScopedZonedAttribute(attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", key))
|
||||||
|
// reset the share timestamp, currently file shares are hardcoded to expire after 30 days...
|
||||||
|
// we reset the profile image here so that it is always available.
|
||||||
|
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.ts", key), strconv.FormatInt(time.Now().Unix(), 10))
|
||||||
|
log.Debugf("Custom Profile Image: %v %s", key, serializedManifest)
|
||||||
|
f := Functionality{}
|
||||||
|
f.RestartFileShare(profile, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ImagePreviewsFunctionality) OnContactRequestValue(profile peer.CwtchPeer, conversation model.Conversation, eventID string, path attr.ScopedZonedPath) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ImagePreviewsFunctionality) OnContactReceiveValue(profile peer.CwtchPeer, conversation model.Conversation, path attr.ScopedZonedPath, value string, exists bool) {
|
||||||
|
if profile.IsFeatureEnabled(constants.FileSharingExperiment) && profile.IsFeatureEnabled(constants.ImagePreviewsExperiment) {
|
||||||
|
_, zone, path := path.GetScopeZonePath()
|
||||||
|
if exists && zone == attr.ProfileZone && path == constants.CustomProfileImageKey {
|
||||||
|
// We only download from accepted conversations
|
||||||
|
if conversation.GetPeerAC().RenderImages {
|
||||||
|
fileKey := value
|
||||||
|
basepath := i.downloadFolder
|
||||||
|
fsf := FunctionalityGate()
|
||||||
|
// We always overwrite profile image files...
|
||||||
|
fp, mp := GenerateDownloadPath(basepath, fileKey, true)
|
||||||
|
|
||||||
|
// If we have marked this file as complete...
|
||||||
|
if value, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.complete", fileKey)); exists && value == event.True {
|
||||||
|
if _, err := os.Stat(fp); err == nil {
|
||||||
|
// file is marked as completed downloaded and exists...
|
||||||
|
// Note: this will also resend the FileDownloaded event if successful...
|
||||||
|
if fsf.VerifyOrResumeDownload(profile, conversation.ID, fileKey, constants.ImagePreviewMaxSizeInBytes) == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Otherwise we fall through...
|
||||||
|
}
|
||||||
|
// Something went wrong...the file is marked as complete but either doesn't exist, or is corrupted such that we can't continue...
|
||||||
|
// So mark complete as false...
|
||||||
|
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.complete", fileKey), event.False)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have reached this point then we need to download the file again...
|
||||||
|
log.Debugf("Downloading Profile Image %v %v %v", fp, mp, fileKey)
|
||||||
|
fsf.DownloadFile(profile, conversation.ID, fp, mp, fileKey, constants.ImagePreviewMaxSizeInBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleImagePreviews checks settings and, if appropriate, auto-downloads any images
|
||||||
|
func (i *ImagePreviewsFunctionality) handleImagePreviews(profile peer.CwtchPeer, ev *event.Event, conversationID, senderID int) {
|
||||||
|
if profile.IsFeatureEnabled(constants.FileSharingExperiment) && profile.IsFeatureEnabled(constants.ImagePreviewsExperiment) {
|
||||||
|
ci, err := profile.GetConversationInfo(senderID)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("attempted to call handleImagePreviews with unknown conversation: %v", senderID)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ci.GetPeerAC().ShareFiles || !ci.GetPeerAC().RenderImages {
|
||||||
|
log.Infof("refusing to autodownload files from sender: %v. conversation AC does not permit image rendering", senderID)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Short-circuit failures
|
||||||
|
// Don't auto-download images if the download path does not exist.
|
||||||
|
if i.downloadFolder == "" {
|
||||||
|
log.Errorf("download folder %v is not set", i.downloadFolder)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't auto-download images if the download path does not exist.
|
||||||
|
if _, err := os.Stat(i.downloadFolder); os.IsNotExist(err) {
|
||||||
|
log.Errorf("download folder %v does not exist", i.downloadFolder)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If file sharing is enabled then reshare all active files...
|
||||||
|
fsf := FunctionalityGate()
|
||||||
|
|
||||||
|
// Now look at the image preview experiment
|
||||||
|
var cm model.MessageWrapper
|
||||||
|
err = json.Unmarshal([]byte(ev.Data[event.Data]), &cm)
|
||||||
|
if err == nil && cm.Overlay == model.OverlayFileSharing {
|
||||||
|
log.Debugf("Received File Sharing Message")
|
||||||
|
var fm OverlayMessage
|
||||||
|
err = json.Unmarshal([]byte(cm.Data), &fm)
|
||||||
|
if err == nil {
|
||||||
|
if fm.ShouldAutoDL() {
|
||||||
|
basepath := i.downloadFolder
|
||||||
|
fp, mp := GenerateDownloadPath(basepath, fm.Name, false)
|
||||||
|
log.Debugf("autodownloading file! %v %v %v", basepath, fp, i.downloadFolder)
|
||||||
|
ev.Data["Auto"] = constants.True
|
||||||
|
mID, _ := strconv.Atoi(ev.Data["Index"])
|
||||||
|
profile.UpdateMessageAttribute(conversationID, 0, mID, constants.AttrDownloaded, constants.True)
|
||||||
|
fsf.DownloadFile(profile, senderID, fp, mp, fm.FileKey(), constants.ImagePreviewMaxSizeInBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,150 @@
|
||||||
|
package servers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"cwtch.im/cwtch/model"
|
||||||
|
"cwtch.im/cwtch/model/attr"
|
||||||
|
"cwtch.im/cwtch/model/constants"
|
||||||
|
"cwtch.im/cwtch/peer"
|
||||||
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
|
"cwtch.im/cwtch/settings"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ServerList is a json encoded list of servers
|
||||||
|
ServerList = event.Field("ServerList")
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// UpdateServerInfo is an event containing a ProfileOnion and a ServerList
|
||||||
|
UpdateServerInfo = event.Type("UpdateServerInfo")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Functionality groups some common UI triggered functions for contacts...
|
||||||
|
type Functionality struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Functionality) NotifySettingsUpdate(settings settings.GlobalSettings) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Functionality) EventsToRegister() []event.Type {
|
||||||
|
return []event.Type{event.QueueJoinServer}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Functionality) ExperimentsToRegister() []string {
|
||||||
|
return []string{constants.GroupsExperiment}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnEvent handles File Sharing Hooks like Manifest Received and FileDownloaded
|
||||||
|
func (f *Functionality) OnEvent(ev event.Event, profile peer.CwtchPeer) {
|
||||||
|
if profile.IsFeatureEnabled(constants.GroupsExperiment) {
|
||||||
|
switch ev.EventType {
|
||||||
|
// keep the UI in sync with the current backend server updates...
|
||||||
|
// queue join server gets triggered on load and on new servers so it's a nice
|
||||||
|
// low-noise event to hook into...
|
||||||
|
case event.QueueJoinServer:
|
||||||
|
f.PublishServerUpdate(profile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Functionality) OnContactRequestValue(profile peer.CwtchPeer, conversation model.Conversation, eventID string, path attr.ScopedZonedPath) {
|
||||||
|
// nop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Functionality) OnContactReceiveValue(profile peer.CwtchPeer, conversation model.Conversation, path attr.ScopedZonedPath, value string, exists bool) {
|
||||||
|
// nopt
|
||||||
|
}
|
||||||
|
|
||||||
|
// FunctionalityGate returns filesharing functionality - gates now happen on function calls.
|
||||||
|
func FunctionalityGate() *Functionality {
|
||||||
|
return new(Functionality)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerKey packages up key information...
|
||||||
|
// TODO: Can this be merged with KeyBundle?
|
||||||
|
type ServerKey struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Key string `json:"key"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SyncStatus packages up server sync information...
|
||||||
|
type SyncStatus struct {
|
||||||
|
StartTime string `json:"startTime"`
|
||||||
|
LastMessageTime string `json:"lastMessageTime"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Server encapsulates the information needed to represent a server...
|
||||||
|
type Server struct {
|
||||||
|
Onion string `json:"onion"`
|
||||||
|
Identifier int `json:"identifier"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
Keys []ServerKey `json:"keys"`
|
||||||
|
SyncProgress SyncStatus `json:"syncProgress"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishServerUpdate serializes the current list of group servers and publishes an event with this information
|
||||||
|
func (f *Functionality) PublishServerUpdate(profile peer.CwtchPeer) error {
|
||||||
|
serverListForOnion := f.GetServerInfoList(profile)
|
||||||
|
serversListBytes, err := json.Marshal(serverListForOnion)
|
||||||
|
profile.PublishEvent(event.NewEvent(UpdateServerInfo, map[event.Field]string{"ProfileOnion": profile.GetOnion(), ServerList: string(serversListBytes)}))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetServerInfoList compiles all the information the UI might need regarding all servers..
|
||||||
|
func (f *Functionality) GetServerInfoList(profile peer.CwtchPeer) []Server {
|
||||||
|
var servers []Server
|
||||||
|
for _, server := range profile.GetServers() {
|
||||||
|
server, err := f.GetServerInfo(profile, server)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("profile server list is corrupted: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
servers = append(servers, server)
|
||||||
|
}
|
||||||
|
return servers
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteServer purges a server and all related keys from a profile
|
||||||
|
func (f *Functionality) DeleteServerInfo(profile peer.CwtchPeer, serverOnion string) error {
|
||||||
|
// Servers are stores as special conversations
|
||||||
|
ci, err := profile.FetchConversationInfo(serverOnion)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Purge keys...
|
||||||
|
// NOTE: This will leave some groups in the state of being unable to connect to a particular
|
||||||
|
// server.
|
||||||
|
profile.DeleteConversation(ci.ID)
|
||||||
|
f.PublishServerUpdate(profile)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetServerInfo compiles all the information the UI might need regarding a particular server including any verified
|
||||||
|
// cryptographic keys
|
||||||
|
func (f *Functionality) GetServerInfo(profile peer.CwtchPeer, serverOnion string) (Server, error) {
|
||||||
|
serverInfo, err := profile.FetchConversationInfo(serverOnion)
|
||||||
|
if err != nil {
|
||||||
|
return Server{}, errors.New("server not found")
|
||||||
|
}
|
||||||
|
keyTypes := []model.KeyType{model.KeyTypeServerOnion, model.KeyTypeTokenOnion, model.KeyTypePrivacyPass}
|
||||||
|
var serverKeys []ServerKey
|
||||||
|
|
||||||
|
for _, keyType := range keyTypes {
|
||||||
|
if key, has := serverInfo.GetAttribute(attr.PublicScope, attr.ServerKeyZone, string(keyType)); has {
|
||||||
|
serverKeys = append(serverKeys, ServerKey{Type: string(keyType), Key: key})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
description, _ := serverInfo.GetAttribute(attr.LocalScope, attr.ServerZone, constants.Description)
|
||||||
|
startTimeStr := serverInfo.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.SyncPreLastMessageTime)).ToString()]
|
||||||
|
recentTimeStr := serverInfo.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.SyncMostRecentMessageTime)).ToString()]
|
||||||
|
syncStatus := SyncStatus{startTimeStr, recentTimeStr}
|
||||||
|
|
||||||
|
return Server{Onion: serverOnion, Identifier: serverInfo.ID, Status: connections.ConnectionStateName[profile.GetPeerState(serverInfo.Handle)], Keys: serverKeys, Description: description, SyncProgress: syncStatus}, nil
|
||||||
|
}
|
34
go.mod
34
go.mod
|
@ -1,19 +1,29 @@
|
||||||
module cwtch.im/cwtch
|
module cwtch.im/cwtch
|
||||||
|
|
||||||
go 1.14
|
go 1.20
|
||||||
|
|
||||||
require (
|
require (
|
||||||
git.openprivacy.ca/cwtch.im/tapir v0.5.0
|
git.openprivacy.ca/cwtch.im/tapir v0.6.0
|
||||||
git.openprivacy.ca/openprivacy/connectivity v1.8.1
|
git.openprivacy.ca/openprivacy/connectivity v1.11.0
|
||||||
git.openprivacy.ca/openprivacy/log v1.0.3
|
git.openprivacy.ca/openprivacy/log v1.0.3
|
||||||
github.com/gtank/ristretto255 v0.1.2
|
github.com/gtank/ristretto255 v0.1.3-0.20210930101514-6bb39798585c
|
||||||
github.com/mutecomm/go-sqlcipher/v4 v4.4.2
|
github.com/mutecomm/go-sqlcipher/v4 v4.4.2
|
||||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
github.com/onsi/ginkgo/v2 v2.1.4
|
||||||
github.com/onsi/ginkgo/v2 v2.0.0-rc2
|
github.com/onsi/gomega v1.20.1
|
||||||
github.com/onsi/gomega v1.17.0
|
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d
|
||||||
github.com/stretchr/testify v1.7.0 // indirect
|
)
|
||||||
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee
|
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007 // indirect
|
require (
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
filippo.io/edwards25519 v1.0.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect
|
git.openprivacy.ca/openprivacy/bine v0.0.5 // indirect
|
||||||
|
github.com/google/go-cmp v0.5.8 // indirect
|
||||||
|
github.com/gtank/merlin v0.1.1 // indirect
|
||||||
|
github.com/mimoo/StrobeGo v0.0.0-20220103164710-9a04d6ca976b // indirect
|
||||||
|
github.com/stretchr/testify v1.7.0 // indirect
|
||||||
|
go.etcd.io/bbolt v1.3.6 // indirect
|
||||||
|
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b // indirect
|
||||||
|
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64 // indirect
|
||||||
|
golang.org/x/text v0.3.7 // indirect
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
141
go.sum
141
go.sum
|
@ -1,147 +1,70 @@
|
||||||
filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmGfU=
|
filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek=
|
||||||
filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns=
|
filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns=
|
||||||
git.openprivacy.ca/cwtch.im/tapir v0.5.0 h1:bHVZ0GtMe3nkNVY7PoKOyp6f8HHmksdEvhHgfppV/C8=
|
git.openprivacy.ca/cwtch.im/tapir v0.6.0 h1:TtnKjxitkIDMM7Qn0n/u+mOHRLJzuQUYjYRu5n0/QFY=
|
||||||
git.openprivacy.ca/cwtch.im/tapir v0.5.0/go.mod h1:dvcAGBGbgKLDIOu6uyBryR6Fpq6v7QUDLev+w7xOh/Y=
|
git.openprivacy.ca/cwtch.im/tapir v0.6.0/go.mod h1:iQIq4y7N+DuP3CxyG66WNEC/d6vzh+wXvvOmelB+KoY=
|
||||||
git.openprivacy.ca/openprivacy/bine v0.0.4 h1:CO7EkGyz+jegZ4ap8g5NWRuDHA/56KKvGySR6OBPW+c=
|
git.openprivacy.ca/openprivacy/bine v0.0.5 h1:DJs5gqw3SkvLSgRDvroqJxZ7F+YsbxbBRg5t0rU5gYE=
|
||||||
git.openprivacy.ca/openprivacy/bine v0.0.4/go.mod h1:13ZqhKyqakDsN/ZkQkIGNULsmLyqtXc46XBcnuXm/mU=
|
git.openprivacy.ca/openprivacy/bine v0.0.5/go.mod h1:fwdeq6RO08WDkV0k7HfArsjRvurVULoUQmT//iaABZM=
|
||||||
git.openprivacy.ca/openprivacy/connectivity v1.8.1 h1:OjWy+JTAvlrstY8PnGPBp7Ho04JaKHaQ+YdoLwSdaCo=
|
git.openprivacy.ca/openprivacy/connectivity v1.11.0 h1:roASjaFtQLu+HdH5fa2wx6F00NL3YsUTlmXBJh8aLZk=
|
||||||
git.openprivacy.ca/openprivacy/connectivity v1.8.1/go.mod h1:UjQiGBnWbotmBzIw59B8H6efwDadjkKzm3RPT1UaIRw=
|
git.openprivacy.ca/openprivacy/connectivity v1.11.0/go.mod h1:OQO1+7OIz/jLxDrorEMzvZA6SEbpbDyLGpjoFqT3z1Y=
|
||||||
git.openprivacy.ca/openprivacy/log v1.0.2/go.mod h1:gGYK8xHtndRLDymFtmjkG26GaMQNgyhioNS82m812Iw=
|
|
||||||
git.openprivacy.ca/openprivacy/log v1.0.3 h1:E/PMm4LY+Q9s3aDpfySfEDq/vYQontlvNj/scrPaga0=
|
git.openprivacy.ca/openprivacy/log v1.0.3 h1:E/PMm4LY+Q9s3aDpfySfEDq/vYQontlvNj/scrPaga0=
|
||||||
git.openprivacy.ca/openprivacy/log v1.0.3/go.mod h1:gGYK8xHtndRLDymFtmjkG26GaMQNgyhioNS82m812Iw=
|
git.openprivacy.ca/openprivacy/log v1.0.3/go.mod h1:gGYK8xHtndRLDymFtmjkG26GaMQNgyhioNS82m812Iw=
|
||||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
|
||||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
|
||||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
|
||||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
|
||||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
|
||||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
|
||||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
|
||||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
|
||||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
|
||||||
github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is=
|
github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is=
|
||||||
github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s=
|
github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s=
|
||||||
github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc=
|
github.com/gtank/ristretto255 v0.1.3-0.20210930101514-6bb39798585c h1:gkfmnY4Rlt3VINCo4uKdpvngiibQyoENVj5Q88sxXhE=
|
||||||
github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o=
|
github.com/gtank/ristretto255 v0.1.3-0.20210930101514-6bb39798585c/go.mod h1:tDPFhGdt3hJWqtKwx57i9baiB1Cj0yAg22VOPUqm5vY=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
|
||||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643 h1:hLDRPB66XQT/8+wG9WsDpiCvZf1yKO7sz7scAjSlBa0=
|
|
||||||
github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM=
|
github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM=
|
||||||
|
github.com/mimoo/StrobeGo v0.0.0-20220103164710-9a04d6ca976b h1:QrHweqAtyJ9EwCaGHBu1fghwxIPiopAHV06JlXrMHjk=
|
||||||
|
github.com/mimoo/StrobeGo v0.0.0-20220103164710-9a04d6ca976b/go.mod h1:xxLb2ip6sSUts3g1irPVHyk/DGslwQsNOo9I7smJfNU=
|
||||||
github.com/mutecomm/go-sqlcipher/v4 v4.4.2 h1:eM10bFtI4UvibIsKr10/QT7Yfz+NADfjZYh0GKrXUNc=
|
github.com/mutecomm/go-sqlcipher/v4 v4.4.2 h1:eM10bFtI4UvibIsKr10/QT7Yfz+NADfjZYh0GKrXUNc=
|
||||||
github.com/mutecomm/go-sqlcipher/v4 v4.4.2/go.mod h1:mF2UmIpBnzFeBdu/ypTDb/LdbS0nk0dfSN1WUsWTjMA=
|
github.com/mutecomm/go-sqlcipher/v4 v4.4.2/go.mod h1:mF2UmIpBnzFeBdu/ypTDb/LdbS0nk0dfSN1WUsWTjMA=
|
||||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY=
|
||||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
|
||||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
|
||||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
|
||||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
|
||||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
|
||||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
|
||||||
github.com/onsi/ginkgo/v2 v2.0.0-rc2 h1:2ukZwTHG/SAlJe4mm5xTdcUYH7IRvldIXhukE1pQBeY=
|
|
||||||
github.com/onsi/ginkgo/v2 v2.0.0-rc2/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
|
||||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
|
||||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
|
||||||
github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
|
|
||||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
|
||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||||
go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
|
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||||
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee h1:4yd7jl+vXjalO5ztz6Vc1VADv+S/80LGJmyl1ROJ2AI=
|
|
||||||
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d h1:3qF+Z8Hkrw9sOhrFHti9TlB1Hkac1x+DNRkv0XQiFjo=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b h1:ZmngSVLe/wycRns9MKikG9OWIEjGcGAkacif7oYQaUY=
|
||||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
|
|
||||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64 h1:UiNENfZ8gDvpiWw7IpOMQ27spWmThO1RwwdQVbJahJM=
|
||||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE=
|
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
||||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
|
||||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
|
||||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
|
||||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
|
||||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
|
|
@ -1,5 +1,10 @@
|
||||||
package attr
|
package attr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Scope model for peer attributes and requests
|
Scope model for peer attributes and requests
|
||||||
|
|
||||||
|
@ -18,6 +23,12 @@ type Scope string
|
||||||
// ScopedZonedPath typed path with a scope and a zone
|
// ScopedZonedPath typed path with a scope and a zone
|
||||||
type ScopedZonedPath string
|
type ScopedZonedPath string
|
||||||
|
|
||||||
|
func (szp ScopedZonedPath) GetScopeZonePath() (Scope, Zone, string) {
|
||||||
|
scope, path := ParseScope(string(szp))
|
||||||
|
zone, zpath := ParseZone(path)
|
||||||
|
return scope, zone, zpath
|
||||||
|
}
|
||||||
|
|
||||||
// scopes for attributes
|
// scopes for attributes
|
||||||
const (
|
const (
|
||||||
// on a peer, local and peer supplied data
|
// on a peer, local and peer supplied data
|
||||||
|
@ -78,3 +89,16 @@ func (scope Scope) IsPublic() bool {
|
||||||
func (scope Scope) IsConversation() bool {
|
func (scope Scope) IsConversation() bool {
|
||||||
return scope == ConversationScope
|
return scope == ConversationScope
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ParseScope takes in an untyped string and returns an explicit Scope along with the rest of the untyped path
|
||||||
|
func ParseScope(path string) (Scope, string) {
|
||||||
|
parts := strings.SplitN(path, Separator, 3)
|
||||||
|
|
||||||
|
log.Debugf("parsed scope: %v %v", parts, path)
|
||||||
|
|
||||||
|
if len(parts) != 3 {
|
||||||
|
return UnknownScope, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return IntoScope(parts[0]), parts[1] + Separator + parts[2]
|
||||||
|
}
|
||||||
|
|
|
@ -40,6 +40,10 @@ func (zone Zone) ConstructZonedPath(path string) ZonedPath {
|
||||||
return ZonedPath(string(zone) + Separator + path)
|
return ZonedPath(string(zone) + Separator + path)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (zp ZonedPath) ToString() string {
|
||||||
|
return string(zp)
|
||||||
|
}
|
||||||
|
|
||||||
// ParseZone takes in an untyped string and returns an explicit Zone along with the rest of the untyped path
|
// ParseZone takes in an untyped string and returns an explicit Zone along with the rest of the untyped path
|
||||||
func ParseZone(path string) (Zone, string) {
|
func ParseZone(path string) (Zone, string) {
|
||||||
parts := strings.SplitN(path, Separator, 2)
|
parts := strings.SplitN(path, Separator, 2)
|
||||||
|
|
|
@ -49,3 +49,26 @@ const AttrRejected = "rejected-invite"
|
||||||
|
|
||||||
// AttrDownloaded - conversation attribute for storing downloaded prompts (for file downloads)
|
// AttrDownloaded - conversation attribute for storing downloaded prompts (for file downloads)
|
||||||
const AttrDownloaded = "file-downloaded"
|
const AttrDownloaded = "file-downloaded"
|
||||||
|
|
||||||
|
const CustomProfileImageKey = "custom-profile-image"
|
||||||
|
|
||||||
|
const SyncPreLastMessageTime = "SyncPreLastMessageTime"
|
||||||
|
const SyncMostRecentMessageTime = "SyncMostRecentMessageTime"
|
||||||
|
|
||||||
|
const AttrLastConnectionTime = "last-connection-time"
|
||||||
|
const PeerAutostart = "autostart"
|
||||||
|
const PeerAppearOffline = "appear-offline"
|
||||||
|
const Archived = "archived"
|
||||||
|
|
||||||
|
const ProfileStatus = "profile-status"
|
||||||
|
const ProfileAttribute1 = "profile-attribute-1"
|
||||||
|
const ProfileAttribute2 = "profile-attribute-2"
|
||||||
|
const ProfileAttribute3 = "profile-attribute-3"
|
||||||
|
|
||||||
|
// Description is used on server contacts,
|
||||||
|
const Description = "description"
|
||||||
|
|
||||||
|
// Used to store the status of acl migrations
|
||||||
|
const ACLVersion = "acl-version"
|
||||||
|
const ACLVersionOne = "acl-v1"
|
||||||
|
const ACLVersionTwo = "acl-v2"
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
package constants
|
package constants
|
||||||
|
|
||||||
|
const GroupsExperiment = "tapir-groups-experiment"
|
||||||
|
|
||||||
// FileSharingExperiment Allows file sharing
|
// FileSharingExperiment Allows file sharing
|
||||||
const FileSharingExperiment = "filesharing"
|
const FileSharingExperiment = "filesharing"
|
||||||
|
|
||||||
|
@ -10,5 +12,10 @@ const ImagePreviewsExperiment = "filesharing-images"
|
||||||
// ImagePreviewMaxSizeInBytes Files up to this size will be autodownloaded using ImagePreviewsExperiment
|
// ImagePreviewMaxSizeInBytes Files up to this size will be autodownloaded using ImagePreviewsExperiment
|
||||||
const ImagePreviewMaxSizeInBytes = 20971520
|
const ImagePreviewMaxSizeInBytes = 20971520
|
||||||
|
|
||||||
|
const MessageFormattingExperiment = "message-formatting"
|
||||||
|
|
||||||
// AutoDLFileExts Files with these extensions will be autodownloaded using ImagePreviewsExperiment
|
// AutoDLFileExts Files with these extensions will be autodownloaded using ImagePreviewsExperiment
|
||||||
var AutoDLFileExts = [...]string{".jpg", ".jpeg", ".png", ".gif", ".webp", ".bmp"}
|
var AutoDLFileExts = [...]string{".jpg", ".jpeg", ".png", ".gif", ".webp", ".bmp"}
|
||||||
|
|
||||||
|
// BlodeuweddExperiment enables the Blodeuwedd Assistant
|
||||||
|
const BlodeuweddExperiment = "blodeuwedd"
|
||||||
|
|
|
@ -1,21 +1,36 @@
|
||||||
package model
|
package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"time"
|
||||||
|
|
||||||
"cwtch.im/cwtch/model/attr"
|
"cwtch.im/cwtch/model/attr"
|
||||||
"cwtch.im/cwtch/model/constants"
|
"cwtch.im/cwtch/model/constants"
|
||||||
"encoding/json"
|
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AccessControl is a type determining client assigned authorization to a peer
|
// AccessControl is a type determining client assigned authorization to a peer
|
||||||
|
// for a given conversation
|
||||||
type AccessControl struct {
|
type AccessControl struct {
|
||||||
Blocked bool // Any attempts from this handle to connect are blocked
|
Blocked bool // Any attempts from this handle to connect are blocked overrides all other settings
|
||||||
|
|
||||||
|
// Basic Conversation Rights
|
||||||
Read bool // Allows a handle to access the conversation
|
Read bool // Allows a handle to access the conversation
|
||||||
Append bool // Allows a handle to append new messages to the conversation
|
Append bool // Allows a handle to append new messages to the conversation
|
||||||
|
|
||||||
|
AutoConnect bool // Profile should automatically try to connect with peer
|
||||||
|
ExchangeAttributes bool // Profile should automatically exchange attributes like Name, Profile Image, etc.
|
||||||
|
|
||||||
|
// Extension Related Permissions
|
||||||
|
ShareFiles bool // Allows a handle to share files to a conversation
|
||||||
|
RenderImages bool // Indicates that certain filetypes should be autodownloaded and rendered when shared by this contact
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultP2PAccessControl - because in the year 2021, go does not support constant structs...
|
// DefaultP2PAccessControl defaults to a semi-trusted peer with no access to special extensions.
|
||||||
func DefaultP2PAccessControl() AccessControl {
|
func DefaultP2PAccessControl() AccessControl {
|
||||||
return AccessControl{Read: true, Append: true, Blocked: false}
|
return AccessControl{Read: true, Append: true, ExchangeAttributes: true, Blocked: false,
|
||||||
|
AutoConnect: true, ShareFiles: false, RenderImages: false}
|
||||||
}
|
}
|
||||||
|
|
||||||
// AccessControlList represents an access control list for a conversation. Mapping handles to conversation
|
// AccessControlList represents an access control list for a conversation. Mapping handles to conversation
|
||||||
|
@ -29,10 +44,10 @@ func (acl *AccessControlList) Serialize() []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeserializeAccessControlList takes in JSON and returns an AccessControlList
|
// DeserializeAccessControlList takes in JSON and returns an AccessControlList
|
||||||
func DeserializeAccessControlList(data []byte) AccessControlList {
|
func DeserializeAccessControlList(data []byte) (AccessControlList, error) {
|
||||||
var acl AccessControlList
|
var acl AccessControlList
|
||||||
json.Unmarshal(data, &acl)
|
err := json.Unmarshal(data, &acl)
|
||||||
return acl
|
return acl, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attributes a type-driven encapsulation of an Attribute map.
|
// Attributes a type-driven encapsulation of an Attribute map.
|
||||||
|
@ -46,8 +61,12 @@ func (a *Attributes) Serialize() []byte {
|
||||||
|
|
||||||
// DeserializeAttributes converts a JSON struct into an Attributes map
|
// DeserializeAttributes converts a JSON struct into an Attributes map
|
||||||
func DeserializeAttributes(data []byte) Attributes {
|
func DeserializeAttributes(data []byte) Attributes {
|
||||||
var attributes Attributes
|
attributes := make(Attributes)
|
||||||
json.Unmarshal(data, &attributes)
|
err := json.Unmarshal(data, &attributes)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("error deserializing attributes (this is likely a programming error): %v", err)
|
||||||
|
return make(Attributes)
|
||||||
|
}
|
||||||
return attributes
|
return attributes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,6 +78,8 @@ type Conversation struct {
|
||||||
Handle string
|
Handle string
|
||||||
Attributes Attributes
|
Attributes Attributes
|
||||||
ACL AccessControlList
|
ACL AccessControlList
|
||||||
|
|
||||||
|
// Deprecated, please use ACL for permissions related functions
|
||||||
Accepted bool
|
Accepted bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,6 +91,21 @@ func (ci *Conversation) GetAttribute(scope attr.Scope, zone attr.Zone, key strin
|
||||||
return "", false
|
return "", false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPeerAC returns a suitable Access Control object for a the given peer conversation
|
||||||
|
// If this is called for a group conversation, this method will error and return a safe default AC.
|
||||||
|
func (ci *Conversation) GetPeerAC() AccessControl {
|
||||||
|
if acl, exists := ci.ACL[ci.Handle]; exists {
|
||||||
|
return acl
|
||||||
|
}
|
||||||
|
log.Errorf("attempted to access a Peer Access Control object from %v but peer ACL is undefined. This is likely a programming error", ci.Handle)
|
||||||
|
return DefaultP2PAccessControl()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCwtchPeer is a helper attribute that identifies whether a conversation is a cwtch peer
|
||||||
|
func (ci *Conversation) IsCwtchPeer() bool {
|
||||||
|
return tor.IsValidHostname(ci.Handle)
|
||||||
|
}
|
||||||
|
|
||||||
// IsGroup is a helper attribute that identifies whether a conversation is a legacy group
|
// IsGroup is a helper attribute that identifies whether a conversation is a legacy group
|
||||||
func (ci *Conversation) IsGroup() bool {
|
func (ci *Conversation) IsGroup() bool {
|
||||||
if _, exists := ci.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.GroupID)).ToString()]; exists {
|
if _, exists := ci.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.GroupID)).ToString()]; exists {
|
||||||
|
@ -86,6 +122,30 @@ func (ci *Conversation) IsServer() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ServerSyncProgress is only valid during a server being in the AUTHENTICATED state and therefor in the syncing process
|
||||||
|
// it returns a double (0-1) representing the estimated progress of the syncing
|
||||||
|
func (ci *Conversation) ServerSyncProgress() float64 {
|
||||||
|
startTimeStr, startExists := ci.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.SyncPreLastMessageTime)).ToString()]
|
||||||
|
recentTimeStr, recentExists := ci.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.SyncMostRecentMessageTime)).ToString()]
|
||||||
|
|
||||||
|
if !startExists || !recentExists {
|
||||||
|
return 0.0
|
||||||
|
}
|
||||||
|
|
||||||
|
startTime, err := time.Parse(startTimeStr, time.RFC3339Nano)
|
||||||
|
if err != nil {
|
||||||
|
return 0.0
|
||||||
|
}
|
||||||
|
recentTime, err := time.Parse(recentTimeStr, time.RFC3339Nano)
|
||||||
|
if err != nil {
|
||||||
|
return 0.0
|
||||||
|
}
|
||||||
|
|
||||||
|
syncRange := time.Since(startTime)
|
||||||
|
pointFromStart := startTime.Sub(recentTime)
|
||||||
|
return pointFromStart.Seconds() / syncRange.Seconds()
|
||||||
|
}
|
||||||
|
|
||||||
// ConversationMessage bundles an instance of a conversation message row
|
// ConversationMessage bundles an instance of a conversation message row
|
||||||
type ConversationMessage struct {
|
type ConversationMessage struct {
|
||||||
ID int
|
ID int
|
||||||
|
|
|
@ -0,0 +1,41 @@
|
||||||
|
package model
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
// Experiments are optional functionality that can be enabled/disabled by an application either completely or individually.
|
||||||
|
// examples of experiments include File Sharing, Profile Images and Groups.
|
||||||
|
type Experiments struct {
|
||||||
|
enabled bool
|
||||||
|
experiments sync.Map
|
||||||
|
}
|
||||||
|
|
||||||
|
// InitExperiments encapsulates a set of experiments separate from their storage in GlobalSettings.
|
||||||
|
func InitExperiments(enabled bool, experiments map[string]bool) Experiments {
|
||||||
|
|
||||||
|
var syncExperiments sync.Map
|
||||||
|
for experiment, set := range experiments {
|
||||||
|
syncExperiments.Store(experiment, set)
|
||||||
|
}
|
||||||
|
|
||||||
|
return Experiments{
|
||||||
|
enabled: enabled,
|
||||||
|
experiments: syncExperiments,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEnabled is a convenience function that takes in an experiment and returns true if it is enabled. Experiments
|
||||||
|
// are only enabled if both global experiments are turned on and if the specific experiment is also turned on.
|
||||||
|
// The one exception to this is experiments that have been promoted to default functionality which may be turned on
|
||||||
|
// even if experiments turned off globally. These experiments are defined by DefaultEnabledFunctionality.
|
||||||
|
func (e *Experiments) IsEnabled(experiment string) bool {
|
||||||
|
if !e.enabled {
|
||||||
|
// todo handle default-enabled functionality
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
enabled, exists := e.experiments.Load(experiment)
|
||||||
|
if !exists {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return enabled.(bool)
|
||||||
|
}
|
|
@ -59,21 +59,26 @@ func NewGroup(server string) (*Group, error) {
|
||||||
|
|
||||||
// Derive Group ID from the group key and the server public key. This binds the group to a particular server
|
// Derive Group ID from the group key and the server public key. This binds the group to a particular server
|
||||||
// and key.
|
// and key.
|
||||||
group.GroupID = deriveGroupID(groupKey[:], server)
|
var err error
|
||||||
return group, nil
|
group.GroupID, err = deriveGroupID(groupKey[:], server)
|
||||||
|
return group, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckGroup returns true only if the ID of the group is cryptographically valid.
|
// CheckGroup returns true only if the ID of the group is cryptographically valid.
|
||||||
func (g *Group) CheckGroup() bool {
|
func (g *Group) CheckGroup() bool {
|
||||||
return g.GroupID == deriveGroupID(g.GroupKey[:], g.GroupServer)
|
id, _ := deriveGroupID(g.GroupKey[:], g.GroupServer)
|
||||||
|
return g.GroupID == id
|
||||||
}
|
}
|
||||||
|
|
||||||
// deriveGroupID hashes together the key and the hostname to create a bound identifier that can later
|
// deriveGroupID hashes together the key and the hostname to create a bound identifier that can later
|
||||||
// be referenced and checked by profiles when they receive invites and messages.
|
// be referenced and checked by profiles when they receive invites and messages.
|
||||||
func deriveGroupID(groupKey []byte, serverHostname string) string {
|
func deriveGroupID(groupKey []byte, serverHostname string) (string, error) {
|
||||||
data, _ := base32.StdEncoding.DecodeString(strings.ToUpper(serverHostname))
|
data, err := base32.StdEncoding.DecodeString(strings.ToUpper(serverHostname))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
pubkey := data[0:ed25519.PublicKeySize]
|
pubkey := data[0:ed25519.PublicKeySize]
|
||||||
return hex.EncodeToString(pbkdf2.Key(groupKey, pubkey, 4096, 16, sha512.New))
|
return hex.EncodeToString(pbkdf2.Key(groupKey, pubkey, 4096, 16, sha512.New)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Invite generates a invitation that can be sent to a cwtch peer
|
// Invite generates a invitation that can be sent to a cwtch peer
|
||||||
|
@ -91,7 +96,7 @@ func (g *Group) Invite() (string, error) {
|
||||||
return serializedInvite, err
|
return serializedInvite, err
|
||||||
}
|
}
|
||||||
|
|
||||||
//EncryptMessage takes a message and encrypts the message under the group key.
|
// EncryptMessage takes a message and encrypts the message under the group key.
|
||||||
func (g *Group) EncryptMessage(message *groups.DecryptedGroupMessage) ([]byte, error) {
|
func (g *Group) EncryptMessage(message *groups.DecryptedGroupMessage) ([]byte, error) {
|
||||||
var nonce [24]byte
|
var nonce [24]byte
|
||||||
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
|
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
|
||||||
|
@ -148,7 +153,7 @@ func ValidateInvite(invite string) (*groups.GroupInvite, error) {
|
||||||
|
|
||||||
// Derive the servers public key (we can ignore the error checking here because it's already been
|
// Derive the servers public key (we can ignore the error checking here because it's already been
|
||||||
// done by IsValidHostname, and check that we derive the same groupID...
|
// done by IsValidHostname, and check that we derive the same groupID...
|
||||||
derivedGroupID := deriveGroupID(gci.SharedKey, gci.ServerHost)
|
derivedGroupID, _ := deriveGroupID(gci.SharedKey, gci.ServerHost)
|
||||||
if derivedGroupID != gci.GroupID {
|
if derivedGroupID != gci.GroupID {
|
||||||
return nil, errors.New("group id is invalid")
|
return nil, errors.New("group id is invalid")
|
||||||
}
|
}
|
||||||
|
@ -166,7 +171,9 @@ func ValidateInvite(invite string) (*groups.GroupInvite, error) {
|
||||||
// If successful, adds the message to the group's timeline
|
// If successful, adds the message to the group's timeline
|
||||||
func (g *Group) AttemptDecryption(ciphertext []byte, signature []byte) (bool, *groups.DecryptedGroupMessage) {
|
func (g *Group) AttemptDecryption(ciphertext []byte, signature []byte) (bool, *groups.DecryptedGroupMessage) {
|
||||||
success, dgm := g.DecryptMessage(ciphertext)
|
success, dgm := g.DecryptMessage(ciphertext)
|
||||||
if success {
|
// the second check here is not needed, but DecryptMessage violates the usual
|
||||||
|
// go calling convention and we want static analysis tools to pick it up
|
||||||
|
if success && dgm != nil {
|
||||||
|
|
||||||
// Attempt to serialize this message
|
// Attempt to serialize this message
|
||||||
serialized, err := json.Marshal(dgm)
|
serialized, err := json.Marshal(dgm)
|
||||||
|
@ -215,6 +222,7 @@ func (g *Group) AttemptDecryption(ciphertext []byte, signature []byte) (bool, *g
|
||||||
// 2. Secondly, we confirm that the sender sent the message to a particular group id on a specific server (it doesn't
|
// 2. Secondly, we confirm that the sender sent the message to a particular group id on a specific server (it doesn't
|
||||||
// matter if we actually received this message from the server or from a hybrid protocol, all that matters is
|
// matter if we actually received this message from the server or from a hybrid protocol, all that matters is
|
||||||
// that the sender and receivers agree that this message was intended for the group
|
// that the sender and receivers agree that this message was intended for the group
|
||||||
|
//
|
||||||
// The 2nd point is important as it prevents an attack documented in the original Cwtch paper (and later at
|
// The 2nd point is important as it prevents an attack documented in the original Cwtch paper (and later at
|
||||||
// https://docs.openprivacy.ca/cwtch-security-handbook/groups.html) in which a malicious profile sets up 2 groups
|
// https://docs.openprivacy.ca/cwtch-security-handbook/groups.html) in which a malicious profile sets up 2 groups
|
||||||
// on two different servers with the same key and then forwards messages between them to convince the parties in
|
// on two different servers with the same key and then forwards messages between them to convince the parties in
|
||||||
|
|
|
@ -9,7 +9,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGroup(t *testing.T) {
|
func TestGroup(t *testing.T) {
|
||||||
g, _ := NewGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
g, err := NewGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Group with real group server should not fail")
|
||||||
|
}
|
||||||
dgm := &groups.DecryptedGroupMessage{
|
dgm := &groups.DecryptedGroupMessage{
|
||||||
Onion: "onion",
|
Onion: "onion",
|
||||||
Text: "Hello World!",
|
Text: "Hello World!",
|
||||||
|
@ -37,7 +40,7 @@ func TestGroup(t *testing.T) {
|
||||||
|
|
||||||
encMessage, _ := g.EncryptMessage(dgm)
|
encMessage, _ := g.EncryptMessage(dgm)
|
||||||
ok, message := g.DecryptMessage(encMessage)
|
ok, message := g.DecryptMessage(encMessage)
|
||||||
if !ok || message.Text != "Hello World!" {
|
if (!ok || message == nil) || message.Text != "Hello World!" {
|
||||||
t.Errorf("group encryption was invalid, or returned wrong message decrypted:%v message:%v", ok, message)
|
t.Errorf("group encryption was invalid, or returned wrong message decrypted:%v message:%v", ok, message)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -73,7 +76,10 @@ func TestGroupValidation(t *testing.T) {
|
||||||
t.Logf("Error: %v", err)
|
t.Logf("Error: %v", err)
|
||||||
|
|
||||||
// Generate a valid group but replace the group server...
|
// Generate a valid group but replace the group server...
|
||||||
group, _ = NewGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
group, err = NewGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Group with real group server should not fail")
|
||||||
|
}
|
||||||
group.GroupServer = "tcnkoch4nyr3cldkemejtkpqok342rbql6iclnjjs3ndgnjgufzyxvqd"
|
group.GroupServer = "tcnkoch4nyr3cldkemejtkpqok342rbql6iclnjjs3ndgnjgufzyxvqd"
|
||||||
invite, _ = group.Invite()
|
invite, _ = group.Invite()
|
||||||
_, err = ValidateInvite(invite)
|
_, err = ValidateInvite(invite)
|
||||||
|
@ -84,7 +90,10 @@ func TestGroupValidation(t *testing.T) {
|
||||||
t.Logf("Error: %v", err)
|
t.Logf("Error: %v", err)
|
||||||
|
|
||||||
// Generate a valid group but replace the group key...
|
// Generate a valid group but replace the group key...
|
||||||
group, _ = NewGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
group, err = NewGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Group with real group server should not fail")
|
||||||
|
}
|
||||||
group.GroupKey = sha256.Sum256([]byte{})
|
group.GroupKey = sha256.Sum256([]byte{})
|
||||||
invite, _ = group.Invite()
|
invite, _ = group.Invite()
|
||||||
_, err = ValidateInvite(invite)
|
_, err = ValidateInvite(invite)
|
||||||
|
|
|
@ -99,7 +99,7 @@ func (t *Timeline) SetMessages(messages []Message) {
|
||||||
|
|
||||||
// GetMessagesByHash attempts to find messages that match the given
|
// GetMessagesByHash attempts to find messages that match the given
|
||||||
// content hash in the timeline. If successful it returns a list of messages as well as their local index
|
// content hash in the timeline. If successful it returns a list of messages as well as their local index
|
||||||
//, on failure it returns an error.
|
// , on failure it returns an error.
|
||||||
// We return a list of messages because content hashes are not guaranteed to be unique from a given Peer. This allows
|
// We return a list of messages because content hashes are not guaranteed to be unique from a given Peer. This allows
|
||||||
// us to do things like: ensure that reply-to and quotes reference the last seen message from the message they are quoted
|
// us to do things like: ensure that reply-to and quotes reference the last seen message from the message they are quoted
|
||||||
// in or detect duplicate messages from a peer.
|
// in or detect duplicate messages from a peer.
|
||||||
|
|
|
@ -3,6 +3,7 @@ package model
|
||||||
import (
|
import (
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CalculateContentHash derives a hash using the author and the message body. It is intended to be
|
// CalculateContentHash derives a hash using the author and the message body. It is intended to be
|
||||||
|
@ -12,3 +13,13 @@ func CalculateContentHash(author string, messageBody string) string {
|
||||||
contentBasedHash := sha256.Sum256(content)
|
contentBasedHash := sha256.Sum256(content)
|
||||||
return base64.StdEncoding.EncodeToString(contentBasedHash[:])
|
return base64.StdEncoding.EncodeToString(contentBasedHash[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func DeserializeMessage(message string) (*MessageWrapper, error) {
|
||||||
|
var cm MessageWrapper
|
||||||
|
err := json.Unmarshal([]byte(message), &cm)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &cm, err
|
||||||
|
}
|
||||||
|
|
|
@ -1,9 +1,40 @@
|
||||||
package model
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
// MessageWrapper is the canonical Cwtch overlay wrapper
|
// MessageWrapper is the canonical Cwtch overlay wrapper
|
||||||
type MessageWrapper struct {
|
type MessageWrapper struct {
|
||||||
Overlay int `json:"o"`
|
Overlay int `json:"o"`
|
||||||
Data string `json:"d"`
|
Data string `json:"d"`
|
||||||
|
|
||||||
|
// when the data was assembled
|
||||||
|
SendTime *time.Time `json:"s,omitempty"`
|
||||||
|
|
||||||
|
// when the data was transmitted (by protocol engine e.g. over Tor)
|
||||||
|
TransitTime *time.Time `json:"t,omitempty"`
|
||||||
|
|
||||||
|
// when the data was received
|
||||||
|
RecvTime *time.Time `json:"r,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Channel is defined as being the last 3 bits of the overlay id
|
||||||
|
// Channel 0 is reserved for the main conversation
|
||||||
|
// Channel 2 is reserved for conversation admin (managed groups)
|
||||||
|
// Channel 7 is reserved for streams (no ack, no store)
|
||||||
|
func (mw MessageWrapper) Channel() int {
|
||||||
|
if mw.Overlay > 1024 {
|
||||||
|
return mw.Overlay & 0x07
|
||||||
|
}
|
||||||
|
// for backward compatibilty all overlays less than 0x400 i.e. 1024 are
|
||||||
|
// mapped to channel 0 regardless of their channel status.
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// If Overlay is a Stream Message it should not be ackd, or stored.
|
||||||
|
func (mw MessageWrapper) IsStream() bool {
|
||||||
|
return mw.Channel() == 0x07
|
||||||
}
|
}
|
||||||
|
|
||||||
// OverlayChat is the canonical identifier for chat overlays
|
// OverlayChat is the canonical identifier for chat overlays
|
||||||
|
|
|
@ -80,11 +80,19 @@ func (p *Profile) GetCopy(timeline bool) *Profile {
|
||||||
|
|
||||||
if timeline {
|
if timeline {
|
||||||
for groupID := range newp.Groups {
|
for groupID := range newp.Groups {
|
||||||
newp.Groups[groupID].Timeline = *p.Groups[groupID].Timeline.GetCopy()
|
if group, exists := newp.Groups[groupID]; exists {
|
||||||
|
if pGroup, exists := p.Groups[groupID]; exists {
|
||||||
|
group.Timeline = *(pGroup).Timeline.GetCopy()
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for peerID := range newp.Contacts {
|
for peerID := range newp.Contacts {
|
||||||
newp.Contacts[peerID].Timeline = *p.Contacts[peerID].Timeline.GetCopy()
|
if peer, exists := newp.Contacts[peerID]; exists {
|
||||||
|
if pPeer, exists := p.Contacts[peerID]; exists {
|
||||||
|
peer.Timeline = *(pPeer).Timeline.GetCopy()
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
1107
peer/cwtch_peer.go
1107
peer/cwtch_peer.go
File diff suppressed because it is too large
Load Diff
|
@ -1,6 +1,8 @@
|
||||||
package peer
|
package peer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"compress/gzip"
|
||||||
"cwtch.im/cwtch/event"
|
"cwtch.im/cwtch/event"
|
||||||
"cwtch.im/cwtch/model"
|
"cwtch.im/cwtch/model"
|
||||||
"cwtch.im/cwtch/model/attr"
|
"cwtch.im/cwtch/model/attr"
|
||||||
|
@ -8,7 +10,12 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StorageKeyType is an interface wrapper around storage key types
|
// StorageKeyType is an interface wrapper around storage key types
|
||||||
|
@ -30,10 +37,12 @@ const (
|
||||||
type CwtchProfileStorage struct {
|
type CwtchProfileStorage struct {
|
||||||
|
|
||||||
// Note: Statements are thread safe..
|
// Note: Statements are thread safe..
|
||||||
|
mutex sync.Mutex
|
||||||
|
|
||||||
// Profile related statements
|
// Profile related statements
|
||||||
insertProfileKeyValueStmt *sql.Stmt
|
insertProfileKeyValueStmt *sql.Stmt
|
||||||
selectProfileKeyValueStmt *sql.Stmt
|
selectProfileKeyValueStmt *sql.Stmt
|
||||||
|
findProfileKeySQLStmt *sql.Stmt
|
||||||
|
|
||||||
// Conversation related statements
|
// Conversation related statements
|
||||||
insertConversationStmt *sql.Stmt
|
insertConversationStmt *sql.Stmt
|
||||||
|
@ -53,6 +62,7 @@ type CwtchProfileStorage struct {
|
||||||
channelGetMostRecentMessagesStmts map[ChannelID]*sql.Stmt
|
channelGetMostRecentMessagesStmts map[ChannelID]*sql.Stmt
|
||||||
channelGetMessageByContentHashStmts map[ChannelID]*sql.Stmt
|
channelGetMessageByContentHashStmts map[ChannelID]*sql.Stmt
|
||||||
channelRowNumberStmts map[ChannelID]*sql.Stmt
|
channelRowNumberStmts map[ChannelID]*sql.Stmt
|
||||||
|
channelSearchConversationSQLStmt map[ChannelID]*sql.Stmt
|
||||||
ProfileDirectory string
|
ProfileDirectory string
|
||||||
db *sql.DB
|
db *sql.DB
|
||||||
}
|
}
|
||||||
|
@ -65,6 +75,7 @@ type ChannelID struct {
|
||||||
|
|
||||||
const insertProfileKeySQLStmt = `insert or replace into profile_kv(KeyType, KeyName, KeyValue) values(?,?,?);`
|
const insertProfileKeySQLStmt = `insert or replace into profile_kv(KeyType, KeyName, KeyValue) values(?,?,?);`
|
||||||
const selectProfileKeySQLStmt = `select KeyValue from profile_kv where KeyType=(?) and KeyName=(?);`
|
const selectProfileKeySQLStmt = `select KeyValue from profile_kv where KeyType=(?) and KeyName=(?);`
|
||||||
|
const findProfileKeySQLStmt = `select KeyName from profile_kv where KeyType=(?) and KeyName LIKE (?);`
|
||||||
|
|
||||||
const insertConversationSQLStmt = `insert into conversations(Handle, Attributes, ACL, Accepted) values(?,?,?,?);`
|
const insertConversationSQLStmt = `insert into conversations(Handle, Attributes, ACL, Accepted) values(?,?,?,?);`
|
||||||
const fetchAllConversationsSQLStmt = `select ID, Handle, Attributes, ACL, Accepted from conversations;`
|
const fetchAllConversationsSQLStmt = `select ID, Handle, Attributes, ACL, Accepted from conversations;`
|
||||||
|
@ -105,6 +116,9 @@ const getMessageCountFromConversationSQLStmt = `select count(*) from channel_%d_
|
||||||
// getMostRecentMessagesSQLStmt is a template for fetching the most recent N messages in a conversation channel
|
// getMostRecentMessagesSQLStmt is a template for fetching the most recent N messages in a conversation channel
|
||||||
const getMostRecentMessagesSQLStmt = `select ID, Body, Attributes, Signature, ContentHash from channel_%d_%d_chat order by ID desc limit (?) offset (?);`
|
const getMostRecentMessagesSQLStmt = `select ID, Body, Attributes, Signature, ContentHash from channel_%d_%d_chat order by ID desc limit (?) offset (?);`
|
||||||
|
|
||||||
|
// searchConversationSQLStmt is a template for search a conversation for the most recent N messages matching a given pattern
|
||||||
|
const searchConversationSQLStmt = `select ID, Body, Attributes, Signature, ContentHash from (select ID, Body, Attributes, Signature, ContentHash from channel_%d_%d_chat order by ID desc limit (?) offset (?)) where BODY like (?)`
|
||||||
|
|
||||||
// NewCwtchProfileStorage constructs a new CwtchProfileStorage from a database. It is also responsible for
|
// NewCwtchProfileStorage constructs a new CwtchProfileStorage from a database. It is also responsible for
|
||||||
// Preparing commonly used SQL Statements
|
// Preparing commonly used SQL Statements
|
||||||
func NewCwtchProfileStorage(db *sql.DB, profileDirectory string) (*CwtchProfileStorage, error) {
|
func NewCwtchProfileStorage(db *sql.DB, profileDirectory string) (*CwtchProfileStorage, error) {
|
||||||
|
@ -115,60 +129,79 @@ func NewCwtchProfileStorage(db *sql.DB, profileDirectory string) (*CwtchProfileS
|
||||||
|
|
||||||
insertProfileKeyValueStmt, err := db.Prepare(insertProfileKeySQLStmt)
|
insertProfileKeyValueStmt, err := db.Prepare(insertProfileKeySQLStmt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error preparing query: %v %v", insertProfileKeySQLStmt, err)
|
_ = db.Close()
|
||||||
|
// note: this is debug because we expect failure here when opening an encrypted database with an
|
||||||
|
// incorrect password. The rest are errors because failure is not expected.
|
||||||
|
log.Debugf("error preparing query: %v %v", insertProfileKeySQLStmt, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
selectProfileKeyStmt, err := db.Prepare(selectProfileKeySQLStmt)
|
selectProfileKeyStmt, err := db.Prepare(selectProfileKeySQLStmt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
_ = db.Close()
|
||||||
log.Errorf("error preparing query: %v %v", selectProfileKeySQLStmt, err)
|
log.Errorf("error preparing query: %v %v", selectProfileKeySQLStmt, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
findProfileKeyStmt, err := db.Prepare(findProfileKeySQLStmt)
|
||||||
|
if err != nil {
|
||||||
|
_ = db.Close()
|
||||||
|
log.Errorf("error preparing query: %v %v", findProfileKeySQLStmt, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
insertConversationStmt, err := db.Prepare(insertConversationSQLStmt)
|
insertConversationStmt, err := db.Prepare(insertConversationSQLStmt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
_ = db.Close()
|
||||||
log.Errorf("error preparing query: %v %v", insertConversationSQLStmt, err)
|
log.Errorf("error preparing query: %v %v", insertConversationSQLStmt, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fetchAllConversationsStmt, err := db.Prepare(fetchAllConversationsSQLStmt)
|
fetchAllConversationsStmt, err := db.Prepare(fetchAllConversationsSQLStmt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
_ = db.Close()
|
||||||
log.Errorf("error preparing query: %v %v", fetchAllConversationsSQLStmt, err)
|
log.Errorf("error preparing query: %v %v", fetchAllConversationsSQLStmt, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
selectConversationStmt, err := db.Prepare(selectConversationSQLStmt)
|
selectConversationStmt, err := db.Prepare(selectConversationSQLStmt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
_ = db.Close()
|
||||||
log.Errorf("error preparing query: %v %v", selectConversationSQLStmt, err)
|
log.Errorf("error preparing query: %v %v", selectConversationSQLStmt, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
selectConversationByHandleStmt, err := db.Prepare(selectConversationByHandleSQLStmt)
|
selectConversationByHandleStmt, err := db.Prepare(selectConversationByHandleSQLStmt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
_ = db.Close()
|
||||||
log.Errorf("error preparing query: %v %v", selectConversationByHandleSQLStmt, err)
|
log.Errorf("error preparing query: %v %v", selectConversationByHandleSQLStmt, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
acceptConversationStmt, err := db.Prepare(acceptConversationSQLStmt)
|
acceptConversationStmt, err := db.Prepare(acceptConversationSQLStmt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
_ = db.Close()
|
||||||
log.Errorf("error preparing query: %v %v", acceptConversationSQLStmt, err)
|
log.Errorf("error preparing query: %v %v", acceptConversationSQLStmt, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
deleteConversationStmt, err := db.Prepare(deleteConversationSQLStmt)
|
deleteConversationStmt, err := db.Prepare(deleteConversationSQLStmt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
_ = db.Close()
|
||||||
log.Errorf("error preparing query: %v %v", deleteConversationSQLStmt, err)
|
log.Errorf("error preparing query: %v %v", deleteConversationSQLStmt, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
setConversationAttributesStmt, err := db.Prepare(setConversationAttributesSQLStmt)
|
setConversationAttributesStmt, err := db.Prepare(setConversationAttributesSQLStmt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
_ = db.Close()
|
||||||
log.Errorf("error preparing query: %v %v", setConversationAttributesSQLStmt, err)
|
log.Errorf("error preparing query: %v %v", setConversationAttributesSQLStmt, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
setConversationACLStmt, err := db.Prepare(setConversationACLSQLStmt)
|
setConversationACLStmt, err := db.Prepare(setConversationACLSQLStmt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
_ = db.Close()
|
||||||
log.Errorf("error preparing query: %v %v", setConversationACLSQLStmt, err)
|
log.Errorf("error preparing query: %v %v", setConversationACLSQLStmt, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -177,6 +210,7 @@ func NewCwtchProfileStorage(db *sql.DB, profileDirectory string) (*CwtchProfileS
|
||||||
ProfileDirectory: profileDirectory,
|
ProfileDirectory: profileDirectory,
|
||||||
insertProfileKeyValueStmt: insertProfileKeyValueStmt,
|
insertProfileKeyValueStmt: insertProfileKeyValueStmt,
|
||||||
selectProfileKeyValueStmt: selectProfileKeyStmt,
|
selectProfileKeyValueStmt: selectProfileKeyStmt,
|
||||||
|
findProfileKeySQLStmt: findProfileKeyStmt,
|
||||||
fetchAllConversationsStmt: fetchAllConversationsStmt,
|
fetchAllConversationsStmt: fetchAllConversationsStmt,
|
||||||
insertConversationStmt: insertConversationStmt,
|
insertConversationStmt: insertConversationStmt,
|
||||||
selectConversationStmt: selectConversationStmt,
|
selectConversationStmt: selectConversationStmt,
|
||||||
|
@ -193,12 +227,15 @@ func NewCwtchProfileStorage(db *sql.DB, profileDirectory string) (*CwtchProfileS
|
||||||
channelGetMostRecentMessagesStmts: map[ChannelID]*sql.Stmt{},
|
channelGetMostRecentMessagesStmts: map[ChannelID]*sql.Stmt{},
|
||||||
channelGetCountStmts: map[ChannelID]*sql.Stmt{},
|
channelGetCountStmts: map[ChannelID]*sql.Stmt{},
|
||||||
channelRowNumberStmts: map[ChannelID]*sql.Stmt{},
|
channelRowNumberStmts: map[ChannelID]*sql.Stmt{},
|
||||||
|
channelSearchConversationSQLStmt: map[ChannelID]*sql.Stmt{},
|
||||||
},
|
},
|
||||||
nil
|
nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// StoreProfileKeyValue allows storing of typed Key/Value attribute in the Storage Engine
|
// StoreProfileKeyValue allows storing of typed Key/Value attribute in the Storage Engine
|
||||||
func (cps *CwtchProfileStorage) StoreProfileKeyValue(keyType StorageKeyType, key string, value []byte) error {
|
func (cps *CwtchProfileStorage) StoreProfileKeyValue(keyType StorageKeyType, key string, value []byte) error {
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
_, err := cps.insertProfileKeyValueStmt.Exec(keyType, key, value)
|
_, err := cps.insertProfileKeyValueStmt.Exec(keyType, key, value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error executing query: %v", err)
|
log.Errorf("error executing query: %v", err)
|
||||||
|
@ -207,8 +244,40 @@ func (cps *CwtchProfileStorage) StoreProfileKeyValue(keyType StorageKeyType, key
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FindProfileKeysByPrefix allows fetching of typed values via a known Key from the Storage Engine
|
||||||
|
func (cps *CwtchProfileStorage) FindProfileKeysByPrefix(keyType StorageKeyType, prefix string) ([]string, error) {
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
|
rows, err := cps.findProfileKeySQLStmt.Query(keyType, prefix+"%")
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error executing query: %v", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var keys []string
|
||||||
|
defer rows.Close()
|
||||||
|
for {
|
||||||
|
result := rows.Next()
|
||||||
|
|
||||||
|
if !result {
|
||||||
|
return keys, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var key []byte
|
||||||
|
err = rows.Scan(&key)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error fetching rows: %v", err)
|
||||||
|
rows.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
keys = append(keys, string(key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// LoadProfileKeyValue allows fetching of typed values via a known Key from the Storage Engine
|
// LoadProfileKeyValue allows fetching of typed values via a known Key from the Storage Engine
|
||||||
func (cps *CwtchProfileStorage) LoadProfileKeyValue(keyType StorageKeyType, key string) ([]byte, error) {
|
func (cps *CwtchProfileStorage) LoadProfileKeyValue(keyType StorageKeyType, key string) ([]byte, error) {
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
rows, err := cps.selectProfileKeyValueStmt.Query(keyType, key)
|
rows, err := cps.selectProfileKeyValueStmt.Query(keyType, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error executing query: %v", err)
|
log.Errorf("error executing query: %v", err)
|
||||||
|
@ -234,6 +303,8 @@ func (cps *CwtchProfileStorage) LoadProfileKeyValue(keyType StorageKeyType, key
|
||||||
|
|
||||||
// NewConversation stores a new conversation in the data store
|
// NewConversation stores a new conversation in the data store
|
||||||
func (cps *CwtchProfileStorage) NewConversation(handle string, attributes model.Attributes, acl model.AccessControlList, accepted bool) (int, error) {
|
func (cps *CwtchProfileStorage) NewConversation(handle string, attributes model.Attributes, acl model.AccessControlList, accepted bool) (int, error) {
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
tx, err := cps.db.Begin()
|
tx, err := cps.db.Begin()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -279,6 +350,8 @@ func (cps *CwtchProfileStorage) NewConversation(handle string, attributes model.
|
||||||
// Ideally this function should not exist, and all lookups should happen by ID (this is currently
|
// Ideally this function should not exist, and all lookups should happen by ID (this is currently
|
||||||
// unavoidable in some circumstances because the event bus references conversations by handle, not by id)
|
// unavoidable in some circumstances because the event bus references conversations by handle, not by id)
|
||||||
func (cps *CwtchProfileStorage) GetConversationByHandle(handle string) (*model.Conversation, error) {
|
func (cps *CwtchProfileStorage) GetConversationByHandle(handle string) (*model.Conversation, error) {
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
rows, err := cps.selectConversationByHandleStmt.Query(handle)
|
rows, err := cps.selectConversationByHandleStmt.Query(handle)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error executing query: %v", err)
|
log.Errorf("error executing query: %v", err)
|
||||||
|
@ -303,13 +376,20 @@ func (cps *CwtchProfileStorage) GetConversationByHandle(handle string) (*model.C
|
||||||
}
|
}
|
||||||
rows.Close()
|
rows.Close()
|
||||||
|
|
||||||
return &model.Conversation{ID: id, Handle: handle, ACL: model.DeserializeAccessControlList(acl), Attributes: model.DeserializeAttributes(attributes), Accepted: accepted}, nil
|
cacl, err := model.DeserializeAccessControlList(acl)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error deserializing ACL from database, database maybe corrupted: %v", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.Conversation{ID: id, Handle: handle, ACL: cacl, Attributes: model.DeserializeAttributes(attributes), Accepted: accepted}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchConversations returns *all* active conversations. This method should only be called
|
// FetchConversations returns *all* active conversations. This method should only be called
|
||||||
// on app start up to build a summary of conversations for the UI. Any further updates should be integrated
|
// on app start up to build a summary of conversations for the UI. Any further updates should be integrated
|
||||||
// through the event bus.
|
// through the event bus.
|
||||||
func (cps *CwtchProfileStorage) FetchConversations() ([]*model.Conversation, error) {
|
func (cps *CwtchProfileStorage) FetchConversations() ([]*model.Conversation, error) {
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
rows, err := cps.fetchAllConversationsStmt.Query()
|
rows, err := cps.fetchAllConversationsStmt.Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error executing query: %v", err)
|
log.Errorf("error executing query: %v", err)
|
||||||
|
@ -337,13 +417,21 @@ func (cps *CwtchProfileStorage) FetchConversations() ([]*model.Conversation, err
|
||||||
rows.Close()
|
rows.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
conversations = append(conversations, &model.Conversation{ID: id, Handle: handle, ACL: model.DeserializeAccessControlList(acl), Attributes: model.DeserializeAttributes(attributes), Accepted: accepted})
|
|
||||||
|
cacl, err := model.DeserializeAccessControlList(acl)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error deserializing ACL from database, database maybe corrupted: %v", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
conversations = append(conversations, &model.Conversation{ID: id, Handle: handle, ACL: cacl, Attributes: model.DeserializeAttributes(attributes), Accepted: accepted})
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetConversation looks up a particular conversation by id
|
// GetConversation looks up a particular conversation by id
|
||||||
func (cps *CwtchProfileStorage) GetConversation(id int) (*model.Conversation, error) {
|
func (cps *CwtchProfileStorage) GetConversation(id int) (*model.Conversation, error) {
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
rows, err := cps.selectConversationStmt.Query(id)
|
rows, err := cps.selectConversationStmt.Query(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error executing query: %v", err)
|
log.Errorf("error executing query: %v", err)
|
||||||
|
@ -368,11 +456,18 @@ func (cps *CwtchProfileStorage) GetConversation(id int) (*model.Conversation, er
|
||||||
}
|
}
|
||||||
rows.Close()
|
rows.Close()
|
||||||
|
|
||||||
return &model.Conversation{ID: id, Handle: handle, ACL: model.DeserializeAccessControlList(acl), Attributes: model.DeserializeAttributes(attributes), Accepted: accepted}, nil
|
cacl, err := model.DeserializeAccessControlList(acl)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error deserializing ACL from database, database maybe corrupted: %v", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.Conversation{ID: id, Handle: handle, ACL: cacl, Attributes: model.DeserializeAttributes(attributes), Accepted: accepted}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AcceptConversation sets the accepted status of a conversation to true in the backing datastore
|
// AcceptConversation sets the accepted status of a conversation to true in the backing datastore
|
||||||
func (cps *CwtchProfileStorage) AcceptConversation(id int) error {
|
func (cps *CwtchProfileStorage) AcceptConversation(id int) error {
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
_, err := cps.acceptConversationStmt.Exec(id)
|
_, err := cps.acceptConversationStmt.Exec(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error executing query: %v", err)
|
log.Errorf("error executing query: %v", err)
|
||||||
|
@ -383,6 +478,8 @@ func (cps *CwtchProfileStorage) AcceptConversation(id int) error {
|
||||||
|
|
||||||
// DeleteConversation purges the conversation and any associated message history from the conversation store.
|
// DeleteConversation purges the conversation and any associated message history from the conversation store.
|
||||||
func (cps *CwtchProfileStorage) DeleteConversation(id int) error {
|
func (cps *CwtchProfileStorage) DeleteConversation(id int) error {
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
_, err := cps.deleteConversationStmt.Exec(id)
|
_, err := cps.deleteConversationStmt.Exec(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error executing query: %v", err)
|
log.Errorf("error executing query: %v", err)
|
||||||
|
@ -393,6 +490,8 @@ func (cps *CwtchProfileStorage) DeleteConversation(id int) error {
|
||||||
|
|
||||||
// SetConversationACL sets a new ACL on a given conversation.
|
// SetConversationACL sets a new ACL on a given conversation.
|
||||||
func (cps *CwtchProfileStorage) SetConversationACL(id int, acl model.AccessControlList) error {
|
func (cps *CwtchProfileStorage) SetConversationACL(id int, acl model.AccessControlList) error {
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
_, err := cps.setConversationACLStmt.Exec(acl.Serialize(), id)
|
_, err := cps.setConversationACLStmt.Exec(acl.Serialize(), id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error executing query: %v", err)
|
log.Errorf("error executing query: %v", err)
|
||||||
|
@ -407,6 +506,8 @@ func (cps *CwtchProfileStorage) SetConversationAttribute(id int, path attr.Scope
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
ci.Attributes[path.ToString()] = value
|
ci.Attributes[path.ToString()] = value
|
||||||
_, err = cps.setConversationAttributesStmt.Exec(ci.Attributes.Serialize(), id)
|
_, err = cps.setConversationAttributesStmt.Exec(ci.Attributes.Serialize(), id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -418,9 +519,10 @@ func (cps *CwtchProfileStorage) SetConversationAttribute(id int, path attr.Scope
|
||||||
|
|
||||||
// InsertMessage appends a message to a conversation channel, with a given set of attributes
|
// InsertMessage appends a message to a conversation channel, with a given set of attributes
|
||||||
func (cps *CwtchProfileStorage) InsertMessage(conversation int, channel int, body string, attributes model.Attributes, signature string, contentHash string) (int, error) {
|
func (cps *CwtchProfileStorage) InsertMessage(conversation int, channel int, body string, attributes model.Attributes, signature string, contentHash string) (int, error) {
|
||||||
|
|
||||||
channelID := ChannelID{Conversation: conversation, Channel: channel}
|
channelID := ChannelID{Conversation: conversation, Channel: channel}
|
||||||
|
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
_, exists := cps.channelInsertStmts[channelID]
|
_, exists := cps.channelInsertStmts[channelID]
|
||||||
if !exists {
|
if !exists {
|
||||||
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(insertMessageIntoConversationSQLStmt, conversation, channel))
|
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(insertMessageIntoConversationSQLStmt, conversation, channel))
|
||||||
|
@ -446,6 +548,8 @@ func (cps *CwtchProfileStorage) UpdateMessageAttributes(conversation int, channe
|
||||||
|
|
||||||
channelID := ChannelID{Conversation: conversation, Channel: channel}
|
channelID := ChannelID{Conversation: conversation, Channel: channel}
|
||||||
|
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
_, exists := cps.channelUpdateMessageStmts[channelID]
|
_, exists := cps.channelUpdateMessageStmts[channelID]
|
||||||
if !exists {
|
if !exists {
|
||||||
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(updateMessageIntoConversationSQLStmt, conversation, channel))
|
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(updateMessageIntoConversationSQLStmt, conversation, channel))
|
||||||
|
@ -470,6 +574,8 @@ func (cps *CwtchProfileStorage) UpdateMessageAttributes(conversation int, channe
|
||||||
func (cps *CwtchProfileStorage) GetChannelMessageBySignature(conversation int, channel int, signature string) (int, error) {
|
func (cps *CwtchProfileStorage) GetChannelMessageBySignature(conversation int, channel int, signature string) (int, error) {
|
||||||
channelID := ChannelID{Conversation: conversation, Channel: channel}
|
channelID := ChannelID{Conversation: conversation, Channel: channel}
|
||||||
|
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
_, exists := cps.channelGetMessageBySignatureStmts[channelID]
|
_, exists := cps.channelGetMessageBySignatureStmts[channelID]
|
||||||
if !exists {
|
if !exists {
|
||||||
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getMessageBySignatureFromConversationSQLStmt, conversation, channel))
|
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getMessageBySignatureFromConversationSQLStmt, conversation, channel))
|
||||||
|
@ -507,6 +613,8 @@ func (cps *CwtchProfileStorage) GetChannelMessageBySignature(conversation int, c
|
||||||
func (cps *CwtchProfileStorage) GetChannelMessageByContentHash(conversation int, channel int, hash string) (int, error) {
|
func (cps *CwtchProfileStorage) GetChannelMessageByContentHash(conversation int, channel int, hash string) (int, error) {
|
||||||
channelID := ChannelID{Conversation: conversation, Channel: channel}
|
channelID := ChannelID{Conversation: conversation, Channel: channel}
|
||||||
|
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
_, exists := cps.channelGetMessageByContentHashStmts[channelID]
|
_, exists := cps.channelGetMessageByContentHashStmts[channelID]
|
||||||
if !exists {
|
if !exists {
|
||||||
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getMessageByContentHashFromConversationSQLStmt, conversation, channel))
|
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getMessageByContentHashFromConversationSQLStmt, conversation, channel))
|
||||||
|
@ -545,6 +653,8 @@ func (cps *CwtchProfileStorage) GetChannelMessageByContentHash(conversation int,
|
||||||
func (cps *CwtchProfileStorage) GetRowNumberByMessageID(conversation int, channel int, id int) (int, error) {
|
func (cps *CwtchProfileStorage) GetRowNumberByMessageID(conversation int, channel int, id int) (int, error) {
|
||||||
channelID := ChannelID{Conversation: conversation, Channel: channel}
|
channelID := ChannelID{Conversation: conversation, Channel: channel}
|
||||||
|
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
_, exists := cps.channelRowNumberStmts[channelID]
|
_, exists := cps.channelRowNumberStmts[channelID]
|
||||||
if !exists {
|
if !exists {
|
||||||
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getLocalIndexOfMessageIDSQLStmt, conversation, channel))
|
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getLocalIndexOfMessageIDSQLStmt, conversation, channel))
|
||||||
|
@ -584,6 +694,8 @@ func (cps *CwtchProfileStorage) GetRowNumberByMessageID(conversation int, channe
|
||||||
func (cps *CwtchProfileStorage) GetChannelMessage(conversation int, channel int, messageID int) (string, model.Attributes, error) {
|
func (cps *CwtchProfileStorage) GetChannelMessage(conversation int, channel int, messageID int) (string, model.Attributes, error) {
|
||||||
channelID := ChannelID{Conversation: conversation, Channel: channel}
|
channelID := ChannelID{Conversation: conversation, Channel: channel}
|
||||||
|
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
_, exists := cps.channelGetMessageStmts[channelID]
|
_, exists := cps.channelGetMessageStmts[channelID]
|
||||||
if !exists {
|
if !exists {
|
||||||
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getMessageFromConversationSQLStmt, conversation, channel))
|
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getMessageFromConversationSQLStmt, conversation, channel))
|
||||||
|
@ -624,6 +736,8 @@ func (cps *CwtchProfileStorage) GetChannelMessage(conversation int, channel int,
|
||||||
func (cps *CwtchProfileStorage) GetChannelMessageCount(conversation int, channel int) (int, error) {
|
func (cps *CwtchProfileStorage) GetChannelMessageCount(conversation int, channel int) (int, error) {
|
||||||
channelID := ChannelID{Conversation: conversation, Channel: channel}
|
channelID := ChannelID{Conversation: conversation, Channel: channel}
|
||||||
|
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
_, exists := cps.channelGetCountStmts[channelID]
|
_, exists := cps.channelGetCountStmts[channelID]
|
||||||
if !exists {
|
if !exists {
|
||||||
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getMessageCountFromConversationSQLStmt, conversation, channel))
|
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getMessageCountFromConversationSQLStmt, conversation, channel))
|
||||||
|
@ -643,10 +757,51 @@ func (cps *CwtchProfileStorage) GetChannelMessageCount(conversation int, channel
|
||||||
return count, nil
|
return count, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMostRecentMessages returns the most recent messages in a channel up to a given limit at a given offset
|
func (cps *CwtchProfileStorage) SearchMessages(conversation int, channel int, pattern string, offset int, limit int) ([]model.ConversationMessage, error) {
|
||||||
func (cps *CwtchProfileStorage) GetMostRecentMessages(conversation int, channel int, offset int, limit int) ([]model.ConversationMessage, error) {
|
|
||||||
channelID := ChannelID{Conversation: conversation, Channel: channel}
|
channelID := ChannelID{Conversation: conversation, Channel: channel}
|
||||||
|
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
|
_, exists := cps.channelSearchConversationSQLStmt[channelID]
|
||||||
|
if !exists {
|
||||||
|
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(searchConversationSQLStmt, conversation, channel))
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error executing transaction: %v", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cps.channelSearchConversationSQLStmt[channelID] = conversationStmt
|
||||||
|
}
|
||||||
|
rows, err := cps.channelSearchConversationSQLStmt[channelID].Query(limit, offset, pattern)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error executing prepared stmt: %v", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var conversationMessages []model.ConversationMessage
|
||||||
|
defer rows.Close()
|
||||||
|
for {
|
||||||
|
result := rows.Next()
|
||||||
|
if !result {
|
||||||
|
return conversationMessages, nil
|
||||||
|
}
|
||||||
|
var id int
|
||||||
|
var body string
|
||||||
|
var attributes []byte
|
||||||
|
var sig string
|
||||||
|
var contenthash string
|
||||||
|
err = rows.Scan(&id, &body, &attributes, &sig, &contenthash)
|
||||||
|
if err != nil {
|
||||||
|
return conversationMessages, err
|
||||||
|
}
|
||||||
|
conversationMessages = append(conversationMessages, model.ConversationMessage{ID: id, Body: body, Attr: model.DeserializeAttributes(attributes), Signature: sig, ContentHash: contenthash})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMostRecentMessages returns the most recent messages in a channel up to a given limit at a given offset
|
||||||
|
func (cps *CwtchProfileStorage) GetMostRecentMessages(conversation int, channel int, offset int, limit uint) ([]model.ConversationMessage, error) {
|
||||||
|
channelID := ChannelID{Conversation: conversation, Channel: channel}
|
||||||
|
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
_, exists := cps.channelGetMostRecentMessagesStmts[channelID]
|
_, exists := cps.channelGetMostRecentMessagesStmts[channelID]
|
||||||
if !exists {
|
if !exists {
|
||||||
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getMostRecentMessagesSQLStmt, conversation, channel))
|
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getMostRecentMessagesSQLStmt, conversation, channel))
|
||||||
|
@ -684,6 +839,8 @@ func (cps *CwtchProfileStorage) GetMostRecentMessages(conversation int, channel
|
||||||
|
|
||||||
// PurgeConversationChannel deletes all message for a conversation channel.
|
// PurgeConversationChannel deletes all message for a conversation channel.
|
||||||
func (cps *CwtchProfileStorage) PurgeConversationChannel(conversation int, channel int) error {
|
func (cps *CwtchProfileStorage) PurgeConversationChannel(conversation int, channel int) error {
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(purgeMessagesFromConversationSQLStmt, conversation, channel))
|
conversationStmt, err := cps.db.Prepare(fmt.Sprintf(purgeMessagesFromConversationSQLStmt, conversation, channel))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error executing transaction: %v", err)
|
log.Errorf("error executing transaction: %v", err)
|
||||||
|
@ -695,13 +852,31 @@ func (cps *CwtchProfileStorage) PurgeConversationChannel(conversation int, chann
|
||||||
|
|
||||||
// PurgeNonSavedMessages deletes all message conversations that are not explicitly set to saved.
|
// PurgeNonSavedMessages deletes all message conversations that are not explicitly set to saved.
|
||||||
func (cps *CwtchProfileStorage) PurgeNonSavedMessages() {
|
func (cps *CwtchProfileStorage) PurgeNonSavedMessages() {
|
||||||
// Purge Messages that are not stored...
|
|
||||||
|
// check to see if the profile global setting has been explicitly set to save (peer) conversations by default.
|
||||||
|
defaultSave := false
|
||||||
|
key, err := cps.LoadProfileKeyValue(TypeAttribute, attr.LocalScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(event.PreserveHistoryDefaultSettingKey)).ToString())
|
||||||
|
if err == nil {
|
||||||
|
if defaultSaveSetting, err := strconv.ParseBool(string(key)); err == nil {
|
||||||
|
defaultSave = defaultSaveSetting
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// For each conversation, all that is not explicitly saved will be lost...
|
||||||
ci, err := cps.FetchConversations()
|
ci, err := cps.FetchConversations()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
for _, conversation := range ci {
|
for _, conversation := range ci {
|
||||||
|
// unless this is a server or a group...for which we default save always (for legacy reasons)
|
||||||
|
// FIXME: revisit this for hybrid groups.
|
||||||
if !conversation.IsGroup() && !conversation.IsServer() {
|
if !conversation.IsGroup() && !conversation.IsServer() {
|
||||||
if conversation.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(event.SaveHistoryKey)).ToString()] != event.SaveHistoryConfirmed {
|
// Note that we only check for confirmed status here...if it is set to any other value we will fallthrough to the default.
|
||||||
log.Infof("purging conversation...")
|
saveHistoryConfirmed := conversation.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(event.SaveHistoryKey)).ToString()] == event.SaveHistoryConfirmed
|
||||||
|
deleteHistoryConfirmed := conversation.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(event.SaveHistoryKey)).ToString()] == event.DeleteHistoryConfirmed
|
||||||
|
// we purge conversation history in two specific instances...
|
||||||
|
// if the conversation has been explicitly marked as delete history confirmed OR
|
||||||
|
// if save history hasn't been confirmed and default save history is false - i.e. in all other cases
|
||||||
|
if deleteHistoryConfirmed || (!saveHistoryConfirmed && !defaultSave) {
|
||||||
|
log.Debugf("purging conversation...")
|
||||||
// TODO: At some point in the future this needs to iterate over channels and make a decision for each on..
|
// TODO: At some point in the future this needs to iterate over channels and make a decision for each on..
|
||||||
cps.PurgeConversationChannel(conversation.ID, 0)
|
cps.PurgeConversationChannel(conversation.ID, 0)
|
||||||
}
|
}
|
||||||
|
@ -711,46 +886,50 @@ func (cps *CwtchProfileStorage) PurgeNonSavedMessages() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the underlying database and prepared statements
|
// Close closes the underlying database and prepared statements
|
||||||
func (cps *CwtchProfileStorage) Close() {
|
func (cps *CwtchProfileStorage) Close(purgeAllNonSavedMessages bool) {
|
||||||
if cps.db != nil {
|
if cps.db != nil {
|
||||||
|
if purgeAllNonSavedMessages {
|
||||||
cps.PurgeNonSavedMessages()
|
cps.PurgeNonSavedMessages()
|
||||||
|
}
|
||||||
|
// We can't lock before this..
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
|
|
||||||
cps.insertProfileKeyValueStmt.Close()
|
_ = cps.insertProfileKeyValueStmt.Close()
|
||||||
cps.selectProfileKeyValueStmt.Close()
|
_ = cps.selectProfileKeyValueStmt.Close()
|
||||||
|
|
||||||
cps.insertConversationStmt.Close()
|
_ = cps.insertConversationStmt.Close()
|
||||||
cps.fetchAllConversationsStmt.Close()
|
_ = cps.fetchAllConversationsStmt.Close()
|
||||||
cps.selectConversationStmt.Close()
|
_ = cps.selectConversationStmt.Close()
|
||||||
cps.selectConversationByHandleStmt.Close()
|
_ = cps.selectConversationByHandleStmt.Close()
|
||||||
cps.acceptConversationStmt.Close()
|
_ = cps.acceptConversationStmt.Close()
|
||||||
cps.deleteConversationStmt.Close()
|
_ = cps.deleteConversationStmt.Close()
|
||||||
cps.setConversationAttributesStmt.Close()
|
_ = cps.setConversationAttributesStmt.Close()
|
||||||
cps.setConversationACLStmt.Close()
|
_ = cps.setConversationACLStmt.Close()
|
||||||
|
|
||||||
for _, v := range cps.channelInsertStmts {
|
for _, v := range cps.channelInsertStmts {
|
||||||
v.Close()
|
_ = v.Close()
|
||||||
}
|
}
|
||||||
for _, v := range cps.channelUpdateMessageStmts {
|
for _, v := range cps.channelUpdateMessageStmts {
|
||||||
v.Close()
|
_ = v.Close()
|
||||||
}
|
}
|
||||||
for _, v := range cps.channelGetMessageStmts {
|
for _, v := range cps.channelGetMessageStmts {
|
||||||
v.Close()
|
_ = v.Close()
|
||||||
}
|
}
|
||||||
for _, v := range cps.channelGetMessageBySignatureStmts {
|
for _, v := range cps.channelGetMessageBySignatureStmts {
|
||||||
v.Close()
|
_ = v.Close()
|
||||||
}
|
}
|
||||||
for _, v := range cps.channelGetCountStmts {
|
for _, v := range cps.channelGetCountStmts {
|
||||||
v.Close()
|
_ = v.Close()
|
||||||
}
|
}
|
||||||
for _, v := range cps.channelGetMostRecentMessagesStmts {
|
for _, v := range cps.channelGetMostRecentMessagesStmts {
|
||||||
v.Close()
|
_ = v.Close()
|
||||||
}
|
}
|
||||||
for _, v := range cps.channelGetMessageByContentHashStmts {
|
for _, v := range cps.channelGetMessageByContentHashStmts {
|
||||||
v.Close()
|
_ = v.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
cps.db.Close()
|
_ = cps.db.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -767,7 +946,81 @@ func (cps *CwtchProfileStorage) Delete() {
|
||||||
// **note* this is technically a very dangerous API and should only be called after
|
// **note* this is technically a very dangerous API and should only be called after
|
||||||
// checks on the current password and the derived new password.
|
// checks on the current password and the derived new password.
|
||||||
func (cps *CwtchProfileStorage) Rekey(newkey [32]byte) error {
|
func (cps *CwtchProfileStorage) Rekey(newkey [32]byte) error {
|
||||||
|
cps.mutex.Lock()
|
||||||
|
defer cps.mutex.Unlock()
|
||||||
// PRAGMA queries don't allow subs...
|
// PRAGMA queries don't allow subs...
|
||||||
_, err := cps.db.Exec(fmt.Sprintf(`PRAGMA rekey="x'%x'";`, newkey))
|
_, err := cps.db.Exec(fmt.Sprintf(`PRAGMA rekey="x'%x'";`, newkey))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Export takes in a file name and creates an exported cwtch profile file (which in reality is a compressed tarball).
|
||||||
|
func (cps *CwtchProfileStorage) Export(filename string) error {
|
||||||
|
profileDB := filepath.Join(cps.ProfileDirectory, dbFile)
|
||||||
|
profileSalt := filepath.Join(cps.ProfileDirectory, saltFile)
|
||||||
|
profileVersion := filepath.Join(cps.ProfileDirectory, versionFile)
|
||||||
|
|
||||||
|
file, err := os.Create(filename)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not create tarball file '%s', got error '%s'", filename, err.Error())
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
gzipWriter := gzip.NewWriter(file)
|
||||||
|
defer gzipWriter.Close()
|
||||||
|
|
||||||
|
tarWriter := tar.NewWriter(gzipWriter)
|
||||||
|
defer tarWriter.Close()
|
||||||
|
|
||||||
|
// We need to know the base directory so we can import it later (and prevent duplicates)...
|
||||||
|
profilePath := filepath.Base(cps.ProfileDirectory)
|
||||||
|
|
||||||
|
err = addFileToTarWriter(profilePath, profileDB, tarWriter)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not add file '%s', to tarball, got error '%s'", profileDB, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
err = addFileToTarWriter(profilePath, profileSalt, tarWriter)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not add file '%s', to tarball, got error '%s'", profileDB, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
err = addFileToTarWriter(profilePath, profileVersion, tarWriter)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not add file '%s', to tarball, got error '%s'", profileDB, err.Error())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addFileToTarWriter(profilePath string, filePath string, tarWriter *tar.Writer) error {
|
||||||
|
file, err := os.Open(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not open file '%s', got error '%s'", filePath, err.Error())
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
stat, err := file.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not get stat for file '%s', got error '%s'", filePath, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
header := &tar.Header{
|
||||||
|
// Note: we are using strings.Join here deliberately so that we can import the profile
|
||||||
|
// in a cross platform way (e.g. using filepath here would result in different names on Windows v.s Linux)
|
||||||
|
Name: strings.Join([]string{profilePath, stat.Name()}, "/"),
|
||||||
|
Size: stat.Size(),
|
||||||
|
Mode: int64(stat.Mode()),
|
||||||
|
ModTime: stat.ModTime(),
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tarWriter.WriteHeader(header)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not write header for file '%s', got error '%s'", filePath, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.Copy(tarWriter, file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not copy the file '%s' data to the tarball, got error '%s'", filePath, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,52 @@
|
||||||
|
package peer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"cwtch.im/cwtch/model"
|
||||||
|
"cwtch.im/cwtch/model/attr"
|
||||||
|
"cwtch.im/cwtch/settings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ProfileHooks interface {
|
||||||
|
// EventsToRegister returns a set of events that the extension is interested hooking
|
||||||
|
EventsToRegister() []event.Type
|
||||||
|
|
||||||
|
// ExperimentsToRegister returns a set of experiments that the extension is interested in being notified about
|
||||||
|
ExperimentsToRegister() []string
|
||||||
|
|
||||||
|
// OnEvent is called whenever an event Registered with RegisterEvents is called
|
||||||
|
OnEvent(event event.Event, profile CwtchPeer)
|
||||||
|
|
||||||
|
// OnContactRequestValue is Hooked when a contact sends a request for the given path
|
||||||
|
OnContactRequestValue(profile CwtchPeer, conversation model.Conversation, eventID string, path attr.ScopedZonedPath)
|
||||||
|
|
||||||
|
// OnContactReceiveValue is Hooked after a profile receives a response to a Get/Val Request
|
||||||
|
OnContactReceiveValue(profile CwtchPeer, conversation model.Conversation, path attr.ScopedZonedPath, value string, exists bool)
|
||||||
|
|
||||||
|
// NotifySettingsUpdate allow profile hooks to access configs e.g. download folder
|
||||||
|
NotifySettingsUpdate(settings settings.GlobalSettings)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ProfileHook struct {
|
||||||
|
extension ProfileHooks
|
||||||
|
events map[event.Type]bool
|
||||||
|
experiments map[string]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConstructHook(extension ProfileHooks) ProfileHook {
|
||||||
|
events := make(map[event.Type]bool)
|
||||||
|
for _, e := range extension.EventsToRegister() {
|
||||||
|
events[e] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
experiments := make(map[string]bool)
|
||||||
|
for _, experiment := range extension.ExperimentsToRegister() {
|
||||||
|
experiments[experiment] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return ProfileHook{
|
||||||
|
extension,
|
||||||
|
events,
|
||||||
|
experiments,
|
||||||
|
}
|
||||||
|
}
|
|
@ -5,6 +5,8 @@ import (
|
||||||
"cwtch.im/cwtch/model"
|
"cwtch.im/cwtch/model"
|
||||||
"cwtch.im/cwtch/model/attr"
|
"cwtch.im/cwtch/model/attr"
|
||||||
"cwtch.im/cwtch/protocol/connections"
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
|
"cwtch.im/cwtch/settings"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
||||||
"git.openprivacy.ca/openprivacy/connectivity"
|
"git.openprivacy.ca/openprivacy/connectivity"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -18,7 +20,9 @@ type ModifyPeeringState interface {
|
||||||
BlockUnknownConnections()
|
BlockUnknownConnections()
|
||||||
AllowUnknownConnections()
|
AllowUnknownConnections()
|
||||||
PeerWithOnion(string)
|
PeerWithOnion(string)
|
||||||
JoinServer(string) error
|
QueueJoinServer(string)
|
||||||
|
DisconnectFromPeer(string)
|
||||||
|
DisconnectFromServer(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ModifyContactsAndPeers is a meta-interface intended to restrict a call to reading and modifying contacts
|
// ModifyContactsAndPeers is a meta-interface intended to restrict a call to reading and modifying contacts
|
||||||
|
@ -46,8 +50,16 @@ type ModifyServers interface {
|
||||||
|
|
||||||
// SendMessages enables a caller to sender messages to a contact
|
// SendMessages enables a caller to sender messages to a contact
|
||||||
type SendMessages interface {
|
type SendMessages interface {
|
||||||
SendMessage(conversation int, message string) error
|
SendMessage(conversation int, message string) (int, error)
|
||||||
SendInviteToConversation(conversationID int, inviteConversationID int) error
|
|
||||||
|
// EnhancedSendMessage Attempts to Send a Message and Immediately Attempts to Lookup the Message in the Database
|
||||||
|
EnhancedSendMessage(conversation int, message string) string
|
||||||
|
|
||||||
|
SendInviteToConversation(conversationID int, inviteConversationID int) (int, error)
|
||||||
|
|
||||||
|
// EnhancedSendInviteMessage Attempts to Send an Invite and Immediately Attempts to Lookup the Message in the Database
|
||||||
|
EnhancedSendInviteMessage(conversation int, inviteConversationID int) string
|
||||||
|
|
||||||
SendScopedZonedGetValToContact(conversationID int, scope attr.Scope, zone attr.Zone, key string)
|
SendScopedZonedGetValToContact(conversationID int, scope attr.Scope, zone attr.Zone, key string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,12 +71,16 @@ type CwtchPeer interface {
|
||||||
// most functions
|
// most functions
|
||||||
Init(event.Manager)
|
Init(event.Manager)
|
||||||
|
|
||||||
GenerateProtocolEngine(acn connectivity.ACN, bus event.Manager) (connections.Engine, error)
|
GenerateProtocolEngine(acn connectivity.ACN, bus event.Manager, engineHooks connections.EngineHooks) (connections.Engine, error)
|
||||||
|
|
||||||
AutoHandleEvents(events []event.Type)
|
AutoHandleEvents(events []event.Type)
|
||||||
Listen()
|
Listen()
|
||||||
|
StartConnections(doPeers, doServers bool)
|
||||||
|
// Deprecated in 1.10
|
||||||
StartPeersConnections()
|
StartPeersConnections()
|
||||||
|
// Deprecated in 1.10
|
||||||
StartServerConnections()
|
StartServerConnections()
|
||||||
|
|
||||||
Shutdown()
|
Shutdown()
|
||||||
|
|
||||||
// GetOnion is deprecated. If you find yourself needing to rely on this method it is time
|
// GetOnion is deprecated. If you find yourself needing to rely on this method it is time
|
||||||
|
@ -81,6 +97,9 @@ type CwtchPeer interface {
|
||||||
// scope.zone.key = value
|
// scope.zone.key = value
|
||||||
GetScopedZonedAttribute(scope attr.Scope, zone attr.Zone, key string) (string, bool)
|
GetScopedZonedAttribute(scope attr.Scope, zone attr.Zone, key string) (string, bool)
|
||||||
|
|
||||||
|
// GetScopedZonedAttributeKeys returns all keys associated with a given scope and zone
|
||||||
|
GetScopedZonedAttributeKeys(scope attr.Scope, zone attr.Zone) ([]string, error)
|
||||||
|
|
||||||
AccessPeeringState
|
AccessPeeringState
|
||||||
ModifyPeeringState
|
ModifyPeeringState
|
||||||
|
|
||||||
|
@ -93,15 +112,27 @@ type CwtchPeer interface {
|
||||||
|
|
||||||
// Import Bundle
|
// Import Bundle
|
||||||
ImportBundle(string) error
|
ImportBundle(string) error
|
||||||
|
EnhancedImportBundle(string) string
|
||||||
|
|
||||||
// New Unified Conversation Interfaces
|
// New Unified Conversation Interfaces
|
||||||
NewContactConversation(handle string, acl model.AccessControl, accepted bool) (int, error)
|
NewContactConversation(handle string, acl model.AccessControl, accepted bool) (int, error)
|
||||||
FetchConversations() ([]*model.Conversation, error)
|
FetchConversations() ([]*model.Conversation, error)
|
||||||
|
ArchiveConversation(conversation int)
|
||||||
GetConversationInfo(conversation int) (*model.Conversation, error)
|
GetConversationInfo(conversation int) (*model.Conversation, error)
|
||||||
FetchConversationInfo(handle string) (*model.Conversation, error)
|
FetchConversationInfo(handle string) (*model.Conversation, error)
|
||||||
|
|
||||||
|
// API-level management of conversation access control
|
||||||
|
UpdateConversationAccessControlList(id int, acl model.AccessControlList) error
|
||||||
|
EnhancedUpdateConversationAccessControlList(conversation int, acjson string) error
|
||||||
|
|
||||||
|
GetConversationAccessControlList(conversation int) (model.AccessControlList, error)
|
||||||
|
EnhancedGetConversationAccessControlList(conversation int) (string, error)
|
||||||
|
|
||||||
|
// Convieniance Functions for ACL Management
|
||||||
AcceptConversation(conversation int) error
|
AcceptConversation(conversation int) error
|
||||||
BlockConversation(conversation int) error
|
BlockConversation(conversation int) error
|
||||||
UnblockConversation(conversation int) error
|
UnblockConversation(conversation int) error
|
||||||
|
|
||||||
SetConversationAttribute(conversation int, path attr.ScopedZonedPath, value string) error
|
SetConversationAttribute(conversation int, path attr.ScopedZonedPath, value string) error
|
||||||
GetConversationAttribute(conversation int, path attr.ScopedZonedPath) (string, error)
|
GetConversationAttribute(conversation int, path attr.ScopedZonedPath) (string, error)
|
||||||
DeleteConversation(conversation int) error
|
DeleteConversation(conversation int) error
|
||||||
|
@ -110,11 +141,41 @@ type CwtchPeer interface {
|
||||||
GetChannelMessage(conversation int, channel int, id int) (string, model.Attributes, error)
|
GetChannelMessage(conversation int, channel int, id int) (string, model.Attributes, error)
|
||||||
GetChannelMessageCount(conversation int, channel int) (int, error)
|
GetChannelMessageCount(conversation int, channel int) (int, error)
|
||||||
GetChannelMessageByContentHash(conversation int, channel int, contenthash string) (int, error)
|
GetChannelMessageByContentHash(conversation int, channel int, contenthash string) (int, error)
|
||||||
GetMostRecentMessages(conversation int, channel int, offset int, limit int) ([]model.ConversationMessage, error)
|
GetMostRecentMessages(conversation int, channel int, offset int, limit uint) ([]model.ConversationMessage, error)
|
||||||
UpdateMessageAttribute(conversation int, channel int, id int, key string, value string) error
|
UpdateMessageAttribute(conversation int, channel int, id int, key string, value string) error
|
||||||
|
SearchConversations(pattern string) string
|
||||||
|
|
||||||
ShareFile(fileKey string, serializedManifest string)
|
// EnhancedGetMessageById returns a json-encoded enhanced message, suitable for rendering in a UI
|
||||||
|
EnhancedGetMessageById(conversation int, mid int) string
|
||||||
|
|
||||||
|
// EnhancedGetMessageByContentHash returns a json-encoded enhanced message, suitable for rendering in a UI
|
||||||
|
EnhancedGetMessageByContentHash(conversation int, hash string) string
|
||||||
|
|
||||||
|
// EnhancedGetMessages returns a set of json-encoded enhanced messages, suitable for rendering in a UI
|
||||||
|
EnhancedGetMessages(conversation int, index int, count uint) string
|
||||||
|
|
||||||
|
// Server Token APIS
|
||||||
|
// TODO move these to feature protected interfaces
|
||||||
|
StoreCachedTokens(tokenServer string, tokens []*privacypass.Token)
|
||||||
|
|
||||||
|
// Profile Management
|
||||||
CheckPassword(password string) bool
|
CheckPassword(password string) bool
|
||||||
ChangePassword(oldpassword string, newpassword string, newpasswordAgain string) error
|
ChangePassword(oldpassword string, newpassword string, newpasswordAgain string) error
|
||||||
|
ExportProfile(file string) error
|
||||||
Delete()
|
Delete()
|
||||||
|
PublishEvent(resp event.Event)
|
||||||
|
RegisterHook(hook ProfileHooks)
|
||||||
|
UpdateExperiments(enabled bool, experiments map[string]bool)
|
||||||
|
NotifySettingsUpdate(settings settings.GlobalSettings)
|
||||||
|
IsFeatureEnabled(featureName string) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnhancedMessage wraps a Cwtch model.Message with some additional data to reduce calls from the UI.
|
||||||
|
type EnhancedMessage struct {
|
||||||
|
model.Message
|
||||||
|
ID int // the actual ID of the message in the database (not the row number)
|
||||||
|
LocalIndex int // local index in the DB (row #). Can be empty (most calls supply it) but lookup by hash will fill it
|
||||||
|
ContentHash string
|
||||||
|
ContactImage string
|
||||||
|
Attributes map[string]string
|
||||||
}
|
}
|
||||||
|
|
174
peer/storage.go
174
peer/storage.go
|
@ -1,23 +1,27 @@
|
||||||
package peer
|
package peer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"compress/gzip"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
"golang.org/x/crypto/pbkdf2"
|
"golang.org/x/crypto/pbkdf2"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
const versionFile = "VERSION"
|
const versionFile = "VERSION"
|
||||||
const version = "2"
|
const version = "2"
|
||||||
const saltFile = "SALT"
|
const saltFile = "SALT"
|
||||||
|
const dbFile = "db"
|
||||||
|
|
||||||
// CreateKeySalt derives a key and salt from a password: returns key, salt, err
|
// CreateKeySalt derives a key and salt from a password: returns key, salt, err
|
||||||
func CreateKeySalt(password string) ([32]byte, [128]byte, error) {
|
func CreateKeySalt(password string) ([32]byte, [128]byte, error) {
|
||||||
|
@ -43,7 +47,7 @@ func createKey(password string, salt []byte) [32]byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
func initV2Directory(directory, password string) ([32]byte, [128]byte, error) {
|
func initV2Directory(directory, password string) ([32]byte, [128]byte, error) {
|
||||||
os.Mkdir(directory, 0700)
|
os.MkdirAll(directory, 0700)
|
||||||
|
|
||||||
key, salt, err := CreateKeySalt(password)
|
key, salt, err := CreateKeySalt(password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -51,12 +55,12 @@ func initV2Directory(directory, password string) ([32]byte, [128]byte, error) {
|
||||||
return [32]byte{}, [128]byte{}, err
|
return [32]byte{}, [128]byte{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = ioutil.WriteFile(path.Join(directory, versionFile), []byte(version), 0600); err != nil {
|
if err = os.WriteFile(path.Join(directory, versionFile), []byte(version), 0600); err != nil {
|
||||||
log.Errorf("Could not write version file: %v", err)
|
log.Errorf("Could not write version file: %v", err)
|
||||||
return [32]byte{}, [128]byte{}, err
|
return [32]byte{}, [128]byte{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = ioutil.WriteFile(path.Join(directory, saltFile), salt[:], 0600); err != nil {
|
if err = os.WriteFile(path.Join(directory, saltFile), salt[:], 0600); err != nil {
|
||||||
log.Errorf("Could not write salt file: %v", err)
|
log.Errorf("Could not write salt file: %v", err)
|
||||||
return [32]byte{}, [128]byte{}, err
|
return [32]byte{}, [128]byte{}, err
|
||||||
}
|
}
|
||||||
|
@ -65,7 +69,7 @@ func initV2Directory(directory, password string) ([32]byte, [128]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func openEncryptedDatabase(profileDirectory string, password string, createIfNotExists bool) (*sql.DB, error) {
|
func openEncryptedDatabase(profileDirectory string, password string, createIfNotExists bool) (*sql.DB, error) {
|
||||||
salt, err := ioutil.ReadFile(path.Join(profileDirectory, saltFile))
|
salt, err := os.ReadFile(path.Join(profileDirectory, saltFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -151,7 +155,7 @@ func CreateEncryptedStore(profileDirectory string, password string) (*CwtchProfi
|
||||||
|
|
||||||
// FromEncryptedDatabase constructs a Cwtch Profile from an existing Encrypted Database
|
// FromEncryptedDatabase constructs a Cwtch Profile from an existing Encrypted Database
|
||||||
func FromEncryptedDatabase(profileDirectory string, password string) (CwtchPeer, error) {
|
func FromEncryptedDatabase(profileDirectory string, password string) (CwtchPeer, error) {
|
||||||
log.Infof("Loading Encrypted Profile: %v", profileDirectory)
|
log.Debugf("Loading Encrypted Profile: %v", profileDirectory)
|
||||||
db, err := openEncryptedDatabase(profileDirectory, password, false)
|
db, err := openEncryptedDatabase(profileDirectory, password, false)
|
||||||
if db == nil || err != nil {
|
if db == nil || err != nil {
|
||||||
return nil, fmt.Errorf("unable to open encrypted database: error: %v", err)
|
return nil, fmt.Errorf("unable to open encrypted database: error: %v", err)
|
||||||
|
@ -165,3 +169,161 @@ func FromEncryptedDatabase(profileDirectory string, password string) (CwtchPeer,
|
||||||
}
|
}
|
||||||
return FromEncryptedStorage(cps), nil
|
return FromEncryptedStorage(cps), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ImportProfile(exportedCwtchFile string, profilesDir string, password string) (CwtchPeer, error) {
|
||||||
|
profileID, err := checkCwtchProfileBackupFile(exportedCwtchFile)
|
||||||
|
if profileID == "" || err != nil {
|
||||||
|
log.Errorf("%s is an invalid cwtch backup file: %s", profileID, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
log.Debugf("%s is a valid cwtch backup file", profileID)
|
||||||
|
|
||||||
|
profileDBFile := filepath.Join(profilesDir, profileID, dbFile)
|
||||||
|
log.Debugf("checking %v", profileDBFile)
|
||||||
|
if _, err := os.Stat(profileDBFile); errors.Is(err, os.ErrNotExist) {
|
||||||
|
// backup is valid and the profile hasn't been imported yet, time to extract and check the password
|
||||||
|
profileDir := filepath.Join(profilesDir, profileID)
|
||||||
|
os.MkdirAll(profileDir, 0700)
|
||||||
|
err := importCwtchProfileBackupFile(exportedCwtchFile, profilesDir)
|
||||||
|
if err == nil {
|
||||||
|
profile, err := FromEncryptedDatabase(profileDir, password)
|
||||||
|
if err == nil {
|
||||||
|
return profile, err
|
||||||
|
}
|
||||||
|
// Otherwise purge
|
||||||
|
log.Errorf("error importing profile: %v. removing %s", err, profileDir)
|
||||||
|
os.RemoveAll(profileDir)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("%s is already a profile for this app", profileID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkCwtchProfileBackupFile(srcFile string) (string, error) {
|
||||||
|
f, err := os.Open(srcFile)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
gzf, err := gzip.NewReader(f)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
tarReader := tar.NewReader(gzf)
|
||||||
|
|
||||||
|
profileName := ""
|
||||||
|
|
||||||
|
for {
|
||||||
|
header, err := tarReader.Next()
|
||||||
|
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch header.Typeflag {
|
||||||
|
case tar.TypeDir:
|
||||||
|
return "", errors.New("invalid cwtch backup file")
|
||||||
|
case tar.TypeReg:
|
||||||
|
parts := strings.Split(header.Name, "/")
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return "", errors.New("invalid header name")
|
||||||
|
}
|
||||||
|
dir := parts[0]
|
||||||
|
profileFileType := parts[1]
|
||||||
|
|
||||||
|
_, hexErr := hex.DecodeString(dir)
|
||||||
|
if dir == "." || dir == ".." || len(dir) != 32 || hexErr != nil {
|
||||||
|
return "", errors.New("invalid profile name")
|
||||||
|
}
|
||||||
|
|
||||||
|
if profileName == "" {
|
||||||
|
profileName = dir
|
||||||
|
}
|
||||||
|
if dir != profileName {
|
||||||
|
return "", errors.New("invalid cwtch backup file")
|
||||||
|
}
|
||||||
|
|
||||||
|
if profileFileType != dbFile && profileFileType != saltFile && profileFileType != versionFile {
|
||||||
|
return "", errors.New("invalid cwtch backup file")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return "", errors.New("invalid cwtch backup file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return profileName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func importCwtchProfileBackupFile(srcFile string, profilesDir string) error {
|
||||||
|
f, err := os.Open(srcFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
gzf, err := gzip.NewReader(f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tarReader := tar.NewReader(gzf)
|
||||||
|
|
||||||
|
profileName := ""
|
||||||
|
|
||||||
|
for {
|
||||||
|
header, err := tarReader.Next()
|
||||||
|
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch header.Typeflag {
|
||||||
|
case tar.TypeDir:
|
||||||
|
return errors.New("invalid cwtch backup file")
|
||||||
|
case tar.TypeReg:
|
||||||
|
// using split here because we deliberately construct these paths in a cross-platform consistent way
|
||||||
|
parts := strings.Split(header.Name, "/")
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return errors.New("invalid header name")
|
||||||
|
}
|
||||||
|
dir := parts[0]
|
||||||
|
base := parts[1]
|
||||||
|
|
||||||
|
_, hexErr := hex.DecodeString(dir)
|
||||||
|
if dir == "." || dir == ".." || len(dir) != 32 || hexErr != nil {
|
||||||
|
return errors.New("invalid profile name")
|
||||||
|
}
|
||||||
|
|
||||||
|
if profileName == "" {
|
||||||
|
profileName = dir
|
||||||
|
}
|
||||||
|
|
||||||
|
if dir != profileName {
|
||||||
|
return errors.New("invalid cwtch backup file")
|
||||||
|
}
|
||||||
|
|
||||||
|
// here we use filepath.Join to construct a valid directory path
|
||||||
|
outFile, err := os.Create(filepath.Join(profilesDir, dir, base))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error importing cwtch profile file: %s", err)
|
||||||
|
}
|
||||||
|
defer outFile.Close()
|
||||||
|
if _, err := io.Copy(outFile, tarReader); err != nil {
|
||||||
|
return fmt.Errorf("error importing cwtch profile file: %s", err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return errors.New("invalid cwtch backup file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -4,8 +4,11 @@ import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cwtch.im/cwtch/event"
|
"cwtch.im/cwtch/event"
|
||||||
|
@ -24,6 +27,13 @@ import (
|
||||||
"golang.org/x/crypto/ed25519"
|
"golang.org/x/crypto/ed25519"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// 32 from tor/src/app/config/config.c MaxClientCircuitsPending
|
||||||
|
// we lower a bit because there's a lot of spillage
|
||||||
|
// - just cus we get a SOCKS timeout doesn't mean tor has stopped trying as a huge sorce
|
||||||
|
// - potential multiple profiles as a huge source
|
||||||
|
// - second order connections like token service's second servers aren't tracked in our system adding a few extra periodically
|
||||||
|
const TorMaxPendingConns = 28
|
||||||
|
|
||||||
type connectionLockedService struct {
|
type connectionLockedService struct {
|
||||||
service tapir.Service
|
service tapir.Service
|
||||||
connectingLock sync.Mutex
|
connectingLock sync.Mutex
|
||||||
|
@ -40,7 +50,7 @@ type engine struct {
|
||||||
authorizations sync.Map // string(onion) => model.Authorization
|
authorizations sync.Map // string(onion) => model.Authorization
|
||||||
|
|
||||||
// Block Unknown Contacts
|
// Block Unknown Contacts
|
||||||
blockUnknownContacts bool
|
blockUnknownContacts atomic.Bool
|
||||||
|
|
||||||
// Pointer to the Global Event Manager
|
// Pointer to the Global Event Manager
|
||||||
eventManager event.Manager
|
eventManager event.Manager
|
||||||
|
@ -60,7 +70,10 @@ type engine struct {
|
||||||
// file sharing subsystem is responsible for maintaining active shares and downloads
|
// file sharing subsystem is responsible for maintaining active shares and downloads
|
||||||
filesharingSubSystem files.FileSharingSubSystem
|
filesharingSubSystem files.FileSharingSubSystem
|
||||||
|
|
||||||
shuttingDown bool
|
tokenManagers sync.Map // [tokenService][]TokenManager
|
||||||
|
|
||||||
|
shuttingDown atomic.Bool
|
||||||
|
onSendMessage func(connection tapir.Connection, message []byte) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Engine (ProtocolEngine) encapsulates the logic necessary to make and receive Cwtch connections.
|
// Engine (ProtocolEngine) encapsulates the logic necessary to make and receive Cwtch connections.
|
||||||
|
@ -74,12 +87,16 @@ type Engine interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProtocolEngine initializes a new engine that runs Cwtch using the given parameters
|
// NewProtocolEngine initializes a new engine that runs Cwtch using the given parameters
|
||||||
func NewProtocolEngine(identity primitives.Identity, privateKey ed25519.PrivateKey, acn connectivity.ACN, eventManager event.Manager, peerAuthorizations map[string]model.Authorization) Engine {
|
func NewProtocolEngine(identity primitives.Identity, privateKey ed25519.PrivateKey, acn connectivity.ACN, eventManager event.Manager, peerAuthorizations map[string]model.Authorization, engineHooks EngineHooks) Engine {
|
||||||
engine := new(engine)
|
engine := new(engine)
|
||||||
engine.identity = identity
|
engine.identity = identity
|
||||||
engine.privateKey = privateKey
|
engine.privateKey = privateKey
|
||||||
engine.ephemeralServices = make(map[string]*connectionLockedService)
|
engine.ephemeralServices = make(map[string]*connectionLockedService)
|
||||||
engine.queue = event.NewQueue()
|
engine.queue = event.NewQueue()
|
||||||
|
|
||||||
|
// the standard send message function
|
||||||
|
engine.onSendMessage = engineHooks.SendPeerMessage
|
||||||
|
|
||||||
go engine.eventHandler()
|
go engine.eventHandler()
|
||||||
|
|
||||||
engine.acn = acn
|
engine.acn = acn
|
||||||
|
@ -91,8 +108,8 @@ func NewProtocolEngine(identity primitives.Identity, privateKey ed25519.PrivateK
|
||||||
engine.eventManager = eventManager
|
engine.eventManager = eventManager
|
||||||
|
|
||||||
engine.eventManager.Subscribe(event.ProtocolEngineStartListen, engine.queue)
|
engine.eventManager.Subscribe(event.ProtocolEngineStartListen, engine.queue)
|
||||||
|
engine.eventManager.Subscribe(event.ProtocolEngineShutdown, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.PeerRequest, engine.queue)
|
engine.eventManager.Subscribe(event.PeerRequest, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.RetryPeerRequest, engine.queue)
|
|
||||||
engine.eventManager.Subscribe(event.InvitePeerToGroup, engine.queue)
|
engine.eventManager.Subscribe(event.InvitePeerToGroup, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.JoinServer, engine.queue)
|
engine.eventManager.Subscribe(event.JoinServer, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.LeaveServer, engine.queue)
|
engine.eventManager.Subscribe(event.LeaveServer, engine.queue)
|
||||||
|
@ -101,17 +118,23 @@ func NewProtocolEngine(identity primitives.Identity, privateKey ed25519.PrivateK
|
||||||
engine.eventManager.Subscribe(event.SendGetValMessageToPeer, engine.queue)
|
engine.eventManager.Subscribe(event.SendGetValMessageToPeer, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.SendRetValMessageToPeer, engine.queue)
|
engine.eventManager.Subscribe(event.SendRetValMessageToPeer, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.DeleteContact, engine.queue)
|
engine.eventManager.Subscribe(event.DeleteContact, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.DeleteGroup, engine.queue)
|
|
||||||
|
|
||||||
engine.eventManager.Subscribe(event.UpdateConversationAuthorization, engine.queue)
|
engine.eventManager.Subscribe(event.UpdateConversationAuthorization, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.BlockUnknownPeers, engine.queue)
|
engine.eventManager.Subscribe(event.BlockUnknownPeers, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.AllowUnknownPeers, engine.queue)
|
engine.eventManager.Subscribe(event.AllowUnknownPeers, engine.queue)
|
||||||
|
engine.eventManager.Subscribe(event.DisconnectPeerRequest, engine.queue)
|
||||||
|
engine.eventManager.Subscribe(event.DisconnectServerRequest, engine.queue)
|
||||||
|
|
||||||
// File Handling
|
// File Handling
|
||||||
engine.eventManager.Subscribe(event.ShareManifest, engine.queue)
|
engine.eventManager.Subscribe(event.ShareManifest, engine.queue)
|
||||||
|
engine.eventManager.Subscribe(event.StopFileShare, engine.queue)
|
||||||
|
engine.eventManager.Subscribe(event.StopAllFileShares, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.ManifestSizeReceived, engine.queue)
|
engine.eventManager.Subscribe(event.ManifestSizeReceived, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.ManifestSaved, engine.queue)
|
engine.eventManager.Subscribe(event.ManifestSaved, engine.queue)
|
||||||
|
|
||||||
|
// Token Server
|
||||||
|
engine.eventManager.Subscribe(event.MakeAntispamPayment, engine.queue)
|
||||||
|
|
||||||
for peer, authorization := range peerAuthorizations {
|
for peer, authorization := range peerAuthorizations {
|
||||||
engine.authorizations.Store(peer, authorization)
|
engine.authorizations.Store(peer, authorization)
|
||||||
}
|
}
|
||||||
|
@ -128,22 +151,21 @@ func (e *engine) EventManager() event.Manager {
|
||||||
|
|
||||||
// eventHandler process events from other subsystems
|
// eventHandler process events from other subsystems
|
||||||
func (e *engine) eventHandler() {
|
func (e *engine) eventHandler() {
|
||||||
|
log.Debugf("restartFlow Launching ProtocolEngine listener")
|
||||||
for {
|
for {
|
||||||
ev := e.queue.Next()
|
ev := e.queue.Next()
|
||||||
|
// optimistic shutdown...
|
||||||
|
if e.shuttingDown.Load() {
|
||||||
|
return
|
||||||
|
}
|
||||||
switch ev.EventType {
|
switch ev.EventType {
|
||||||
case event.StatusRequest:
|
case event.StatusRequest:
|
||||||
e.eventManager.Publish(event.Event{EventType: event.ProtocolEngineStatus, EventID: ev.EventID})
|
e.eventManager.Publish(event.Event{EventType: event.ProtocolEngineStatus, EventID: ev.EventID})
|
||||||
case event.PeerRequest:
|
case event.PeerRequest:
|
||||||
|
log.Debugf("restartFlow Handling Peer Request")
|
||||||
if torProvider.IsValidHostname(ev.Data[event.RemotePeer]) {
|
if torProvider.IsValidHostname(ev.Data[event.RemotePeer]) {
|
||||||
go e.peerWithOnion(ev.Data[event.RemotePeer])
|
go e.peerWithOnion(ev.Data[event.RemotePeer])
|
||||||
}
|
}
|
||||||
case event.RetryPeerRequest:
|
|
||||||
// This event allows engine to treat (automated) retry peering requests differently to user-specified
|
|
||||||
// peer events
|
|
||||||
if torProvider.IsValidHostname(ev.Data[event.RemotePeer]) {
|
|
||||||
log.Debugf("Retrying Peer Request: %v", ev.Data[event.RemotePeer])
|
|
||||||
go e.peerWithOnion(ev.Data[event.RemotePeer])
|
|
||||||
}
|
|
||||||
case event.InvitePeerToGroup:
|
case event.InvitePeerToGroup:
|
||||||
err := e.sendPeerMessage(ev.Data[event.RemotePeer], pmodel.PeerMessage{ID: ev.EventID, Context: event.ContextInvite, Data: []byte(ev.Data[event.GroupInvite])})
|
err := e.sendPeerMessage(ev.Data[event.RemotePeer], pmodel.PeerMessage{ID: ev.EventID, Context: event.ContextInvite, Data: []byte(ev.Data[event.GroupInvite])})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -155,7 +177,18 @@ func (e *engine) eventHandler() {
|
||||||
// will result in a full sync
|
// will result in a full sync
|
||||||
signature = []byte{}
|
signature = []byte{}
|
||||||
}
|
}
|
||||||
go e.peerWithTokenServer(ev.Data[event.GroupServer], ev.Data[event.ServerTokenOnion], ev.Data[event.ServerTokenY], signature)
|
// if we have been sent cached tokens, also deserialize them
|
||||||
|
cachedTokensJson := ev.Data[event.CachedTokens]
|
||||||
|
var cachedTokens []*privacypass.Token
|
||||||
|
if len(cachedTokensJson) != 0 {
|
||||||
|
json.Unmarshal([]byte(cachedTokensJson), &cachedTokens)
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a new token handler...
|
||||||
|
e.NewTokenHandler(ev.Data[event.ServerTokenOnion], cachedTokens)
|
||||||
|
go e.peerWithTokenServer(ev.Data[event.GroupServer], ev.Data[event.ServerTokenOnion], ev.Data[event.ServerTokenY], signature, cachedTokens)
|
||||||
|
case event.MakeAntispamPayment:
|
||||||
|
go e.makeAntispamPayment(ev.Data[event.GroupServer])
|
||||||
case event.LeaveServer:
|
case event.LeaveServer:
|
||||||
e.leaveServer(ev.Data[event.GroupServer])
|
e.leaveServer(ev.Data[event.GroupServer])
|
||||||
case event.DeleteContact:
|
case event.DeleteContact:
|
||||||
|
@ -163,11 +196,15 @@ func (e *engine) eventHandler() {
|
||||||
// We remove this peer from out blocklist which will prevent them from contacting us if we have "block unknown peers" turned on.
|
// We remove this peer from out blocklist which will prevent them from contacting us if we have "block unknown peers" turned on.
|
||||||
e.authorizations.Delete(ev.Data[event.RemotePeer])
|
e.authorizations.Delete(ev.Data[event.RemotePeer])
|
||||||
e.deleteConnection(onion)
|
e.deleteConnection(onion)
|
||||||
case event.DeleteGroup:
|
case event.DisconnectPeerRequest:
|
||||||
// TODO: There isn't a way here to determine if other Groups are using a server connection...
|
e.deleteConnection(ev.Data[event.RemotePeer])
|
||||||
|
case event.DisconnectServerRequest:
|
||||||
|
e.leaveServer(ev.Data[event.GroupServer])
|
||||||
case event.SendMessageToGroup:
|
case event.SendMessageToGroup:
|
||||||
ciphertext, _ := base64.StdEncoding.DecodeString(ev.Data[event.Ciphertext])
|
ciphertext, _ := base64.StdEncoding.DecodeString(ev.Data[event.Ciphertext])
|
||||||
signature, _ := base64.StdEncoding.DecodeString(ev.Data[event.Signature])
|
signature, _ := base64.StdEncoding.DecodeString(ev.Data[event.Signature])
|
||||||
|
|
||||||
|
// launch a goroutine to post to the server
|
||||||
go e.sendMessageToGroup(ev.Data[event.GroupID], ev.Data[event.GroupServer], ciphertext, signature, 0)
|
go e.sendMessageToGroup(ev.Data[event.GroupID], ev.Data[event.GroupServer], ciphertext, signature, 0)
|
||||||
case event.SendMessageToPeer:
|
case event.SendMessageToPeer:
|
||||||
// TODO: remove this passthrough once the UI is integrated.
|
// TODO: remove this passthrough once the UI is integrated.
|
||||||
|
@ -208,14 +245,18 @@ func (e *engine) eventHandler() {
|
||||||
}
|
}
|
||||||
case event.AllowUnknownPeers:
|
case event.AllowUnknownPeers:
|
||||||
log.Debugf("%v now allows unknown connections", e.identity.Hostname())
|
log.Debugf("%v now allows unknown connections", e.identity.Hostname())
|
||||||
e.blockUnknownContacts = false
|
e.blockUnknownContacts.Store(false)
|
||||||
case event.BlockUnknownPeers:
|
case event.BlockUnknownPeers:
|
||||||
log.Debugf("%v now forbids unknown connections", e.identity.Hostname())
|
log.Debugf("%v now forbids unknown connections", e.identity.Hostname())
|
||||||
e.blockUnknownContacts = true
|
e.blockUnknownContacts.Store(true)
|
||||||
case event.ProtocolEngineStartListen:
|
case event.ProtocolEngineStartListen:
|
||||||
go e.listenFn()
|
go e.listenFn()
|
||||||
case event.ShareManifest:
|
case event.ShareManifest:
|
||||||
e.filesharingSubSystem.ShareFile(ev.Data[event.FileKey], ev.Data[event.SerializedManifest])
|
e.filesharingSubSystem.ShareFile(ev.Data[event.FileKey], ev.Data[event.SerializedManifest])
|
||||||
|
case event.StopFileShare:
|
||||||
|
e.filesharingSubSystem.StopFileShare(ev.Data[event.FileKey])
|
||||||
|
case event.StopAllFileShares:
|
||||||
|
e.filesharingSubSystem.StopAllFileShares()
|
||||||
case event.ManifestSizeReceived:
|
case event.ManifestSizeReceived:
|
||||||
handle := ev.Data[event.Handle]
|
handle := ev.Data[event.Handle]
|
||||||
key := ev.Data[event.FileKey]
|
key := ev.Data[event.FileKey]
|
||||||
|
@ -229,6 +270,14 @@ func (e *engine) eventHandler() {
|
||||||
serializedManifest := ev.Data[event.SerializedManifest]
|
serializedManifest := ev.Data[event.SerializedManifest]
|
||||||
tempFile := ev.Data[event.TempFile]
|
tempFile := ev.Data[event.TempFile]
|
||||||
title := ev.Data[event.NameSuggestion]
|
title := ev.Data[event.NameSuggestion]
|
||||||
|
|
||||||
|
// Another optimistic check here. Technically Cwtch profile should not request manifest on a download files
|
||||||
|
// but if they do then we should check if it exists up front. If it does then announce that the download
|
||||||
|
// is complete.
|
||||||
|
if _, filePath, success := e.filesharingSubSystem.VerifyFile(key); success {
|
||||||
|
log.Debugf("file verified and downloaded!")
|
||||||
|
e.eventManager.Publish(event.NewEvent(event.FileDownloaded, map[event.Field]string{event.FileKey: key, event.FilePath: filePath, event.TempFile: tempFile}))
|
||||||
|
} else {
|
||||||
// NOTE: for now there will probably only ever be a single chunk request. When we enable group
|
// NOTE: for now there will probably only ever be a single chunk request. When we enable group
|
||||||
// sharing and rehosting then this loop will serve as a a way of splitting the request among multiple
|
// sharing and rehosting then this loop will serve as a a way of splitting the request among multiple
|
||||||
// contacts
|
// contacts
|
||||||
|
@ -237,6 +286,9 @@ func (e *engine) eventHandler() {
|
||||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: err.Error()}))
|
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: err.Error()}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
case event.ProtocolEngineShutdown:
|
||||||
|
return
|
||||||
default:
|
default:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -247,7 +299,7 @@ func (e *engine) isBlocked(onion string) bool {
|
||||||
authorization, known := e.authorizations.Load(onion)
|
authorization, known := e.authorizations.Load(onion)
|
||||||
if !known {
|
if !known {
|
||||||
// if we block unknown peers we will block this contact
|
// if we block unknown peers we will block this contact
|
||||||
return e.blockUnknownContacts
|
return e.blockUnknownContacts.Load()
|
||||||
}
|
}
|
||||||
return authorization.(model.Authorization) == model.AuthBlocked
|
return authorization.(model.Authorization) == model.AuthBlocked
|
||||||
}
|
}
|
||||||
|
@ -258,7 +310,7 @@ func (e *engine) isAllowed(onion string) bool {
|
||||||
log.Errorf("attempted to lookup authorization of onion not in map...that should never happen")
|
log.Errorf("attempted to lookup authorization of onion not in map...that should never happen")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if e.blockUnknownContacts {
|
if e.blockUnknownContacts.Load() {
|
||||||
return authorization.(model.Authorization) == model.AuthApproved
|
return authorization.(model.Authorization) == model.AuthApproved
|
||||||
}
|
}
|
||||||
return authorization.(model.Authorization) != model.AuthBlocked
|
return authorization.(model.Authorization) != model.AuthBlocked
|
||||||
|
@ -273,27 +325,38 @@ func (e *engine) createPeerTemplate() *PeerApp {
|
||||||
peerAppTemplate.OnAuth = e.ignoreOnShutdown(e.peerAuthed)
|
peerAppTemplate.OnAuth = e.ignoreOnShutdown(e.peerAuthed)
|
||||||
peerAppTemplate.OnConnecting = e.ignoreOnShutdown(e.peerConnecting)
|
peerAppTemplate.OnConnecting = e.ignoreOnShutdown(e.peerConnecting)
|
||||||
peerAppTemplate.OnClose = e.ignoreOnShutdown(e.peerDisconnected)
|
peerAppTemplate.OnClose = e.ignoreOnShutdown(e.peerDisconnected)
|
||||||
|
peerAppTemplate.OnSendMessage = e.onSendMessage
|
||||||
return peerAppTemplate
|
return peerAppTemplate
|
||||||
}
|
}
|
||||||
|
|
||||||
// Listen sets up an onion listener to process incoming cwtch messages
|
// Listen sets up an onion listener to process incoming cwtch messages
|
||||||
func (e *engine) listenFn() {
|
func (e *engine) listenFn() {
|
||||||
err := e.service.Listen(e.createPeerTemplate())
|
err := e.service.Listen(e.createPeerTemplate())
|
||||||
if !e.shuttingDown {
|
if !e.shuttingDown.Load() {
|
||||||
e.eventManager.Publish(event.NewEvent(event.ProtocolEngineStopped, map[event.Field]string{event.Identity: e.identity.Hostname(), event.Error: err.Error()}))
|
e.eventManager.Publish(event.NewEvent(event.ProtocolEngineStopped, map[event.Field]string{event.Identity: e.identity.Hostname(), event.Error: err.Error()}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown tears down the eventHandler goroutine
|
// Shutdown tears down the eventHandler goroutine
|
||||||
func (e *engine) Shutdown() {
|
func (e *engine) Shutdown() {
|
||||||
e.shuttingDown = true
|
// don't accept any more events...
|
||||||
|
e.queue.Publish(event.NewEvent(event.ProtocolEngineShutdown, map[event.Field]string{}))
|
||||||
|
e.eventManager.Publish(event.NewEvent(event.ProtocolEngineShutdown, map[event.Field]string{}))
|
||||||
e.service.Shutdown()
|
e.service.Shutdown()
|
||||||
|
e.shuttingDown.Store(true)
|
||||||
e.ephemeralServicesLock.Lock()
|
e.ephemeralServicesLock.Lock()
|
||||||
defer e.ephemeralServicesLock.Unlock()
|
defer e.ephemeralServicesLock.Unlock()
|
||||||
for _, connection := range e.ephemeralServices {
|
for _, connection := range e.ephemeralServices {
|
||||||
log.Infof("shutting down ephemeral service")
|
log.Infof("shutting down ephemeral service")
|
||||||
connection.service.Shutdown()
|
// work around: service.shutdown() can block for a long time if it is Open()ing a new connection, putting it in a
|
||||||
|
// goroutine means we can perform this operation and let the per service shutdown in their own time or until the app exits
|
||||||
|
conn := connection // don't capture loop variable
|
||||||
|
go func() {
|
||||||
|
conn.connectingLock.Lock()
|
||||||
|
conn.service.Shutdown()
|
||||||
|
conn.connectingLock.Unlock()
|
||||||
|
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
e.queue.Shutdown()
|
e.queue.Shutdown()
|
||||||
}
|
}
|
||||||
|
@ -305,48 +368,75 @@ func (e *engine) peerWithOnion(onion string) {
|
||||||
if !e.isBlocked(onion) {
|
if !e.isBlocked(onion) {
|
||||||
e.ignoreOnShutdown(e.peerConnecting)(onion)
|
e.ignoreOnShutdown(e.peerConnecting)(onion)
|
||||||
connected, err := e.service.Connect(onion, e.createPeerTemplate())
|
connected, err := e.service.Connect(onion, e.createPeerTemplate())
|
||||||
|
if connected && err == nil {
|
||||||
|
// on success CwtchPeer will handle Auth and other status updates
|
||||||
|
// early exit from this function...
|
||||||
|
return
|
||||||
|
}
|
||||||
// If we are already connected...check if we are authed and issue an auth event
|
// If we are already connected...check if we are authed and issue an auth event
|
||||||
// (This allows the ui to be stateless)
|
// (This allows the ui to be stateless)
|
||||||
if connected && err != nil {
|
if connected && err != nil {
|
||||||
conn, err := e.service.GetConnection(onion)
|
conn, err := e.service.WaitForCapabilityOrClose(onion, cwtchCapability)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if conn.HasCapability(cwtchCapability) {
|
if conn.HasCapability(cwtchCapability) {
|
||||||
e.ignoreOnShutdown(e.peerAuthed)(onion)
|
e.ignoreOnShutdown(e.peerAuthed)(onion)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
log.Errorf("PeerWithOnion something went very wrong...%v %v", onion, err)
|
||||||
|
if conn != nil {
|
||||||
|
conn.Close()
|
||||||
}
|
}
|
||||||
|
e.ignoreOnShutdown(e.peerDisconnected)(onion)
|
||||||
|
} else {
|
||||||
|
e.ignoreOnShutdown(e.peerDisconnected)(onion)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
e.ignoreOnShutdown(e.peerDisconnected)(onion)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *engine) makeAntispamPayment(onion string) {
|
||||||
|
log.Debugf("making antispam payment")
|
||||||
|
e.ephemeralServicesLock.Lock()
|
||||||
|
ephemeralService, ok := e.ephemeralServices[onion]
|
||||||
|
e.ephemeralServicesLock.Unlock()
|
||||||
|
|
||||||
|
if ephemeralService == nil || !ok {
|
||||||
|
log.Debugf("could not find associated group for antispam payment")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only issue a disconnected error if we are disconnected (Connect will fail if a connection already exists)
|
// Before doing anything, send and event with the current number of token
|
||||||
if !connected && err != nil {
|
// This may unblock downstream processes who don't have an accurate token count
|
||||||
e.ignoreOnShutdown(e.peerDisconnected)(onion)
|
e.PokeTokenCount(onion)
|
||||||
|
|
||||||
|
conn, err := ephemeralService.service.GetConnection(onion)
|
||||||
|
if err == nil {
|
||||||
|
tokenApp, ok := (conn.App()).(*TokenBoardClient)
|
||||||
|
if ok {
|
||||||
|
tokenManagerPointer, _ := e.tokenManagers.LoadOrStore(tokenApp.tokenServiceOnion, NewTokenManager())
|
||||||
|
tokenManager := tokenManagerPointer.(*TokenManager)
|
||||||
|
log.Debugf("checking antispam tokens %v", tokenManager.NumTokens())
|
||||||
|
if tokenManager.NumTokens() < 5 {
|
||||||
|
go tokenApp.PurchaseTokens()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// peerWithTokenServer is the entry point for cwtchPeer - server relationships
|
// peerWithTokenServer is the entry point for cwtchPeer - server relationships
|
||||||
// needs to be run in a goroutine as will block on Open.
|
// needs to be run in a goroutine as will block on Open.
|
||||||
func (e *engine) peerWithTokenServer(onion string, tokenServerOnion string, tokenServerY string, lastKnownSignature []byte) {
|
func (e *engine) peerWithTokenServer(onion string, tokenServerOnion string, tokenServerY string, lastKnownSignature []byte, cachedTokens []*privacypass.Token) {
|
||||||
e.ephemeralServicesLock.Lock()
|
e.ephemeralServicesLock.Lock()
|
||||||
connectionService, exists := e.ephemeralServices[onion]
|
_, exists := e.ephemeralServices[onion]
|
||||||
|
|
||||||
if exists && connectionService.service != nil {
|
if exists {
|
||||||
if conn, err := connectionService.service.GetConnection(onion); err == nil {
|
|
||||||
// We are already peered and synced so return...
|
|
||||||
// This will only not-trigger if lastKnownSignature has been wiped, which only happens when ResyncServer is called
|
|
||||||
// in CwtchPeer.
|
|
||||||
if !conn.IsClosed() && len(lastKnownSignature) != 0 {
|
|
||||||
e.ephemeralServicesLock.Unlock()
|
e.ephemeralServicesLock.Unlock()
|
||||||
|
log.Debugf("attempted to join a server with an active connection")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Otherwise...we are going to rebuild the connection(which will result in a bandwidth heavy resync)...
|
|
||||||
connectionService.service.Shutdown()
|
|
||||||
}
|
|
||||||
// Otherwise...let's reconnect
|
|
||||||
}
|
|
||||||
|
|
||||||
connectionService = &connectionLockedService{service: new(tor.BaseOnionService)}
|
connectionService := &connectionLockedService{service: new(tor.BaseOnionService)}
|
||||||
e.ephemeralServices[onion] = connectionService
|
e.ephemeralServices[onion] = connectionService
|
||||||
|
|
||||||
connectionService.connectingLock.Lock()
|
connectionService.connectingLock.Lock()
|
||||||
|
@ -359,9 +449,9 @@ func (e *engine) peerWithTokenServer(onion string, tokenServerOnion string, toke
|
||||||
eid, epk := primitives.InitializeEphemeralIdentity()
|
eid, epk := primitives.InitializeEphemeralIdentity()
|
||||||
connectionService.service.Init(e.acn, epk, &eid)
|
connectionService.service.Init(e.acn, epk, &eid)
|
||||||
|
|
||||||
Y := ristretto255.NewElement()
|
Y := new(ristretto255.Element)
|
||||||
Y.UnmarshalText([]byte(tokenServerY))
|
Y.UnmarshalText([]byte(tokenServerY))
|
||||||
connected, err := connectionService.service.Connect(onion, NewTokenBoardClient(e.acn, Y, tokenServerOnion, lastKnownSignature, e.receiveGroupMessage, e.serverAuthed, e.serverSynced, e.ignoreOnShutdown(e.serverDisconnected)))
|
connected, err := connectionService.service.Connect(onion, NewTokenBoardClient(e.acn, Y, tokenServerOnion, lastKnownSignature, e))
|
||||||
// If we are already connected...check if we are authed and issue an auth event
|
// If we are already connected...check if we are authed and issue an auth event
|
||||||
// (This allows the ui to be stateless)
|
// (This allows the ui to be stateless)
|
||||||
if connected && err != nil {
|
if connected && err != nil {
|
||||||
|
@ -380,6 +470,10 @@ func (e *engine) peerWithTokenServer(onion string, tokenServerOnion string, toke
|
||||||
e.ignoreOnShutdown(e.serverAuthed)(onion)
|
e.ignoreOnShutdown(e.serverAuthed)(onion)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if we are not authed or synced then we are stuck...
|
||||||
|
e.ignoreOnShutdown(e.serverConnecting)(onion)
|
||||||
|
log.Errorf("server connection attempt issued to active connection")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -391,7 +485,7 @@ func (e *engine) peerWithTokenServer(onion string, tokenServerOnion string, toke
|
||||||
|
|
||||||
func (e *engine) ignoreOnShutdown(f func(string)) func(string) {
|
func (e *engine) ignoreOnShutdown(f func(string)) func(string) {
|
||||||
return func(x string) {
|
return func(x string) {
|
||||||
if !e.shuttingDown {
|
if !e.shuttingDown.Load() {
|
||||||
f(x)
|
f(x)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -399,7 +493,7 @@ func (e *engine) ignoreOnShutdown(f func(string)) func(string) {
|
||||||
|
|
||||||
func (e *engine) ignoreOnShutdown2(f func(string, string)) func(string, string) {
|
func (e *engine) ignoreOnShutdown2(f func(string, string)) func(string, string) {
|
||||||
return func(x, y string) {
|
return func(x, y string) {
|
||||||
if !e.shuttingDown {
|
if !e.shuttingDown.Load() {
|
||||||
f(x, y)
|
f(x, y)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -411,16 +505,24 @@ func (e *engine) peerAuthed(onion string) {
|
||||||
e.authorizations.Store(onion, model.AuthUnknown)
|
e.authorizations.Store(onion, model.AuthUnknown)
|
||||||
}
|
}
|
||||||
|
|
||||||
details, err := e.acn.GetInfo(onion)
|
// FIXME: This call uses WAY too much memory, and was responsible for the vast majority
|
||||||
if err == nil {
|
// of allocations in the UI
|
||||||
if hops, exists := details["circuit"]; exists {
|
// This is because Bine ends up reading the entire response into memory and then passes that back
|
||||||
e.eventManager.Publish(event.NewEvent(event.ACNInfo, map[event.Field]string{
|
// into Connectivity which eventually extracts just what it needs.
|
||||||
event.Handle: onion,
|
// Ideally we would just read from the control stream directly into reusable buffers.
|
||||||
event.Key: "circuit",
|
|
||||||
event.Data: hops,
|
//details, err := e.acn.GetInfo(onion)
|
||||||
}))
|
//if err == nil {
|
||||||
}
|
// if hops, exists := details["circuit"]; exists {
|
||||||
}
|
// e.eventManager.Publish(event.NewEvent(event.ACNInfo, map[event.Field]string{
|
||||||
|
// event.Handle: onion,
|
||||||
|
// event.Key: "circuit",
|
||||||
|
// event.Data: hops,
|
||||||
|
// }))
|
||||||
|
// }
|
||||||
|
//} else {
|
||||||
|
// log.Errorf("error getting info for onion %v", err)
|
||||||
|
//}
|
||||||
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{
|
e.eventManager.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{
|
||||||
event.RemotePeer: string(onion),
|
event.RemotePeer: string(onion),
|
||||||
|
@ -430,14 +532,14 @@ func (e *engine) peerAuthed(onion string) {
|
||||||
|
|
||||||
func (e *engine) peerConnecting(onion string) {
|
func (e *engine) peerConnecting(onion string) {
|
||||||
e.eventManager.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{
|
e.eventManager.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{
|
||||||
event.RemotePeer: string(onion),
|
event.RemotePeer: onion,
|
||||||
event.ConnectionState: ConnectionStateName[CONNECTING],
|
event.ConnectionState: ConnectionStateName[CONNECTING],
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *engine) serverConnecting(onion string) {
|
func (e *engine) serverConnecting(onion string) {
|
||||||
e.eventManager.Publish(event.NewEvent(event.ServerStateChange, map[event.Field]string{
|
e.eventManager.Publish(event.NewEvent(event.ServerStateChange, map[event.Field]string{
|
||||||
event.GroupServer: string(onion),
|
event.GroupServer: onion,
|
||||||
event.ConnectionState: ConnectionStateName[CONNECTING],
|
event.ConnectionState: ConnectionStateName[CONNECTING],
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
@ -457,6 +559,8 @@ func (e *engine) serverSynced(onion string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *engine) serverDisconnected(onion string) {
|
func (e *engine) serverDisconnected(onion string) {
|
||||||
|
e.leaveServer(onion)
|
||||||
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.ServerStateChange, map[event.Field]string{
|
e.eventManager.Publish(event.NewEvent(event.ServerStateChange, map[event.Field]string{
|
||||||
event.GroupServer: onion,
|
event.GroupServer: onion,
|
||||||
event.ConnectionState: ConnectionStateName[DISCONNECTED],
|
event.ConnectionState: ConnectionStateName[DISCONNECTED],
|
||||||
|
@ -472,6 +576,15 @@ func (e *engine) peerAck(onion string, eventID string) {
|
||||||
|
|
||||||
func (e *engine) peerDisconnected(onion string) {
|
func (e *engine) peerDisconnected(onion string) {
|
||||||
|
|
||||||
|
// Clean up any existing get value requests...
|
||||||
|
e.getValRequests.Range(func(key, value interface{}) bool {
|
||||||
|
keyString := key.(string)
|
||||||
|
if strings.HasPrefix(keyString, onion) {
|
||||||
|
e.getValRequests.Delete(keyString)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
// Purge circuit information...
|
// Purge circuit information...
|
||||||
e.eventManager.Publish(event.NewEvent(event.ACNInfo, map[event.Field]string{
|
e.eventManager.Publish(event.NewEvent(event.ACNInfo, map[event.Field]string{
|
||||||
event.Handle: onion,
|
event.Handle: onion,
|
||||||
|
@ -493,8 +606,13 @@ func (e *engine) sendGetValToPeer(eventID, onion, scope, path string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
e.getValRequests.Store(onion+eventID, message)
|
key := onion + eventID
|
||||||
return e.sendPeerMessage(onion, pmodel.PeerMessage{ID: eventID, Context: event.ContextGetVal, Data: message})
|
e.getValRequests.Store(key, message)
|
||||||
|
err = e.sendPeerMessage(onion, pmodel.PeerMessage{ID: eventID, Context: event.ContextGetVal, Data: message})
|
||||||
|
if err != nil {
|
||||||
|
e.getValRequests.Delete(key)
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *engine) sendRetValToPeer(eventID, onion, val, existsStr string) error {
|
func (e *engine) sendRetValToPeer(eventID, onion, val, existsStr string) error {
|
||||||
|
@ -540,6 +658,7 @@ func (e *engine) sendMessageToGroup(groupID string, server string, ct []byte, si
|
||||||
e.ephemeralServicesLock.Unlock()
|
e.ephemeralServicesLock.Unlock()
|
||||||
|
|
||||||
if ephemeralService == nil || !ok {
|
if ephemeralService == nil || !ok {
|
||||||
|
log.Debugf("could not send message to group: serve not found")
|
||||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: "server-not-found", event.Signature: base64.StdEncoding.EncodeToString(sig)}))
|
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: "server-not-found", event.Signature: base64.StdEncoding.EncodeToString(sig)}))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -548,22 +667,24 @@ func (e *engine) sendMessageToGroup(groupID string, server string, ct []byte, si
|
||||||
if err == nil {
|
if err == nil {
|
||||||
tokenApp, ok := (conn.App()).(*TokenBoardClient)
|
tokenApp, ok := (conn.App()).(*TokenBoardClient)
|
||||||
if ok {
|
if ok {
|
||||||
if spent, numtokens := tokenApp.Post(ct, sig); !spent {
|
if spent, numtokens := tokenApp.Post(groupID, ct, sig); !spent {
|
||||||
// we failed to post, probably because we ran out of tokens... so make a payment
|
// we failed to post, probably because we ran out of tokens... so make a payment
|
||||||
go tokenApp.MakePayment()
|
go tokenApp.PurchaseTokens()
|
||||||
// backoff
|
// backoff
|
||||||
time.Sleep(time.Second * 5)
|
time.Sleep(time.Second * 5)
|
||||||
// try again
|
// try again
|
||||||
|
log.Debugf("sending message to group error attempt: %v", attempts)
|
||||||
e.sendMessageToGroup(groupID, server, ct, sig, attempts+1)
|
e.sendMessageToGroup(groupID, server, ct, sig, attempts+1)
|
||||||
} else {
|
} else {
|
||||||
if numtokens < 5 {
|
if numtokens < 5 {
|
||||||
go tokenApp.MakePayment()
|
go tokenApp.PurchaseTokens()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// regardless we return....
|
// regardless we return....
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
log.Debugf("could not send message to group")
|
||||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: "server-connection-not-valid", event.Signature: base64.StdEncoding.EncodeToString(sig)}))
|
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: "server-connection-not-valid", event.Signature: base64.StdEncoding.EncodeToString(sig)}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -628,8 +749,18 @@ func (e *engine) handlePeerMessage(hostname string, eventID string, context stri
|
||||||
// Fall through handler for the default text conversation.
|
// Fall through handler for the default text conversation.
|
||||||
e.eventManager.Publish(event.NewEvent(event.NewMessageFromPeerEngine, map[event.Field]string{event.TimestampReceived: time.Now().Format(time.RFC3339Nano), event.RemotePeer: hostname, event.Data: string(message)}))
|
e.eventManager.Publish(event.NewEvent(event.NewMessageFromPeerEngine, map[event.Field]string{event.TimestampReceived: time.Now().Format(time.RFC3339Nano), event.RemotePeer: hostname, event.Data: string(message)}))
|
||||||
|
|
||||||
|
// Don't ack messages in channel 7
|
||||||
|
// Note: this code explictly doesn't care about malformed messages, we deal with them
|
||||||
|
// later on...we still want to ack the original send...(as some "malformed" messages
|
||||||
|
// may be future-ok)
|
||||||
|
if cm, err := model.DeserializeMessage(string(message)); err == nil {
|
||||||
|
if cm.IsStream() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Send an explicit acknowledgement
|
// Send an explicit acknowledgement
|
||||||
// Every other protocol should have a explicit acknowledgement message e.g. value lookups have responses, and file handling has an explicit flow
|
// Every other protocol should have an explicit acknowledgement message e.g. value lookups have responses, and file handling has an explicit flow
|
||||||
if err := e.sendPeerMessage(hostname, pmodel.PeerMessage{ID: eventID, Context: event.ContextAck, Data: []byte{}}); err != nil {
|
if err := e.sendPeerMessage(hostname, pmodel.PeerMessage{ID: eventID, Context: event.ContextAck, Data: []byte{}}); err != nil {
|
||||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: hostname, event.EventID: eventID, event.Error: err.Error()}))
|
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: hostname, event.EventID: eventID, event.Error: err.Error()}))
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,59 @@
|
||||||
|
package connections
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"cwtch.im/cwtch/protocol/groups"
|
||||||
|
"encoding/base64"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Implement Token Service Handler for Engine
|
||||||
|
|
||||||
|
// GroupMessageHandler receives a server and an encrypted group message
|
||||||
|
func (e *engine) GroupMessageHandler(server string, gm *groups.EncryptedGroupMessage) {
|
||||||
|
e.receiveGroupMessage(server, gm)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PostingFailed notifies a peer that a message failed to post
|
||||||
|
func (e *engine) PostingFailed(group string, sig []byte) {
|
||||||
|
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: group, event.Error: "failed to post message", event.Signature: base64.StdEncoding.EncodeToString(sig)}))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerAuthedHandler is notified when a server has successfully authed
|
||||||
|
func (e *engine) ServerAuthedHandler(server string) {
|
||||||
|
e.serverAuthed(server)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerSyncedHandler is notified when a server has successfully synced
|
||||||
|
func (e *engine) ServerSyncedHandler(server string) {
|
||||||
|
e.serverSynced(server)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerClosedHandler is notified when a server connection has closed, the result is ignored during shutdown...
|
||||||
|
func (e *engine) ServerClosedHandler(server string) {
|
||||||
|
e.ignoreOnShutdown(e.serverDisconnected)(server)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTokenHandler is notified after a successful token acquisition
|
||||||
|
func (e *engine) NewTokenHandler(tokenService string, tokens []*privacypass.Token) {
|
||||||
|
tokenManagerPointer, _ := e.tokenManagers.LoadOrStore(tokenService, NewTokenManager())
|
||||||
|
tokenManager := tokenManagerPointer.(*TokenManager)
|
||||||
|
tokenManager.StoreNewTokens(tokens)
|
||||||
|
e.eventManager.Publish(event.NewEvent(event.TokenManagerInfo, map[event.Field]string{event.ServerTokenOnion: tokenService, event.ServerTokenCount: strconv.Itoa(tokenManager.NumTokens())}))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchToken is notified when a server requires a new token from the client
|
||||||
|
func (e *engine) FetchToken(tokenService string) (*privacypass.Token, int, error) {
|
||||||
|
tokenManagerPointer, _ := e.tokenManagers.LoadOrStore(tokenService, NewTokenManager())
|
||||||
|
tokenManager := tokenManagerPointer.(*TokenManager)
|
||||||
|
token, numTokens, err := tokenManager.FetchToken()
|
||||||
|
e.eventManager.Publish(event.NewEvent(event.TokenManagerInfo, map[event.Field]string{event.ServerTokenOnion: tokenService, event.ServerTokenCount: strconv.Itoa(numTokens)}))
|
||||||
|
return token, numTokens, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *engine) PokeTokenCount(tokenService string) {
|
||||||
|
tokenManagerPointer, _ := e.tokenManagers.LoadOrStore(tokenService, NewTokenManager())
|
||||||
|
tokenManager := tokenManagerPointer.(*TokenManager)
|
||||||
|
e.eventManager.Publish(event.NewEvent(event.TokenManagerInfo, map[event.Field]string{event.ServerTokenOnion: tokenService, event.ServerTokenCount: strconv.Itoa(tokenManager.NumTokens())}))
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
package connections
|
||||||
|
|
||||||
|
import "git.openprivacy.ca/cwtch.im/tapir"
|
||||||
|
|
||||||
|
type EngineHooks interface {
|
||||||
|
SendPeerMessage(connection tapir.Connection, message []byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type DefaultEngineHooks struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (deh DefaultEngineHooks) SendPeerMessage(connection tapir.Connection, message []byte) error {
|
||||||
|
return connection.Send(message)
|
||||||
|
}
|
|
@ -0,0 +1,59 @@
|
||||||
|
package connections
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/utils"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/applications"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/networks/tor"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/primitives"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
||||||
|
"git.openprivacy.ca/openprivacy/connectivity"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MakePayment uses the PoW based token protocol to obtain more tokens
|
||||||
|
func MakePayment(tokenServiceOnion string, tokenService *privacypass.TokenServer, acn connectivity.ACN, handler TokenBoardHandler) error {
|
||||||
|
log.Debugf("making a payment")
|
||||||
|
id, sk := primitives.InitializeEphemeralIdentity()
|
||||||
|
client := new(tor.BaseOnionService)
|
||||||
|
client.Init(acn, sk, &id)
|
||||||
|
defer client.Shutdown()
|
||||||
|
|
||||||
|
tokenApplication := new(applications.TokenApplication)
|
||||||
|
tokenApplication.TokenService = tokenService
|
||||||
|
powTokenApp := new(applications.ApplicationChain).
|
||||||
|
ChainApplication(new(applications.ProofOfWorkApplication), applications.SuccessfulProofOfWorkCapability).
|
||||||
|
ChainApplication(tokenApplication, applications.HasTokensCapability)
|
||||||
|
|
||||||
|
log.Debugf("waiting for successful PoW auth...")
|
||||||
|
tp := utils.TimeoutPolicy(time.Second * 30)
|
||||||
|
err := tp.ExecuteAction(func() error {
|
||||||
|
connected, err := client.Connect(tokenServiceOnion, powTokenApp)
|
||||||
|
if connected && err == nil {
|
||||||
|
log.Debugf("waiting for successful token acquisition...")
|
||||||
|
conn, err := client.WaitForCapabilityOrClose(tokenServiceOnion, applications.HasTokensCapability)
|
||||||
|
if err == nil {
|
||||||
|
powtapp, ok := conn.App().(*applications.TokenApplication)
|
||||||
|
if ok {
|
||||||
|
log.Debugf("updating tokens")
|
||||||
|
handler.NewTokenHandler(tokenServiceOnion, powtapp.Tokens)
|
||||||
|
log.Debugf("transcript: %v", powtapp.Transcript().OutputTranscriptToAudit())
|
||||||
|
conn.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
log.Errorf("invalid cast of powapp. this should never happen %v %v", powtapp, reflect.TypeOf(conn.App()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
|
||||||
|
// we timed out
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("make payment timeout...")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
|
@ -1,11 +1,15 @@
|
||||||
package connections
|
package connections
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"cwtch.im/cwtch/model"
|
||||||
model2 "cwtch.im/cwtch/protocol/model"
|
model2 "cwtch.im/cwtch/protocol/model"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"git.openprivacy.ca/cwtch.im/tapir"
|
"git.openprivacy.ca/cwtch.im/tapir"
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/applications"
|
"git.openprivacy.ca/cwtch.im/tapir/applications"
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const cwtchCapability = tapir.Capability("cwtchCapability")
|
const cwtchCapability = tapir.Capability("cwtchCapability")
|
||||||
|
@ -21,6 +25,8 @@ type PeerApp struct {
|
||||||
OnAuth func(string)
|
OnAuth func(string)
|
||||||
OnClose func(string)
|
OnClose func(string)
|
||||||
OnConnecting func(string)
|
OnConnecting func(string)
|
||||||
|
OnSendMessage func(connection tapir.Connection, message []byte) error
|
||||||
|
version atomic.Value
|
||||||
}
|
}
|
||||||
|
|
||||||
type peerGetVal struct {
|
type peerGetVal struct {
|
||||||
|
@ -32,6 +38,9 @@ type peerRetVal struct {
|
||||||
Exists bool
|
Exists bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const Version1 = 0x01
|
||||||
|
const Version2 = 0x02
|
||||||
|
|
||||||
// NewInstance should always return a new instantiation of the application.
|
// NewInstance should always return a new instantiation of the application.
|
||||||
func (pa *PeerApp) NewInstance() tapir.Application {
|
func (pa *PeerApp) NewInstance() tapir.Application {
|
||||||
newApp := new(PeerApp)
|
newApp := new(PeerApp)
|
||||||
|
@ -42,6 +51,8 @@ func (pa *PeerApp) NewInstance() tapir.Application {
|
||||||
newApp.OnAuth = pa.OnAuth
|
newApp.OnAuth = pa.OnAuth
|
||||||
newApp.OnClose = pa.OnClose
|
newApp.OnClose = pa.OnClose
|
||||||
newApp.OnConnecting = pa.OnConnecting
|
newApp.OnConnecting = pa.OnConnecting
|
||||||
|
newApp.OnSendMessage = pa.OnSendMessage
|
||||||
|
newApp.version.Store(Version1)
|
||||||
return newApp
|
return newApp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,11 +70,28 @@ func (pa *PeerApp) Init(connection tapir.Connection) {
|
||||||
pa.connection.Close()
|
pa.connection.Close()
|
||||||
pa.OnClose(connection.Hostname())
|
pa.OnClose(connection.Hostname())
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
|
// we are authenticated
|
||||||
|
// attempt to negotiate a more efficient packet format...
|
||||||
|
// we are abusing the context here slightly by sending a "malformed" GetVal request.
|
||||||
|
// as a rule cwtch ignores getval requests that it cannot deserialize so older clients will ignore this
|
||||||
|
// message.
|
||||||
|
// version *must* be the first message sent to prevent race conditions for other events fired after-auth
|
||||||
|
// (e.g. getVal requests)
|
||||||
|
// as such, we send this message before we update the rest of the system
|
||||||
|
_ = pa.SendMessage(model2.PeerMessage{
|
||||||
|
ID: event.ContextVersion,
|
||||||
|
Context: event.ContextGetVal,
|
||||||
|
Data: []byte{Version2},
|
||||||
|
})
|
||||||
|
|
||||||
pa.OnAuth(connection.Hostname())
|
pa.OnAuth(connection.Hostname())
|
||||||
go pa.listen()
|
go pa.listen()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// The auth protocol wasn't completed, we can safely shutdown the connection
|
// The auth protocol wasn't completed, we can safely shutdown the connection
|
||||||
|
// send an onclose here because we *may* have triggered this and we want to retry later...
|
||||||
|
pa.OnClose(connection.Hostname())
|
||||||
connection.Close()
|
connection.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -76,11 +104,47 @@ func (pa *PeerApp) listen() {
|
||||||
pa.OnClose(pa.connection.Hostname())
|
pa.OnClose(pa.connection.Hostname())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var peerMessage model2.PeerMessage
|
|
||||||
err := json.Unmarshal(message, &peerMessage)
|
var packet model2.PeerMessage
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if pa.version.Load() == Version1 {
|
||||||
|
err = json.Unmarshal(message, &packet)
|
||||||
|
} else if pa.version.Load() == Version2 {
|
||||||
|
parsePacket, parseErr := model2.ParsePeerMessage(message)
|
||||||
|
// if all else fails...attempt to process this message as a version 1 message
|
||||||
|
if parseErr != nil {
|
||||||
|
err = json.Unmarshal(message, &packet)
|
||||||
|
} else {
|
||||||
|
packet = *parsePacket
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
log.Errorf("invalid version")
|
||||||
|
pa.OnClose(pa.connection.Hostname())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if pa.IsAllowed(pa.connection.Hostname()) {
|
if pa.IsAllowed(pa.connection.Hostname()) {
|
||||||
pa.MessageHandler(pa.connection.Hostname(), peerMessage.ID, peerMessage.Context, peerMessage.Data)
|
// we don't expose im.cwtch.version messages outside of PeerApp (ideally at some point in the future we
|
||||||
|
// can remove this check all together)
|
||||||
|
if packet.ID == event.ContextVersion {
|
||||||
|
if pa.version.Load() == Version1 && len(packet.Data) == 1 && packet.Data[0] == Version2 {
|
||||||
|
log.Debugf("switching to protocol version 2")
|
||||||
|
pa.version.Store(Version2)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if cm, err := model.DeserializeMessage(string(packet.Data)); err == nil {
|
||||||
|
if cm.TransitTime != nil {
|
||||||
|
rt := time.Now().UTC()
|
||||||
|
cm.RecvTime = &rt
|
||||||
|
data, _ := json.Marshal(cm)
|
||||||
|
packet.Data = data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pa.MessageHandler(pa.connection.Hostname(), packet.ID, packet.Context, packet.Data)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Errorf("Error unmarshalling PeerMessage package: %x %v", message, err)
|
log.Errorf("Error unmarshalling PeerMessage package: %x %v", message, err)
|
||||||
|
@ -91,9 +155,41 @@ func (pa *PeerApp) listen() {
|
||||||
// SendMessage sends the peer a preformatted message
|
// SendMessage sends the peer a preformatted message
|
||||||
// NOTE: This is a stub, we will likely want to extend this to better reflect the desired protocol
|
// NOTE: This is a stub, we will likely want to extend this to better reflect the desired protocol
|
||||||
func (pa *PeerApp) SendMessage(message model2.PeerMessage) error {
|
func (pa *PeerApp) SendMessage(message model2.PeerMessage) error {
|
||||||
serialized, err := json.Marshal(message)
|
var serialized []byte
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if cm, err := model.DeserializeMessage(string(message.Data)); err == nil {
|
||||||
|
if cm.SendTime != nil {
|
||||||
|
tt := time.Now().UTC()
|
||||||
|
cm.TransitTime = &tt
|
||||||
|
data, _ := json.Marshal(cm)
|
||||||
|
message.Data = data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pa.version.Load() == Version2 {
|
||||||
|
// treat data as a pre-serialized string, not as a byte array (which will be base64 encoded and bloat the packet size)
|
||||||
|
serialized = message.Serialize()
|
||||||
|
} else {
|
||||||
|
serialized, err = json.Marshal(message)
|
||||||
|
}
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return pa.connection.Send(serialized)
|
err = pa.OnSendMessage(pa.connection, serialized)
|
||||||
|
|
||||||
|
// at this point we have tried to send a message to a peer only to find that something went wrong.
|
||||||
|
// we don't know *what* went wrong - the most likely explanation is the peer went offline in the time between
|
||||||
|
// sending the message and it arriving in the engine to be sent. Other explanations include problems with Tor,
|
||||||
|
// a dropped wifi connection.
|
||||||
|
// Regardless, we error out this message and close this peer app assuming it cannot be used again.
|
||||||
|
// We expect that cwtch will eventually recreate this connection and the app.
|
||||||
|
if err != nil {
|
||||||
|
// close any associated sockets
|
||||||
|
pa.connection.Close()
|
||||||
|
// tell cwtch this connection is no longer valid
|
||||||
|
pa.OnClose(err.Error())
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,54 @@
|
||||||
|
package connections
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TokenManager maintains a list of tokens associated with a single TokenServer
|
||||||
|
type TokenManager struct {
|
||||||
|
lock sync.Mutex
|
||||||
|
tokens map[string]*privacypass.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTokenManager() *TokenManager {
|
||||||
|
tm := new(TokenManager)
|
||||||
|
tm.tokens = make(map[string]*privacypass.Token)
|
||||||
|
return tm
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoreNewTokens adds tokens to the internal list managed by this TokenManager
|
||||||
|
func (tm *TokenManager) StoreNewTokens(tokens []*privacypass.Token) {
|
||||||
|
tm.lock.Lock()
|
||||||
|
defer tm.lock.Unlock()
|
||||||
|
log.Debugf("acquired %v new tokens", tokens)
|
||||||
|
for _, token := range tokens {
|
||||||
|
serialized, _ := json.Marshal(token)
|
||||||
|
tm.tokens[string(serialized)] = token
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NumTokens returns the current number of tokens
|
||||||
|
func (tm *TokenManager) NumTokens() int {
|
||||||
|
tm.lock.Lock()
|
||||||
|
defer tm.lock.Unlock()
|
||||||
|
return len(tm.tokens)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchToken removes a token from the internal list and returns it, along with a count of the remaining tokens.
|
||||||
|
// Errors if no tokens available.
|
||||||
|
func (tm *TokenManager) FetchToken() (*privacypass.Token, int, error) {
|
||||||
|
tm.lock.Lock()
|
||||||
|
defer tm.lock.Unlock()
|
||||||
|
if len(tm.tokens) == 0 {
|
||||||
|
return nil, 0, errors.New("no more tokens")
|
||||||
|
}
|
||||||
|
for serializedToken, token := range tm.tokens {
|
||||||
|
delete(tm.tokens, serializedToken)
|
||||||
|
return token, len(tm.tokens), nil
|
||||||
|
}
|
||||||
|
return nil, 0, errors.New("no more tokens")
|
||||||
|
}
|
|
@ -3,31 +3,35 @@ package connections
|
||||||
import (
|
import (
|
||||||
"cwtch.im/cwtch/protocol/groups"
|
"cwtch.im/cwtch/protocol/groups"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"git.openprivacy.ca/cwtch.im/tapir"
|
"git.openprivacy.ca/cwtch.im/tapir"
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/applications"
|
"git.openprivacy.ca/cwtch.im/tapir/applications"
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/networks/tor"
|
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/primitives"
|
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
||||||
"git.openprivacy.ca/openprivacy/connectivity"
|
"git.openprivacy.ca/openprivacy/connectivity"
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
"github.com/gtank/ristretto255"
|
"github.com/gtank/ristretto255"
|
||||||
"reflect"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TokenBoardHandler encapsulates all the various handlers a client needs to interact with a token board
|
||||||
|
// this includes handlers to receive new messages, as well as handlers to manage tokens.
|
||||||
|
type TokenBoardHandler interface {
|
||||||
|
GroupMessageHandler(server string, gm *groups.EncryptedGroupMessage)
|
||||||
|
ServerAuthedHandler(server string)
|
||||||
|
ServerSyncedHandler(server string)
|
||||||
|
ServerClosedHandler(server string)
|
||||||
|
NewTokenHandler(tokenService string, tokens []*privacypass.Token)
|
||||||
|
PostingFailed(server string, sig []byte)
|
||||||
|
FetchToken(tokenService string) (*privacypass.Token, int, error)
|
||||||
|
}
|
||||||
|
|
||||||
// NewTokenBoardClient generates a new Client for Token Board
|
// NewTokenBoardClient generates a new Client for Token Board
|
||||||
func NewTokenBoardClient(acn connectivity.ACN, Y *ristretto255.Element, tokenServiceOnion string, lastKnownSignature []byte, groupMessageHandler func(server string, gm *groups.EncryptedGroupMessage), serverAuthedHandler func(server string), serverSyncedHandler func(server string), serverClosedHandler func(server string)) tapir.Application {
|
func NewTokenBoardClient(acn connectivity.ACN, Y *ristretto255.Element, tokenServiceOnion string, lastKnownSignature []byte, tokenBoardHandler TokenBoardHandler) tapir.Application {
|
||||||
tba := new(TokenBoardClient)
|
tba := new(TokenBoardClient)
|
||||||
tba.acn = acn
|
tba.acn = acn
|
||||||
tba.tokenService = privacypass.NewTokenServer()
|
tba.tokenService = privacypass.NewTokenServer()
|
||||||
tba.tokenService.Y = Y
|
tba.tokenService.Y = Y
|
||||||
tba.tokenServiceOnion = tokenServiceOnion
|
tba.tokenServiceOnion = tokenServiceOnion
|
||||||
tba.receiveGroupMessageHandler = groupMessageHandler
|
tba.tokenBoardHandler = tokenBoardHandler
|
||||||
tba.serverAuthedHandler = serverAuthedHandler
|
|
||||||
tba.serverSyncedHandler = serverSyncedHandler
|
|
||||||
tba.serverClosedHandler = serverClosedHandler
|
|
||||||
tba.lastKnownSignature = lastKnownSignature
|
tba.lastKnownSignature = lastKnownSignature
|
||||||
return tba
|
return tba
|
||||||
}
|
}
|
||||||
|
@ -36,27 +40,23 @@ func NewTokenBoardClient(acn connectivity.ACN, Y *ristretto255.Element, tokenSer
|
||||||
type TokenBoardClient struct {
|
type TokenBoardClient struct {
|
||||||
applications.AuthApp
|
applications.AuthApp
|
||||||
connection tapir.Connection
|
connection tapir.Connection
|
||||||
receiveGroupMessageHandler func(server string, gm *groups.EncryptedGroupMessage)
|
tokenBoardHandler TokenBoardHandler
|
||||||
serverAuthedHandler func(server string)
|
|
||||||
serverSyncedHandler func(server string)
|
|
||||||
serverClosedHandler func(server string)
|
|
||||||
|
|
||||||
// Token service handling
|
// Token service handling
|
||||||
acn connectivity.ACN
|
acn connectivity.ACN
|
||||||
tokens []*privacypass.Token
|
|
||||||
tokenLock sync.Mutex
|
|
||||||
tokenService *privacypass.TokenServer
|
tokenService *privacypass.TokenServer
|
||||||
tokenServiceOnion string
|
tokenServiceOnion string
|
||||||
lastKnownSignature []byte
|
lastKnownSignature []byte
|
||||||
|
|
||||||
|
postLock sync.Mutex
|
||||||
|
postQueue []groups.CachedEncryptedGroupMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewInstance Client a new TokenBoardApp
|
// NewInstance Client a new TokenBoardApp
|
||||||
func (ta *TokenBoardClient) NewInstance() tapir.Application {
|
func (ta *TokenBoardClient) NewInstance() tapir.Application {
|
||||||
tba := new(TokenBoardClient)
|
tba := new(TokenBoardClient)
|
||||||
tba.serverAuthedHandler = ta.serverAuthedHandler
|
tba.tokenBoardHandler = ta.tokenBoardHandler
|
||||||
tba.serverSyncedHandler = ta.serverSyncedHandler
|
|
||||||
tba.serverClosedHandler = ta.serverClosedHandler
|
|
||||||
tba.receiveGroupMessageHandler = ta.receiveGroupMessageHandler
|
|
||||||
tba.acn = ta.acn
|
tba.acn = ta.acn
|
||||||
tba.tokenService = ta.tokenService
|
tba.tokenService = ta.tokenService
|
||||||
tba.tokenServiceOnion = ta.tokenServiceOnion
|
tba.tokenServiceOnion = ta.tokenServiceOnion
|
||||||
|
@ -66,17 +66,22 @@ func (ta *TokenBoardClient) NewInstance() tapir.Application {
|
||||||
|
|
||||||
// Init initializes the cryptographic TokenBoardApp
|
// Init initializes the cryptographic TokenBoardApp
|
||||||
func (ta *TokenBoardClient) Init(connection tapir.Connection) {
|
func (ta *TokenBoardClient) Init(connection tapir.Connection) {
|
||||||
|
// connection.Hostname is always valid because we are ALWAYS the initiating party
|
||||||
|
log.Debugf("connecting to server: %v", connection.Hostname())
|
||||||
ta.AuthApp.Init(connection)
|
ta.AuthApp.Init(connection)
|
||||||
|
log.Debugf("server protocol complete: %v", connection.Hostname())
|
||||||
if connection.HasCapability(applications.AuthCapability) {
|
if connection.HasCapability(applications.AuthCapability) {
|
||||||
|
log.Debugf("Successfully Initialized Connection to %v", connection.Hostname())
|
||||||
ta.connection = connection
|
ta.connection = connection
|
||||||
ta.serverAuthedHandler(ta.connection.Hostname())
|
ta.tokenBoardHandler.ServerAuthedHandler(connection.Hostname())
|
||||||
log.Debugf("Successfully Initialized Connection")
|
|
||||||
go ta.Listen()
|
go ta.Listen()
|
||||||
// Optimistically acquire many tokens for this server...
|
// Optimistically acquire many tokens for this server...
|
||||||
go ta.MakePayment()
|
go ta.PurchaseTokens()
|
||||||
go ta.MakePayment()
|
go ta.PurchaseTokens()
|
||||||
ta.Replay()
|
ta.Replay()
|
||||||
} else {
|
} else {
|
||||||
|
log.Debugf("Error Connecting to %v", connection.Hostname())
|
||||||
|
ta.tokenBoardHandler.ServerClosedHandler(connection.Hostname())
|
||||||
connection.Close()
|
connection.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -88,7 +93,7 @@ func (ta *TokenBoardClient) Listen() {
|
||||||
data := ta.connection.Expect()
|
data := ta.connection.Expect()
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
log.Debugf("Server closed the connection...")
|
log.Debugf("Server closed the connection...")
|
||||||
ta.serverClosedHandler(ta.connection.Hostname())
|
ta.tokenBoardHandler.ServerClosedHandler(ta.connection.Hostname())
|
||||||
return // connection is closed
|
return // connection is closed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,7 +101,7 @@ func (ta *TokenBoardClient) Listen() {
|
||||||
var message groups.Message
|
var message groups.Message
|
||||||
if err := json.Unmarshal(data, &message); err != nil {
|
if err := json.Unmarshal(data, &message); err != nil {
|
||||||
log.Debugf("Server sent an unexpected message, closing the connection: %v", err)
|
log.Debugf("Server sent an unexpected message, closing the connection: %v", err)
|
||||||
ta.serverClosedHandler(ta.connection.Hostname())
|
ta.tokenBoardHandler.ServerClosedHandler(ta.connection.Hostname())
|
||||||
ta.connection.Close()
|
ta.connection.Close()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -104,15 +109,28 @@ func (ta *TokenBoardClient) Listen() {
|
||||||
switch message.MessageType {
|
switch message.MessageType {
|
||||||
case groups.NewMessageMessage:
|
case groups.NewMessageMessage:
|
||||||
if message.NewMessage != nil {
|
if message.NewMessage != nil {
|
||||||
ta.receiveGroupMessageHandler(ta.connection.Hostname(), &message.NewMessage.EGM)
|
ta.tokenBoardHandler.GroupMessageHandler(ta.connection.Hostname(), &message.NewMessage.EGM)
|
||||||
} else {
|
} else {
|
||||||
log.Debugf("Server sent an unexpected NewMessage, closing the connection: %s", data)
|
log.Debugf("Server sent an unexpected NewMessage, closing the connection: %s", data)
|
||||||
ta.serverClosedHandler(ta.connection.Hostname())
|
ta.tokenBoardHandler.ServerClosedHandler(ta.connection.Hostname())
|
||||||
ta.connection.Close()
|
ta.connection.Close()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case groups.PostResultMessage:
|
case groups.PostResultMessage:
|
||||||
// TODO handle failure
|
ta.postLock.Lock()
|
||||||
|
egm := ta.postQueue[0]
|
||||||
|
ta.postQueue = ta.postQueue[1:]
|
||||||
|
ta.postLock.Unlock()
|
||||||
|
if !message.PostResult.Success {
|
||||||
|
log.Debugf("post result message: %v", message.PostResult)
|
||||||
|
// Retry using another token
|
||||||
|
posted, _ := ta.Post(egm.Group, egm.Ciphertext, egm.Signature)
|
||||||
|
// if posting failed...
|
||||||
|
if !posted {
|
||||||
|
log.Errorf("error posting message")
|
||||||
|
ta.tokenBoardHandler.PostingFailed(egm.Group, egm.Signature)
|
||||||
|
}
|
||||||
|
}
|
||||||
case groups.ReplayResultMessage:
|
case groups.ReplayResultMessage:
|
||||||
if message.ReplayResult != nil {
|
if message.ReplayResult != nil {
|
||||||
log.Debugf("Replaying %v Messages...", message.ReplayResult.NumMessages)
|
log.Debugf("Replaying %v Messages...", message.ReplayResult.NumMessages)
|
||||||
|
@ -121,23 +139,23 @@ func (ta *TokenBoardClient) Listen() {
|
||||||
|
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
log.Debugf("Server sent an unexpected EncryptedGroupMessage, closing the connection")
|
log.Debugf("Server sent an unexpected EncryptedGroupMessage, closing the connection")
|
||||||
ta.serverClosedHandler(ta.connection.Hostname())
|
ta.tokenBoardHandler.ServerClosedHandler(ta.connection.Hostname())
|
||||||
ta.connection.Close()
|
ta.connection.Close()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
egm := &groups.EncryptedGroupMessage{}
|
egm := &groups.EncryptedGroupMessage{}
|
||||||
if err := json.Unmarshal(data, egm); err == nil {
|
if err := json.Unmarshal(data, egm); err == nil {
|
||||||
ta.receiveGroupMessageHandler(ta.connection.Hostname(), egm)
|
ta.tokenBoardHandler.GroupMessageHandler(ta.connection.Hostname(), egm)
|
||||||
ta.lastKnownSignature = egm.Signature
|
ta.lastKnownSignature = egm.Signature
|
||||||
} else {
|
} else {
|
||||||
log.Debugf("Server sent an unexpected EncryptedGroupMessage, closing the connection: %v", err)
|
log.Debugf("Server sent an unexpected EncryptedGroupMessage, closing the connection: %v", err)
|
||||||
ta.serverClosedHandler(ta.connection.Hostname())
|
ta.tokenBoardHandler.ServerClosedHandler(ta.connection.Hostname())
|
||||||
ta.connection.Close()
|
ta.connection.Close()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ta.serverSyncedHandler(ta.connection.Hostname())
|
ta.tokenBoardHandler.ServerSyncedHandler(ta.connection.Hostname())
|
||||||
ta.connection.SetCapability(groups.CwtchServerSyncedCapability)
|
ta.connection.SetCapability(groups.CwtchServerSyncedCapability)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -152,17 +170,21 @@ func (ta *TokenBoardClient) Replay() {
|
||||||
|
|
||||||
// PurchaseTokens purchases the given number of tokens from the server (using the provided payment handler)
|
// PurchaseTokens purchases the given number of tokens from the server (using the provided payment handler)
|
||||||
func (ta *TokenBoardClient) PurchaseTokens() {
|
func (ta *TokenBoardClient) PurchaseTokens() {
|
||||||
ta.MakePayment()
|
MakePayment(ta.tokenServiceOnion, ta.tokenService, ta.acn, ta.tokenBoardHandler)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Post sends a Post Request to the server
|
// Post sends a Post Request to the server
|
||||||
func (ta *TokenBoardClient) Post(ct []byte, sig []byte) (bool, int) {
|
func (ta *TokenBoardClient) Post(group string, ct []byte, sig []byte) (bool, int) {
|
||||||
egm := groups.EncryptedGroupMessage{Ciphertext: ct, Signature: sig}
|
egm := groups.EncryptedGroupMessage{Ciphertext: ct, Signature: sig}
|
||||||
token, numTokens, err := ta.NextToken(egm.ToBytes(), ta.connection.Hostname())
|
token, numTokens, err := ta.NextToken(egm.ToBytes(), ta.connection.Hostname())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
data, _ := json.Marshal(groups.Message{MessageType: groups.PostRequestMessage, PostRequest: &groups.PostRequest{EGM: egm, Token: token}})
|
data, _ := json.Marshal(groups.Message{MessageType: groups.PostRequestMessage, PostRequest: &groups.PostRequest{EGM: egm, Token: token}})
|
||||||
|
ta.postLock.Lock()
|
||||||
|
// ONLY put group in the EGM as a cache / for error reporting...
|
||||||
|
ta.postQueue = append(ta.postQueue, groups.CachedEncryptedGroupMessage{Group: group, EncryptedGroupMessage: egm})
|
||||||
log.Debugf("Message Length: %s %v", data, len(data))
|
log.Debugf("Message Length: %s %v", data, len(data))
|
||||||
err := ta.connection.Send(data)
|
err := ta.connection.Send(data)
|
||||||
|
ta.postLock.Unlock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, numTokens
|
return false, numTokens
|
||||||
}
|
}
|
||||||
|
@ -172,62 +194,11 @@ func (ta *TokenBoardClient) Post(ct []byte, sig []byte) (bool, int) {
|
||||||
return false, numTokens
|
return false, numTokens
|
||||||
}
|
}
|
||||||
|
|
||||||
// MakePayment uses the PoW based token protocol to obtain more tokens
|
|
||||||
func (ta *TokenBoardClient) MakePayment() error {
|
|
||||||
log.Debugf("Making a Payment")
|
|
||||||
id, sk := primitives.InitializeEphemeralIdentity()
|
|
||||||
client := new(tor.BaseOnionService)
|
|
||||||
client.Init(ta.acn, sk, &id)
|
|
||||||
|
|
||||||
tokenApplication := new(applications.TokenApplication)
|
|
||||||
tokenApplication.TokenService = ta.tokenService
|
|
||||||
powTokenApp := new(applications.ApplicationChain).
|
|
||||||
ChainApplication(new(applications.ProofOfWorkApplication), applications.SuccessfulProofOfWorkCapability).
|
|
||||||
ChainApplication(tokenApplication, applications.HasTokensCapability)
|
|
||||||
|
|
||||||
log.Debugf("Waiting for successful PoW Auth...")
|
|
||||||
|
|
||||||
connected, err := client.Connect(ta.tokenServiceOnion, powTokenApp)
|
|
||||||
if connected && err == nil {
|
|
||||||
log.Debugf("Waiting for successful Token Acquisition...")
|
|
||||||
conn, err := client.WaitForCapabilityOrClose(ta.tokenServiceOnion, applications.HasTokensCapability)
|
|
||||||
if err == nil {
|
|
||||||
powtapp, ok := conn.App().(*applications.TokenApplication)
|
|
||||||
if ok {
|
|
||||||
// Update tokens...we need a lock here to prevent SpendToken from modifying the tokens
|
|
||||||
// during this process..
|
|
||||||
log.Debugf("Updating Tokens")
|
|
||||||
ta.tokenLock.Lock()
|
|
||||||
ta.tokens = append(ta.tokens, powtapp.Tokens...)
|
|
||||||
ta.tokenLock.Unlock()
|
|
||||||
log.Debugf("Transcript: %v", powtapp.Transcript().OutputTranscriptToAudit())
|
|
||||||
conn.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
log.Errorf("invalid cast of powapp. this should never happen %v %v", powtapp, reflect.TypeOf(conn.App()))
|
|
||||||
return errors.New("invalid cast of powapp. this should never happen")
|
|
||||||
}
|
|
||||||
log.Debugf("could not connect to payment server %v..trying again")
|
|
||||||
return ta.MakePayment()
|
|
||||||
} else if connected && err != nil {
|
|
||||||
log.Debugf("inexplicable error: %v", err)
|
|
||||||
}
|
|
||||||
log.Debugf("failed to make a connection. trying again...")
|
|
||||||
// it doesn't actually take that long to make a payment, so waiting a small amount of time should suffice
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
return ta.MakePayment()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextToken retrieves the next token
|
// NextToken retrieves the next token
|
||||||
func (ta *TokenBoardClient) NextToken(data []byte, hostname string) (privacypass.SpentToken, int, error) {
|
func (ta *TokenBoardClient) NextToken(data []byte, hostname string) (privacypass.SpentToken, int, error) {
|
||||||
// Taken the first new token, we need a lock here because tokens can be appended by MakePayment
|
token, numtokens, err := ta.tokenBoardHandler.FetchToken(ta.tokenServiceOnion)
|
||||||
// which could result in weird behaviour...
|
if err != nil {
|
||||||
ta.tokenLock.Lock()
|
return privacypass.SpentToken{}, numtokens, err
|
||||||
defer ta.tokenLock.Unlock()
|
|
||||||
if len(ta.tokens) == 0 {
|
|
||||||
return privacypass.SpentToken{}, len(ta.tokens), errors.New("no more tokens")
|
|
||||||
}
|
}
|
||||||
token := ta.tokens[0]
|
return token.SpendToken(append(data, hostname...)), numtokens, nil
|
||||||
ta.tokens = ta.tokens[1:]
|
|
||||||
return token.SpendToken(append(data, hostname...)), len(ta.tokens), nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@ type ChunkSpec []uint64
|
||||||
// CreateChunkSpec given a full list of chunks with their downloaded status (true for downloaded, false otherwise)
|
// CreateChunkSpec given a full list of chunks with their downloaded status (true for downloaded, false otherwise)
|
||||||
// derives a list of identifiers of chunks that have not been downloaded yet
|
// derives a list of identifiers of chunks that have not been downloaded yet
|
||||||
func CreateChunkSpec(progress []bool) ChunkSpec {
|
func CreateChunkSpec(progress []bool) ChunkSpec {
|
||||||
var chunks ChunkSpec
|
chunks := ChunkSpec{}
|
||||||
for i, p := range progress {
|
for i, p := range progress {
|
||||||
if !p {
|
if !p {
|
||||||
chunks = append(chunks, uint64(i))
|
chunks = append(chunks, uint64(i))
|
||||||
|
|
|
@ -15,7 +15,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// FileSharingSubSystem encapsulates the functionality necessary to share and download files via Cwtch
|
// FileSharingSubSystem encapsulates the functionality necessary to share and download files via Cwtch
|
||||||
//
|
|
||||||
type FileSharingSubSystem struct {
|
type FileSharingSubSystem struct {
|
||||||
|
|
||||||
// for sharing files
|
// for sharing files
|
||||||
|
@ -35,9 +34,24 @@ func (fsss *FileSharingSubSystem) ShareFile(fileKey string, serializedManifest s
|
||||||
log.Errorf("could not share file %v", err)
|
log.Errorf("could not share file %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
log.Debugf("sharing file: %v %v", fileKey, serializedManifest)
|
||||||
fsss.activeShares.Store(fileKey, &manifest)
|
fsss.activeShares.Store(fileKey, &manifest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StopFileShare given a file key removes the serialized manifest from consideration by the file sharing
|
||||||
|
// subsystem. Future requests on this manifest will fail, as will any in-progress chunk requests.
|
||||||
|
func (fsss *FileSharingSubSystem) StopFileShare(fileKey string) {
|
||||||
|
fsss.activeShares.Delete(fileKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StopAllFileShares removes all active file shares from consideration
|
||||||
|
func (fsss *FileSharingSubSystem) StopAllFileShares() {
|
||||||
|
fsss.activeShares.Range(func(key, value interface{}) bool {
|
||||||
|
fsss.activeShares.Delete(key)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// FetchManifest given a file key and knowledge of the manifest size in chunks (obtained via an attribute lookup)
|
// FetchManifest given a file key and knowledge of the manifest size in chunks (obtained via an attribute lookup)
|
||||||
// construct a request to download the manifest.
|
// construct a request to download the manifest.
|
||||||
func (fsss *FileSharingSubSystem) FetchManifest(fileKey string, manifestSize uint64) model.PeerMessage {
|
func (fsss *FileSharingSubSystem) FetchManifest(fileKey string, manifestSize uint64) model.PeerMessage {
|
||||||
|
@ -82,7 +96,7 @@ func (fsss *FileSharingSubSystem) RequestManifestParts(fileKey string) []model.P
|
||||||
if exists {
|
if exists {
|
||||||
oldManifest := manifestI.(*Manifest)
|
oldManifest := manifestI.(*Manifest)
|
||||||
serializedOldManifest := oldManifest.Serialize()
|
serializedOldManifest := oldManifest.Serialize()
|
||||||
log.Debugf("found serialized manifest: %s", serializedOldManifest)
|
log.Debugf("found serialized manifest")
|
||||||
|
|
||||||
// copy so we dont get threading issues by modifying the original
|
// copy so we dont get threading issues by modifying the original
|
||||||
// and then redact the file path before sending
|
// and then redact the file path before sending
|
||||||
|
@ -130,6 +144,7 @@ func (fsss *FileSharingSubSystem) ReceiveManifestPart(manifestKey string, part [
|
||||||
|
|
||||||
log.Debugf("storing manifest part %v %v", offset, end)
|
log.Debugf("storing manifest part %v %v", offset, end)
|
||||||
serializedManifestBytes := []byte(serializedManifest)
|
serializedManifestBytes := []byte(serializedManifest)
|
||||||
|
if len(serializedManifestBytes) > offset && len(serializedManifestBytes) >= end {
|
||||||
copy(serializedManifestBytes[offset:end], part[:])
|
copy(serializedManifestBytes[offset:end], part[:])
|
||||||
|
|
||||||
if len(part) < DefaultChunkSize {
|
if len(part) < DefaultChunkSize {
|
||||||
|
@ -148,6 +163,7 @@ func (fsss *FileSharingSubSystem) ReceiveManifestPart(manifestKey string, part [
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return "", ""
|
return "", ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,8 +8,8 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
@ -122,7 +122,7 @@ func (m *Manifest) GetChunkBytes(id uint64) ([]byte, error) {
|
||||||
|
|
||||||
// LoadManifest reads in a json serialized Manifest from a file
|
// LoadManifest reads in a json serialized Manifest from a file
|
||||||
func LoadManifest(filename string) (*Manifest, error) {
|
func LoadManifest(filename string) (*Manifest, error) {
|
||||||
bytes, err := ioutil.ReadFile(filename)
|
bytes, err := os.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -232,12 +232,15 @@ func (m *Manifest) GetChunkRequest() ChunkSpec {
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrepareDownload creates an empty file of the expected size of the file described by the manifest
|
// PrepareDownload creates an empty file of the expected size of the file described by the manifest
|
||||||
// If the file already exists it assume it is the correct file and that it is resuming from when it left off.
|
// If the file already exists it assumes it is the correct file and that it is resuming from when it left off.
|
||||||
func (m *Manifest) PrepareDownload() error {
|
func (m *Manifest) PrepareDownload() error {
|
||||||
m.lock.Lock()
|
m.lock.Lock()
|
||||||
defer m.lock.Unlock()
|
defer m.lock.Unlock()
|
||||||
|
|
||||||
m.chunkComplete = make([]bool, len(m.Chunks))
|
m.chunkComplete = make([]bool, len(m.Chunks))
|
||||||
|
if m.ChunkSizeInBytes == 0 || m.FileSizeInBytes == 0 {
|
||||||
|
return fmt.Errorf("manifest is invalid")
|
||||||
|
}
|
||||||
|
|
||||||
if info, err := os.Stat(m.FileName); os.IsNotExist(err) {
|
if info, err := os.Stat(m.FileName); os.IsNotExist(err) {
|
||||||
useFileName := m.FileName
|
useFileName := m.FileName
|
||||||
|
@ -294,6 +297,12 @@ func (m *Manifest) PrepareDownload() error {
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if chunkI >= len(m.Chunks) {
|
||||||
|
log.Errorf("file is larger than the number of chunks assigned. Assuming manifest was corrupted.")
|
||||||
|
return fmt.Errorf("file is larger than the number of chunks assigned. Assuming manifest was corrupted")
|
||||||
|
}
|
||||||
|
|
||||||
hash := sha512.New()
|
hash := sha512.New()
|
||||||
hash.Write(buf[0:n])
|
hash.Write(buf[0:n])
|
||||||
chunkHash := hash.Sum(nil)
|
chunkHash := hash.Sum(nil)
|
||||||
|
@ -319,7 +328,7 @@ func (m *Manifest) Close() {
|
||||||
|
|
||||||
// Save writes a JSON encoded byte array version of the manifest to path
|
// Save writes a JSON encoded byte array version of the manifest to path
|
||||||
func (m *Manifest) Save(path string) error {
|
func (m *Manifest) Save(path string) error {
|
||||||
return ioutil.WriteFile(path, m.Serialize(), 0600)
|
return os.WriteFile(path, m.Serialize(), 0600)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serialize returns the manifest as a JSON encoded byte array
|
// Serialize returns the manifest as a JSON encoded byte array
|
||||||
|
|
|
@ -3,8 +3,8 @@ package files
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io/ioutil"
|
|
||||||
"math"
|
"math"
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ func TestManifestLarge(t *testing.T) {
|
||||||
t.Logf("%v %s", len(json), json)
|
t.Logf("%v %s", len(json), json)
|
||||||
|
|
||||||
// Pretend we downloaded the manifest
|
// Pretend we downloaded the manifest
|
||||||
ioutil.WriteFile("testdata/cwtch.png.manifest", json, 0600)
|
os.WriteFile("testdata/cwtch.png.manifest", json, 0600)
|
||||||
|
|
||||||
// Load the manifest from a file
|
// Load the manifest from a file
|
||||||
cwtchPngManifest, err := LoadManifest("testdata/cwtch.png.manifest")
|
cwtchPngManifest, err := LoadManifest("testdata/cwtch.png.manifest")
|
||||||
|
@ -93,7 +93,12 @@ func TestManifestLarge(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prepare Download
|
// Prepare Download
|
||||||
cwtchPngOutManifest, _ := LoadManifest("testdata/cwtch.png.manifest")
|
cwtchPngOutManifest, err := LoadManifest("testdata/cwtch.png.manifest")
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not prepare download %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
cwtchPngOutManifest.FileName = "testdata/cwtch.out.png"
|
cwtchPngOutManifest.FileName = "testdata/cwtch.out.png"
|
||||||
|
|
||||||
defer cwtchPngOutManifest.Close()
|
defer cwtchPngOutManifest.Close()
|
||||||
|
|
|
@ -39,6 +39,12 @@ type EncryptedGroupMessage struct {
|
||||||
Signature []byte
|
Signature []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CachedEncryptedGroupMessage provides an encapsulation of the encrypted group message for local caching / error reporting
|
||||||
|
type CachedEncryptedGroupMessage struct {
|
||||||
|
EncryptedGroupMessage
|
||||||
|
Group string
|
||||||
|
}
|
||||||
|
|
||||||
// ToBytes converts the encrypted group message to a set of bytes for serialization
|
// ToBytes converts the encrypted group message to a set of bytes for serialization
|
||||||
func (egm EncryptedGroupMessage) ToBytes() []byte {
|
func (egm EncryptedGroupMessage) ToBytes() []byte {
|
||||||
data, _ := json.Marshal(egm)
|
data, _ := json.Marshal(egm)
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PeerMessage is an encapsulation that can be used by higher level applications
|
||||||
|
type PeerMessage struct {
|
||||||
|
// ID **must** only contain alphanumeric characters separated by period.
|
||||||
|
ID string // A unique Message ID (primarily used for acknowledgments)
|
||||||
|
|
||||||
|
// Context **must** only contain alphanumeric characters separated by period.
|
||||||
|
Context string // A unique context identifier i.e. im.cwtch.chat
|
||||||
|
|
||||||
|
// Data can contain anything
|
||||||
|
Data []byte // A data packet.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize constructs an efficient serialized representation
|
||||||
|
// Format: [ID String] | [Context String] | Binary Data
|
||||||
|
func (m *PeerMessage) Serialize() []byte {
|
||||||
|
return append(append([]byte(m.ID+"|"), []byte(m.Context+"|")...), m.Data...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParsePeerMessage returns either a deserialized PeerMessage or an error if it is malformed
|
||||||
|
func ParsePeerMessage(message []byte) (*PeerMessage, error) {
|
||||||
|
|
||||||
|
// find the identifier prefix
|
||||||
|
idTerminator := bytes.IndexByte(message, '|')
|
||||||
|
if idTerminator != -1 && idTerminator+1 < len(message) {
|
||||||
|
// find the context terminator prefix
|
||||||
|
contextbegin := idTerminator + 1
|
||||||
|
contextTerminator := bytes.IndexByte(message[contextbegin:], '|')
|
||||||
|
if contextTerminator != -1 {
|
||||||
|
|
||||||
|
// check that we have data
|
||||||
|
dataBegin := contextbegin + contextTerminator + 1
|
||||||
|
var data []byte
|
||||||
|
if dataBegin < len(message) {
|
||||||
|
data = message[dataBegin:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// compile the message
|
||||||
|
return &PeerMessage{
|
||||||
|
ID: string(message[0:idTerminator]),
|
||||||
|
Context: string(message[contextbegin : contextbegin+contextTerminator]),
|
||||||
|
Data: data,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, errors.New("invalid message")
|
||||||
|
}
|
|
@ -1,8 +0,0 @@
|
||||||
package model
|
|
||||||
|
|
||||||
// PeerMessage is an encapsulation that can be used by higher level applications
|
|
||||||
type PeerMessage struct {
|
|
||||||
ID string // A unique Message ID (primarily used for acknowledgments)
|
|
||||||
Context string // A unique context identifier i.e. im.cwtch.chat
|
|
||||||
Data []byte // The serialized data packet.
|
|
||||||
}
|
|
|
@ -0,0 +1,160 @@
|
||||||
|
package settings
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"cwtch.im/cwtch/model/constants"
|
||||||
|
"cwtch.im/cwtch/storage/v1"
|
||||||
|
"encoding/json"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"os"
|
||||||
|
path "path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
CwtchStarted = event.Type("CwtchStarted")
|
||||||
|
CwtchStartError = event.Type("CwtchStartError")
|
||||||
|
UpdateGlobalSettings = event.Type("UpdateGlobalSettings")
|
||||||
|
)
|
||||||
|
|
||||||
|
const GlobalSettingsFilename = "ui.globals"
|
||||||
|
const saltFile = "SALT"
|
||||||
|
|
||||||
|
type NotificationPolicy string
|
||||||
|
|
||||||
|
const (
|
||||||
|
NotificationPolicyMute = NotificationPolicy("NotificationPolicy.Mute")
|
||||||
|
NotificationPolicyOptIn = NotificationPolicy("NotificationPolicy.OptIn")
|
||||||
|
NotificationPolicyDefaultAll = NotificationPolicy("NotificationPolicy.DefaultAll")
|
||||||
|
)
|
||||||
|
|
||||||
|
type GlobalSettingsFile struct {
|
||||||
|
v1.FileStore
|
||||||
|
}
|
||||||
|
|
||||||
|
type GlobalSettings struct {
|
||||||
|
Locale string
|
||||||
|
Theme string
|
||||||
|
ThemeMode string
|
||||||
|
ThemeImages bool
|
||||||
|
PreviousPid int64
|
||||||
|
ExperimentsEnabled bool
|
||||||
|
Experiments map[string]bool
|
||||||
|
BlockUnknownConnections bool
|
||||||
|
NotificationPolicy NotificationPolicy
|
||||||
|
NotificationContent string
|
||||||
|
StreamerMode bool
|
||||||
|
StateRootPane int
|
||||||
|
FirstTime bool
|
||||||
|
UIColumnModePortrait string
|
||||||
|
UIColumnModeLandscape string
|
||||||
|
DownloadPath string
|
||||||
|
AllowAdvancedTorConfig bool
|
||||||
|
CustomTorrc string
|
||||||
|
UseCustomTorrc bool
|
||||||
|
UseExternalTor bool
|
||||||
|
CustomSocksPort int
|
||||||
|
CustomControlPort int
|
||||||
|
UseTorCache bool
|
||||||
|
TorCacheDir string
|
||||||
|
BlodeuweddPath string
|
||||||
|
FontScaling float64
|
||||||
|
DefaultSaveHistory bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultGlobalSettings = GlobalSettings{
|
||||||
|
Locale: "en",
|
||||||
|
Theme: "cwtch",
|
||||||
|
ThemeMode: "dark",
|
||||||
|
ThemeImages: false,
|
||||||
|
PreviousPid: -1,
|
||||||
|
ExperimentsEnabled: false,
|
||||||
|
Experiments: map[string]bool{constants.MessageFormattingExperiment: true},
|
||||||
|
StateRootPane: 0,
|
||||||
|
FirstTime: true,
|
||||||
|
BlockUnknownConnections: false,
|
||||||
|
StreamerMode: false,
|
||||||
|
UIColumnModePortrait: "DualpaneMode.Single",
|
||||||
|
UIColumnModeLandscape: "DualpaneMode.CopyPortrait",
|
||||||
|
NotificationPolicy: "NotificationPolicy.Mute",
|
||||||
|
NotificationContent: "NotificationContent.SimpleEvent",
|
||||||
|
DownloadPath: "",
|
||||||
|
AllowAdvancedTorConfig: false,
|
||||||
|
CustomTorrc: "",
|
||||||
|
UseCustomTorrc: false,
|
||||||
|
CustomSocksPort: -1,
|
||||||
|
CustomControlPort: -1,
|
||||||
|
UseTorCache: false,
|
||||||
|
TorCacheDir: "",
|
||||||
|
BlodeuweddPath: "",
|
||||||
|
FontScaling: 1.0, // use the system pixel scaling default
|
||||||
|
DefaultSaveHistory: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitGlobalSettingsFile(directory string, password string) (*GlobalSettingsFile, error) {
|
||||||
|
var key [32]byte
|
||||||
|
salt, err := os.ReadFile(path.Join(directory, saltFile))
|
||||||
|
if err != nil {
|
||||||
|
log.Infof("Could not find salt file: %v (creating a new settings file)", err)
|
||||||
|
var newSalt [128]byte
|
||||||
|
key, newSalt, err = v1.CreateKeySalt(password)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Could not initialize salt: %v", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err := os.MkdirAll(directory, 0700)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = os.WriteFile(path.Join(directory, saltFile), newSalt[:], 0600)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Could not write salt file: %v", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
key = v1.CreateKey(password, salt)
|
||||||
|
}
|
||||||
|
|
||||||
|
gsFile := v1.NewFileStore(directory, GlobalSettingsFilename, key)
|
||||||
|
log.Infof("initialized global settings file: %v", gsFile)
|
||||||
|
globalSettingsFile := GlobalSettingsFile{
|
||||||
|
gsFile,
|
||||||
|
}
|
||||||
|
return &globalSettingsFile, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (globalSettingsFile *GlobalSettingsFile) ReadGlobalSettings() GlobalSettings {
|
||||||
|
settings := DefaultGlobalSettings
|
||||||
|
|
||||||
|
if globalSettingsFile == nil {
|
||||||
|
log.Errorf("Global Settings File was not Initialized Properly")
|
||||||
|
return settings
|
||||||
|
}
|
||||||
|
|
||||||
|
settingsBytes, err := globalSettingsFile.Read()
|
||||||
|
if err != nil {
|
||||||
|
log.Infof("Could not read global ui settings: %v (assuming this is a first time app deployment...)", err)
|
||||||
|
return settings //firstTime = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// note: by giving json.Unmarshal settings we are providing it defacto defaults
|
||||||
|
// from DefaultGlobalSettings
|
||||||
|
err = json.Unmarshal(settingsBytes, &settings)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Could not parse global ui settings: %v\n", err)
|
||||||
|
// TODO if settings is corrupted, we probably want to alert the UI.
|
||||||
|
return settings //firstTime = true
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("Settings: %#v", settings)
|
||||||
|
return settings
|
||||||
|
}
|
||||||
|
|
||||||
|
func (globalSettingsFile *GlobalSettingsFile) WriteGlobalSettings(globalSettings GlobalSettings) {
|
||||||
|
bytes, _ := json.Marshal(globalSettings)
|
||||||
|
// override first time setting
|
||||||
|
globalSettings.FirstTime = true
|
||||||
|
err := globalSettingsFile.Write(bytes)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Could not write global ui settings: %v\n", err)
|
||||||
|
}
|
||||||
|
}
|
|
@ -8,7 +8,7 @@ import (
|
||||||
"golang.org/x/crypto/pbkdf2"
|
"golang.org/x/crypto/pbkdf2"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"os"
|
||||||
path "path/filepath"
|
path "path/filepath"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ func CreateKey(password string, salt []byte) [32]byte {
|
||||||
return dkr
|
return dkr
|
||||||
}
|
}
|
||||||
|
|
||||||
//EncryptFileData encrypts the data with the supplied key
|
// EncryptFileData encrypts the data with the supplied key
|
||||||
func EncryptFileData(data []byte, key [32]byte) ([]byte, error) {
|
func EncryptFileData(data []byte, key [32]byte) ([]byte, error) {
|
||||||
var nonce [24]byte
|
var nonce [24]byte
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ func EncryptFileData(data []byte, key [32]byte) ([]byte, error) {
|
||||||
return encrypted, nil
|
return encrypted, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//DecryptFile decrypts the passed ciphertext with the supplied key.
|
// DecryptFile decrypts the passed ciphertext with the supplied key.
|
||||||
func DecryptFile(ciphertext []byte, key [32]byte) ([]byte, error) {
|
func DecryptFile(ciphertext []byte, key [32]byte) ([]byte, error) {
|
||||||
var decryptNonce [24]byte
|
var decryptNonce [24]byte
|
||||||
copy(decryptNonce[:], ciphertext[:24])
|
copy(decryptNonce[:], ciphertext[:24])
|
||||||
|
@ -61,7 +61,7 @@ func DecryptFile(ciphertext []byte, key [32]byte) ([]byte, error) {
|
||||||
|
|
||||||
// ReadEncryptedFile reads data from an encrypted file in directory with key
|
// ReadEncryptedFile reads data from an encrypted file in directory with key
|
||||||
func ReadEncryptedFile(directory, filename string, key [32]byte) ([]byte, error) {
|
func ReadEncryptedFile(directory, filename string, key [32]byte) ([]byte, error) {
|
||||||
encryptedbytes, err := ioutil.ReadFile(path.Join(directory, filename))
|
encryptedbytes, err := os.ReadFile(path.Join(directory, filename))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return DecryptFile(encryptedbytes, key)
|
return DecryptFile(encryptedbytes, key)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,6 @@ package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
)
|
)
|
||||||
|
@ -38,7 +37,7 @@ func (fps *fileStore) Write(data []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(path.Join(fps.directory, fps.filename), encryptedbytes, 0600)
|
err = os.WriteFile(path.Join(fps.directory, fps.filename), encryptedbytes, 0600)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,14 +5,14 @@ import (
|
||||||
"cwtch.im/cwtch/model"
|
"cwtch.im/cwtch/model"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
"io/ioutil"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
)
|
)
|
||||||
|
|
||||||
const profileFilename = "profile"
|
const profileFilename = "profile"
|
||||||
const saltFile = "SALT"
|
const saltFile = "SALT"
|
||||||
|
|
||||||
//ProfileStoreV1 storage for profiles and message streams that uses in memory key and fs stored salt instead of in memory password
|
// ProfileStoreV1 storage for profiles and message streams that uses in memory key and fs stored salt instead of in memory password
|
||||||
type ProfileStoreV1 struct {
|
type ProfileStoreV1 struct {
|
||||||
fs FileStore
|
fs FileStore
|
||||||
directory string
|
directory string
|
||||||
|
@ -24,7 +24,7 @@ type ProfileStoreV1 struct {
|
||||||
// LoadProfileWriterStore loads a profile store from filestore listening for events and saving them
|
// LoadProfileWriterStore loads a profile store from filestore listening for events and saving them
|
||||||
// directory should be $appDir/profiles/$rand
|
// directory should be $appDir/profiles/$rand
|
||||||
func LoadProfileWriterStore(directory, password string) (*ProfileStoreV1, error) {
|
func LoadProfileWriterStore(directory, password string) (*ProfileStoreV1, error) {
|
||||||
salt, err := ioutil.ReadFile(path.Join(directory, saltFile))
|
salt, err := os.ReadFile(path.Join(directory, saltFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -67,19 +67,23 @@ func (ps *ProfileStoreV1) load() error {
|
||||||
|
|
||||||
if contact.Attributes[event.SaveHistoryKey] == event.SaveHistoryConfirmed {
|
if contact.Attributes[event.SaveHistoryKey] == event.SaveHistoryConfirmed {
|
||||||
ss := NewStreamStore(ps.directory, contact.LocalID, ps.key)
|
ss := NewStreamStore(ps.directory, contact.LocalID, ps.key)
|
||||||
cp.Contacts[contact.Onion].Timeline.SetMessages(ss.Read())
|
if contact, exists := cp.Contacts[contact.Onion]; exists {
|
||||||
|
contact.Timeline.SetMessages(ss.Read())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for gid, group := range cp.Groups {
|
for gid, group := range cp.Groups {
|
||||||
if group.Version == 0 {
|
if group.Version == 0 {
|
||||||
log.Infof("group %v is of unsupported version 0. dropping group...\n", group.GroupID)
|
log.Debugf("group %v is of unsupported version 0. dropping group...\n", group.GroupID)
|
||||||
delete(cp.Groups, gid)
|
delete(cp.Groups, gid)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ss := NewStreamStore(ps.directory, group.LocalID, ps.key)
|
ss := NewStreamStore(ps.directory, group.LocalID, ps.key)
|
||||||
cp.Groups[gid].Timeline.SetMessages(ss.Read())
|
if group, exists := cp.Groups[gid]; exists {
|
||||||
cp.Groups[gid].Timeline.Sort()
|
group.Timeline.SetMessages(ss.Read())
|
||||||
|
group.Timeline.Sort()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
"io/ioutil"
|
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
@ -93,7 +92,7 @@ func (ss *streamStore) updateFile() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ioutil.WriteFile(path.Join(ss.storeDirectory, fmt.Sprintf("%s.%d", ss.filenameBase, 0)), encryptedMsgs, 0600)
|
os.WriteFile(path.Join(ss.storeDirectory, fmt.Sprintf("%s.%d", ss.filenameBase, 0)), encryptedMsgs, 0600)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,7 +152,7 @@ func (ss *streamStore) WriteN(messages []model.Message) {
|
||||||
ss.lock.Lock()
|
ss.lock.Lock()
|
||||||
defer ss.lock.Unlock()
|
defer ss.lock.Unlock()
|
||||||
|
|
||||||
log.Infof("WriteN %v messages\n", len(messages))
|
log.Debugf("WriteN %v messages\n", len(messages))
|
||||||
i := 0
|
i := 0
|
||||||
for _, m := range messages {
|
for _, m := range messages {
|
||||||
ss.updateBuffer(m)
|
ss.updateBuffer(m)
|
||||||
|
|
Binary file not shown.
After Width: | Height: | Size: 51 KiB |
|
@ -0,0 +1,201 @@
|
||||||
|
package filesharing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
app2 "cwtch.im/cwtch/app"
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"cwtch.im/cwtch/functionality/filesharing"
|
||||||
|
"cwtch.im/cwtch/model"
|
||||||
|
"cwtch.im/cwtch/model/attr"
|
||||||
|
"cwtch.im/cwtch/model/constants"
|
||||||
|
"cwtch.im/cwtch/peer"
|
||||||
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
// Import SQL Cipher
|
||||||
|
mrand "math/rand"
|
||||||
|
"os"
|
||||||
|
"os/user"
|
||||||
|
"path"
|
||||||
|
"runtime"
|
||||||
|
"runtime/pprof"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
_ "github.com/mutecomm/go-sqlcipher/v4"
|
||||||
|
)
|
||||||
|
|
||||||
|
func waitForPeerPeerConnection(t *testing.T, peera peer.CwtchPeer, peerb peer.CwtchPeer) {
|
||||||
|
for {
|
||||||
|
state := peera.GetPeerState(peerb.GetOnion())
|
||||||
|
if state == connections.FAILED {
|
||||||
|
t.Fatalf("%v could not connect to %v", peera.GetOnion(), peerb.GetOnion())
|
||||||
|
}
|
||||||
|
if state != connections.AUTHENTICATED {
|
||||||
|
fmt.Printf("peer %v waiting connect to peer %v, currently: %v\n", peera.GetOnion(), peerb.GetOnion(), connections.ConnectionStateName[state])
|
||||||
|
time.Sleep(time.Second * 5)
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
peerAName, _ := peera.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name)
|
||||||
|
peerBName, _ := peerb.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name)
|
||||||
|
fmt.Printf("%v CONNECTED and AUTHED to %v\n", peerAName, peerBName)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFileSharing(t *testing.T) {
|
||||||
|
numGoRoutinesStart := runtime.NumGoroutine()
|
||||||
|
os.RemoveAll("cwtch.out.png")
|
||||||
|
os.RemoveAll("cwtch.out.png.manifest")
|
||||||
|
os.RemoveAll("storage")
|
||||||
|
os.RemoveAll("tordir")
|
||||||
|
os.RemoveAll("./download_dir")
|
||||||
|
|
||||||
|
log.SetLevel(log.LevelInfo)
|
||||||
|
|
||||||
|
os.Mkdir("tordir", 0700)
|
||||||
|
dataDir := path.Join("tordir", "tor")
|
||||||
|
os.MkdirAll(dataDir, 0700)
|
||||||
|
|
||||||
|
// we don't need real randomness for the port, just to avoid a possible conflict...
|
||||||
|
socksPort := mrand.Intn(1000) + 9051
|
||||||
|
controlPort := mrand.Intn(1000) + 9052
|
||||||
|
|
||||||
|
// generate a random password
|
||||||
|
key := make([]byte, 64)
|
||||||
|
_, err := rand.Read(key)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
useCache := os.Getenv("TORCACHE") == "true"
|
||||||
|
|
||||||
|
torDataDir := ""
|
||||||
|
if useCache {
|
||||||
|
log.Infof("using tor cache")
|
||||||
|
torDataDir = filepath.Join(dataDir, "data-dir-torcache")
|
||||||
|
os.MkdirAll(torDataDir, 0700)
|
||||||
|
} else {
|
||||||
|
log.Infof("using clean tor data dir")
|
||||||
|
if torDataDir, err = os.MkdirTemp(dataDir, "data-dir-"); err != nil {
|
||||||
|
t.Fatalf("could not create data dir")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tor.NewTorrc().WithSocksPort(socksPort).WithOnionTrafficOnly().WithHashedPassword(base64.StdEncoding.EncodeToString(key)).WithControlPort(controlPort).Build("tordir/tor/torrc")
|
||||||
|
acn, err := tor.NewTorACNWithAuth("./tordir", path.Join("..", "tor"), torDataDir, controlPort, tor.HashedPasswordAuthenticator{Password: base64.StdEncoding.EncodeToString(key)})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Could not start Tor: %v", err)
|
||||||
|
}
|
||||||
|
acn.WaitTillBootstrapped()
|
||||||
|
defer acn.Close()
|
||||||
|
|
||||||
|
app := app2.NewApp(acn, "./storage", app2.LoadAppSettings("./storage"))
|
||||||
|
|
||||||
|
usr, err := user.Current()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("current user is undefined")
|
||||||
|
}
|
||||||
|
cwtchDir := path.Join(usr.HomeDir, ".cwtch")
|
||||||
|
os.Mkdir(cwtchDir, 0700)
|
||||||
|
os.RemoveAll(path.Join(cwtchDir, "testing"))
|
||||||
|
os.Mkdir(path.Join(cwtchDir, "testing"), 0700)
|
||||||
|
|
||||||
|
t.Logf("Creating Alice...")
|
||||||
|
app.CreateProfile("alice", "asdfasdf", true)
|
||||||
|
|
||||||
|
t.Logf("Creating Bob...")
|
||||||
|
app.CreateProfile("bob", "asdfasdf", true)
|
||||||
|
|
||||||
|
t.Logf("** Waiting for Alice, Bob...")
|
||||||
|
alice := app2.WaitGetPeer(app, "alice")
|
||||||
|
app.ActivatePeerEngine(alice.GetOnion())
|
||||||
|
app.ConfigureConnections(alice.GetOnion(), true, true, true)
|
||||||
|
bob := app2.WaitGetPeer(app, "bob")
|
||||||
|
app.ActivatePeerEngine(bob.GetOnion())
|
||||||
|
app.ConfigureConnections(bob.GetOnion(), true, true, true)
|
||||||
|
|
||||||
|
alice.AutoHandleEvents([]event.Type{event.PeerStateChange, event.NewRetValMessageFromPeer})
|
||||||
|
bob.AutoHandleEvents([]event.Type{event.PeerStateChange, event.NewRetValMessageFromPeer})
|
||||||
|
|
||||||
|
// Turn on File Sharing Experiment...
|
||||||
|
settings := app.ReadSettings()
|
||||||
|
settings.ExperimentsEnabled = true
|
||||||
|
settings.DownloadPath = "./download_dir"
|
||||||
|
os.RemoveAll(path.Join(settings.DownloadPath, "cwtch.png"))
|
||||||
|
os.RemoveAll(path.Join(settings.DownloadPath, "cwtch.png.manifest"))
|
||||||
|
os.MkdirAll(settings.DownloadPath, 0700)
|
||||||
|
settings.Experiments[constants.FileSharingExperiment] = true
|
||||||
|
// Turn Auto Downloading On... (Part of the Image Previews / Profile Images Experiment)
|
||||||
|
settings.Experiments[constants.ImagePreviewsExperiment] = true
|
||||||
|
app.UpdateSettings(settings)
|
||||||
|
|
||||||
|
t.Logf("** Launching Peers...")
|
||||||
|
waitTime := time.Duration(30) * time.Second
|
||||||
|
t.Logf("** Waiting for Alice, Bob to connect with onion network... (%v)\n", waitTime)
|
||||||
|
time.Sleep(waitTime)
|
||||||
|
|
||||||
|
bob.NewContactConversation(alice.GetOnion(), model.DefaultP2PAccessControl(), true)
|
||||||
|
alice.NewContactConversation(bob.GetOnion(), model.DefaultP2PAccessControl(), true)
|
||||||
|
alice.PeerWithOnion(bob.GetOnion())
|
||||||
|
|
||||||
|
json, err := alice.EnhancedGetConversationAccessControlList(1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error!: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("alice<->bob ACL: %s", json)
|
||||||
|
|
||||||
|
t.Logf("Waiting for alice and Bob to peer...")
|
||||||
|
waitForPeerPeerConnection(t, alice, bob)
|
||||||
|
err = alice.AcceptConversation(1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error!: %v", err)
|
||||||
|
}
|
||||||
|
err = bob.AcceptConversation(1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error!: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Alice and Bob are Connected!!")
|
||||||
|
|
||||||
|
filesharingFunctionality := filesharing.FunctionalityGate()
|
||||||
|
|
||||||
|
_, fileSharingMessage, err := filesharingFunctionality.ShareFile("cwtch.png", alice)
|
||||||
|
alice.SendMessage(1, fileSharingMessage)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error!: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// test that bob can download and verify the file
|
||||||
|
// The main difference here is that bob doesn't need to do anything...
|
||||||
|
// testBobDownloadFile(t, bob, filesharingFunctionality, queueOracle)
|
||||||
|
|
||||||
|
// Wait for say...
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
|
||||||
|
if _, err := os.Stat(path.Join(settings.DownloadPath, "cwtch.png")); errors.Is(err, os.ErrNotExist) {
|
||||||
|
// path/to/whatever does not exist
|
||||||
|
t.Fatalf("cwtch.png should have been automatically downloaded...")
|
||||||
|
}
|
||||||
|
|
||||||
|
app.Shutdown()
|
||||||
|
acn.Close()
|
||||||
|
time.Sleep(20 * time.Second)
|
||||||
|
numGoRoutinesPostACN := runtime.NumGoroutine()
|
||||||
|
|
||||||
|
// Printing out the current goroutines
|
||||||
|
// Very useful if we are leaking any.
|
||||||
|
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||||
|
|
||||||
|
if numGoRoutinesStart != numGoRoutinesPostACN {
|
||||||
|
t.Errorf("Number of GoRoutines at start (%v) does not match number of goRoutines after cleanup of peers and servers (%v), clean up failed, leak detected!", numGoRoutinesStart, numGoRoutinesPostACN)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -1,10 +1,8 @@
|
||||||
package testing
|
package testing
|
||||||
|
|
||||||
import (
|
import (
|
||||||
// Import SQL Cipher
|
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
app2 "cwtch.im/cwtch/app"
|
app2 "cwtch.im/cwtch/app"
|
||||||
"cwtch.im/cwtch/app/utils"
|
|
||||||
"cwtch.im/cwtch/event"
|
"cwtch.im/cwtch/event"
|
||||||
"cwtch.im/cwtch/model"
|
"cwtch.im/cwtch/model"
|
||||||
"cwtch.im/cwtch/model/attr"
|
"cwtch.im/cwtch/model/attr"
|
||||||
|
@ -13,16 +11,19 @@ import (
|
||||||
"cwtch.im/cwtch/protocol/connections"
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
||||||
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
_ "github.com/mutecomm/go-sqlcipher/v4"
|
_ "github.com/mutecomm/go-sqlcipher/v4"
|
||||||
"io/ioutil"
|
|
||||||
mrand "math/rand"
|
mrand "math/rand"
|
||||||
"os"
|
"os"
|
||||||
"os/user"
|
"os/user"
|
||||||
"path"
|
"path"
|
||||||
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"runtime/pprof"
|
"runtime/pprof"
|
||||||
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
@ -38,7 +39,7 @@ func waitForConnection(t *testing.T, peer peer.CwtchPeer, addr string, target co
|
||||||
for {
|
for {
|
||||||
log.Infof("%v checking connection...\n", peerName)
|
log.Infof("%v checking connection...\n", peerName)
|
||||||
state := peer.GetPeerState(addr)
|
state := peer.GetPeerState(addr)
|
||||||
log.Infof("Waiting for Peer %v to %v - state: %v\n", peerName, addr, state)
|
log.Infof("Waiting for Peer %v to %v - state: %v\n", peerName, addr, connections.ConnectionStateName[state])
|
||||||
if state == connections.FAILED {
|
if state == connections.FAILED {
|
||||||
t.Fatalf("%v could not connect to %v", peer.GetOnion(), addr)
|
t.Fatalf("%v could not connect to %v", peer.GetOnion(), addr)
|
||||||
}
|
}
|
||||||
|
@ -53,6 +54,28 @@ func waitForConnection(t *testing.T, peer peer.CwtchPeer, addr string, target co
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func waitForRetVal(peer peer.CwtchPeer, convId int, szp attr.ScopedZonedPath) {
|
||||||
|
for {
|
||||||
|
_, err := peer.GetConversationAttribute(convId, szp)
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second * 5)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkAndLoadTokens() []*privacypass.Token {
|
||||||
|
var tokens []*privacypass.Token
|
||||||
|
data, err := os.ReadFile("../tokens")
|
||||||
|
if err == nil {
|
||||||
|
err := json.Unmarshal(data, &tokens)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("could not load tokens from file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tokens
|
||||||
|
}
|
||||||
|
|
||||||
func TestCwtchPeerIntegration(t *testing.T) {
|
func TestCwtchPeerIntegration(t *testing.T) {
|
||||||
|
|
||||||
// Goroutine Monitoring Start..
|
// Goroutine Monitoring Start..
|
||||||
|
@ -64,12 +87,18 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
||||||
log.ExcludeFromPattern("outbound/3dhauthchannel")
|
log.ExcludeFromPattern("outbound/3dhauthchannel")
|
||||||
log.ExcludeFromPattern("event/eventmanager")
|
log.ExcludeFromPattern("event/eventmanager")
|
||||||
log.ExcludeFromPattern("tapir")
|
log.ExcludeFromPattern("tapir")
|
||||||
|
|
||||||
|
// checking if we should use the token cache
|
||||||
|
cachedTokens := checkAndLoadTokens()
|
||||||
|
if len(cachedTokens) > 7 {
|
||||||
|
log.Infof("using cached tokens")
|
||||||
|
}
|
||||||
|
|
||||||
os.Mkdir("tordir", 0700)
|
os.Mkdir("tordir", 0700)
|
||||||
dataDir := path.Join("tordir", "tor")
|
dataDir := path.Join("tordir", "tor")
|
||||||
os.MkdirAll(dataDir, 0700)
|
os.MkdirAll(dataDir, 0700)
|
||||||
|
|
||||||
// we don't need real randomness for the port, just to avoid a possible conflict...
|
// we don't need real randomness for the port, just to avoid a possible conflict...
|
||||||
mrand.Seed(int64(time.Now().Nanosecond()))
|
|
||||||
socksPort := mrand.Intn(1000) + 9051
|
socksPort := mrand.Intn(1000) + 9051
|
||||||
controlPort := mrand.Intn(1000) + 9052
|
controlPort := mrand.Intn(1000) + 9052
|
||||||
|
|
||||||
|
@ -80,16 +109,26 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
useCache := os.Getenv("TORCACHE") == "true"
|
||||||
|
|
||||||
torDataDir := ""
|
torDataDir := ""
|
||||||
if torDataDir, err = ioutil.TempDir(dataDir, "data-dir-"); err != nil {
|
if useCache {
|
||||||
|
log.Infof("using tor cache")
|
||||||
|
torDataDir = filepath.Join(dataDir, "data-dir-torcache")
|
||||||
|
os.MkdirAll(torDataDir, 0700)
|
||||||
|
} else {
|
||||||
|
log.Infof("using clean tor data dir")
|
||||||
|
if torDataDir, err = os.MkdirTemp(dataDir, "data-dir-"); err != nil {
|
||||||
t.Fatalf("could not create data dir")
|
t.Fatalf("could not create data dir")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
tor.NewTorrc().WithSocksPort(socksPort).WithOnionTrafficOnly().WithHashedPassword(base64.StdEncoding.EncodeToString(key)).WithControlPort(controlPort).Build("tordir/tor/torrc")
|
tor.NewTorrc().WithSocksPort(socksPort).WithOnionTrafficOnly().WithHashedPassword(base64.StdEncoding.EncodeToString(key)).WithControlPort(controlPort).Build("tordir/tor/torrc")
|
||||||
acn, err := tor.NewTorACNWithAuth("./tordir", path.Join("..", "tor"), torDataDir, controlPort, tor.HashedPasswordAuthenticator{Password: base64.StdEncoding.EncodeToString(key)})
|
acn, err := tor.NewTorACNWithAuth("./tordir", path.Join("..", "tor"), torDataDir, controlPort, tor.HashedPasswordAuthenticator{Password: base64.StdEncoding.EncodeToString(key)})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Could not start Tor: %v", err)
|
t.Fatalf("Could not start Tor: %v", err)
|
||||||
}
|
}
|
||||||
|
log.Infof("Waiting for tor to bootstrap...")
|
||||||
acn.WaitTillBootstrapped()
|
acn.WaitTillBootstrapped()
|
||||||
defer acn.Close()
|
defer acn.Close()
|
||||||
|
|
||||||
|
@ -99,7 +138,7 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
||||||
const ServerAddr = "nfhxzvzxinripgdh4t2m4xcy3crf6p4cbhectgckuj3idsjsaotgowad"
|
const ServerAddr = "nfhxzvzxinripgdh4t2m4xcy3crf6p4cbhectgckuj3idsjsaotgowad"
|
||||||
serverKeyBundle, _ := base64.StdEncoding.DecodeString(ServerKeyBundleBase64)
|
serverKeyBundle, _ := base64.StdEncoding.DecodeString(ServerKeyBundleBase64)
|
||||||
|
|
||||||
app := app2.NewApp(acn, "./storage")
|
app := app2.NewApp(acn, "./storage", app2.LoadAppSettings("./storage"))
|
||||||
|
|
||||||
usr, _ := user.Current()
|
usr, _ := user.Current()
|
||||||
cwtchDir := path.Join(usr.HomeDir, ".cwtch")
|
cwtchDir := path.Join(usr.HomeDir, ".cwtch")
|
||||||
|
@ -110,53 +149,67 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
||||||
numGoRoutinesPostAppStart := runtime.NumGoroutine()
|
numGoRoutinesPostAppStart := runtime.NumGoroutine()
|
||||||
|
|
||||||
// ***** cwtchPeer setup *****
|
// ***** cwtchPeer setup *****
|
||||||
|
// Turn on Groups Experiment...
|
||||||
|
settings := app.ReadSettings()
|
||||||
|
settings.ExperimentsEnabled = true
|
||||||
|
settings.Experiments[constants.GroupsExperiment] = true
|
||||||
|
app.UpdateSettings(settings)
|
||||||
|
|
||||||
log.Infoln("Creating Alice...")
|
log.Infoln("Creating Alice...")
|
||||||
app.CreateTaggedPeer("Alice", "asdfasdf", "test")
|
app.CreateProfile("Alice", "asdfasdf", true)
|
||||||
|
|
||||||
log.Infoln("Creating Bob...")
|
log.Infoln("Creating Bob...")
|
||||||
app.CreateTaggedPeer("Bob", "asdfasdf", "test")
|
app.CreateProfile("Bob", "asdfasdf", true)
|
||||||
|
|
||||||
log.Infoln("Creating Carol...")
|
log.Infoln("Creating Carol...")
|
||||||
app.CreateTaggedPeer("Carol", "asdfasdf", "test")
|
app.CreateProfile("Carol", "asdfasdf", true)
|
||||||
|
|
||||||
alice := utils.WaitGetPeer(app, "Alice")
|
alice := app2.WaitGetPeer(app, "Alice")
|
||||||
|
aliceBus := app.GetEventBus(alice.GetOnion())
|
||||||
|
app.ActivatePeerEngine(alice.GetOnion())
|
||||||
|
app.ConfigureConnections(alice.GetOnion(), true, true, true)
|
||||||
log.Infoln("Alice created:", alice.GetOnion())
|
log.Infoln("Alice created:", alice.GetOnion())
|
||||||
alice.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name, "Alice")
|
// alice.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name, "Alice") <- This is now done automatically by ProfileValueExtension, keeping this here for clarity
|
||||||
alice.AutoHandleEvents([]event.Type{event.PeerStateChange, event.ServerStateChange, event.NewGroupInvite, event.NewRetValMessageFromPeer})
|
alice.AutoHandleEvents([]event.Type{event.PeerStateChange, event.ServerStateChange, event.NewGroupInvite, event.NewRetValMessageFromPeer})
|
||||||
|
|
||||||
bob := utils.WaitGetPeer(app, "Bob")
|
bob := app2.WaitGetPeer(app, "Bob")
|
||||||
|
bobBus := app.GetEventBus(bob.GetOnion())
|
||||||
|
app.ActivatePeerEngine(bob.GetOnion())
|
||||||
|
app.ConfigureConnections(bob.GetOnion(), true, true, true)
|
||||||
log.Infoln("Bob created:", bob.GetOnion())
|
log.Infoln("Bob created:", bob.GetOnion())
|
||||||
bob.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name, "Bob")
|
// bob.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name, "Bob") <- This is now done automatically by ProfileValueExtension, keeping this here for clarity
|
||||||
bob.AutoHandleEvents([]event.Type{event.PeerStateChange, event.ServerStateChange, event.NewGroupInvite, event.NewRetValMessageFromPeer})
|
bob.AutoHandleEvents([]event.Type{event.PeerStateChange, event.ServerStateChange, event.NewGroupInvite, event.NewRetValMessageFromPeer})
|
||||||
|
|
||||||
carol := utils.WaitGetPeer(app, "Carol")
|
carol := app2.WaitGetPeer(app, "Carol")
|
||||||
|
carolBus := app.GetEventBus(carol.GetOnion())
|
||||||
|
app.ActivatePeerEngine(carol.GetOnion())
|
||||||
|
app.ConfigureConnections(carol.GetOnion(), true, true, true)
|
||||||
log.Infoln("Carol created:", carol.GetOnion())
|
log.Infoln("Carol created:", carol.GetOnion())
|
||||||
carol.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name, "Carol")
|
// carol.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name, "Carol") <- This is now done automatically by ProfileValueExtension, keeping this here for clarity
|
||||||
carol.AutoHandleEvents([]event.Type{event.PeerStateChange, event.ServerStateChange, event.NewGroupInvite, event.NewRetValMessageFromPeer})
|
carol.AutoHandleEvents([]event.Type{event.PeerStateChange, event.ServerStateChange, event.NewGroupInvite, event.NewRetValMessageFromPeer})
|
||||||
|
|
||||||
app.LaunchPeers()
|
|
||||||
|
|
||||||
waitTime := time.Duration(60) * time.Second
|
waitTime := time.Duration(60) * time.Second
|
||||||
log.Infof("** Waiting for Alice, Bob, and Carol to connect with onion network... (%v)\n", waitTime)
|
log.Infof("** Waiting for Alice, Bob, and Carol to register their onion hidden service on the network... (%v)\n", waitTime)
|
||||||
time.Sleep(waitTime)
|
time.Sleep(waitTime)
|
||||||
numGoRoutinesPostPeerStart := runtime.NumGoroutine()
|
numGoRoutinesPostPeerStart := runtime.NumGoroutine()
|
||||||
log.Infof("** Wait Done!")
|
log.Infof("** Wait Done!")
|
||||||
|
|
||||||
// ***** Peering, server joining, group creation / invite *****
|
// ***** Peering, server joining, group creation / invite *****
|
||||||
|
|
||||||
log.Infoln("Alice peering with Bob...")
|
log.Infoln("Alice and Bob creating conversations...")
|
||||||
// Simulate Alice Adding Bob
|
// Simulate Alice Adding Bob
|
||||||
|
log.Infof(" alice.NewConvo(bob)...")
|
||||||
alice2bobConversationID, err := alice.NewContactConversation(bob.GetOnion(), model.DefaultP2PAccessControl(), true)
|
alice2bobConversationID, err := alice.NewContactConversation(bob.GetOnion(), model.DefaultP2PAccessControl(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error adding conversaiton %v", alice2bobConversationID)
|
t.Fatalf("error adding conversaiton %v", alice2bobConversationID)
|
||||||
}
|
}
|
||||||
|
log.Infof(" bob.NewConvo(alice)...")
|
||||||
bob2aliceConversationID, err := bob.NewContactConversation(alice.GetOnion(), model.DefaultP2PAccessControl(), true)
|
bob2aliceConversationID, err := bob.NewContactConversation(alice.GetOnion(), model.DefaultP2PAccessControl(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error adding conversaiton %v", bob2aliceConversationID)
|
t.Fatalf("error adding conversaiton %v", bob2aliceConversationID)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Alice peering with Carol...")
|
log.Infof("Alice and Carol creating conversations...")
|
||||||
// Simulate Alice Adding Carol
|
// Simulate Alice Adding Carol
|
||||||
alice2carolConversationID, err := alice.NewContactConversation(carol.GetOnion(), model.DefaultP2PAccessControl(), true)
|
alice2carolConversationID, err := alice.NewContactConversation(carol.GetOnion(), model.DefaultP2PAccessControl(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -167,7 +220,9 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
||||||
t.Fatalf("error adding conversaiton %v", carol2aliceConversationID)
|
t.Fatalf("error adding conversaiton %v", carol2aliceConversationID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Infof("Alice peering with Bob...")
|
||||||
alice.PeerWithOnion(bob.GetOnion())
|
alice.PeerWithOnion(bob.GetOnion())
|
||||||
|
log.Infof("Alice Peering with Carol...")
|
||||||
alice.PeerWithOnion(carol.GetOnion())
|
alice.PeerWithOnion(carol.GetOnion())
|
||||||
|
|
||||||
// Test that we can rekey alice without issues...
|
// Test that we can rekey alice without issues...
|
||||||
|
@ -198,23 +253,27 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
||||||
// Probably related to latency/throughput problems in the underlying tor network.
|
// Probably related to latency/throughput problems in the underlying tor network.
|
||||||
time.Sleep(30 * time.Second)
|
time.Sleep(30 * time.Second)
|
||||||
|
|
||||||
|
waitForRetVal(bob, bob2aliceConversationID, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name)))
|
||||||
aliceName, err := bob.GetConversationAttribute(bob2aliceConversationID, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name)))
|
aliceName, err := bob.GetConversationAttribute(bob2aliceConversationID, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name)))
|
||||||
if err != nil || aliceName != "Alice" {
|
if err != nil || aliceName != "Alice" {
|
||||||
t.Fatalf("Bob: alice GetKeyVal error on alice peer.name %v: %v\n", aliceName, err)
|
t.Fatalf("Bob: alice GetKeyVal error on alice peer.name %v: %v\n", aliceName, err)
|
||||||
}
|
}
|
||||||
log.Infof("Bob has alice's name as '%v'\n", aliceName)
|
log.Infof("Bob has alice's name as '%v'\n", aliceName)
|
||||||
|
|
||||||
|
waitForRetVal(alice, alice2bobConversationID, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name)))
|
||||||
bobName, err := alice.GetConversationAttribute(alice2bobConversationID, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name)))
|
bobName, err := alice.GetConversationAttribute(alice2bobConversationID, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name)))
|
||||||
if err != nil || bobName != "Bob" {
|
if err != nil || bobName != "Bob" {
|
||||||
t.Fatalf("Alice: bob GetKeyVal error on bob peer.name %v: %v \n", bobName, err)
|
t.Fatalf("Alice: bob GetKeyVal error on bob peer.name %v: %v \n", bobName, err)
|
||||||
}
|
}
|
||||||
log.Infof("Alice has bob's name as '%v'\n", bobName)
|
log.Infof("Alice has bob's name as '%v'\n", bobName)
|
||||||
|
|
||||||
|
waitForRetVal(carol, carol2aliceConversationID, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name)))
|
||||||
aliceName, err = carol.GetConversationAttribute(carol2aliceConversationID, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name)))
|
aliceName, err = carol.GetConversationAttribute(carol2aliceConversationID, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name)))
|
||||||
if err != nil || aliceName != "Alice" {
|
if err != nil || aliceName != "Alice" {
|
||||||
t.Fatalf("carol GetKeyVal error for alice peer.name %v: %v\n", aliceName, err)
|
t.Fatalf("carol GetKeyVal error for alice peer.name %v: %v\n", aliceName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
waitForRetVal(alice, alice2carolConversationID, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name)))
|
||||||
carolName, err := alice.GetConversationAttribute(alice2carolConversationID, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name)))
|
carolName, err := alice.GetConversationAttribute(alice2carolConversationID, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name)))
|
||||||
if err != nil || carolName != "Carol" {
|
if err != nil || carolName != "Carol" {
|
||||||
t.Fatalf("alice GetKeyVal error, carol peer.name: %v: %v\n", carolName, err)
|
t.Fatalf("alice GetKeyVal error, carol peer.name: %v: %v\n", carolName, err)
|
||||||
|
@ -222,18 +281,16 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
||||||
log.Infof("Alice has carol's name as '%v'\n", carolName)
|
log.Infof("Alice has carol's name as '%v'\n", carolName)
|
||||||
|
|
||||||
// Group Testing
|
// Group Testing
|
||||||
|
usedTokens := len(aliceLines)
|
||||||
// Simulate Alice Creating a Group
|
// Simulate Alice Creating a Group
|
||||||
log.Infoln("Alice joining server...")
|
log.Infoln("Alice joining server...")
|
||||||
if _, err := alice.AddServer(string(serverKeyBundle)); err != nil {
|
if serverOnion, err := alice.AddServer(string(serverKeyBundle)); err != nil {
|
||||||
t.Fatalf("Failed to Add Server Bundle %v", err)
|
if len(cachedTokens) > len(aliceLines) {
|
||||||
|
alice.StoreCachedTokens(serverOnion, cachedTokens[0:len(aliceLines)])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ading here will require resync
|
t.Fatalf("Failed to Add Server Bundle %v", err)
|
||||||
carol.AddServer(string(serverKeyBundle))
|
}
|
||||||
|
|
||||||
log.Infof("Waiting for alice to join server...")
|
|
||||||
waitForConnection(t, alice, ServerAddr, connections.SYNCED)
|
|
||||||
|
|
||||||
// Creating a Group
|
// Creating a Group
|
||||||
log.Infof("Creating group on %v...", ServerAddr)
|
log.Infof("Creating group on %v...", ServerAddr)
|
||||||
|
@ -246,7 +303,7 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
||||||
|
|
||||||
// Invites
|
// Invites
|
||||||
log.Infoln("Alice inviting Bob to group...")
|
log.Infoln("Alice inviting Bob to group...")
|
||||||
err = alice.SendInviteToConversation(alice2bobConversationID, aliceGroupConversationID)
|
_, err = alice.SendInviteToConversation(alice2bobConversationID, aliceGroupConversationID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error for Alice inviting Bob to group: %v", err)
|
t.Fatalf("Error for Alice inviting Bob to group: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -260,38 +317,45 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
||||||
log.Infof("Parsed Overlay Message: %v", overlayMessage)
|
log.Infof("Parsed Overlay Message: %v", overlayMessage)
|
||||||
err = bob.ImportBundle(overlayMessage.Data)
|
err = bob.ImportBundle(overlayMessage.Data)
|
||||||
log.Infof("Result of Bob Importing the Bundle from Alice: %v", err)
|
log.Infof("Result of Bob Importing the Bundle from Alice: %v", err)
|
||||||
|
if len(cachedTokens) > (usedTokens + len(bobLines)) {
|
||||||
|
bob.StoreCachedTokens(ServerAddr, cachedTokens[usedTokens:usedTokens+len(bobLines)])
|
||||||
|
usedTokens += len(bobLines)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Waiting for alice to join server...")
|
||||||
|
waitForConnection(t, alice, ServerAddr, connections.SYNCED)
|
||||||
log.Infof("Waiting for Bob to join connect to group server...")
|
log.Infof("Waiting for Bob to join connect to group server...")
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
bobGroupConversationID := 3
|
|
||||||
waitForConnection(t, bob, ServerAddr, connections.SYNCED)
|
waitForConnection(t, bob, ServerAddr, connections.SYNCED)
|
||||||
|
|
||||||
|
// 1 = Alice
|
||||||
|
// 2 = Server
|
||||||
|
// 3 = Group...
|
||||||
|
bobGroupConversationID := 3
|
||||||
|
|
||||||
numGoRoutinesPostServerConnect := runtime.NumGoroutine()
|
numGoRoutinesPostServerConnect := runtime.NumGoroutine()
|
||||||
|
|
||||||
// ***** Conversation *****
|
// ***** Conversation *****
|
||||||
log.Infof("Starting conversation in group...")
|
log.Infof("Starting conversation in group...")
|
||||||
checkSendMessageToGroup(t, alice, aliceGroupConversationID, aliceLines[0])
|
checkSendMessageToGroup(t, alice, aliceBus, aliceGroupConversationID, aliceLines[0])
|
||||||
checkSendMessageToGroup(t, bob, bobGroupConversationID, bobLines[0])
|
checkSendMessageToGroup(t, bob, bobBus, bobGroupConversationID, bobLines[0])
|
||||||
checkSendMessageToGroup(t, alice, aliceGroupConversationID, aliceLines[1])
|
checkSendMessageToGroup(t, alice, aliceBus, aliceGroupConversationID, aliceLines[1])
|
||||||
checkSendMessageToGroup(t, bob, bobGroupConversationID, bobLines[1])
|
checkSendMessageToGroup(t, bob, bobBus, bobGroupConversationID, bobLines[1])
|
||||||
|
|
||||||
// Alice invites Bob to the Group...
|
// Pretend that Carol Acquires the Overlay Message through some other means...
|
||||||
message, _, err = carol.GetChannelMessage(carol2aliceConversationID, 0, 1)
|
|
||||||
log.Infof("Alice message to Carol %v %v", message, err)
|
|
||||||
json.Unmarshal([]byte(message), &overlayMessage)
|
json.Unmarshal([]byte(message), &overlayMessage)
|
||||||
log.Infof("Parsed Overlay Message: %v", overlayMessage)
|
log.Infof("Parsed Overlay Message: %v", overlayMessage)
|
||||||
err = carol.ImportBundle(overlayMessage.Data)
|
err = carol.ImportBundle(overlayMessage.Data)
|
||||||
log.Infof("Result of Carol Importing the Bundle from Alice: %v", err)
|
log.Infof("Result of Carol Importing the Bundle from Alice: %v", err)
|
||||||
|
|
||||||
log.Infof("Waiting for Carol to join connect to group server...")
|
log.Infof("Waiting for Carol to join connect to group server...")
|
||||||
carol.ResyncServer(ServerAddr)
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
carolGroupConversationID := 3
|
carolGroupConversationID := 3
|
||||||
|
if len(cachedTokens) > (usedTokens + len(carolLines)) {
|
||||||
|
carol.StoreCachedTokens(ServerAddr, cachedTokens[usedTokens:usedTokens+len(carolLines)])
|
||||||
|
}
|
||||||
waitForConnection(t, carol, ServerAddr, connections.SYNCED)
|
waitForConnection(t, carol, ServerAddr, connections.SYNCED)
|
||||||
|
|
||||||
numGoRoutinesPostCarolConnect := runtime.NumGoroutine()
|
numGoRoutinesPostCarolConnect := runtime.NumGoroutine()
|
||||||
|
|
||||||
// Check Alice Timeline
|
// Check Alice Timeline
|
||||||
|
log.Infof("Checking Alice's Timeline...")
|
||||||
checkMessage(t, alice, aliceGroupConversationID, 1, aliceLines[0])
|
checkMessage(t, alice, aliceGroupConversationID, 1, aliceLines[0])
|
||||||
checkMessage(t, alice, aliceGroupConversationID, 2, bobLines[0])
|
checkMessage(t, alice, aliceGroupConversationID, 2, bobLines[0])
|
||||||
checkMessage(t, alice, aliceGroupConversationID, 3, aliceLines[1])
|
checkMessage(t, alice, aliceGroupConversationID, 3, aliceLines[1])
|
||||||
|
@ -302,11 +366,14 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
||||||
time.Sleep(time.Second * 3)
|
time.Sleep(time.Second * 3)
|
||||||
numGoRoutinesPostAlice := runtime.NumGoroutine()
|
numGoRoutinesPostAlice := runtime.NumGoroutine()
|
||||||
|
|
||||||
checkSendMessageToGroup(t, carol, carolGroupConversationID, carolLines[0])
|
checkSendMessageToGroup(t, carol, carolBus, carolGroupConversationID, carolLines[0])
|
||||||
checkSendMessageToGroup(t, bob, bobGroupConversationID, bobLines[2])
|
checkSendMessageToGroup(t, bob, bobBus, bobGroupConversationID, bobLines[2])
|
||||||
time.Sleep(time.Second * 30)
|
|
||||||
|
// Time to Sync
|
||||||
|
time.Sleep(time.Second * 10)
|
||||||
|
|
||||||
// Check Bob Timeline
|
// Check Bob Timeline
|
||||||
|
log.Infof("Checking Bob's Timeline...")
|
||||||
checkMessage(t, bob, bobGroupConversationID, 1, aliceLines[0])
|
checkMessage(t, bob, bobGroupConversationID, 1, aliceLines[0])
|
||||||
checkMessage(t, bob, bobGroupConversationID, 2, bobLines[0])
|
checkMessage(t, bob, bobGroupConversationID, 2, bobLines[0])
|
||||||
checkMessage(t, bob, bobGroupConversationID, 3, aliceLines[1])
|
checkMessage(t, bob, bobGroupConversationID, 3, aliceLines[1])
|
||||||
|
@ -315,6 +382,7 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
||||||
checkMessage(t, bob, bobGroupConversationID, 6, bobLines[2])
|
checkMessage(t, bob, bobGroupConversationID, 6, bobLines[2])
|
||||||
|
|
||||||
// Check Carol Timeline
|
// Check Carol Timeline
|
||||||
|
log.Infof("Checking Carols's Timeline...")
|
||||||
checkMessage(t, carol, carolGroupConversationID, 1, aliceLines[0])
|
checkMessage(t, carol, carolGroupConversationID, 1, aliceLines[0])
|
||||||
checkMessage(t, carol, carolGroupConversationID, 2, bobLines[0])
|
checkMessage(t, carol, carolGroupConversationID, 2, bobLines[0])
|
||||||
checkMessage(t, carol, carolGroupConversationID, 3, aliceLines[1])
|
checkMessage(t, carol, carolGroupConversationID, 3, aliceLines[1])
|
||||||
|
@ -322,6 +390,10 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
||||||
checkMessage(t, carol, carolGroupConversationID, 5, carolLines[0])
|
checkMessage(t, carol, carolGroupConversationID, 5, carolLines[0])
|
||||||
checkMessage(t, carol, carolGroupConversationID, 6, bobLines[2])
|
checkMessage(t, carol, carolGroupConversationID, 6, bobLines[2])
|
||||||
|
|
||||||
|
// Have bob clean up some conversations...
|
||||||
|
log.Infof("Bob cleanup conversation")
|
||||||
|
bob.DeleteConversation(1)
|
||||||
|
|
||||||
log.Infof("Shutting down Bob...")
|
log.Infof("Shutting down Bob...")
|
||||||
app.ShutdownPeer(bob.GetOnion())
|
app.ShutdownPeer(bob.GetOnion())
|
||||||
time.Sleep(time.Second * 3)
|
time.Sleep(time.Second * 3)
|
||||||
|
@ -342,14 +414,14 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
||||||
|
|
||||||
log.Infof("Shutting down ACN...")
|
log.Infof("Shutting down ACN...")
|
||||||
acn.Close()
|
acn.Close()
|
||||||
time.Sleep(time.Second * 30) // the network status plugin might keep goroutines alive for a minute before killing them
|
time.Sleep(time.Second * 60) // the network status / heartbeat plugin might keep goroutines alive for a minute before killing them
|
||||||
|
|
||||||
numGoRoutinesPostAppShutdown := runtime.NumGoroutine()
|
numGoRoutinesPostAppShutdown := runtime.NumGoroutine()
|
||||||
|
|
||||||
// Printing out the current goroutines
|
// Printing out the current goroutines
|
||||||
// Very useful if we are leaking any.
|
// Very useful if we are leaking any.
|
||||||
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||||
|
fmt.Println("")
|
||||||
log.Infof("numGoRoutinesStart: %v\nnumGoRoutinesPostAppStart: %v\nnumGoRoutinesPostPeerStart: %v\nnumGoRoutinesPostPeerAndServerConnect: %v\n"+
|
log.Infof("numGoRoutinesStart: %v\nnumGoRoutinesPostAppStart: %v\nnumGoRoutinesPostPeerStart: %v\nnumGoRoutinesPostPeerAndServerConnect: %v\n"+
|
||||||
"numGoRoutinesPostAlice: %v\nnumGoRoutinesPostCarolConnect: %v\nnumGoRoutinesPostBob: %v\nnumGoRoutinesPostCarol: %v\nnumGoRoutinesPostAppShutdown: %v",
|
"numGoRoutinesPostAlice: %v\nnumGoRoutinesPostCarolConnect: %v\nnumGoRoutinesPostBob: %v\nnumGoRoutinesPostCarol: %v\nnumGoRoutinesPostAppShutdown: %v",
|
||||||
numGoRoutinesStart, numGoRoutinesPostAppStart, numGoRoutinesPostPeerStart, numGoRoutinesPostServerConnect,
|
numGoRoutinesStart, numGoRoutinesPostAppStart, numGoRoutinesPostPeerStart, numGoRoutinesPostServerConnect,
|
||||||
|
@ -361,23 +433,39 @@ func TestCwtchPeerIntegration(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Utility function for sending a message from a peer to a group
|
// Utility function for sending a message from a peer to a group
|
||||||
func checkSendMessageToGroup(t *testing.T, profile peer.CwtchPeer, id int, message string) {
|
func checkSendMessageToGroup(t *testing.T, profile peer.CwtchPeer, bus event.Manager, id int, message string) {
|
||||||
name, _ := profile.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name)
|
name, _ := profile.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name)
|
||||||
log.Infof("%v> %v\n", name, message)
|
log.Infof("%v> %v\n", name, message)
|
||||||
err := profile.SendMessage(id, message)
|
queue := event.NewQueue()
|
||||||
|
bus.Subscribe(event.IndexedAcknowledgement, queue)
|
||||||
|
mid, err := profile.SendMessage(id, message)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Alice failed to send a message to the group: %v", err)
|
log.Errorf("Alice failed to send a message to the group: %v", err)
|
||||||
|
t.Fatalf("Alice failed to send a message to the group: %v\n", err)
|
||||||
}
|
}
|
||||||
|
log.Infof("Sent message with mid: %v, waiting for ack...", mid)
|
||||||
|
ev := queue.Next()
|
||||||
|
switch ev.EventType {
|
||||||
|
case event.IndexedAcknowledgement:
|
||||||
|
if evid, err := strconv.Atoi(ev.Data[event.Index]); err == nil && evid == mid {
|
||||||
|
log.Infof("Message mid acked!")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
queue.Shutdown()
|
||||||
time.Sleep(time.Second * 10)
|
time.Sleep(time.Second * 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Utility function for testing that a message in a conversation is as expected
|
// Utility function for testing that a message in a conversation is as expected
|
||||||
func checkMessage(t *testing.T, profile peer.CwtchPeer, id int, messageID int, expected string) {
|
func checkMessage(t *testing.T, profile peer.CwtchPeer, id int, messageID int, expected string) {
|
||||||
message, _, err := profile.GetChannelMessage(id, 0, messageID)
|
message, _, err := profile.GetChannelMessage(id, 0, messageID)
|
||||||
|
log.Debugf(" checking if expected: %v is actual: %v", expected, message)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected message %v expected: %v got error: %v", profile.GetOnion(), expected, err)
|
log.Errorf("unexpected message %v expected: %v got error: %v", profile.GetOnion(), expected, err)
|
||||||
|
t.Fatalf("unexpected message %v expected: %v got error: %v\n", profile.GetOnion(), expected, err)
|
||||||
}
|
}
|
||||||
if message != expected {
|
if message != expected {
|
||||||
t.Fatalf("unexpected message %v expected: %v got: [%v]", profile.GetOnion(), expected, message)
|
log.Errorf("unexpected message %v expected: %v got: [%v]", profile.GetOnion(), expected, message)
|
||||||
|
t.Fatalf("unexpected message %v expected: %v got: [%v]\n", profile.GetOnion(), expected, message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
// Import SQL Cipher
|
// Import SQL Cipher
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
app2 "cwtch.im/cwtch/app"
|
app2 "cwtch.im/cwtch/app"
|
||||||
"cwtch.im/cwtch/app/utils"
|
|
||||||
"cwtch.im/cwtch/model"
|
"cwtch.im/cwtch/model"
|
||||||
"cwtch.im/cwtch/model/constants"
|
"cwtch.im/cwtch/model/constants"
|
||||||
"cwtch.im/cwtch/peer"
|
"cwtch.im/cwtch/peer"
|
||||||
|
@ -13,7 +12,6 @@ import (
|
||||||
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
_ "github.com/mutecomm/go-sqlcipher/v4"
|
_ "github.com/mutecomm/go-sqlcipher/v4"
|
||||||
"io/ioutil"
|
|
||||||
mrand "math/rand"
|
mrand "math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
@ -31,7 +29,6 @@ func TestEncryptedStorage(t *testing.T) {
|
||||||
os.MkdirAll(dataDir, 0700)
|
os.MkdirAll(dataDir, 0700)
|
||||||
|
|
||||||
// we don't need real randomness for the port, just to avoid a possible conflict...
|
// we don't need real randomness for the port, just to avoid a possible conflict...
|
||||||
mrand.Seed(int64(time.Now().Nanosecond()))
|
|
||||||
socksPort := mrand.Intn(1000) + 9051
|
socksPort := mrand.Intn(1000) + 9051
|
||||||
controlPort := mrand.Intn(1000) + 9052
|
controlPort := mrand.Intn(1000) + 9052
|
||||||
|
|
||||||
|
@ -43,7 +40,7 @@ func TestEncryptedStorage(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
torDataDir := ""
|
torDataDir := ""
|
||||||
if torDataDir, err = ioutil.TempDir(path.Join("..", "testing"), "data-dir-"); err != nil {
|
if torDataDir, err = os.MkdirTemp(dataDir, "data-dir-"); err != nil {
|
||||||
t.Fatalf("could not create data dir")
|
t.Fatalf("could not create data dir")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,12 +58,12 @@ func TestEncryptedStorage(t *testing.T) {
|
||||||
|
|
||||||
defer acn.Close()
|
defer acn.Close()
|
||||||
acn.WaitTillBootstrapped()
|
acn.WaitTillBootstrapped()
|
||||||
app := app2.NewApp(acn, cwtchDir)
|
app := app2.NewApp(acn, cwtchDir, app2.LoadAppSettings(cwtchDir))
|
||||||
app.CreateTaggedPeer("alice", "password", constants.ProfileTypeV1Password)
|
app.CreateProfile("alice", "password", true)
|
||||||
app.CreateTaggedPeer("bob", "password", constants.ProfileTypeV1Password)
|
app.CreateProfile("bob", "password", true)
|
||||||
|
|
||||||
alice := utils.WaitGetPeer(app, "alice")
|
alice := app2.WaitGetPeer(app, "alice")
|
||||||
bob := utils.WaitGetPeer(app, "bob")
|
bob := app2.WaitGetPeer(app, "bob")
|
||||||
|
|
||||||
alice.Listen()
|
alice.Listen()
|
||||||
bob.Listen()
|
bob.Listen()
|
||||||
|
@ -79,6 +76,7 @@ func TestEncryptedStorage(t *testing.T) {
|
||||||
t.Fatalf("unexpected issue when fetching all of alices conversations. Expected 1 got : %v %v", conversations, err)
|
t.Fatalf("unexpected issue when fetching all of alices conversations. Expected 1 got : %v %v", conversations, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
aliceOnion := alice.GetOnion()
|
||||||
alice.PeerWithOnion(bob.GetOnion())
|
alice.PeerWithOnion(bob.GetOnion())
|
||||||
|
|
||||||
time.Sleep(time.Second * 40)
|
time.Sleep(time.Second * 40)
|
||||||
|
@ -94,7 +92,16 @@ func TestEncryptedStorage(t *testing.T) {
|
||||||
|
|
||||||
time.Sleep(time.Second * 30)
|
time.Sleep(time.Second * 30)
|
||||||
|
|
||||||
ci, _ := bob.FetchConversationInfo(alice.GetOnion())
|
ci, err := bob.FetchConversationInfo(alice.GetOnion())
|
||||||
|
for err != nil {
|
||||||
|
time.Sleep(time.Second * 5)
|
||||||
|
ci, err = bob.FetchConversationInfo(alice.GetOnion())
|
||||||
|
}
|
||||||
|
|
||||||
|
if ci == nil {
|
||||||
|
t.Fatalf("could not fetch bobs conversation")
|
||||||
|
}
|
||||||
|
|
||||||
body, _, err := bob.GetChannelMessage(ci.ID, 0, 1)
|
body, _, err := bob.GetChannelMessage(ci.ID, 0, 1)
|
||||||
if body != "Hello Bob" || err != nil {
|
if body != "Hello Bob" || err != nil {
|
||||||
t.Fatalf("unexpected message in conversation channel %v %v", body, err)
|
t.Fatalf("unexpected message in conversation channel %v %v", body, err)
|
||||||
|
@ -126,6 +133,26 @@ func TestEncryptedStorage(t *testing.T) {
|
||||||
t.Fatalf("expeced GetMostRecentMessages to return 1, instead returned: %v %v", len(messages), messages)
|
t.Fatalf("expeced GetMostRecentMessages to return 1, instead returned: %v %v", len(messages), messages)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = alice.ExportProfile("alice.tar.gz")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not export profile: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = app.ImportProfile("alice.tar.gz", "password")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("profile is already imported...this should fail")
|
||||||
|
}
|
||||||
|
|
||||||
|
app.DeleteProfile(alice.GetOnion(), "password")
|
||||||
|
alice, err = app.ImportProfile("alice.tar.gz", "password")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("profile should have successfully imported: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if alice.GetOnion() != aliceOnion {
|
||||||
|
t.Fatalf("profile is not Alice...%s != %s", aliceOnion, alice.GetOnion())
|
||||||
|
}
|
||||||
|
|
||||||
app.Shutdown()
|
app.Shutdown()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,10 +6,9 @@ import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"path/filepath"
|
||||||
|
|
||||||
app2 "cwtch.im/cwtch/app"
|
app2 "cwtch.im/cwtch/app"
|
||||||
"cwtch.im/cwtch/app/utils"
|
|
||||||
"cwtch.im/cwtch/event"
|
"cwtch.im/cwtch/event"
|
||||||
"cwtch.im/cwtch/functionality/filesharing"
|
"cwtch.im/cwtch/functionality/filesharing"
|
||||||
"cwtch.im/cwtch/model"
|
"cwtch.im/cwtch/model"
|
||||||
|
@ -18,6 +17,7 @@ import (
|
||||||
"cwtch.im/cwtch/peer"
|
"cwtch.im/cwtch/peer"
|
||||||
"cwtch.im/cwtch/protocol/connections"
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
"cwtch.im/cwtch/protocol/files"
|
"cwtch.im/cwtch/protocol/files"
|
||||||
|
utils2 "cwtch.im/cwtch/utils"
|
||||||
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
|
||||||
|
@ -54,18 +54,18 @@ func waitForPeerPeerConnection(t *testing.T, peera peer.CwtchPeer, peerb peer.Cw
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFileSharing(t *testing.T) {
|
func TestFileSharing(t *testing.T) {
|
||||||
|
numGoRoutinesStart := runtime.NumGoroutine()
|
||||||
os.RemoveAll("cwtch.out.png")
|
os.RemoveAll("cwtch.out.png")
|
||||||
os.RemoveAll("cwtch.out.png.manifest")
|
os.RemoveAll("cwtch.out.png.manifest")
|
||||||
|
|
||||||
log.SetLevel(log.LevelDebug)
|
log.SetLevel(log.LevelDebug)
|
||||||
|
log.ExcludeFromPattern("tapir")
|
||||||
|
|
||||||
os.Mkdir("tordir", 0700)
|
os.Mkdir("tordir", 0700)
|
||||||
dataDir := path.Join("tordir", "tor")
|
dataDir := path.Join("tordir", "tor")
|
||||||
os.MkdirAll(dataDir, 0700)
|
os.MkdirAll(dataDir, 0700)
|
||||||
|
|
||||||
// we don't need real randomness for the port, just to avoid a possible conflict...
|
// we don't need real randomness for the port, just to avoid a possible conflict...
|
||||||
mrand.Seed(int64(time.Now().Nanosecond()))
|
|
||||||
socksPort := mrand.Intn(1000) + 9051
|
socksPort := mrand.Intn(1000) + 9051
|
||||||
controlPort := mrand.Intn(1000) + 9052
|
controlPort := mrand.Intn(1000) + 9052
|
||||||
|
|
||||||
|
@ -76,70 +76,160 @@ func TestFileSharing(t *testing.T) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
useCache := os.Getenv("TORCACHE") == "true"
|
||||||
|
|
||||||
torDataDir := ""
|
torDataDir := ""
|
||||||
if torDataDir, err = ioutil.TempDir(dataDir, "data-dir-"); err != nil {
|
if useCache {
|
||||||
|
log.Infof("using tor cache")
|
||||||
|
torDataDir = filepath.Join(dataDir, "data-dir-torcache")
|
||||||
|
os.MkdirAll(torDataDir, 0700)
|
||||||
|
} else {
|
||||||
|
log.Infof("using clean tor data dir")
|
||||||
|
if torDataDir, err = os.MkdirTemp(dataDir, "data-dir-"); err != nil {
|
||||||
t.Fatalf("could not create data dir")
|
t.Fatalf("could not create data dir")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
tor.NewTorrc().WithSocksPort(socksPort).WithOnionTrafficOnly().WithHashedPassword(base64.StdEncoding.EncodeToString(key)).WithControlPort(controlPort).Build("tordir/tor/torrc")
|
tor.NewTorrc().WithSocksPort(socksPort).WithOnionTrafficOnly().WithHashedPassword(base64.StdEncoding.EncodeToString(key)).WithControlPort(controlPort).Build("tordir/tor/torrc")
|
||||||
acn, err := tor.NewTorACNWithAuth("./tordir", path.Join("..", "..", "tor"), torDataDir, controlPort, tor.HashedPasswordAuthenticator{Password: base64.StdEncoding.EncodeToString(key)})
|
acn, err := tor.NewTorACNWithAuth("./tordir", path.Join("..", "tor"), torDataDir, controlPort, tor.HashedPasswordAuthenticator{Password: base64.StdEncoding.EncodeToString(key)})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Could not start Tor: %v", err)
|
t.Fatalf("Could not start Tor: %v", err)
|
||||||
}
|
}
|
||||||
acn.WaitTillBootstrapped()
|
acn.WaitTillBootstrapped()
|
||||||
defer acn.Close()
|
defer acn.Close()
|
||||||
|
|
||||||
numGoRoutinesStart := runtime.NumGoroutine()
|
app := app2.NewApp(acn, "./storage", app2.LoadAppSettings("./storage"))
|
||||||
app := app2.NewApp(acn, "./storage")
|
|
||||||
|
|
||||||
usr, _ := user.Current()
|
usr, err := user.Current()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("current user is undefined")
|
||||||
|
}
|
||||||
cwtchDir := path.Join(usr.HomeDir, ".cwtch")
|
cwtchDir := path.Join(usr.HomeDir, ".cwtch")
|
||||||
os.Mkdir(cwtchDir, 0700)
|
os.Mkdir(cwtchDir, 0700)
|
||||||
os.RemoveAll(path.Join(cwtchDir, "testing"))
|
os.RemoveAll(path.Join(cwtchDir, "testing"))
|
||||||
os.Mkdir(path.Join(cwtchDir, "testing"), 0700)
|
os.Mkdir(path.Join(cwtchDir, "testing"), 0700)
|
||||||
|
|
||||||
fmt.Println("Creating Alice...")
|
t.Logf("Creating Alice...")
|
||||||
app.CreateTaggedPeer("alice", "asdfasdf", "testing")
|
app.CreateProfile("alice", "asdfasdf", true)
|
||||||
|
|
||||||
fmt.Println("Creating Bob...")
|
t.Logf("Creating Bob...")
|
||||||
app.CreateTaggedPeer("bob", "asdfasdf", "testing")
|
app.CreateProfile("bob", "asdfasdf", true)
|
||||||
|
|
||||||
t.Logf("** Waiting for Alice, Bob...")
|
t.Logf("** Waiting for Alice, Bob...")
|
||||||
alice := utils.WaitGetPeer(app, "alice")
|
alice := app2.WaitGetPeer(app, "alice")
|
||||||
bob := utils.WaitGetPeer(app, "bob")
|
app.ActivatePeerEngine(alice.GetOnion())
|
||||||
|
app.ConfigureConnections(alice.GetOnion(), true, true, true)
|
||||||
|
bob := app2.WaitGetPeer(app, "bob")
|
||||||
|
app.ActivatePeerEngine(bob.GetOnion())
|
||||||
|
app.ConfigureConnections(bob.GetOnion(), true, true, true)
|
||||||
alice.AutoHandleEvents([]event.Type{event.PeerStateChange, event.NewRetValMessageFromPeer})
|
alice.AutoHandleEvents([]event.Type{event.PeerStateChange, event.NewRetValMessageFromPeer})
|
||||||
bob.AutoHandleEvents([]event.Type{event.PeerStateChange, event.NewRetValMessageFromPeer, event.ManifestReceived})
|
bob.AutoHandleEvents([]event.Type{event.PeerStateChange, event.NewRetValMessageFromPeer})
|
||||||
|
|
||||||
|
aliceQueueOracle := event.NewQueue()
|
||||||
|
aliceEb := app.GetEventBus(alice.GetOnion())
|
||||||
|
if aliceEb == nil {
|
||||||
|
t.Fatalf("alice's eventbus is undefined")
|
||||||
|
}
|
||||||
|
aliceEb.Subscribe(event.SearchResult, aliceQueueOracle)
|
||||||
queueOracle := event.NewQueue()
|
queueOracle := event.NewQueue()
|
||||||
app.GetEventBus(bob.GetOnion()).Subscribe(event.FileDownloaded, queueOracle)
|
bobEb := app.GetEventBus(bob.GetOnion())
|
||||||
|
if bobEb == nil {
|
||||||
|
t.Fatalf("bob's eventbus is undefined")
|
||||||
|
}
|
||||||
|
bobEb.Subscribe(event.FileDownloaded, queueOracle)
|
||||||
|
|
||||||
|
// Turn on File Sharing Experiment...
|
||||||
|
settings := app.ReadSettings()
|
||||||
|
settings.ExperimentsEnabled = true
|
||||||
|
settings.Experiments[constants.FileSharingExperiment] = true
|
||||||
|
app.UpdateSettings(settings)
|
||||||
|
|
||||||
t.Logf("** Launching Peers...")
|
t.Logf("** Launching Peers...")
|
||||||
app.LaunchPeers()
|
|
||||||
|
|
||||||
waitTime := time.Duration(30) * time.Second
|
waitTime := time.Duration(30) * time.Second
|
||||||
t.Logf("** Waiting for Alice, Bob to connect with onion network... (%v)\n", waitTime)
|
t.Logf("** Waiting for Alice, Bob to connect with onion network... (%v)\n", waitTime)
|
||||||
time.Sleep(waitTime)
|
time.Sleep(waitTime)
|
||||||
|
|
||||||
bob.NewContactConversation(alice.GetOnion(), model.DefaultP2PAccessControl(), true)
|
bob.NewContactConversation(alice.GetOnion(), model.DefaultP2PAccessControl(), true)
|
||||||
alice.NewContactConversation(bob.GetOnion(), model.DefaultP2PAccessControl(), true)
|
alice.NewContactConversation(bob.GetOnion(), model.DefaultP2PAccessControl(), true)
|
||||||
alice.PeerWithOnion(bob.GetOnion())
|
|
||||||
|
|
||||||
fmt.Println("Waiting for alice and Bob to peer...")
|
filesharingFunctionality := filesharing.FunctionalityGate()
|
||||||
waitForPeerPeerConnection(t, alice, bob)
|
|
||||||
|
|
||||||
fmt.Println("Alice and Bob are Connected!!")
|
_, fileSharingMessage, err := filesharingFunctionality.ShareFile("cwtch.png", alice)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error!: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
filesharingFunctionality, _ := filesharing.FunctionalityGate(map[string]bool{constants.FileSharingExperiment: true})
|
alice.SendMessage(1, fileSharingMessage)
|
||||||
|
|
||||||
err = filesharingFunctionality.ShareFile("cwtch.png", alice, 1)
|
// Ok this is fun...we just Sent a Message we may not have a connection yet...
|
||||||
|
// so this test will only pass if sending offline works...
|
||||||
|
waitForPeerPeerConnection(t, bob, alice)
|
||||||
|
|
||||||
|
bob.SendMessage(1, "this is a test message")
|
||||||
|
bob.SendMessage(1, "this is another test message")
|
||||||
|
|
||||||
|
// Wait for the messages to arrive...
|
||||||
|
time.Sleep(time.Second * 20)
|
||||||
|
alice.SearchConversations("test")
|
||||||
|
|
||||||
|
results := 0
|
||||||
|
for {
|
||||||
|
ev := aliceQueueOracle.Next()
|
||||||
|
if ev.EventType != event.SearchResult {
|
||||||
|
t.Fatalf("Expected a search result vent")
|
||||||
|
}
|
||||||
|
results += 1
|
||||||
|
t.Logf("found search result (%d)....%v", results, ev)
|
||||||
|
if results == 2 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// test that bob can download and verify the file
|
||||||
|
testBobDownloadFile(t, bob, filesharingFunctionality, queueOracle)
|
||||||
|
|
||||||
|
// Test stopping and restarting file shares
|
||||||
|
t.Logf("Stopping File Share")
|
||||||
|
filesharingFunctionality.StopAllFileShares(alice)
|
||||||
|
|
||||||
|
// Allow time for the stop request to filter through Engine
|
||||||
|
time.Sleep(time.Second * 5)
|
||||||
|
|
||||||
|
// Restart
|
||||||
|
t.Logf("Restarting File Share")
|
||||||
|
err = filesharingFunctionality.ReShareFiles(alice)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error!: %v", err)
|
t.Fatalf("Error!: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the messages to arrive...
|
// run the same download test again...to check that we can actually download the file
|
||||||
time.Sleep(time.Second * 10)
|
testBobDownloadFile(t, bob, filesharingFunctionality, queueOracle)
|
||||||
|
|
||||||
|
// test that we can delete bob...
|
||||||
|
app.DeleteProfile(bob.GetOnion(), "asdfasdf")
|
||||||
|
|
||||||
|
aliceQueueOracle.Shutdown()
|
||||||
|
queueOracle.Shutdown()
|
||||||
|
app.Shutdown()
|
||||||
|
acn.Close()
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
numGoRoutinesPostACN := runtime.NumGoroutine()
|
||||||
|
|
||||||
|
// Printing out the current goroutines
|
||||||
|
// Very useful if we are leaking any.
|
||||||
|
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||||
|
|
||||||
|
if numGoRoutinesStart != numGoRoutinesPostACN {
|
||||||
|
t.Errorf("Number of GoRoutines at start (%v) does not match number of goRoutines after cleanup of peers and servers (%v), clean up failed, leak detected!", numGoRoutinesStart, numGoRoutinesPostACN)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func testBobDownloadFile(t *testing.T, bob peer.CwtchPeer, filesharingFunctionality *filesharing.Functionality, queueOracle event.Queue) {
|
||||||
|
|
||||||
|
os.RemoveAll("cwtch.out.png")
|
||||||
|
os.RemoveAll("cwtch.out.png.manifest")
|
||||||
|
|
||||||
message, _, err := bob.GetChannelMessage(1, 0, 1)
|
message, _, err := bob.GetChannelMessage(1, 0, 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -154,11 +244,29 @@ func TestFileSharing(t *testing.T) {
|
||||||
err := json.Unmarshal([]byte(messageWrapper.Data), &fileMessageOverlay)
|
err := json.Unmarshal([]byte(messageWrapper.Data), &fileMessageOverlay)
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
filesharingFunctionality.DownloadFile(bob, 1, "cwtch.out.png", "cwtch.out.png.manifest", fmt.Sprintf("%s.%s", fileMessageOverlay.Hash, fileMessageOverlay.Nonce))
|
t.Logf("bob attempting to download file with invalid download")
|
||||||
|
// try downloading with invalid download dir
|
||||||
|
err = filesharingFunctionality.DownloadFile(bob, 1, "/do/not/download/this/file/cwtch.out.png", "./cwtch.out.png.manifest", fmt.Sprintf("%s.%s", fileMessageOverlay.Hash, fileMessageOverlay.Nonce), constants.ImagePreviewMaxSizeInBytes)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("should not download file with invalid download dir")
|
||||||
|
}
|
||||||
|
t.Logf("bob attempting to download file with invalid manifest")
|
||||||
|
// try downloading with invalid manifest dir
|
||||||
|
err = filesharingFunctionality.DownloadFile(bob, 1, "./cwtch.out.png", "/do/not/download/this/file/cwtch.out.png.manifest", fmt.Sprintf("%s.%s", fileMessageOverlay.Hash, fileMessageOverlay.Nonce), constants.ImagePreviewMaxSizeInBytes)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("should not download file with invalid manifest dir")
|
||||||
|
}
|
||||||
|
t.Logf("bob attempting to download file")
|
||||||
|
err = filesharingFunctionality.DownloadFile(bob, 1, "./cwtch.out.png", "./cwtch.out.png.manifest", fmt.Sprintf("%s.%s", fileMessageOverlay.Hash, fileMessageOverlay.Nonce), constants.ImagePreviewMaxSizeInBytes)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not download file: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the file downloaded event
|
// Wait for the file downloaded event
|
||||||
|
ClientTimeout := utils2.TimeoutPolicy(time.Second * 120)
|
||||||
|
err = ClientTimeout.ExecuteAction(func() error {
|
||||||
ev := queueOracle.Next()
|
ev := queueOracle.Next()
|
||||||
if ev.EventType != event.FileDownloaded {
|
if ev.EventType != event.FileDownloaded {
|
||||||
t.Fatalf("Expected file download event")
|
t.Fatalf("Expected file download event")
|
||||||
|
@ -168,18 +276,10 @@ func TestFileSharing(t *testing.T) {
|
||||||
if hex.EncodeToString(manifest.RootHash) != "8f0ed73bbb30db45b6a740b1251cae02945f48e4f991464d5f3607685c45dcd136a325dab2e5f6429ce2b715e602b20b5b16bf7438fb6235fefe912adcedb5fd" {
|
if hex.EncodeToString(manifest.RootHash) != "8f0ed73bbb30db45b6a740b1251cae02945f48e4f991464d5f3607685c45dcd136a325dab2e5f6429ce2b715e602b20b5b16bf7438fb6235fefe912adcedb5fd" {
|
||||||
t.Fatalf("file hash does not match expected %x: ", manifest.RootHash)
|
t.Fatalf("file hash does not match expected %x: ", manifest.RootHash)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
queueOracle.Shutdown()
|
if err != nil {
|
||||||
app.Shutdown()
|
t.Fatalf("timeout when attempting to download a file")
|
||||||
time.Sleep(3 * time.Second)
|
|
||||||
numGoRoutinesPostACN := runtime.NumGoroutine()
|
|
||||||
|
|
||||||
// Printing out the current goroutines
|
|
||||||
// Very useful if we are leaking any.
|
|
||||||
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
|
||||||
|
|
||||||
if numGoRoutinesStart != numGoRoutinesPostACN {
|
|
||||||
t.Errorf("Number of GoRoutines at start (%v) does not match number of goRoutines after cleanup of peers and servers (%v), clean up failed, leak detected!", numGoRoutinesStart, numGoRoutinesPostACN)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,22 +3,26 @@
|
||||||
echo "Checking code quality (you want to see no output here)"
|
echo "Checking code quality (you want to see no output here)"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
echo "Vetting:"
|
|
||||||
go vet ./...
|
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "Linting:"
|
echo "Running staticcheck..."
|
||||||
|
|
||||||
staticcheck ./...
|
staticcheck ./...
|
||||||
|
|
||||||
|
# In the future we should remove include-pkgs. However, there are a few false positives in the overall go stdlib that make this
|
||||||
|
# too noisy right now, specifically assigning nil to initialize slices (safe), and using go internal context channels assigned
|
||||||
|
# nil (also safe).
|
||||||
|
# We also have one file infinite_channel.go written in a way that static analysis cannot reason about easily. So it is explictly
|
||||||
|
# ignored.
|
||||||
|
echo "Running nilaway..."
|
||||||
|
nilaway -include-pkgs="cwtch.im/cwtch,cwtch.im/tapir,git.openprivacy.ca/openprivacy/connectivity" -exclude-file-docstrings="nolint:nilaway" ./...
|
||||||
|
|
||||||
echo "Time to format"
|
echo "Time to format"
|
||||||
gofmt -l -s -w .
|
gofmt -l -s -w .
|
||||||
|
|
||||||
# ineffassign (https://github.com/gordonklaus/ineffassign)
|
# ineffassign (https://github.com/gordonklaus/ineffassign)
|
||||||
echo "Checking for ineffectual assignment of errors (unchecked errors...)"
|
# echo "Checking for ineffectual assignment of errors (unchecked errors...)"
|
||||||
ineffassign .
|
# ineffassign .
|
||||||
|
|
||||||
# misspell (https://github.com/client9/misspell/cmd/misspell)
|
# misspell (https://github.com/client9/misspell/cmd/misspell)
|
||||||
echo "Checking for misspelled words..."
|
# echo "Checking for misspelled words..."
|
||||||
misspell . | grep -v "testing/" | grep -v "vendor/" | grep -v "go.sum" | grep -v ".idea"
|
# misspell . | grep -v "testing/" | grep -v "vendor/" | grep -v "go.sum" | grep -v ".idea"
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
set -e
|
set -e
|
||||||
pwd
|
pwd
|
||||||
GORACE="haltonerror=1"
|
GORACE="haltonerror=1"
|
||||||
|
go test -coverprofile=plugins.cover.out -v ./app/plugins
|
||||||
go test -race ${1} -coverprofile=model.cover.out -v ./model
|
go test -race ${1} -coverprofile=model.cover.out -v ./model
|
||||||
go test -race ${1} -coverprofile=event.cover.out -v ./event
|
go test -race ${1} -coverprofile=event.cover.out -v ./event
|
||||||
go test -race ${1} -coverprofile=storage.v1.cover.out -v ./storage/v1
|
go test -race ${1} -coverprofile=storage.v1.cover.out -v ./storage/v1
|
||||||
|
|
|
@ -0,0 +1,152 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"cwtch.im/cwtch/model"
|
||||||
|
"cwtch.im/cwtch/model/constants"
|
||||||
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
|
"cwtch.im/cwtch/protocol/groups"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
||||||
|
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"github.com/gtank/ristretto255"
|
||||||
|
mrand "math/rand"
|
||||||
|
"os"
|
||||||
|
path "path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var tool = flag.String("tool", "", "the tool to use")
|
||||||
|
var bundle = flag.String("bundle", "", "a server bundle to parse")
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if *tool == "" {
|
||||||
|
fmt.Fprintf(os.Stderr, "usage: cwtch_tools -tool <tool>\n")
|
||||||
|
flag.PrintDefaults()
|
||||||
|
}
|
||||||
|
|
||||||
|
switch *tool {
|
||||||
|
case "bundleparse":
|
||||||
|
bundle, err := bundleParse(*bundle)
|
||||||
|
if err == nil {
|
||||||
|
fmt.Printf("bundle: %s\n", bundle.Serialize())
|
||||||
|
} else {
|
||||||
|
fmt.Printf("error parsing bundle: %v", err)
|
||||||
|
}
|
||||||
|
case "gettokens":
|
||||||
|
getTokens(*bundle)
|
||||||
|
default:
|
||||||
|
fmt.Fprintf(os.Stderr, "unknown tool: %s \n", *tool)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func bundleParse(bundle string) (*model.KeyBundle, error) {
|
||||||
|
if strings.HasPrefix(bundle, constants.ServerPrefix) {
|
||||||
|
bundle, err := base64.StdEncoding.DecodeString(bundle[len(constants.ServerPrefix):])
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("invalid server bundle: %v\n", err)
|
||||||
|
}
|
||||||
|
keyBundle, err := model.DeserializeAndVerify([]byte(bundle))
|
||||||
|
return keyBundle, err
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("unknown bundle prefix")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTokens(bundle string) {
|
||||||
|
log.SetLevel(log.LevelDebug)
|
||||||
|
keyBundle, err := bundleParse(bundle)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error parsing keybundle: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
privacyPassKey, err := keyBundle.GetKey(model.KeyTypePrivacyPass)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error parsing keybundle: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenServiceKey, err := keyBundle.GetKey(model.KeyTypeTokenOnion)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error parsing keybundle: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Mkdir("tordir", 0700)
|
||||||
|
dataDir := path.Join("tordir", "tor")
|
||||||
|
os.MkdirAll(dataDir, 0700)
|
||||||
|
|
||||||
|
// we don't need real randomness for the port, just to avoid a possible conflict...
|
||||||
|
socksPort := mrand.Intn(1000) + 9051
|
||||||
|
controlPort := mrand.Intn(1000) + 9052
|
||||||
|
|
||||||
|
// generate a random password
|
||||||
|
key := make([]byte, 64)
|
||||||
|
_, err = rand.Read(key)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("insufficient randomness: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
torDataDir := "./data-dir-cwtchtool"
|
||||||
|
if err = os.MkdirAll(torDataDir, 0700); err != nil {
|
||||||
|
log.Errorf("could not create data dir")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tor.NewTorrc().WithSocksPort(socksPort).WithOnionTrafficOnly().WithHashedPassword(base64.StdEncoding.EncodeToString(key)).WithControlPort(controlPort).Build("tordir/tor/torrc")
|
||||||
|
acn, err := tor.NewTorACNWithAuth("./tordir", path.Join("..", "tor"), torDataDir, controlPort, tor.HashedPasswordAuthenticator{Password: base64.StdEncoding.EncodeToString(key)})
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Could not start Tor: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
acn.WaitTillBootstrapped()
|
||||||
|
defer acn.Close()
|
||||||
|
|
||||||
|
Y := new(ristretto255.Element)
|
||||||
|
Y.UnmarshalText([]byte(privacyPassKey))
|
||||||
|
|
||||||
|
tokenServer := privacypass.NewTokenServer()
|
||||||
|
tokenServer.Y = Y
|
||||||
|
|
||||||
|
err = connections.MakePayment(string(tokenServiceKey), tokenServer, acn, Handler{})
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("timed out trying to get payments")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Handler struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h Handler) PostingFailed(server string, sig []byte) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h Handler) GroupMessageHandler(server string, gm *groups.EncryptedGroupMessage) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h Handler) ServerAuthedHandler(server string) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h Handler) ServerSyncedHandler(server string) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h Handler) ServerClosedHandler(server string) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h Handler) NewTokenHandler(tokenService string, tokens []*privacypass.Token) {
|
||||||
|
data, _ := json.Marshal(tokens)
|
||||||
|
os.WriteFile("tokens", data, 0600)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h Handler) FetchToken(tokenService string) (*privacypass.Token, int, error) {
|
||||||
|
return nil, 0, nil
|
||||||
|
}
|
|
@ -0,0 +1,42 @@
|
||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Test timeout policy, checking for goroutine leaks in addition to successful timeouts
|
||||||
|
func TestTimeoutPolicy(t *testing.T) {
|
||||||
|
|
||||||
|
gonumStart := runtime.NumGoroutine()
|
||||||
|
|
||||||
|
tp := TimeoutPolicy(time.Second)
|
||||||
|
|
||||||
|
// test with timeout
|
||||||
|
err := tp.ExecuteAction(func() error {
|
||||||
|
time.Sleep(time.Second * 2)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("timeout should have occurred")
|
||||||
|
}
|
||||||
|
|
||||||
|
// test without timeout
|
||||||
|
err = tp.ExecuteAction(func() error {
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("timeout should not have occurred")
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for gorutine clean up
|
||||||
|
time.Sleep(time.Second * 4)
|
||||||
|
|
||||||
|
// final check
|
||||||
|
gonumEnd := runtime.NumGoroutine()
|
||||||
|
if gonumStart != gonumEnd {
|
||||||
|
t.Fatalf("goroutine leak in execute action")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
// nolint:nilaway - the context timeout here is reported as an error, even though it is a by-the-doc example
|
||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TimeoutPolicy is an interface for enforcing common timeout patterns
|
||||||
|
type TimeoutPolicy time.Duration
|
||||||
|
|
||||||
|
// ExecuteAction runs a function and returns an error if it hasn't returned
|
||||||
|
// by the time specified by TimeoutPolicy
|
||||||
|
func (tp *TimeoutPolicy) ExecuteAction(action func() error) error {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*tp))
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// channel is buffered- this is important!
|
||||||
|
c := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
// this write is non-blocking as this goroutine has sole access to the channel
|
||||||
|
c <- action()
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-c:
|
||||||
|
return err
|
||||||
|
case <-ctx.Done():
|
||||||
|
return fmt.Errorf("ActionTimedOutError")
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue