Compare commits

..

No commits in common. "master" and "master" have entirely different histories.

131 changed files with 5041 additions and 13247 deletions

View File

@ -1,89 +0,0 @@
---
kind: pipeline
type: docker
name: linux-test
steps:
- name: fetch
image: golang:1.21.5
volumes:
- name: deps
path: /go
commands:
- go install honnef.co/go/tools/cmd/staticcheck@latest
- go install go.uber.org/nilaway/cmd/nilaway@latest
- wget https://git.openprivacy.ca/openprivacy/buildfiles/raw/branch/master/tor/tor-0.4.8.9-linux-x86_64.tar.gz -O tor.tar.gz
- tar -xzf tor.tar.gz
- chmod a+x Tor/tor
- export PATH=$PWD/Tor/:$PATH
- export LD_LIBRARY_PATH=$PWD/Tor/
- tor --version
- export GO111MODULE=on
- name: quality
image: golang:1.21.5
volumes:
- name: deps
path: /go
commands:
- ./testing/quality.sh
- name: units-tests
image: golang:1.21.5
volumes:
- name: deps
path: /go
commands:
- export PATH=`pwd`:$PATH
- sh testing/tests.sh
- name: integ-test
image: golang:1.21.5
volumes:
- name: deps
path: /go
commands:
- export PATH=$PWD/Tor/:$PATH
- export LD_LIBRARY_PATH=$PWD/Tor/
- tor --version
- go test -timeout=30m -race -v cwtch.im/cwtch/testing/
- name: filesharing-integ-test
image: golang:1.21.5
volumes:
- name: deps
path: /go
commands:
- export PATH=$PWD/Tor/:$PATH
- export LD_LIBRARY_PATH=$PWD/Tor/
- go test -timeout=20m -race -v cwtch.im/cwtch/testing/filesharing
- name: filesharing-autodownload-integ-test
image: golang:1.21.5
volumes:
- name: deps
path: /go
commands:
- export PATH=$PWD/Tor/:$PATH
- export LD_LIBRARY_PATH=$PWD/Tor/
- go test -timeout=20m -race -v cwtch.im/cwtch/testing/autodownload
- name: notify-gogs
image: openpriv/drone-gogs
pull: if-not-exists
when:
event: pull_request
status: [ success, changed, failure ]
environment:
GOGS_ACCOUNT_TOKEN:
from_secret: gogs_account_token
settings:
gogs_url: https://git.openprivacy.ca
volumes:
# gopath where bin and pkg lives to persist across steps
- name: deps
temp: {}
trigger:
repo: cwtch.im/cwtch
branch: master
event:
- push
- pull_request
- tag

36
.gitignore vendored
View File

@ -3,36 +3,6 @@
*private_key*
*.messages
*.test
*.json
*/messages/*
server/app/messages
.reviewboardrc
/vendor/
/testing/tor/
/storage/*/testing/
/storage/testing/
/testing/storage/
ebusgraph.txt
messages/
serverMonitorReport.txt
testing/cwtch.out.png
testing/filesharing/storage
testing/filesharing/tordir
testing/filesharing/cwtch.out.png
testing/filesharing/cwtch.out.png.manifest
testing/cwtch.out.png.manifest
testing/tordir/
tokens-bak.db
tokens.db
tokens1.db
arch/
testing/encryptedstorage/encrypted_storage_profiles
testing/encryptedstorage/tordir
*.tar.gz
data-dir-cwtchtool/
tokens
tordir/
testing/autodownload/download_dir
testing/autodownload/storage
*.swp
testing/managerstorage/*
*/*test_*
*/*_test*
.*

View File

@ -1,60 +0,0 @@
image: golang:latest
#before_script:
stages:
- test
- docker-push
- deploy-staging
test-server:
stage: test
script:
- mkdir /go/src/cwtch.im
- ln -s /builds/${CI_PROJECT_NAMESPACE}/cwtch /go/src/cwtch.im/cwtch
- cd /go/src/cwtch.im/cwtch/server/app/
- go get
- go tool vet -composites=false -shadow=true *.go
- go test
test-client:
stage: test
script:
- mkdir /go/src/cwtch.im
- ln -s /builds/${CI_PROJECT_NAMESPACE}/cwtch /go/src/cwtch.im/cwtch
- cd /go/src/cwtch.im/cwtch/app/cli/
- go get
- go tool vet -composites=false -shadow=true *.go
- go test
# We don't really care about the client here but it's useful to know what's
# happening on t'other side of the coin
allow_failure: true
gitlab-registry:
stage: docker-push
image: docker:latest
services:
- docker:dind
tags:
script:
- docker login -u gitlab-ci-token -p $CI_JOB_TOKEN ${CI_REGISTRY}
- docker build -t ${CI_REGISTRY_IMAGE}:latest -t ${CI_REGISTRY_IMAGE}:${CI_COMMIT_SHA:0:8} .
- docker push ${CI_REGISTRY_IMAGE}:latest
- docker push ${CI_REGISTRY_IMAGE}:${CI_COMMIT_SHA:0:8}
dependencies:
- test-server
docker-hub:
stage: docker-push
image: docker:latest
services:
- docker:dind
tags:
script:
- docker login -u ${DOCKER_HUB_ID} -p ${DOCKER_HUB_PASSWORD} registry.hub.docker.com
- docker build -t registry.hub.docker.com/${DOCKER_HUB_ID}/cwtch:latest -t registry.hub.docker.com/${DOCKER_HUB_ID}/cwtch:${CI_COMMIT_SHA:0:8} .
- docker push registry.hub.docker.com/${DOCKER_HUB_ID}/cwtch:latest
- docker push registry.hub.docker.com/${DOCKER_HUB_ID}/cwtch:${CI_COMMIT_SHA:0:8}
dependencies:
- test-server

View File

@ -1,62 +0,0 @@
# Contributing
## Getting Started
Sign up to the [Open Privacy Gogs instance](https://git.openprivacy.ca/)
Get the code
clone gogs@git.openprivacy.ca:cwtch.im/cwtch.git
Make a development branch to do your work
git checkout -b work-branch-name
If you are using Goland as an IDE, now would be a good time to enable automatic gofmt on save of files with the File Watches plugin [https://stackoverflow.com/questions/33774950/execute-gofmt-on-file-save-in-intellij](StackOverflow)
## Pull Requests
When you are done, rebase squash any multiple commits you have into one
git rebase -i master
Test the code and check it has not quality issues
./testing/tests.sh
./testing/quality.sh
Ideally run the integration tests (~5 minutes)
cd testing
go test
push your branch (-f for *force* in the case you've rebased and squashed)
git push origin work-branch-name -f
create a [pull request](https://git.openprivacy.ca/cwtch.im/cwtch/pulls)
If you have fixes, you can amend them to the current commit rather than a new one with
git commit --amend
git push -f
## Review Board
For very large and complicated Pull Requests we have created a Review Board instance to facilitate more in depth review and discussion at https://review.openprivacy.ca
First acquire the client, RBTools, on Ubuntu:
apt install rbtools
Then hookup your git repo to review board with:
rbt setup-repo
Using the repo `cwtch`
Finally you will be able to create commits with
rbt post --parent master
It possibly will need the arguments `--tracking-branch=cwtch/master --branch=YOUR-BRANCH -d` (-d for debug if you are having trouble)

View File

@ -1,9 +0,0 @@
MIT License
Copyright (c) 2018 Open Privacy Research Society
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,4 +1,4 @@
# Cwtch: Privacy Preserving Infrastructure for Asynchronous, Decentralized and Metadata Resistant Applications
# Cwtch: Anonymous, Decentralized, Group Messaging
Communications metadata is known to be exploited by various adversaries to undermine the security of systems, to track victims and to conduct large scale social network analysis to feed mass surveillance. Metadata resistant tools are in their infancy and research into the construction and user experience of such tools is lacking.
We present Cwtch, and extension of the metadata resistant protocol [Ricochet](https://ricochet.im) to support asynchronous, multi-peer group communications through the use of discardable, untrusted, anonymous infrastructure.
@ -19,43 +19,10 @@ We seek to protect the following communication contexts:
Beyond individual conversations, we also seek to defend against context correlation attacks, whereby multiple conversations are analyzed to derive higher level information:
* **Relationships** - Discovering social relationships between parties by analyzing the frequency and length of their communications over a period of time. (Carol and Eve call each other every single day for multiple hours at a time.)
* **Cliques** - Discovering social relationships between multiple parties by deriving casual communication chains from their communication metadata (e.g. everytime Alice talks to Bob she talks to Carol almost immediately after.)
* **Cliques** - Discovering social relationships between multiple parties by deriving casual communication chains from their communication metadata (e.g. everytime Alice talks to Bob she talks to Carol almost immediately after.)
* **Pattern of Life** - Discovering which communications are cyclical and predictable. (e.g. Alice calls Eve every Monday evening for around an hour.)
More Information: [https://cwtch.im](https://cwtch.im)
Development and Contributing information in [CONTRIBUTING.md](https://git.openprivacy.ca/cwtch.im/cwtch/src/master/CONTRIBUTING.md)
## Running Cwtch
### Server
#### Docker
### NOTE: The following section is out of date. The new Cwtch server is available from https://git.openprivacy.ca/cwtch.im/server, but there is no current docker container for it.
This repository contains a `Dockerfile` allowing you to build and run the server as a [docker](https://www.docker.com/) container.
To get started issue `docker build -t openpriv/cwtch-server:latest .`, this will create 2 temporary docker containers, one to build the Tor daemon and one to build Cwtch. The compiled binaries will then be bundled into a new image and tagged as `openpriv/cwtch-server:latest`.
To run Cwtch in the foreground execute `docker run openpriv/cwtch-server:latest`, you will see a small amount of output from Tor and then Cwtch will output your server address. When you `Ctrl + C` the container will terminate. To run Cwtch in the background execute `docker run --name my-cwtch-server -d openpriv/cwtch-server:latest`. To get your Cwtch server address issue `docker logs my-cwtch-server`.
The image creates 3 volumes, for /etc/cwtch, /etc/tor, /var/lib/tor
##### Upgrading
To upgrade with continuity
```
# Stop current container/service
docker stop my-cwtch-server
docker pull openpriv/cwtch-server
# Create a new container and copy the volumes (cwtch/onion keys, message store)
docker create --name my-cwtch-server-2 --volumes-from my-cwtch-server openpriv/cwtch-server:latest
# Resume service with the new container
docker start my-cwtch-server-2
```
![](https://git.openprivacy.ca/avatars/5?s=140)
![](https://git.openprivacy.ca/avatars/5?s=140)

View File

@ -1,598 +1,47 @@
package app
import (
"cwtch.im/cwtch/app/plugins"
"cwtch.im/cwtch/event"
"cwtch.im/cwtch/extensions"
"cwtch.im/cwtch/functionality/filesharing"
"cwtch.im/cwtch/functionality/servers"
"cwtch.im/cwtch/model"
"cwtch.im/cwtch/model/attr"
"cwtch.im/cwtch/model/constants"
"cwtch.im/cwtch/peer"
"cwtch.im/cwtch/protocol/connections"
"cwtch.im/cwtch/settings"
"cwtch.im/cwtch/storage"
"git.openprivacy.ca/openprivacy/connectivity"
"git.openprivacy.ca/openprivacy/log"
"os"
path "path/filepath"
"strconv"
"sync"
"log"
)
type application struct {
eventBuses map[string]event.Manager
directory string
peers map[string]peer.CwtchPeer
acn connectivity.ACN
plugins sync.Map //map[string] []plugins.Plugin
engines map[string]connections.Engine
appBus event.Manager
eventQueue event.Queue
appmutex sync.Mutex
engineHooks connections.EngineHooks
settings *settings.GlobalSettingsFile
// Application is a facade over a CwtchPeer that provides some wrapping logic.
type Application struct {
Peer *peer.CwtchPeer
}
func (app *application) IsFeatureEnabled(experiment string) bool {
globalSettings := app.ReadSettings()
if globalSettings.ExperimentsEnabled {
if status, exists := globalSettings.Experiments[experiment]; exists {
return status
}
}
return false
}
// Application is a full cwtch peer application. It allows management, usage and storage of multiple peers
type Application interface {
LoadProfiles(password string)
CreateProfile(name string, password string, autostart bool)
InstallEngineHooks(engineHooks connections.EngineHooks)
ImportProfile(exportedCwtchFile string, password string) (peer.CwtchPeer, error)
EnhancedImportProfile(exportedCwtchFile string, password string) string
DeleteProfile(onion string, currentPassword string)
AddPeerPlugin(onion string, pluginID plugins.PluginID)
GetPrimaryBus() event.Manager
GetEventBus(onion string) event.Manager
QueryACNStatus()
QueryACNVersion()
ConfigureConnections(onion string, doListn, doPeers, doServers bool)
ActivatePeerEngine(onion string)
DeactivatePeerEngine(onion string)
ReadSettings() settings.GlobalSettings
UpdateSettings(settings settings.GlobalSettings)
IsFeatureEnabled(experiment string) bool
ShutdownPeer(string)
Shutdown()
GetPeer(onion string) peer.CwtchPeer
ListProfiles() []string
}
// LoadProfileFn is the function signature for a function in an app that loads a profile
type LoadProfileFn func(profile peer.CwtchPeer)
func LoadAppSettings(appDirectory string) *settings.GlobalSettingsFile {
log.Debugf("NewApp(%v)\n", appDirectory)
os.MkdirAll(path.Join(appDirectory, "profiles"), 0700)
// Note: we basically presume this doesn't fail. If the file doesn't exist we create it, and as such the
// only plausible error conditions are related to file create e.g. low disk space. If that is the case then
// many other parts of Cwtch are likely to fail also.
globalSettingsFile, err := settings.InitGlobalSettingsFile(appDirectory, DefactoPasswordForUnencryptedProfiles)
if err != nil {
log.Errorf("error initializing global globalSettingsFile file %s. Global globalSettingsFile might not be loaded or saved", err)
}
return globalSettingsFile
}
// NewApp creates a new app with some environment awareness and initializes a Tor Manager
func NewApp(acn connectivity.ACN, appDirectory string, settings *settings.GlobalSettingsFile) Application {
app := &application{engines: make(map[string]connections.Engine), eventBuses: make(map[string]event.Manager), directory: appDirectory, appBus: event.NewEventManager(), settings: settings, eventQueue: event.NewQueue()}
app.peers = make(map[string]peer.CwtchPeer)
app.engineHooks = connections.DefaultEngineHooks{}
app.acn = acn
statusHandler := app.getACNStatusHandler()
acn.SetStatusCallback(statusHandler)
acn.SetVersionCallback(app.getACNVersionHandler())
prog, status := acn.GetBootstrapStatus()
statusHandler(prog, status)
app.GetPrimaryBus().Subscribe(event.ACNStatus, app.eventQueue)
go app.eventHandler()
return app
}
func (app *application) InstallEngineHooks(engineHooks connections.EngineHooks) {
app.appmutex.Lock()
defer app.appmutex.Unlock()
app.engineHooks = engineHooks
}
func (app *application) ReadSettings() settings.GlobalSettings {
app.appmutex.Lock()
defer app.appmutex.Unlock()
return app.settings.ReadGlobalSettings()
}
func (app *application) UpdateSettings(settings settings.GlobalSettings) {
// don't allow any other application changes while settings update
app.appmutex.Lock()
defer app.appmutex.Unlock()
app.settings.WriteGlobalSettings(settings)
for _, profile := range app.peers {
profile.UpdateExperiments(settings.ExperimentsEnabled, settings.Experiments)
// Explicitly toggle blocking/unblocking of unknown connections for profiles
// that have been loaded.
if settings.BlockUnknownConnections {
profile.BlockUnknownConnections()
} else {
profile.AllowUnknownConnections()
}
profile.NotifySettingsUpdate(settings)
}
}
// ListProfiles returns a map of onions to their profile's Name
func (app *application) ListProfiles() []string {
var keys []string
app.appmutex.Lock()
defer app.appmutex.Unlock()
for handle := range app.peers {
keys = append(keys, handle)
}
return keys
}
// GetPeer returns a cwtchPeer for a given onion address
func (app *application) GetPeer(onion string) peer.CwtchPeer {
app.appmutex.Lock()
defer app.appmutex.Unlock()
if profile, ok := app.peers[onion]; ok {
return profile
}
return nil
}
func (app *application) AddPlugin(peerid string, id plugins.PluginID, bus event.Manager, acn connectivity.ACN) {
if _, exists := app.plugins.Load(peerid); !exists {
app.plugins.Store(peerid, []plugins.Plugin{})
}
pluginsinf, _ := app.plugins.Load(peerid)
peerPlugins := pluginsinf.([]plugins.Plugin)
for _, plugin := range peerPlugins {
if plugin.Id() == id {
log.Errorf("trying to add second instance of plugin %v to peer %v", id, peerid)
return
}
}
newp, err := plugins.Get(id, bus, acn, peerid)
// NewProfile creates a new CwtchPeer with a given name.
func (app *Application) NewProfile(name string, filename string) error {
profile := peer.NewCwtchPeer(name)
app.Peer = profile
err := profile.Save(filename)
if err == nil {
newp.Start()
peerPlugins = append(peerPlugins, newp)
log.Debugf("storing plugin for %v %v", peerid, peerPlugins)
app.plugins.Store(peerid, peerPlugins)
} else {
log.Errorf("error adding plugin: %v", err)
go func() {
err := app.Peer.Listen()
if err != nil {
log.Panic(err)
}
}()
}
return err
}
func (app *application) CreateProfile(name string, password string, autostart bool) {
autostartVal := constants.True
if !autostart {
autostartVal = constants.False
}
tagVal := constants.ProfileTypeV1Password
if password == DefactoPasswordForUnencryptedProfiles {
tagVal = constants.ProfileTypeV1DefaultPassword
}
app.CreatePeer(name, password, map[attr.ZonedPath]string{
attr.ProfileZone.ConstructZonedPath(constants.Tag): tagVal,
attr.ProfileZone.ConstructZonedPath(constants.PeerAutostart): autostartVal,
})
}
func (app *application) setupPeer(profile peer.CwtchPeer) {
eventBus := event.NewEventManager()
app.eventBuses[profile.GetOnion()] = eventBus
// Initialize the Peer with the Given Event Bus
app.peers[profile.GetOnion()] = profile
profile.Init(eventBus)
// Update the Peer with the Most Recent Experiment State...
globalSettings := app.settings.ReadGlobalSettings()
profile.UpdateExperiments(globalSettings.ExperimentsEnabled, globalSettings.Experiments)
app.registerHooks(profile)
// Register the Peer With Application Plugins..
app.AddPeerPlugin(profile.GetOnion(), plugins.CONNECTIONRETRY) // Now Mandatory
app.AddPeerPlugin(profile.GetOnion(), plugins.HEARTBEAT) // Now Mandatory
}
func (app *application) CreatePeer(name string, password string, attributes map[attr.ZonedPath]string) {
app.appmutex.Lock()
defer app.appmutex.Unlock()
profileDirectory := path.Join(app.directory, "profiles", model.GenerateRandomID())
profile, err := peer.CreateEncryptedStorePeer(profileDirectory, name, password)
if err != nil {
log.Errorf("Error Creating Peer: %v", err)
app.appBus.Publish(event.NewEventList(event.PeerError, event.Error, err.Error()))
return
}
app.setupPeer(profile)
for zp, val := range attributes {
zone, key := attr.ParseZone(zp.ToString())
profile.SetScopedZonedAttribute(attr.LocalScope, zone, key, val)
}
app.appBus.Publish(event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.GetOnion(), event.Created: event.True}))
}
func (app *application) DeleteProfile(onion string, password string) {
log.Debugf("DeleteProfile called on %v\n", onion)
app.appmutex.Lock()
defer app.appmutex.Unlock()
// short circuit to prevent nil-pointer panic if this function is called twice (or incorrectly)
peer := app.peers[onion]
if peer == nil {
log.Errorf("shutdownPeer called with invalid onion %v", onion)
return
}
// allow a blank password to delete "unencrypted" accounts...
if password == "" {
password = DefactoPasswordForUnencryptedProfiles
}
if peer.CheckPassword(password) {
// soft-shutdown
peer.Shutdown()
// delete the underlying storage
peer.Delete()
// hard shutdown / remove from app
app.shutdownPeer(onion)
// Shutdown and Remove the Engine
log.Debugf("Delete peer for %v Done\n", onion)
app.appBus.Publish(event.NewEventList(event.PeerDeleted, event.Identity, onion))
return
}
app.appBus.Publish(event.NewEventList(event.AppError, event.Error, event.PasswordMatchError, event.Identity, onion))
}
func (app *application) AddPeerPlugin(onion string, pluginID plugins.PluginID) {
app.AddPlugin(onion, pluginID, app.eventBuses[onion], app.acn)
}
func (app *application) ImportProfile(exportedCwtchFile string, password string) (peer.CwtchPeer, error) {
profileDirectory := path.Join(app.directory, "profiles")
profile, err := peer.ImportProfile(exportedCwtchFile, profileDirectory, password)
if profile != nil || err == nil {
app.installProfile(profile)
}
return profile, err
}
func (app *application) EnhancedImportProfile(exportedCwtchFile string, password string) string {
_, err := app.ImportProfile(exportedCwtchFile, password)
// SetProfile loads an existing profile from the given filename.
func (app *Application) SetProfile(filename string) error {
profile, err := peer.LoadCwtchPeer(filename)
app.Peer = profile
if err == nil {
return ""
}
return err.Error()
}
// LoadProfiles takes a password and attempts to load any profiles it can from storage with it and create Peers for them
func (app *application) LoadProfiles(password string) {
count := 0
migrating := false
files, err := os.ReadDir(path.Join(app.directory, "profiles"))
if err != nil {
log.Errorf("error: cannot read profiles directory: %v", err)
return
}
for _, file := range files {
// Attempt to load an encrypted database
profileDirectory := path.Join(app.directory, "profiles", file.Name())
profile, err := peer.FromEncryptedDatabase(profileDirectory, password)
loaded := false
if err == nil {
// return the load the profile...
log.Infof("loading profile from new-type storage database...")
loaded = app.installProfile(profile)
} else { // On failure attempt to load a legacy profile
profileStore, err := storage.LoadProfileWriterStore(profileDirectory, password)
go func() {
err := app.Peer.Listen()
if err != nil {
continue
log.Panic(err)
}
log.Infof("found legacy profile. importing to new database structure...")
legacyProfile := profileStore.GetProfileCopy(true)
if !migrating {
migrating = true
app.appBus.Publish(event.NewEventList(event.StartingStorageMiragtion))
}
cps, err := peer.CreateEncryptedStore(profileDirectory, password)
if err != nil {
log.Errorf("error creating encrypted store: %v", err)
continue
}
profile := peer.ImportLegacyProfile(legacyProfile, cps)
loaded = app.installProfile(profile)
}
if loaded {
count++
}
}
if count == 0 {
message := event.NewEventList(event.AppError, event.Error, event.AppErrLoaded0)
app.appBus.Publish(message)
}
if migrating {
app.appBus.Publish(event.NewEventList(event.DoneStorageMigration))
}()
}
return err
}
func (app *application) registerHooks(profile peer.CwtchPeer) {
// Register Hooks
profile.RegisterHook(extensions.ProfileValueExtension{})
profile.RegisterHook(new(filesharing.Functionality))
profile.RegisterHook(new(filesharing.ImagePreviewsFunctionality))
profile.RegisterHook(new(servers.Functionality))
// Ensure that Profiles have the Most Up to Date Settings...
profile.NotifySettingsUpdate(app.settings.ReadGlobalSettings())
}
// installProfile takes a profile and if it isn't loaded in the app, installs it and returns true
func (app *application) installProfile(profile peer.CwtchPeer) bool {
app.appmutex.Lock()
defer app.appmutex.Unlock()
// Only attempt to finalize the profile if we don't have one loaded...
if app.peers[profile.GetOnion()] == nil {
app.setupPeer(profile)
// Finalize the Creation of Peer / Notify any Interfaces..
app.appBus.Publish(event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.GetOnion(), event.Created: event.False}))
return true
}
// Otherwise shutdown the connections
profile.Shutdown()
return false
}
// ActivatePeerEngine creates a peer engine for use with an ACN, should be called once the underlying ACN is online
func (app *application) ActivatePeerEngine(onion string) {
profile := app.GetPeer(onion)
if profile != nil {
if _, exists := app.engines[onion]; !exists {
eventBus, exists := app.eventBuses[profile.GetOnion()]
if !exists {
// todo handle this case?
log.Errorf("cannot activate peer engine without an event bus")
return
}
engine, err := profile.GenerateProtocolEngine(app.acn, eventBus, app.engineHooks)
if err == nil {
log.Debugf("restartFlow: Creating a New Protocol Engine...")
app.engines[profile.GetOnion()] = engine
eventBus.Publish(event.NewEventList(event.ProtocolEngineCreated))
app.QueryACNStatus()
} else {
log.Errorf("corrupted profile detected for %v", onion)
}
}
}
}
// ConfigureConnections autostarts the given kinds of connections.
func (app *application) ConfigureConnections(onion string, listen bool, peers bool, servers bool) {
profile := app.GetPeer(onion)
if profile != nil {
profileBus, exists := app.eventBuses[profile.GetOnion()]
if exists {
// if we are making a decision to ignore
if !peers || !servers {
profileBus.Publish(event.NewEventList(event.PurgeRetries))
}
// enable the engine if it doesn't exist...
// note: this function is idempotent
app.ActivatePeerEngine(onion)
if listen {
profile.Listen()
}
profileBus.Publish(event.NewEventList(event.ResumeRetries))
// do this in the background, for large contact lists it can take a long time...
go profile.StartConnections(peers, servers)
}
} else {
log.Errorf("profile does not exist %v", onion)
}
}
// DeactivatePeerEngine shutsdown and cleans up a peer engine, should be called when an underlying ACN goes offline
func (app *application) DeactivatePeerEngine(onion string) {
if engine, exists := app.engines[onion]; exists {
engine.Shutdown()
delete(app.engines, onion)
}
}
// GetPrimaryBus returns the bus the Application uses for events that aren't peer specific
func (app *application) GetPrimaryBus() event.Manager {
return app.appBus
}
// GetEventBus returns a cwtchPeer's event bus
func (app *application) GetEventBus(onion string) event.Manager {
if manager, ok := app.eventBuses[onion]; ok {
return manager
}
return nil
}
func (app *application) getACNStatusHandler() func(int, string) {
return func(progress int, status string) {
progStr := strconv.Itoa(progress)
app.appmutex.Lock()
app.appBus.Publish(event.NewEventList(event.ACNStatus, event.Progress, progStr, event.Status, status))
for _, bus := range app.eventBuses {
bus.Publish(event.NewEventList(event.ACNStatus, event.Progress, progStr, event.Status, status))
}
app.appmutex.Unlock()
}
}
func (app *application) getACNVersionHandler() func(string) {
return func(version string) {
app.appmutex.Lock()
defer app.appmutex.Unlock()
app.appBus.Publish(event.NewEventList(event.ACNVersion, event.Data, version))
}
}
func (app *application) QueryACNStatus() {
prog, status := app.acn.GetBootstrapStatus()
app.getACNStatusHandler()(prog, status)
}
func (app *application) QueryACNVersion() {
version := app.acn.GetVersion()
app.appBus.Publish(event.NewEventList(event.ACNVersion, event.Data, version))
}
func (app *application) eventHandler() {
acnStatus := -1
for {
e := app.eventQueue.Next()
switch e.EventType {
case event.ACNStatus:
newAcnStatus, err := strconv.Atoi(e.Data[event.Progress])
if err != nil {
break
}
if newAcnStatus == 100 {
if acnStatus != 100 {
for _, onion := range app.ListProfiles() {
profile := app.GetPeer(onion)
if profile != nil {
autostart, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.PeerAutostart)
appearOffline, appearOfflineExists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.PeerAppearOffline)
if !exists || autostart == "true" {
if appearOfflineExists && appearOffline == "true" {
// don't configure any connections...
log.Infof("peer appearing offline, not launching listen threads or connecting jobs")
app.ConfigureConnections(onion, false, false, false)
} else {
app.ConfigureConnections(onion, true, true, true)
}
}
}
}
}
} else {
if acnStatus == 100 {
// just fell offline
for _, onion := range app.ListProfiles() {
app.DeactivatePeerEngine(onion)
}
}
}
acnStatus = newAcnStatus
default:
// invalid event, signifies shutdown
if e.EventType == "" {
return
}
}
}
}
// ShutdownPeer shuts down a peer and removes it from the app's management
func (app *application) ShutdownPeer(onion string) {
app.appmutex.Lock()
defer app.appmutex.Unlock()
app.shutdownPeer(onion)
}
// shutdownPeer mutex unlocked helper shutdown peer
//
//nolint:nilaway
func (app *application) shutdownPeer(onion string) {
// short circuit to prevent nil-pointer panic if this function is called twice (or incorrectly)
onionEventBus := app.eventBuses[onion]
onionPeer := app.peers[onion]
if onionEventBus == nil || onionPeer == nil {
log.Errorf("shutdownPeer called with invalid onion %v", onion)
return
}
// we are an internal locked method, app.eventBuses[onion] cannot fail...
onionEventBus.Publish(event.NewEventList(event.ShutdownPeer, event.Identity, onion))
onionEventBus.Shutdown()
delete(app.eventBuses, onion)
onionPeer.Shutdown()
delete(app.peers, onion)
if onionEngine, ok := app.engines[onion]; ok {
onionEngine.Shutdown()
delete(app.engines, onion)
}
log.Debugf("shutting down plugins for %v", onion)
pluginsI, ok := app.plugins.Load(onion)
if ok {
appPlugins := pluginsI.([]plugins.Plugin)
for _, plugin := range appPlugins {
plugin.Shutdown()
}
}
app.plugins.Delete(onion)
}
// Shutdown shutsdown all peers of an app
func (app *application) Shutdown() {
app.appmutex.Lock()
defer app.appmutex.Unlock()
for id := range app.peers {
log.Debugf("Shutting Down Peer %v", id)
app.shutdownPeer(id)
}
log.Debugf("Shutting Down App")
app.eventQueue.Shutdown()
app.appBus.Shutdown()
log.Debugf("Shut Down Complete")
// PeerRequest attempts to setup peer relationship with the given onion address.`
func (app *Application) PeerRequest(onion string) {
app.Peer.PeerWithOnion(onion)
}

View File

@ -1,6 +0,0 @@
package app
// DefactoPasswordForUnencryptedProfiles is used to offer "un-passworded" profiles. Our storage encrypts everything with a password. We need an agreed upon
// password to use in that case, that the app case use behind the scenes to password and unlock with
// https://docs.openprivacy.ca/cwtch-security-handbook/profile_encryption_and_storage.html
const DefactoPasswordForUnencryptedProfiles = "be gay do crime"

395
app/cli/main.go Normal file
View File

@ -0,0 +1,395 @@
package main
import (
app2 "cwtch.im/cwtch/app"
"fmt"
"github.com/c-bata/go-prompt"
"strings"
"time"
)
var app app2.Application
var suggestions = []prompt.Suggest{
{Text: "new-profile", Description: "create a new profile"},
{Text: "load-profile", Description: "load a new profile"},
{Text: "quit", Description: "quit cwtch"},
{Text: "info", Description: "show user info"},
{Text: "servers", Description: "retrieve a list of servers and their connection status"},
{Text: "peers", Description: "retrieve a list of peers and their connection status"},
{Text: "contacts", Description: "retrieve a list of contacts"},
{Text: "groups", Description: "retrieve a list of groups"},
{Text: "send", Description: "send a message to a group"},
{Text: "timeline", Description: "read the timeline of a given group"},
{Text: "accept-invite", Description: "accept the invite of a group"},
{Text: "invite", Description: "invite a new contact"},
{Text: "invite-to-group", Description: "invite an existing contact to join an existing group"},
{Text: "new-group", Description: "create a new group"},
{Text: "help", Description: "print list of commands"},
{Text: "trust", Description: "trust a peer"},
{Text: "block", Description: "block a peer - you will no longer see messages or connect to this peer"},
}
var usages = map[string]string{
"new-profile": "new-profile [name] [filename]",
"load-profile": "load-profile [filename]",
"quit": "",
"servers": "",
"peers": "",
"contacts": "",
"groups": "",
"info": "",
"send": "send [groupid] [message]",
"timeline": "timeline [groupid]",
"accept-invite": "accept-invite [groupid]",
"invite": "invite [peerid]",
"invite-to-group": "invite-to-group [peerid] [groupid]",
"new-group": "new-group [server]",
"help": "",
"trust": "trust [peerid]",
"block": "block [peerid]",
}
func completer(d prompt.Document) []prompt.Suggest {
var s []prompt.Suggest
if d.FindStartOfPreviousWord() == 0 {
return prompt.FilterHasPrefix(suggestions, d.GetWordBeforeCursor(), true)
}
w := d.CurrentLine()
if strings.HasPrefix(w, "send") || strings.HasPrefix(w, "timeline") {
s = []prompt.Suggest{}
groups := app.Peer.Profile.Groups
for _, group := range groups {
s = append(s, prompt.Suggest{Text: group.GroupID, Description: "Group owned by " + group.Owner + " on " + group.GroupServer})
}
return prompt.FilterHasPrefix(s, d.GetWordBeforeCursor(), true)
}
if strings.HasPrefix(w, "block") || strings.HasPrefix(w, "trust") {
s = []prompt.Suggest{}
contacts := app.Peer.Profile.Contacts
for _, contact := range contacts {
s = append(s, prompt.Suggest{Text: contact.Onion, Description: contact.Name})
}
return prompt.FilterHasPrefix(s, d.GetWordBeforeCursor(), true)
}
if strings.HasPrefix(w, "invite-to-group") {
if d.FindStartOfPreviousWordWithSpace() == 0 {
s = []prompt.Suggest{}
contacts := app.Peer.Profile.Contacts
for _, contact := range contacts {
s = append(s, prompt.Suggest{Text: contact.Onion, Description: contact.Name})
}
return prompt.FilterHasPrefix(s, d.GetWordBeforeCursor(), true)
}
s = []prompt.Suggest{}
groups := app.Peer.Profile.Groups
for _, group := range groups {
if group.Owner == "self" {
s = append(s, prompt.Suggest{Text: group.GroupID, Description: "Group owned by " + group.Owner + " on " + group.GroupServer})
}
}
return prompt.FilterHasPrefix(s, d.GetWordBeforeCursor(), true)
}
if strings.HasPrefix(w, "accept-invite") {
s = []prompt.Suggest{}
groups := app.Peer.Profile.Groups
for _, group := range groups {
if group.Accepted == false {
s = append(s, prompt.Suggest{Text: group.GroupID, Description: "Group owned by " + group.Owner + " on " + group.GroupServer})
}
}
return prompt.FilterHasPrefix(s, d.GetWordBeforeCursor(), true)
}
return s
}
func main() {
cwtch :=
`
#, #'
@@@@@@:
@@@@@@.
@'@@+#' @@@@+
''''''@ #+@ :
@''''+;+' . '
@''@' :+' , ; ##, +'
,@@ ;' #'#@''. #''@''#
# ''''''#:,,#'''''@
: @''''@ :+'''@
' @;+'@ @'#
.:# '#..# '# @
@@@@@@
@@@@@@
'@@@@
@# . .
+++, #'@+'@
''', ''''''#
.#+# ''', @'''+,
@''# ''', .#@
:; '@''# .;. ''', ' : ;. ,
@+'''@ '+'+ @++ @+'@+''''+@ #+'''#: ''';#''+@ @@@@ @@@@@@@@@ :@@@@#
#''''''# +''. +'': +'''''''''+ @'''''''# '''+'''''@ @@@@ @@@@@@@@@@@@@@@@:
@'''@@'''@ @''# ,'''@ ''+ @@''+#+ :'''@@+''' ''''@@'''' @@@@ @@@@@@@@@@@@@@@@@
'''# @''# +''@ @'''# ;''@ +''+ @''@ ,+'', '''@ #'''. @@@@ @@@@ '@@@# @@@@
;''' @@; '''# #'@'' @''@ @''+ +''# .@@ ''', '''. @@@@ @@@ @@@ .@@@
@''# #'' ''#''#@''. #''# '''. '''. +'', @@@@ @@@ @@@ @@@
@''# @''@'' #'@+'+ #''# '''. ''', +'', +@@@.@@@ @@@@ @@@, @@@ ,@@@
;''+ @, +''@'# @'+''@ @''# +''; '+ ''', +'', @@@@@@@@# @@@@ @@@. .@@@ .@@@
'''# ++'+ ''''@ ,''''# #''' @''@ '@''+ ''', ''', @@@@@@@@: @@@@ @@@; .@@@' ;@@@
@'''@@'''@ #'''. +'''' ;'''#@ :'''#@+''+ ''', ''', @@@@@@# @@@@ @@@+ ,@@@. @@@@
#''''''# @''+ @''+ +'''' @'''''''# ''', ''', #@@@. @@@@ @@@+ @@@ @@@@
@+''+@ '++@ ;++@ '#''@ ##'''@: +++, +++, :@ @@@@ @@@' @@@ '@@@
:' ' '`
fmt.Printf("%v\n\n", cwtch)
quit := false
app = app2.Application{}
profilefile := ""
var history []string
for !quit {
profile := "unset"
if app.Peer != nil {
profile = app.Peer.Profile.Name
}
prmpt := fmt.Sprintf("cwtch [%v]> ", profile)
text := prompt.Input(prmpt, completer, prompt.OptionSuggestionBGColor(prompt.Purple),
prompt.OptionDescriptionBGColor(prompt.White),
prompt.OptionHistory(history))
commands := strings.Split(text[0:], " ")
history = append(history, text)
switch commands[0] {
case "quit":
app.Peer.Save(profilefile)
quit = true
case "new-profile":
if len(commands) == 3 {
err := app.NewProfile(commands[1], commands[2])
profilefile = commands[2]
if err == nil {
fmt.Printf("New profile created for %v\n", commands[1])
} else {
fmt.Printf("Error creating profile for %v: %v\n", commands[1], err)
}
} else {
fmt.Printf("Error creating NewProfile, usage: %s\n", usages["new-profile"])
}
case "load-profile":
if len(commands) == 2 {
err := app.SetProfile(commands[1])
profilefile = commands[1]
if err == nil {
fmt.Printf("Loaded profile for %v\n", commands[1])
} else {
fmt.Printf("Error loading profile for %v: %v\n", commands[1], err)
}
} else {
fmt.Printf("Error Loading profile, usage: %s\n", usages["load-profile"])
}
case "info":
if app.Peer != nil {
fmt.Printf("Address cwtch:%v\n", app.Peer.Profile.Onion)
} else {
fmt.Printf("Profile needs to be set\n")
}
case "invite":
if len(commands) == 2 {
fmt.Printf("Inviting cwtch:%v\n", commands[1])
app.PeerRequest(commands[1])
} else {
fmt.Printf("Error inviting peer, usage: %s\n", usages["invite"])
}
case "peers":
peers := app.Peer.GetPeers()
for p, s := range peers {
fmt.Printf("Name: %v Status: %v\n", p, s)
}
case "servers":
servers := app.Peer.GetServers()
for s, st := range servers {
fmt.Printf("Name: %v Status: %v\n", s, st)
}
case "contacts":
for _, c := range app.Peer.Profile.Contacts {
fmt.Printf("Name: %v, Onion: %v, Trusted: %v\n", c.Name, c.Onion, c.Trusted)
}
case "groups":
for gid, g := range app.Peer.Profile.Groups {
fmt.Printf("Group Id: %v, Owner: %v Accepted:%v \n", gid, g.Owner, g.Accepted)
}
case "trust":
if len(commands) == 2 {
app.Peer.TrustPeer(commands[1])
} else {
fmt.Printf("Error trusting peer, usage: %s\n", usages["trust"])
}
case "block":
if len(commands) == 2 {
app.Peer.BlockPeer(commands[1])
} else {
fmt.Printf("Error blocking peer, usage: %s\n", usages["trust"])
}
case "accept-invite":
if len(commands) == 2 {
groupID := commands[1]
err := app.Peer.AcceptInvite(groupID)
if err != nil {
fmt.Printf("Error: %v\n", err)
} else {
app.Peer.Save(profilefile)
group := app.Peer.Profile.GetGroupByGroupID(groupID)
if group == nil {
fmt.Printf("Error: group does not exist\n")
} else {
app.Peer.JoinServer(group.GroupServer)
}
}
} else {
fmt.Printf("Error accepting invite, usage: %s\n", usages["accept-invite"])
}
case "invite-to-group":
if len(commands) == 3 {
fmt.Printf("Inviting %v to %v\n", commands[1], commands[2])
err := app.Peer.InviteOnionToGroup(commands[1], commands[2])
if err != nil {
fmt.Printf("Error: %v\n", err)
}
} else {
fmt.Printf("Error inviting peer to group, usage: %s\n", usages["invite-to-group"])
}
case "new-group":
if len(commands) == 2 {
fmt.Printf("Setting up a new group on server:%v\n", commands[1])
id, _, err := app.Peer.Profile.StartGroup(commands[1])
if err == nil {
fmt.Printf("New Group [%v] created for server %v\n", id, commands[1])
app.Peer.Save(profilefile)
group := app.Peer.Profile.GetGroupByGroupID(id)
if group == nil {
fmt.Printf("Error: group does not exist\n")
} else {
app.Peer.JoinServer(group.GroupServer)
}
} else {
fmt.Printf("Error creating new group: %v", err)
}
} else {
fmt.Printf("Error creating a new group, usage: %s\n", usages["new-group"])
}
case "send":
if len(commands) > 2 {
message := strings.Join(commands[2:], " ")
err := app.Peer.SendMessageToGroup(commands[1], message)
if err != nil {
fmt.Printf("Error: %v\n", err)
}
} else {
fmt.Printf("Error sending message to group, usage: %s\n", usages["send"])
}
case "timeline":
if len(commands) == 2 {
group := app.Peer.Profile.GetGroupByGroupID(commands[1])
if group == nil {
fmt.Printf("Error: group does not exist\n")
} else {
timeline := group.GetTimeline()
for _, m := range timeline {
verified := "not-verified"
if m.Verified {
verified = "verified"
}
p, ok := app.Peer.Profile.Contacts[m.PeerID]
name := "unknown"
if ok {
name = p.Name
} else if app.Peer.Profile.Onion == m.PeerID {
name = app.Peer.Profile.Name
}
fmt.Printf("%v %v (%v): %v [%s]\n", m.Timestamp, name, m.PeerID, m.Message, verified)
}
}
} else {
fmt.Printf("Error reading timeline from group, usage: %s\n", usages["timeline"])
}
case "export-group":
if len(commands) == 2 {
group := app.Peer.Profile.GetGroupByGroupID(commands[1])
if group == nil {
fmt.Printf("Error: group does not exist\n")
} else {
invite, _ := app.Peer.ExportGroup(commands[1])
fmt.Printf("Invite: %v\n", invite)
}
} else {
fmt.Printf("Error reading timeline from group, usage: %s\n", usages["timeline"])
}
case "save":
app.Peer.Save(profilefile)
case "help":
for _, command := range suggestions {
fmt.Printf("%-18s%-56s%s\n", command.Text, command.Description, usages[command.Text])
}
case "sendlots":
if len(commands) == 2 {
group := app.Peer.Profile.GetGroupByGroupID(commands[1])
if group == nil {
fmt.Printf("Error: group does not exist\n")
} else {
for i := 0; i < 100; i++ {
fmt.Printf("Sending message: %v\n", i)
err := app.Peer.SendMessageToGroup(commands[1], fmt.Sprintf("this is message %v", i))
if err != nil {
fmt.Printf("could not send message %v because %v\n", i, err)
}
}
fmt.Printf("Waiting 5 seconds for message to process...\n")
time.Sleep(time.Second * 5)
timeline := group.GetTimeline()
totalLatency := time.Duration(0)
maxLatency := time.Duration(0)
totalMessages := 0
for i := 0; i < 100; i++ {
found := false
for _, m := range timeline {
if m.Message == fmt.Sprintf("this is message %v", i) && m.PeerID == app.Peer.Profile.Onion {
found = true
latency := m.Received.Sub(m.Timestamp)
fmt.Printf("Latency for Message %v was %v\n", i, latency)
totalLatency = totalLatency + latency
if maxLatency < latency {
maxLatency = latency
}
totalMessages++
}
}
if !found {
fmt.Printf("message %v was never received\n", i)
}
}
fmt.Printf("Average Latency for %v messages was: %vms\n", totalMessages, time.Duration(int64(totalLatency)/int64(totalMessages)))
fmt.Printf("Max Latency for %v messages was: %vms\n", totalMessages, maxLatency)
}
}
}
if profilefile != "" {
app.Peer.Save(profilefile)
}
}
}

View File

@ -1,47 +0,0 @@
package plugins
import (
"cwtch.im/cwtch/event"
"git.openprivacy.ca/openprivacy/log"
"time"
)
const antispamTickTime = 30 * time.Second
type antispam struct {
bus event.Manager
queue event.Queue
breakChan chan bool
}
func (a *antispam) Start() {
go a.run()
}
func (a *antispam) Id() PluginID {
return ANTISPAM
}
func (a *antispam) Shutdown() {
a.breakChan <- true
}
func (a *antispam) run() {
log.Debugf("running antispam trigger plugin")
for {
select {
case <-time.After(antispamTickTime):
// no fuss, just trigger the check. Downstream will filter out superfluous actions
a.bus.Publish(event.NewEvent(event.TriggerAntispamCheck, map[event.Field]string{}))
continue
case <-a.breakChan:
return
}
}
}
// NewAntiSpam returns a Plugin that when started will trigger antispam payments on a regular interval
func NewAntiSpam(bus event.Manager) Plugin {
cr := &antispam{bus: bus, queue: event.NewQueue(), breakChan: make(chan bool, 1)}
return cr
}

View File

@ -1,519 +0,0 @@
package plugins
import (
"cwtch.im/cwtch/event"
"cwtch.im/cwtch/protocol/connections"
"git.openprivacy.ca/openprivacy/connectivity/tor"
"git.openprivacy.ca/openprivacy/log"
"math"
"strconv"
"sync"
"time"
)
// Todo: Move to protocol/connections
// This Plugin is now required and it makes more sense to run more integrated in engine
const tickTimeSec = 30
const tickTime = tickTimeSec * time.Second
const circuitTimeoutSecs int = 120
const MaxBaseTimeoutSec = 5 * 60 // a max base time out of 5 min
const maxFailedBackoff = 6 // 2^6 = 64 -> 64 * [2m to 5m] = 2h8m to 5h20m
const PriorityQueueTimeSinceQualifierHours float64 = 168
type connectionType int
const (
peerConn connectionType = iota
serverConn
)
type contact struct {
id string
state connections.ConnectionState
ctype connectionType
lastAttempt time.Time
failedCount int
lastSeen time.Time
queued bool
}
// compare a to b
// returns -1 if a < b
//
// 0 if a == b
// +1 if a > b
//
// algo: sort by failedCount first favouring less attempts, then sort by lastSeen time favouring more recent connections
func (a *contact) compare(b *contact) int {
if a.failedCount < b.failedCount {
return -1
} else if a.failedCount > b.failedCount {
return +1
}
if a.lastSeen.After(b.lastSeen) {
return -1
} else if a.lastSeen.Before(b.lastSeen) {
return +1
}
return 0
}
type connectionQueue struct {
queue []*contact
}
func newConnectionQueue() *connectionQueue {
return &connectionQueue{queue: []*contact{}}
}
func (cq *connectionQueue) insert(c *contact) {
// find loc
i := 0
var b *contact
for i, b = range cq.queue {
if c.compare(b) >= 0 {
break
}
}
// insert
if len(cq.queue) == i { // nil or empty slice or after last element
cq.queue = append(cq.queue, c)
} else {
cq.queue = append(cq.queue[:i+1], cq.queue[i:]...) // index < len(a)
cq.queue[i] = c
}
c.queued = true
}
func (cq *connectionQueue) dequeue() *contact {
if len(cq.queue) == 0 {
return nil
}
c := cq.queue[0]
cq.queue = cq.queue[1:]
c.queued = false
return c
}
func (cq *connectionQueue) len() int {
return len(cq.queue)
}
type contactRetry struct {
bus event.Manager
queue event.Queue
ACNUp bool
ACNUpTime time.Time
protocolEngine bool
running bool
breakChan chan bool
onion string
lastCheck time.Time
acnProgress int
connections sync.Map //[string]*contact
pendingQueue *connectionQueue
priorityQueue *connectionQueue
authorizedPeers sync.Map
stallRetries bool
}
// NewConnectionRetry returns a Plugin that when started will retry connecting to contacts with a failedCount timing
func NewConnectionRetry(bus event.Manager, onion string) Plugin {
cr := &contactRetry{bus: bus, queue: event.NewQueue(), breakChan: make(chan bool, 1), authorizedPeers: sync.Map{}, connections: sync.Map{}, stallRetries: true, ACNUp: false, ACNUpTime: time.Now(), protocolEngine: false, onion: onion, pendingQueue: newConnectionQueue(), priorityQueue: newConnectionQueue()}
return cr
}
// maxTorCircuitsPending a function to throttle access to tor network during start up
func (cr *contactRetry) maxTorCircuitsPending() int {
timeSinceStart := time.Since(cr.ACNUpTime)
if timeSinceStart < 30*time.Second {
return 4
} else if timeSinceStart < 4*time.Minute {
return 8
} else if timeSinceStart < 8*time.Minute {
return 16
}
return connections.TorMaxPendingConns
}
func (cr *contactRetry) connectingCount() int {
connecting := 0
cr.connections.Range(func(k, v interface{}) bool {
conn := v.(*contact)
if conn.state == connections.CONNECTING {
connecting++
}
return true
})
return connecting
}
func (cr *contactRetry) Start() {
if !cr.running {
go cr.run()
} else {
log.Errorf("Attempted to start Contact Retry plugin twice for %v", cr.onion)
}
}
func (cr *contactRetry) Id() PluginID {
return CONNECTIONRETRY
}
func (cr *contactRetry) run() {
cr.running = true
cr.bus.Subscribe(event.PeerStateChange, cr.queue)
cr.bus.Subscribe(event.ACNStatus, cr.queue)
cr.bus.Subscribe(event.ServerStateChange, cr.queue)
cr.bus.Subscribe(event.QueuePeerRequest, cr.queue)
cr.bus.Subscribe(event.QueueJoinServer, cr.queue)
cr.bus.Subscribe(event.DisconnectPeerRequest, cr.queue)
cr.bus.Subscribe(event.DisconnectServerRequest, cr.queue)
cr.bus.Subscribe(event.ProtocolEngineShutdown, cr.queue)
cr.bus.Subscribe(event.ProtocolEngineCreated, cr.queue)
cr.bus.Subscribe(event.DeleteContact, cr.queue)
cr.bus.Subscribe(event.UpdateConversationAuthorization, cr.queue)
cr.bus.Subscribe(event.PurgeRetries, cr.queue)
cr.bus.Subscribe(event.ResumeRetries, cr.queue)
for {
// Only attempt connection if both the ACN and the Protocol Engines are Online...
log.Debugf("restartFlow checking state")
if cr.ACNUp && cr.protocolEngine && !cr.stallRetries {
log.Debugf("restartFlow time to queue!!")
cr.requeueReady()
connectingCount := cr.connectingCount()
// do priority connections first...
for connectingCount < cr.maxTorCircuitsPending() && len(cr.priorityQueue.queue) > 0 {
contact := cr.priorityQueue.dequeue()
if contact == nil {
break
}
// could have received incoming connection while in queue, make sure still disconnected before trying
if contact.state == connections.DISCONNECTED {
cr.publishConnectionRequest(contact)
connectingCount++
}
}
for connectingCount < cr.maxTorCircuitsPending() && len(cr.pendingQueue.queue) > 0 {
contact := cr.pendingQueue.dequeue()
if contact == nil {
break
}
// could have received incoming connection while in queue, make sure still disconnected before trying
if contact.state == connections.DISCONNECTED {
cr.publishConnectionRequest(contact)
connectingCount++
}
}
cr.lastCheck = time.Now()
}
// regardless of if we're up, run manual force deconnectiong of timed out connections
cr.connections.Range(func(k, v interface{}) bool {
p := v.(*contact)
if p.state == connections.CONNECTING && time.Since(p.lastAttempt) > time.Duration(circuitTimeoutSecs)*time.Second*2 {
// we have been "connecting" for twice the circuttimeout so it's failed, we just didn't learn about it, manually disconnect
cr.handleEvent(p.id, connections.DISCONNECTED, p.ctype)
log.Errorf("had to manually set peer %v of profile %v to DISCONNECTED due to assumed circuit timeout (%v) seconds", p.id, cr.onion, circuitTimeoutSecs*2)
}
return true
})
select {
case e := <-cr.queue.OutChan():
switch e.EventType {
case event.PurgeRetries:
// Purge All Authorized Peers
cr.authorizedPeers.Range(func(key interface{}, value interface{}) bool {
cr.authorizedPeers.Delete(key)
return true
})
// Purge All Connection States
cr.connections.Range(func(key interface{}, value interface{}) bool {
cr.connections.Delete(key)
return true
})
case event.ResumeRetries:
log.Infof("resuming retries...")
cr.stallRetries = false
case event.DisconnectPeerRequest:
peer := e.Data[event.RemotePeer]
cr.authorizedPeers.Delete(peer)
case event.DisconnectServerRequest:
peer := e.Data[event.GroupServer]
cr.authorizedPeers.Delete(peer)
case event.DeleteContact:
// this case covers both servers and peers (servers are peers, and go through the
// same delete conversation flow)
peer := e.Data[event.RemotePeer]
cr.authorizedPeers.Delete(peer)
case event.UpdateConversationAuthorization:
// if we update the conversation authorization then we need to check if
// we need to remove blocked conversations from the regular flow.
peer := e.Data[event.RemotePeer]
blocked := e.Data[event.Blocked]
if blocked == "true" {
cr.authorizedPeers.Delete(peer)
}
case event.PeerStateChange:
state := connections.ConnectionStateToType()[e.Data[event.ConnectionState]]
peer := e.Data[event.RemotePeer]
// only handle state change events from pre-authorized peers;
if _, exists := cr.authorizedPeers.Load(peer); exists {
cr.handleEvent(peer, state, peerConn)
}
case event.ServerStateChange:
state := connections.ConnectionStateToType()[e.Data[event.ConnectionState]]
server := e.Data[event.GroupServer]
// only handle state change events from pre-authorized servers;
if _, exists := cr.authorizedPeers.Load(server); exists {
cr.handleEvent(server, state, serverConn)
}
case event.QueueJoinServer:
fallthrough
case event.QueuePeerRequest:
lastSeen, err := time.Parse(time.RFC3339Nano, e.Data[event.LastSeen])
if err != nil {
lastSeen = event.CwtchEpoch
}
id := ""
if peer, exists := e.Data[event.RemotePeer]; exists {
id = peer
cr.addConnection(peer, connections.DISCONNECTED, peerConn, lastSeen)
} else if server, exists := e.Data[event.GroupServer]; exists {
id = server
cr.addConnection(server, connections.DISCONNECTED, serverConn, lastSeen)
}
// this was an authorized event, and so we store this peer.
log.Debugf("authorizing id: %v", id)
cr.authorizedPeers.Store(id, true)
if c, ok := cr.connections.Load(id); ok {
contact := c.(*contact)
if contact.state == connections.DISCONNECTED {
// prioritize connections made in the last week
if time.Since(contact.lastSeen).Hours() < PriorityQueueTimeSinceQualifierHours {
cr.priorityQueue.insert(contact)
} else {
cr.pendingQueue.insert(contact)
}
}
}
case event.ProtocolEngineShutdown:
cr.ACNUp = false
cr.protocolEngine = false
cr.stallRetries = true
cr.connections.Range(func(k, v interface{}) bool {
p := v.(*contact)
if p.state == connections.AUTHENTICATED || p.state == connections.SYNCED {
p.lastSeen = time.Now()
}
p.state = connections.DISCONNECTED
p.failedCount = 0
return true
})
case event.ProtocolEngineCreated:
cr.protocolEngine = true
cr.processStatus()
case event.ACNStatus:
progData := e.Data[event.Progress]
if prog, err := strconv.Atoi(progData); err == nil {
cr.acnProgress = prog
cr.processStatus()
}
}
case <-time.After(tickTime):
continue
case <-cr.breakChan:
cr.running = false
return
}
}
}
func (cr *contactRetry) processStatus() {
if !cr.protocolEngine {
cr.ACNUp = false
return
}
if cr.acnProgress == 100 && !cr.ACNUp {
// ACN is up...at this point we need to completely reset our state
// as there is no guarantee that the tor daemon shares our state anymore...
cr.ACNUp = true
cr.ACNUpTime = time.Now()
// reset all of the queues...
cr.priorityQueue = newConnectionQueue()
cr.pendingQueue = newConnectionQueue()
// Loop through connections. Reset state, and requeue...
cr.connections.Range(func(k, v interface{}) bool {
p := v.(*contact)
// only reload connections if they are on the authorized peers list
if _, exists := cr.authorizedPeers.Load(p.id); exists {
p.queued = true
// prioritize connections made recently...
log.Debugf("adding %v to queue", p.id)
if time.Since(p.lastSeen).Hours() < PriorityQueueTimeSinceQualifierHours {
cr.priorityQueue.insert(p)
} else {
cr.pendingQueue.insert(p)
}
}
return true
})
} else if cr.acnProgress != 100 {
cr.ACNUp = false
cr.connections.Range(func(k, v interface{}) bool {
p := v.(*contact)
p.failedCount = 0
p.queued = false
p.state = connections.DISCONNECTED
return true
})
}
}
func (cr *contactRetry) requeueReady() {
if !cr.ACNUp {
return
}
var retryable []*contact
throughPutPerMin := int((float64(cr.maxTorCircuitsPending()) / float64(circuitTimeoutSecs)) * 60.0)
queueCount := cr.priorityQueue.len() + cr.pendingQueue.len()
// adjustedBaseTimeout = basetimeoust * (queuedItemsCount / throughPutPerMin)
// when less items are queued than through put it'll lower adjustedBaseTimeOut, but that'll be reset in the next block
// when more items are queued it will increase the timeout, to a max of MaxBaseTimeoutSec (enforced in the next block)
adjustedBaseTimeout := circuitTimeoutSecs * (queueCount / throughPutPerMin)
// circuitTimeoutSecs (120s) < adjustedBaseTimeout < MaxBaseTimeoutSec (300s)
if adjustedBaseTimeout < circuitTimeoutSecs {
adjustedBaseTimeout = circuitTimeoutSecs
} else if adjustedBaseTimeout > MaxBaseTimeoutSec {
adjustedBaseTimeout = MaxBaseTimeoutSec
}
cr.connections.Range(func(k, v interface{}) bool {
p := v.(*contact)
// Don't retry anyone who isn't on the authorized peers list
if _, exists := cr.authorizedPeers.Load(p.id); exists {
if p.state == connections.DISCONNECTED && !p.queued {
timeout := time.Duration((math.Pow(2, float64(p.failedCount)))*float64(adjustedBaseTimeout /*baseTimeoutSec*/)) * time.Second
if time.Since(p.lastAttempt) > timeout {
retryable = append(retryable, p)
}
}
}
return true
})
for _, contact := range retryable {
if time.Since(contact.lastSeen).Hours() < PriorityQueueTimeSinceQualifierHours {
cr.priorityQueue.insert(contact)
} else {
cr.pendingQueue.insert(contact)
}
}
}
func (cr *contactRetry) publishConnectionRequest(contact *contact) {
log.Debugf("RestartFlow Publish Connection Request listener %v", contact)
if contact.ctype == peerConn {
cr.bus.Publish(event.NewEvent(event.PeerRequest, map[event.Field]string{event.RemotePeer: contact.id}))
}
if contact.ctype == serverConn {
cr.bus.Publish(event.NewEvent(event.RetryServerRequest, map[event.Field]string{event.GroupServer: contact.id}))
}
contact.state = connections.CONNECTING // Hacky but needed so we don't over flood waiting for PeerStateChange from engine
contact.lastAttempt = time.Now()
}
func (cr *contactRetry) addConnection(id string, state connections.ConnectionState, ctype connectionType, lastSeen time.Time) {
// don't handle contact retries for ourselves
if id == cr.onion {
return
}
if _, exists := cr.connections.Load(id); !exists {
p := &contact{id: id, state: state, failedCount: 0, lastAttempt: event.CwtchEpoch, ctype: ctype, lastSeen: lastSeen, queued: false}
cr.connections.Store(id, p)
return
} else {
// we have rerequested this connnection, probably via an explicit ask, update it's state
if c, ok := cr.connections.Load(id); ok {
contact := c.(*contact)
contact.state = state
}
}
}
func (cr *contactRetry) handleEvent(id string, state connections.ConnectionState, ctype connectionType) {
log.Debugf("cr.handleEvent state to %v on id %v", connections.ConnectionStateName[state], id)
// don't handle contact retries for ourselves
if id == cr.onion {
return
}
// reject events that contain invalid hostnames...we cannot connect to them
// and they could result in spurious connection attempts...
if !tor.IsValidHostname(id) {
return
}
if _, exists := cr.connections.Load(id); !exists {
// We have an event for something we don't know about...
// The only reason this should happen is if a *new* Peer/Server connection has changed.
// Let's set the timeout to Now() to indicate that this is a fresh connection, and so should likely be prioritized.
cr.addConnection(id, state, ctype, time.Now())
return
}
pinf, _ := cr.connections.Load(id)
p := pinf.(*contact)
log.Debugf(" managing state change for %v %v to %v by self %v", id, connections.ConnectionStateName[p.state], connections.ConnectionStateName[state], cr.onion)
if state == connections.DISCONNECTED || state == connections.FAILED || state == connections.KILLED {
if p.state == connections.SYNCED || p.state == connections.AUTHENTICATED {
p.lastSeen = time.Now()
} else {
p.failedCount += 1
}
p.state = connections.DISCONNECTED
p.lastAttempt = time.Now()
if p.failedCount > maxFailedBackoff {
p.failedCount = maxFailedBackoff
}
} else if state == connections.CONNECTING || state == connections.CONNECTED {
p.state = state
} else if state == connections.AUTHENTICATED || state == connections.SYNCED {
p.state = state
p.lastSeen = time.Now()
p.failedCount = 0
}
}
func (cr *contactRetry) Shutdown() {
cr.breakChan <- true
cr.queue.Shutdown()
}

View File

@ -1,128 +0,0 @@
package plugins
import (
"testing"
"time"
"cwtch.im/cwtch/event"
"cwtch.im/cwtch/protocol/connections"
"git.openprivacy.ca/openprivacy/log"
)
// TestContactRetryQueue simulates some basic connection queueing
// NOTE: This whole test is a race condition, and does flag go's detector
// We are invasively checking the internal state of the retry plugin and accessing pointers from another
// thread.
// We could build an entire thread safe monitoring functonality, but that would dramatically expand the scope of this test.
func TestContactRetryQueue(t *testing.T) {
log.SetLevel(log.LevelDebug)
bus := event.NewEventManager()
cr := NewConnectionRetry(bus, "").(*contactRetry)
cr.ACNUp = true // fake an ACN connection...
cr.protocolEngine = true // fake protocol engine
cr.stallRetries = false // fake not being in offline mode...
go cr.run()
testOnion := "2wgvbza2mbuc72a4u6r6k4hc2blcvrmk4q26bfvlwbqxv2yq5k52fcqd"
t.Logf("contact plugin up and running..sending peer connection...")
// Assert that there is a peer connection identified as "test"
bus.Publish(event.NewEvent(event.QueuePeerRequest, map[event.Field]string{event.RemotePeer: testOnion, event.LastSeen: "test"}))
// Wait until the test actually exists, and is queued
// This is the worst part of this test setup. Ideally we would sleep, or some other yielding, but
// go test scheduling doesn't like that and even sleeping long periods won't cause the event thread to make
// progress...
setup := false
for !setup {
if _, exists := cr.connections.Load(testOnion); exists {
if _, exists := cr.authorizedPeers.Load(testOnion); exists {
t.Logf("authorized")
setup = true
}
}
}
// We should very quickly become connecting...
time.Sleep(time.Second)
pinf, _ := cr.connections.Load(testOnion)
if pinf.(*contact).state != 1 {
t.Fatalf("test connection should be in connecting after update, actually: %v", pinf.(*contact).state)
}
// Asset that "test" is authenticated
cr.handleEvent(testOnion, connections.AUTHENTICATED, peerConn)
// Assert that "test has a valid state"
pinf, _ = cr.connections.Load(testOnion)
if pinf.(*contact).state != 3 {
t.Fatalf("test connection should be in authenticated after update, actually: %v", pinf.(*contact).state)
}
// Publish an unrelated event to trigger the Plugin to go through a queuing cycle
// If we didn't do this we would have to wait 30 seconds for a check-in
bus.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{event.RemotePeer: "test2", event.ConnectionState: "Disconnected"}))
bus.Publish(event.NewEvent(event.QueuePeerRequest, map[event.Field]string{event.RemotePeer: testOnion, event.LastSeen: time.Now().Format(time.RFC3339Nano)}))
time.Sleep(time.Second)
pinf, _ = cr.connections.Load(testOnion)
if pinf.(*contact).state != 1 {
t.Fatalf("test connection should be in connecting after update, actually: %v", pinf.(*contact).state)
}
cr.Shutdown()
}
// Takes around 4 min unless you adjust the consts for tickTimeSec and circuitTimeoutSecs
/*
func TestRetryEmission(t *testing.T) {
log.SetLevel(log.LevelDebug)
log.Infof("*** Starting TestRetryEmission! ***")
bus := event.NewEventManager()
testQueue := event.NewQueue()
bus.Subscribe(event.PeerRequest, testQueue)
cr := NewConnectionRetry(bus, "").(*contactRetry)
cr.Start()
time.Sleep(100 * time.Millisecond)
bus.Publish(event.NewEventList(event.ACNStatus, event.Progress, "100"))
bus.Publish(event.NewEventList(event.ProtocolEngineCreated))
pub, _, _ := ed25519.GenerateKey(rand.Reader)
peerAddr := tor.GetTorV3Hostname(pub)
bus.Publish(event.NewEventList(event.QueuePeerRequest, event.RemotePeer, peerAddr, event.LastSeen, time.Now().Format(time.RFC3339Nano)))
log.Infof("Fetching 1st event")
ev := testQueue.Next()
if ev.EventType != event.PeerRequest {
t.Errorf("1st event emitted was %v, expected %v", ev.EventType, event.PeerRequest)
}
log.Infof("1st event: %v", ev)
bus.Publish(event.NewEventList(event.PeerStateChange, event.RemotePeer, peerAddr, event.ConnectionState, connections.ConnectionStateName[connections.DISCONNECTED]))
log.Infof("fetching 2nd event")
ev = testQueue.Next()
log.Infof("2nd event: %v", ev)
if ev.EventType != event.PeerRequest {
t.Errorf("2nd event emitted was %v, expected %v", ev.EventType, event.PeerRequest)
}
bus.Publish(event.NewEventList(event.PeerStateChange, event.RemotePeer, peerAddr, event.ConnectionState, connections.ConnectionStateName[connections.CONNECTED]))
time.Sleep(100 * time.Millisecond)
bus.Publish(event.NewEventList(event.PeerStateChange, event.RemotePeer, peerAddr, event.ConnectionState, connections.ConnectionStateName[connections.DISCONNECTED]))
log.Infof("fetching 3rd event")
ev = testQueue.Next()
log.Infof("3nd event: %v", ev)
if ev.EventType != event.PeerRequest {
t.Errorf("3nd event emitted was %v, expected %v", ev.EventType, event.PeerRequest)
}
cr.Shutdown()
}
*/

View File

@ -1,49 +0,0 @@
package plugins
import (
"cwtch.im/cwtch/event"
"git.openprivacy.ca/openprivacy/log"
"time"
)
const heartbeatTickTime = 60 * time.Second
type heartbeat struct {
bus event.Manager
queue event.Queue
breakChan chan bool
}
func (hb *heartbeat) Start() {
go hb.run()
}
func (hb *heartbeat) Id() PluginID {
return HEARTBEAT
}
func (hb *heartbeat) Shutdown() {
hb.breakChan <- true
hb.queue.Shutdown()
}
func (hb *heartbeat) run() {
log.Debugf("running heartbeat trigger plugin")
for {
select {
case <-time.After(heartbeatTickTime):
// no fuss, just trigger the beat.
hb.bus.Publish(event.NewEvent(event.Heartbeat, map[event.Field]string{}))
continue
case <-hb.breakChan:
log.Debugf("shutting down heartbeat plugin")
return
}
}
}
// NewHeartbeat returns a Plugin that when started will trigger heartbeat checks on a regular interval
func NewHeartbeat(bus event.Manager) Plugin {
cr := &heartbeat{bus: bus, queue: event.NewQueue(), breakChan: make(chan bool, 1)}
return cr
}

View File

@ -1,156 +0,0 @@
package plugins
import (
"cwtch.im/cwtch/event"
"cwtch.im/cwtch/protocol/connections"
"cwtch.im/cwtch/utils"
"git.openprivacy.ca/openprivacy/connectivity"
"git.openprivacy.ca/openprivacy/log"
"sync"
"time"
)
// NetworkCheckError is a status for when the NetworkCheck Plugin has had an error making an out going connection indicating it may be offline
const NetworkCheckError = "Error"
// NetworkCheckSuccess is a status for when the NetworkCheck Plugin has had a successful message from a peer, indicating it is online right now
const NetworkCheckSuccess = "Success"
const NetworkCheckPeriod = time.Minute
// networkCheck is a convenience plugin for testing high level availability of onion services
type networkCheck struct {
bus event.Manager
queue event.Queue
onion string
acn connectivity.ACN
breakChan chan bool
running bool
offline bool
offlineLock sync.Mutex
}
// NewNetworkCheck returns a Plugin that when started will attempt various network tests
func NewNetworkCheck(onion string, bus event.Manager, acn connectivity.ACN) Plugin {
nc := &networkCheck{onion: onion, bus: bus, acn: acn, queue: event.NewQueue(), breakChan: make(chan bool, 1)}
return nc
}
func (nc *networkCheck) Start() {
go nc.run()
}
func (nc *networkCheck) Id() PluginID {
return NETWORKCHECK
}
func (nc *networkCheck) run() {
nc.running = true
nc.offline = true
nc.bus.Subscribe(event.ProtocolEngineStartListen, nc.queue)
nc.bus.Subscribe(event.NewMessageFromPeer, nc.queue)
nc.bus.Subscribe(event.PeerAcknowledgement, nc.queue)
nc.bus.Subscribe(event.EncryptedGroupMessage, nc.queue)
nc.bus.Subscribe(event.PeerStateChange, nc.queue)
nc.bus.Subscribe(event.ServerStateChange, nc.queue)
nc.bus.Subscribe(event.NewGetValMessageFromPeer, nc.queue)
nc.bus.Subscribe(event.NewRetValMessageFromPeer, nc.queue)
var lastMessageReceived = time.Now()
for {
select {
case <-nc.breakChan:
nc.running = false
return
case e := <-nc.queue.OutChan():
switch e.EventType {
// On receipt of a Listen request for an onion service we will add the onion to our list
// and then we will wait a minute and check the connection for the first time (the onion should be up)
// under normal operating circumstances
case event.ProtocolEngineStartListen:
if nc.onion == (e.Data[event.Onion]) {
log.Debugf("initiating connection check for %v", e.Data[event.Onion])
if time.Since(lastMessageReceived) > time.Minute {
nc.selfTest()
}
} else {
log.Errorf("network check plugin received an event for a different profile than it was started with. Internal wiring is probably wrong.")
}
case event.PeerStateChange:
fallthrough
case event.ServerStateChange:
// if we successfully connect / authenticated to a remote server / peer then we obviously have internet
connectionState := e.Data[event.ConnectionState]
nc.offlineLock.Lock()
if connectionState == connections.ConnectionStateName[connections.AUTHENTICATED] || connectionState == connections.ConnectionStateName[connections.CONNECTED] {
lastMessageReceived = time.Now()
if nc.offline {
nc.bus.Publish(event.NewEvent(event.NetworkStatus, map[event.Field]string{event.Error: "", event.Status: NetworkCheckSuccess}))
nc.offline = false
}
}
nc.offlineLock.Unlock()
default:
// if we receive either an encrypted group message or a peer acknowledgement we can assume the network
// is up and running (our onion service might still not be available, but we would aim to detect that
// through other actions
// we reset out timer
lastMessageReceived = time.Now()
nc.offlineLock.Lock()
if nc.offline {
nc.bus.Publish(event.NewEvent(event.NetworkStatus, map[event.Field]string{event.Error: "", event.Status: NetworkCheckSuccess}))
nc.offline = false
}
nc.offlineLock.Unlock()
}
case <-time.After(NetworkCheckPeriod):
// if we haven't received an action in the last minute...kick off a set of testing
if time.Since(lastMessageReceived) > time.Minute {
nc.selfTest()
lastMessageReceived = time.Now()
}
}
}
}
func (nc *networkCheck) Shutdown() {
if nc.running {
nc.queue.Shutdown()
log.Debugf("shutting down network status plugin")
nc.breakChan <- true
}
}
func (nc *networkCheck) selfTest() {
go nc.checkConnection(nc.onion)
}
func (nc *networkCheck) checkConnection(onion string) {
progress, _ := nc.acn.GetBootstrapStatus()
if progress != 100 {
return
}
// we want to definitively time these actions out faster than tor will, because these onions should definitely be
// online
ClientTimeout := utils.TimeoutPolicy(time.Second * 60)
err := ClientTimeout.ExecuteAction(func() error {
conn, _, err := nc.acn.Open(onion)
if err == nil {
_ = conn.Close()
}
return err
})
nc.offlineLock.Lock()
defer nc.offlineLock.Unlock()
// regardless of the outcome we want to report a status to let anyone who might care know that we did do a check
if err != nil {
log.Debugf("publishing network error for %v -- %v\n", onion, err)
nc.bus.Publish(event.NewEvent(event.NetworkStatus, map[event.Field]string{event.Onion: onion, event.Error: err.Error(), event.Status: NetworkCheckError}))
nc.offline = true
} else {
log.Debugf("publishing network success for %v", onion)
nc.bus.Publish(event.NewEvent(event.NetworkStatus, map[event.Field]string{event.Onion: onion, event.Error: "", event.Status: NetworkCheckSuccess}))
nc.offline = false
}
}

View File

@ -1,41 +0,0 @@
package plugins
import (
"cwtch.im/cwtch/event"
"fmt"
"git.openprivacy.ca/openprivacy/connectivity"
)
// PluginID is used as an ID for signaling plugin activities
type PluginID int
// These are the plugin IDs for the supplied plugins
const (
CONNECTIONRETRY PluginID = iota
NETWORKCHECK
ANTISPAM
HEARTBEAT
)
// Plugin is the interface for a plugin
type Plugin interface {
Start()
Shutdown()
Id() PluginID
}
// Get is a plugin factory for the requested plugin
func Get(id PluginID, bus event.Manager, acn connectivity.ACN, onion string) (Plugin, error) {
switch id {
case CONNECTIONRETRY:
return NewConnectionRetry(bus, onion), nil
case NETWORKCHECK:
return NewNetworkCheck(onion, bus, acn), nil
case ANTISPAM:
return NewAntiSpam(bus), nil
case HEARTBEAT:
return NewHeartbeat(bus), nil
}
return nil, fmt.Errorf("plugin not defined %v", id)
}

View File

@ -1,28 +0,0 @@
package app
import (
"cwtch.im/cwtch/model/attr"
"cwtch.im/cwtch/model/constants"
"cwtch.im/cwtch/peer"
"time"
)
// WaitGetPeer is a helper function for utility apps not written using the event bus
// Proper use of an App is to call CreatePeer and then process the NewPeer event
// however for small utility use, this function which polls the app until the peer is created
// may fill that usecase better
func WaitGetPeer(app Application, name string) peer.CwtchPeer {
for {
for _, handle := range app.ListProfiles() {
peer := app.GetPeer(handle)
if peer == nil {
continue
}
localName, _ := peer.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name)
if localName == name {
return peer
}
}
time.Sleep(100 * time.Millisecond)
}
}

View File

@ -1,364 +0,0 @@
package event
import "time"
var CwtchEpoch = time.Date(2020, 6, 1, 0, 0, 0, 0, time.UTC)
// Type captures the definition of many common Cwtch application events
type Type string
// Defining Common Event Types
const (
StatusRequest = Type("StatusRequest")
ProtocolEngineStatus = Type("ProtocolEngineStatus")
// Attempt to outbound peer with a given remote peer
// attributes:
// RemotePeer: [eg "chpr7qm6op5vfcg2pi4vllco3h6aa7exexc4rqwnlupqhoogx2zgd6qd"
PeerRequest = Type("PeerRequest")
// QueuePeerRequest
// When peer has too many peers to try and wants to ease off Tor throttling, use this to notify ContactRetry plugin to schedule a peer for later try
// LastSeen: last seen time of the contact
// And one of
// RemotePeer
// GroupServer
QueuePeerRequest = Type("QueuePeerRequest")
// Disconnect*Request
// Close active connections and prevent new connections
DisconnectPeerRequest = Type("DisconnectPeerRequest")
DisconnectServerRequest = Type("DisconnectServerRequest")
// Events to Manage Retry Contacts
PurgeRetries = Type("PurgeRetries")
ResumeRetries = Type("ResumeRetries")
// RetryServerRequest
// Asks CwtchPeer to retry a server connection...
// GroupServer: [eg "chpr7qm6op5vfcg2pi4vllco3h6aa7exexc4rqwnlupqhoogx2zgd6qd"
RetryServerRequest = Type("RetryServerRequest")
// RemotePeer
// ConversationID
// Accepted
// Blocked
UpdateConversationAuthorization = Type("UpdateConversationAuthorization")
// Turn on/off blocking of unknown peers (if peers aren't in the contact list then they will be autoblocked
BlockUnknownPeers = Type("BlockUnknownPeers")
AllowUnknownPeers = Type("AllowUnknownPeers")
// GroupServer
QueueJoinServer = Type("QueueJoinServer")
JoinServer = Type("JoinServer")
// attributes GroupServer - the onion of the server to leave
LeaveServer = Type("LeaveServer")
ProtocolEngineCreated = Type("ProtocolEngineCreated")
ProtocolEngineShutdown = Type("ProtocolEngineShutdown")
ProtocolEngineStartListen = Type("ProtocolEngineStartListen")
ProtocolEngineStopped = Type("ProtocolEngineStopped")
InvitePeerToGroup = Type("InvitePeerToGroup")
// a group invite has been received from a remote peer
// attributes:
// TimestampReceived [eg time.Now().Format(time.RFC3339Nano)]
// RemotePeer: [eg "chpr7qm6op5vfcg2pi4vllco3h6aa7exexc4rqwnlupqhoogx2zgd6qd"]
// GroupInvite: [eg "torv3....."]
// Imported
NewGroupInvite = Type("NewGroupInvite")
// Inform the UI about a new group
// GroupID: groupID (allows them to fetch from the peer)
NewGroup = Type("NewGroup")
SendMessageToGroup = Type("SendMessagetoGroup")
//Ciphertext, Signature:
EncryptedGroupMessage = Type("EncryptedGroupMessage")
//TimestampReceived, TimestampSent, Data(Message), GroupID, Signature, PreviousSignature, RemotePeer
NewMessageFromGroup = Type("NewMessageFromGroup")
// Sent if a Group Key is detected as being used outside of expected parameters (e.g. with tampered signatures)
// GroupID: The ID of the Group that is presumed compromised
GroupCompromised = Type("GroupCompromised")
// an error was encountered trying to send a particular Message to a group
// attributes:
// GroupServer: The server the Message was sent to
// Signature: The signature of the Message that failed to send
// Error: string describing the error
SendMessageToGroupError = Type("SendMessageToGroupError")
SendMessageToPeer = Type("SendMessageToPeer")
NewMessageFromPeer = Type("NewMessageFromPeer")
NewMessageFromPeerEngine = Type("NewMessageFromPeerEngine")
// RemotePeer, scope, path
NewGetValMessageFromPeer = Type("NewGetValMessageFromPeer")
// RemotePeer, val, exists
SendRetValMessageToPeer = Type("SendRetValMessageToPeer")
// RemotePeer, scope, val
SendGetValMessageToPeer = Type("SendGetValMessageToPeer")
// RemotePeer, scope, path, data, exists
NewRetValMessageFromPeer = Type("NewRetValMessageFromPeer")
// Peer acknowledges a previously sent message
// attributes
// EventID: The original event id that the peer is responding too.
// RemotePeer: The peer associated with the acknowledgement
PeerAcknowledgement = Type("PeerAcknowledgement")
// Like PeerAcknowledgement but with message index instead of event ID
// attributes
// Index: The original index of the message that the peer is responding too.
// RemotePeer: The peer associated with the acknowledgement
IndexedAcknowledgement = Type("IndexedAcknowledgement")
// Like PeerAcknowledgement but with message index instead of event ID
// attributes
// Index: The original index of the message that the peer is responding too.
// RemotePeer: The peer associated with the acknowledgement
IndexedFailure = Type("IndexedFailure")
// attributes:
// RemotePeer: [eg "chpr7qm6op5vfcg2pi4vllco3h6aa7exexc4rqwnlupqhoogx2zgd6qd"]
// Error: string describing the error
SendMessageToPeerError = Type("SendMessageToPeerError")
// REQUESTS TO STORAGE ENGINE
// a peer contact has been added
// attributes:
// RemotePeer [eg ""]
ContactCreated = Type("ContactCreated")
// Password, NewPassword
ChangePassword = Type("ChangePassword")
// a group has been successfully added or newly created
// attributes:
// Data [serialized *model.Group]
GroupCreated = Type("GroupCreated")
// RemotePeer
DeleteContact = Type("DeleteContact")
// PeerStateChange servers as a new incoming connection message as well, and can/is consumed by frontends to alert of new p2p connections
// RemotePeer
// ConnectionState
PeerStateChange = Type("PeerStateChange")
// GroupServer
// ConnectionState
ServerStateChange = Type("ServerStateChange")
/***** Application client / service messages *****/
// app: Identity(onion), Created(bool)
// service -> client: Identity(localId), Password, [Status(new/default=blank || from reload='running')], Created(bool)
NewPeer = Type("NewPeer")
// Identity(onion)
DeletePeer = Type("DeletePeer")
// Identity(onion)
PeerDeleted = Type("PeerDeleted")
// Identity(onion)
ShutdownPeer = Type("ShutdownPeer")
Shutdown = Type("Shutdown")
// Error(err)
// Error creating peer
PeerError = Type("PeerError")
// Error(err)
AppError = Type("AppError")
// Progress, Status
ACNStatus = Type("ACNStatus")
// ID, Key, Data
ACNInfo = Type("ACNInfo")
// Data
ACNVersion = Type("ACNVersion")
// Network Status
// Status: Success || Error
// Error: Description of the Error
// Onion: the local onion we attempt to check
NetworkStatus = Type("NetworkError")
// For debugging. Allows test to emit a Syn and get a response Ack(eventID) when the subsystem is done processing a queue
Syn = Type("Syn")
Ack = Type("Ack")
// File Handling Events
StopFileShare = Type("StopFileShare")
StopAllFileShares = Type("StopAllFileShares")
ShareManifest = Type("ShareManifest")
ManifestSizeReceived = Type("ManifestSizeReceived")
ManifestError = Type("ManifestError")
ManifestReceived = Type("ManifestReceived")
ManifestSaved = Type("ManifestSaved")
FileDownloadProgressUpdate = Type("FileDownloadProgressUpdate")
FileDownloaded = Type("FileDownloaded")
FileVerificationFailed = Type("FileVerificationFailed")
// Profile Attribute Event
UpdatedProfileAttribute = Type("UpdatedProfileAttribute")
// Conversation Attribute Update...
UpdatedConversationAttribute = Type("UpdatedConversationAttribute")
StartingStorageMiragtion = Type("StartingStorageMigration")
DoneStorageMigration = Type("DoneStorageMigration")
TokenManagerInfo = Type("TokenManagerInfo")
TriggerAntispamCheck = Type("TriggerAntispamCheck")
MakeAntispamPayment = Type("MakeAntispamPayment")
// Heartbeat is used to trigger actions that need to happen every so often...
Heartbeat = Type("Heartbeat")
// Conversation Search
SearchResult = Type("SearchResult")
SearchCancelled = Type("SearchCancelled")
)
// Field defines common event attributes
type Field string
// Defining Common Field Types
const (
// A peers local onion address
Onion = Field("Onion")
ProfileOnion = Field("ProfileOnion")
RemotePeer = Field("RemotePeer")
LastSeen = Field("LastSeen")
Ciphertext = Field("Ciphertext")
Signature = Field("Signature")
CachedTokens = Field("CachedTokens")
PreviousSignature = Field("PreviousSignature")
TimestampSent = Field("TimestampSent")
TimestampReceived = Field("TimestampReceived")
Identity = Field("Identity")
ConversationID = Field("ConversationID")
GroupID = Field("GroupID")
GroupServer = Field("GroupServer")
GroupName = Field("GroupName")
ServerTokenY = Field("ServerTokenY")
ServerTokenOnion = Field("ServerTokenOnion")
GroupInvite = Field("GroupInvite")
ServerTokenCount = Field("ServerTokenCount")
ProfileName = Field("ProfileName")
Password = Field("Password")
NewPassword = Field("NewPassword")
Created = Field("Created")
ConnectionState = Field("ConnectionState")
Key = Field("Key")
Data = Field("Data")
Scope = Field("Scope")
Path = Field("Path")
Exists = Field("Exists")
Salt = Field("Salt")
Error = Field("Error")
Progress = Field("Progress")
Status = Field("Status")
EventID = Field("EventID")
EventContext = Field("EventContext")
Index = Field("Index")
RowIndex = Field("RowIndex")
ContentHash = Field("ContentHash")
// Handle denotes a contact handle of any type.
Handle = Field("Handle")
// Flags denotes a set of message flags
Flags = Field("Flags")
Accepted = Field("Accepted")
Blocked = Field("Blocked")
KeyBundle = Field("KeyBundle")
// Indicate whether an event was triggered by a user import
Imported = Field("Imported")
Source = Field("Source")
FileKey = Field("FileKey")
FileSizeInChunks = Field("FileSizeInChunks")
ManifestSize = Field("ManifestSize")
SerializedManifest = Field("SerializedManifest")
TempFile = Field("TempFile")
FilePath = Field("FilePath")
FileDownloadFinished = Field("FileDownloadFinished")
NameSuggestion = Field("NameSuggestion")
SearchID = Field("SearchID")
)
// Defining Common errors
const (
AppErrLoaded0 = "Loaded 0 profiles"
PasswordMatchError = "Password did not match"
)
// Defining Protocol Contexts
const (
ContextAck = "im.cwtch.acknowledgement"
ContextInvite = "im.cwtch.invite"
ContextRaw = "im.cwtch.raw"
ContextGetVal = "im.cwtch.getVal"
ContextVersion = "im.cwtch.version"
ContextRetVal = "im.cwtch.retVal"
ContextRequestManifest = "im.cwtch.file.request.manifest"
ContextSendManifest = "im.cwtch.file.send.manifest"
ContextRequestFile = "im.cwtch.file.request.chunk"
ContextSendFile = "im.cwtch.file.send.chunk"
)
// Define Attribute Keys related to history preservation
const (
PreserveHistoryDefaultSettingKey = "SaveHistoryDefault" // profile level default
SaveHistoryKey = "SavePeerHistory" // peer level setting
)
// Define Default Attribute Values
const (
// Save History has 3 distinct states. By default we refer to the profile level
// attribute PreserveHistoryDefaultSettingKey ( default: false i.e. DefaultDeleteHistory),
// For each contact, if the profile owner confirms deletion we change to DeleteHistoryConfirmed,
// if the profile owner confirms they want to save history then this becomes SaveHistoryConfirmed
// These settings are set at the UI level using Get/SetScopeZoneAttribute with scoped zone: local.profile.*
SaveHistoryConfirmed = "SaveHistory"
DeleteHistoryConfirmed = "DeleteHistoryConfirmed"
// NOTE: While this says "[DeleteHistory]Default", The actual behaviour will now depend on the
// global app/profile value of PreserveHistoryDefaultSettingKey
DeleteHistoryDefault = "DefaultDeleteHistory"
)
// Bool strings
const (
True = "true"
False = "false"
)

View File

@ -1,65 +0,0 @@
package event
import (
"sync"
)
type queue struct {
infChan infiniteChannel
lock sync.Mutex
closed bool
}
// Queue is a wrapper around a channel for handling Events in a consistent way across subsystems.
// The expectation is that each subsystem in Cwtch will manage a given an event.Queue fed from
// the event.Manager.
type Queue interface {
Publish(event Event)
Next() Event
Shutdown()
OutChan() <-chan Event
Len() int
}
// NewQueue initializes an event.Queue
func NewQueue() Queue {
queue := &queue{infChan: *newInfiniteChannel()}
return queue
}
func (iq *queue) inChan() chan<- Event {
return iq.infChan.In()
}
func (iq *queue) OutChan() <-chan Event {
return iq.infChan.Out()
}
// Next returns the next available event from the front of the queue
func (iq *queue) Next() Event {
event := <-iq.infChan.Out()
return event
}
func (iq *queue) Len() int {
return iq.infChan.Len()
}
// Shutdown closes our eventChannel
func (iq *queue) Shutdown() {
iq.lock.Lock()
if !iq.closed {
iq.closed = true
iq.infChan.Close()
}
iq.lock.Unlock()
}
func (iq *queue) Publish(event Event) {
iq.lock.Lock()
if !iq.closed {
iq.inChan() <- event
}
iq.lock.Unlock()
}

View File

@ -1,183 +0,0 @@
package event
import (
"crypto/rand"
"encoding/json"
"fmt"
"git.openprivacy.ca/openprivacy/log"
"math"
"math/big"
"os"
"runtime"
"strings"
"sync"
)
// Event is the core struct type passed around between various subsystems. Events consist of a type which can be
// filtered on, an event ID for tracing and a map of Fields to string values.
type Event struct {
EventType Type
EventID string
Data map[Field]string
}
// GetRandNumber is a helper function which returns a random integer, this is
// currently mostly used to generate message IDs
func GetRandNumber() *big.Int {
num, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32))
// If we can't generate random numbers then panicking is probably
// the best option.
if err != nil {
panic(err.Error())
}
return num
}
// NewEvent creates a new event object with a unique ID and the given type and data.
func NewEvent(eventType Type, data map[Field]string) Event {
return Event{EventType: eventType, EventID: GetRandNumber().String(), Data: data}
}
// NewEventList creates a new event object with a unique ID and the given type and data supplied in a list format and composed into a map of Type:string
func NewEventList(eventType Type, args ...interface{}) Event {
data := map[Field]string{}
for i := 0; i < len(args); i += 2 {
key, kok := args[i].(Field)
val, vok := args[i+1].(string)
if kok && vok {
data[key] = val
} else {
log.Errorf("attempted to send a field that could not be parsed to a string: %v %v", args[i], args[i+1])
}
}
return Event{EventType: eventType, EventID: GetRandNumber().String(), Data: data}
}
// Manager is an Event Bus which allows subsystems to subscribe to certain EventTypes and publish others.
type manager struct {
subscribers map[Type][]Queue
events chan []byte
mapMutex sync.Mutex
chanMutex sync.Mutex
internal chan bool
closed bool
trace bool
}
// Manager is an interface for an event bus
type Manager interface {
Subscribe(Type, Queue)
Publish(Event)
Shutdown()
}
// NewEventManager returns an initialized EventManager
func NewEventManager() Manager {
em := &manager{}
em.initialize()
return em
}
// Initialize sets up the Manager.
func (em *manager) initialize() {
em.subscribers = make(map[Type][]Queue)
em.events = make(chan []byte)
em.internal = make(chan bool)
em.closed = false
_, em.trace = os.LookupEnv("CWTCH_EVENT_SOURCE")
go em.eventBus()
}
// Subscribe takes an eventType and an Channel and associates them in the eventBus. All future events of that type
// will be sent to the eventChannel.
func (em *manager) Subscribe(eventType Type, queue Queue) {
em.mapMutex.Lock()
defer em.mapMutex.Unlock()
for _, sub := range em.subscribers[eventType] {
if sub == queue {
return // don't add the same queue for the same event twice...
}
}
em.subscribers[eventType] = append(em.subscribers[eventType], queue)
}
// Publish takes an Event and sends it to the internal eventBus where it is distributed to all Subscribers
func (em *manager) Publish(event Event) {
em.chanMutex.Lock()
defer em.chanMutex.Unlock()
if event.EventType != "" && !em.closed {
// Debug Events for Tracing, locked behind an environment variable
// for now.
if em.trace {
pc, _, _, _ := runtime.Caller(1)
funcName := runtime.FuncForPC(pc).Name()
lastSlash := strings.LastIndexByte(funcName, '/')
if lastSlash < 0 {
lastSlash = 0
}
lastDot := strings.LastIndexByte(funcName[lastSlash:], '.') + lastSlash
event.Data[Source] = fmt.Sprintf("%v.%v", funcName[:lastDot], funcName[lastDot+1:])
}
// Deep Copy the Event...
eventJSON, err := json.Marshal(event)
if err != nil {
log.Errorf("Error serializing event: %v", event)
}
em.events <- eventJSON
}
}
// eventBus is an internal function that is used to distribute events to all subscribers
func (em *manager) eventBus() {
for {
eventJSON := <-em.events
// In the case on an empty event. Tear down the Queue
if len(eventJSON) == 0 {
log.Errorf("Received zero length event")
break
}
var event Event
err := json.Unmarshal(eventJSON, &event)
if err != nil {
log.Errorf("Error on Deep Copy: %v %v", eventJSON, err)
}
// maps aren't thread safe
em.mapMutex.Lock()
subscribers := em.subscribers[event.EventType]
em.mapMutex.Unlock()
// Send the event to any subscribers to that event type
for _, subscriber := range subscribers {
// Deep Copy for Each Subscriber
var eventCopy Event
err = json.Unmarshal(eventJSON, &eventCopy)
if err != nil {
log.Errorf("error unmarshalling event: %v ", err)
}
subscriber.Publish(eventCopy)
}
}
// We are about to exit the eventbus thread, fire off an event internally
em.internal <- true
}
// Shutdown triggers, and waits for, the internal eventBus goroutine to finish
func (em *manager) Shutdown() {
em.events <- []byte{}
em.chanMutex.Lock()
em.closed = true
em.chanMutex.Unlock()
// wait for eventBus to finish
<-em.internal
close(em.events)
close(em.internal)
}

View File

@ -1,91 +0,0 @@
package event
import (
"git.openprivacy.ca/openprivacy/log"
"testing"
"time"
)
// Most basic Manager Test, Initialize, Subscribe, Publish, Receive
func TestEventManager(t *testing.T) {
eventManager := NewEventManager()
// We need to make this buffer at least 1, otherwise we will log an error!
simpleQueue := NewQueue()
eventManager.Subscribe("TEST", simpleQueue)
eventManager.Publish(Event{EventType: "TEST", Data: map[Field]string{"Value": "Hello World"}})
event := simpleQueue.Next()
if event.EventType == "TEST" && event.Data["Value"] == "Hello World" {
} else {
t.Errorf("Received Invalid Event")
}
eventManager.Shutdown()
}
func TestEventManagerMultiple(t *testing.T) {
log.SetLevel(log.LevelDebug)
eventManager := NewEventManager()
groupEventQueue := NewQueue()
peerEventQueue := NewQueue()
allEventQueue := NewQueue()
eventManager.Subscribe("PeerEvent", peerEventQueue)
eventManager.Subscribe("GroupEvent", groupEventQueue)
eventManager.Subscribe("PeerEvent", allEventQueue)
eventManager.Subscribe("GroupEvent", allEventQueue)
eventManager.Subscribe("ErrorEvent", allEventQueue)
eventManager.Publish(Event{EventType: "PeerEvent", Data: map[Field]string{"Value": "Hello World Peer"}})
eventManager.Publish(Event{EventType: "GroupEvent", Data: map[Field]string{"Value": "Hello World Group"}})
eventManager.Publish(Event{EventType: "PeerEvent", Data: map[Field]string{"Value": "Hello World Peer"}})
eventManager.Publish(Event{EventType: "ErrorEvent", Data: map[Field]string{"Value": "Hello World Error"}})
eventManager.Publish(Event{EventType: "NobodyIsSubscribedToThisEvent", Data: map[Field]string{"Value": "No one should see this!"}})
assertLength := func(len int, expected int, label string) {
if len != expected {
t.Errorf("Expected %s to be %v was %v", label, expected, len)
}
}
time.Sleep(time.Second)
assertLength(groupEventQueue.Len(), 1, "Group Event Queue Length")
assertLength(peerEventQueue.Len(), 2, "Peer Event Queue Length")
assertLength(allEventQueue.Len(), 4, "All Event Queue Length")
checkEvent := func(eventType Type, expected Type, label string) {
if eventType != expected {
t.Errorf("Expected %s to be %v was %v", label, expected, eventType)
}
}
event := groupEventQueue.Next()
checkEvent(event.EventType, "GroupEvent", "First Group Event")
event = peerEventQueue.Next()
checkEvent(event.EventType, "PeerEvent", "First Peer Event")
event = peerEventQueue.Next()
checkEvent(event.EventType, "PeerEvent", "Second Peer Event")
event = allEventQueue.Next()
checkEvent(event.EventType, "PeerEvent", "ALL: First Peer Event")
event = allEventQueue.Next()
checkEvent(event.EventType, "GroupEvent", "ALL: First Group Event")
event = allEventQueue.Next()
checkEvent(event.EventType, "PeerEvent", "ALL: Second Peer Event")
event = allEventQueue.Next()
checkEvent(event.EventType, "ErrorEvent", "ALL: First Error Event")
eventManager.Shutdown()
groupEventQueue.Shutdown()
peerEventQueue.Shutdown()
allEventQueue.Shutdown()
// Reading from a closed queue should result in an instant return and an empty event
event = groupEventQueue.Next()
checkEvent(event.EventType, "", "Test Next() on Empty Queue")
}

View File

@ -1,73 +0,0 @@
// nolint:nilaway - the infiniteBuffer function causes issues with static analysis because it is very unidomatic.
package event
/*
This package is taken from https://github.com/eapache/channels
as per their suggestion we are not importing the entire package and instead cherry picking and adapting what is needed
It is covered by the MIT License https://github.com/eapache/channels/blob/master/LICENSE
*/
// infiniteChannel implements the Channel interface with an infinite buffer between the input and the output.
type infiniteChannel struct {
input, output chan Event
length chan int
buffer *infiniteQueue
}
func newInfiniteChannel() *infiniteChannel {
ch := &infiniteChannel{
input: make(chan Event),
output: make(chan Event),
length: make(chan int),
buffer: newInfiniteQueue(),
}
go ch.infiniteBuffer()
return ch
}
func (ch *infiniteChannel) In() chan<- Event {
return ch.input
}
func (ch *infiniteChannel) Out() <-chan Event {
return ch.output
}
func (ch *infiniteChannel) Len() int {
return <-ch.length
}
func (ch *infiniteChannel) Close() {
close(ch.input)
}
func (ch *infiniteChannel) infiniteBuffer() {
var input, output chan Event
var next Event
input = ch.input
for input != nil || output != nil {
select {
case elem, open := <-input:
if open {
ch.buffer.Add(elem)
} else {
input = nil
}
case output <- next:
ch.buffer.Remove()
case ch.length <- ch.buffer.Length():
}
if ch.buffer.Length() > 0 {
output = ch.output
next = ch.buffer.Peek()
} else {
output = nil
//next = nil
}
}
close(ch.output)
close(ch.length)
}

View File

@ -1,108 +0,0 @@
package event
/*
This package is taken from https://github.com/eapache/channels
as per their suggestion we are not importing the entire package and instead cherry picking and adapting what is needed
It is covered by the MIT License https://github.com/eapache/channels/blob/master/LICENSE
*/
/*
Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki.
Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
substantial memory and time benefits, and fewer GC pauses.
The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe.
*/
// minQueueLen is smallest capacity that queue may have.
// Must be power of 2 for bitwise modulus: x % n == x & (n - 1).
const minQueueLen = 16
// Queue represents a single instance of the queue data structure.
type infiniteQueue struct {
buf []Event
head, tail, count int
}
// New constructs and returns a new Queue.
func newInfiniteQueue() *infiniteQueue {
return &infiniteQueue{
buf: make([]Event, minQueueLen),
}
}
// Length returns the number of elements currently stored in the queue.
func (q *infiniteQueue) Length() int {
return q.count
}
// resizes the queue to fit exactly twice its current contents
// this can result in shrinking if the queue is less than half-full
func (q *infiniteQueue) resize() {
newBuf := make([]Event, q.count<<1)
if q.tail > q.head {
copy(newBuf, q.buf[q.head:q.tail])
} else {
n := copy(newBuf, q.buf[q.head:])
copy(newBuf[n:], q.buf[:q.tail])
}
q.head = 0
q.tail = q.count
q.buf = newBuf
}
// Add puts an element on the end of the queue.
func (q *infiniteQueue) Add(elem Event) {
if q.count == len(q.buf) {
q.resize()
}
q.buf[q.tail] = elem
// bitwise modulus
q.tail = (q.tail + 1) & (len(q.buf) - 1)
q.count++
}
// Peek returns the element at the head of the queue. This call panics
// if the queue is empty.
func (q *infiniteQueue) Peek() Event {
if q.count <= 0 {
panic("queue: Peek() called on empty queue")
}
return q.buf[q.head]
}
// Get returns the element at index i in the queue. If the index is
// invalid, the call will panic. This method accepts both positive and
// negative index values. Index 0 refers to the first element, and
// index -1 refers to the last.
func (q *infiniteQueue) Get(i int) Event {
// If indexing backwards, convert to positive index.
if i < 0 {
i += q.count
}
if i < 0 || i >= q.count {
panic("queue: Get() called with index out of range")
}
// bitwise modulus
return q.buf[(q.head+i)&(len(q.buf)-1)]
}
// Remove removes and returns the element from the front of the queue. If the
// queue is empty, the call will panic.
func (q *infiniteQueue) Remove() Event {
if q.count <= 0 {
panic("queue: Remove() called on empty queue")
}
ret := q.buf[q.head]
//q.buf[q.head] = nil
// bitwise modulus
q.head = (q.head + 1) & (len(q.buf) - 1)
q.count--
// Resize down if buffer 1/4 full.
if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) {
q.resize()
}
return ret
}

View File

@ -1,119 +0,0 @@
package extensions
import (
"cwtch.im/cwtch/event"
"cwtch.im/cwtch/model"
"cwtch.im/cwtch/model/attr"
"cwtch.im/cwtch/model/constants"
"cwtch.im/cwtch/peer"
"cwtch.im/cwtch/protocol/connections"
"cwtch.im/cwtch/settings"
"git.openprivacy.ca/openprivacy/log"
"strconv"
)
// ProfileValueExtension implements custom Profile Names over Cwtch
type ProfileValueExtension struct {
}
func (pne ProfileValueExtension) NotifySettingsUpdate(_ settings.GlobalSettings) {
}
func (pne ProfileValueExtension) EventsToRegister() []event.Type {
return []event.Type{event.PeerStateChange, event.Heartbeat}
}
func (pne ProfileValueExtension) ExperimentsToRegister() []string {
return nil
}
func (pne ProfileValueExtension) requestProfileInfo(profile peer.CwtchPeer, ci *model.Conversation) {
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.Name)
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.ProfileStatus)
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.ProfileAttribute1)
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.ProfileAttribute2)
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.ProfileAttribute3)
}
func (pne ProfileValueExtension) OnEvent(ev event.Event, profile peer.CwtchPeer) {
switch ev.EventType {
case event.Heartbeat:
// once every heartbeat, loop through conversations and, if they are online, request an update to any long info..
conversations, err := profile.FetchConversations()
if err == nil {
for _, ci := range conversations {
if profile.GetPeerState(ci.Handle) == connections.AUTHENTICATED {
pne.requestProfileInfo(profile, ci)
}
}
}
case event.PeerStateChange:
ci, err := profile.FetchConversationInfo(ev.Data["RemotePeer"])
if err == nil {
// if we have re-authenticated with thie peer then request their profile image...
if connections.ConnectionStateToType()[ev.Data[event.ConnectionState]] == connections.AUTHENTICATED {
// Request some profile information...
pne.requestProfileInfo(profile, ci)
}
}
}
}
// OnContactReceiveValue for ProfileValueExtension handles saving specific Public Profile Values like Profile Name
func (pne ProfileValueExtension) OnContactReceiveValue(profile peer.CwtchPeer, conversation model.Conversation, szp attr.ScopedZonedPath, value string, exists bool) {
// Allow public profile parameters to be added as contact specific attributes...
scope, zone, _ := szp.GetScopeZonePath()
if exists && scope.IsPublic() && zone == attr.ProfileZone {
// Check the current value of the attribute
currentValue, err := profile.GetConversationAttribute(conversation.ID, szp)
if err == nil && currentValue == value {
// Value exists and the value is the same, short-circuit
return
}
// Save the new Attribute
err = profile.SetConversationAttribute(conversation.ID, szp, value)
if err != nil {
// Something else wen't wrong.. short-circuit
log.Errorf("error setting conversation attribute %v", err)
return
}
// Finally publish an update for listeners to react to.
scope, zone, zpath := szp.GetScopeZonePath()
profile.PublishEvent(event.NewEvent(event.UpdatedConversationAttribute, map[event.Field]string{
event.Scope: string(scope),
event.Path: string(zone.ConstructZonedPath(zpath)),
event.Data: value,
event.RemotePeer: conversation.Handle,
event.ConversationID: strconv.Itoa(conversation.ID),
}))
}
}
// OnContactRequestValue for ProfileValueExtension handles returning Public Profile Values
func (pne ProfileValueExtension) OnContactRequestValue(profile peer.CwtchPeer, conversation model.Conversation, eventID string, szp attr.ScopedZonedPath) {
scope, zone, zpath := szp.GetScopeZonePath()
log.Debugf("Looking up public | conversation scope/zone %v", szp.ToString())
if scope.IsPublic() || scope.IsConversation() {
val, exists := profile.GetScopedZonedAttribute(scope, zone, zpath)
// NOTE: Temporary Override because UI currently wipes names if it can't find them...
if !exists && zone == attr.UnknownZone && zpath == constants.Name {
val, exists = profile.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name)
}
// Construct a Response
resp := event.NewEvent(event.SendRetValMessageToPeer, map[event.Field]string{event.ConversationID: strconv.Itoa(conversation.ID), event.RemotePeer: conversation.Handle, event.Exists: strconv.FormatBool(exists)})
resp.EventID = eventID
if exists {
resp.Data[event.Data] = val
} else {
resp.Data[event.Data] = ""
}
log.Debugf("Responding with SendRetValMessageToPeer exists:%v data: %v\n", exists, val)
profile.PublishEvent(resp)
}
}

View File

@ -1,591 +0,0 @@
package filesharing
import (
"crypto/rand"
"cwtch.im/cwtch/event"
"cwtch.im/cwtch/settings"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"os"
path "path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"cwtch.im/cwtch/model"
"cwtch.im/cwtch/model/attr"
"cwtch.im/cwtch/model/constants"
"cwtch.im/cwtch/peer"
"cwtch.im/cwtch/protocol/files"
"git.openprivacy.ca/openprivacy/log"
)
// Functionality groups some common UI triggered functions for contacts...
type Functionality struct {
}
func (f *Functionality) NotifySettingsUpdate(settings settings.GlobalSettings) {
}
func (f *Functionality) EventsToRegister() []event.Type {
return []event.Type{event.ProtocolEngineCreated, event.ManifestReceived, event.FileDownloaded}
}
func (f *Functionality) ExperimentsToRegister() []string {
return []string{constants.FileSharingExperiment}
}
// OnEvent handles File Sharing Hooks like Manifest Received and FileDownloaded
func (f *Functionality) OnEvent(ev event.Event, profile peer.CwtchPeer) {
if profile.IsFeatureEnabled(constants.FileSharingExperiment) {
switch ev.EventType {
case event.ProtocolEngineCreated:
f.ReShareFiles(profile)
case event.ManifestReceived:
log.Debugf("Manifest Received Event!: %v", ev)
handle := ev.Data[event.Handle]
fileKey := ev.Data[event.FileKey]
serializedManifest := ev.Data[event.SerializedManifest]
manifestFilePath, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%v.manifest", fileKey))
if exists {
downloadFilePath, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%v.path", fileKey))
if exists {
log.Debugf("downloading manifest to %v, file to %v", manifestFilePath, downloadFilePath)
var manifest files.Manifest
err := json.Unmarshal([]byte(serializedManifest), &manifest)
if err == nil {
// We only need to check the file size here, as manifest is sent to engine and the file created
// will be bound to the size advertised in manifest.
fileSizeLimitValue, fileSizeLimitExists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%v.limit", fileKey))
if fileSizeLimitExists {
fileSizeLimit, err := strconv.ParseUint(fileSizeLimitValue, 10, 64)
if err == nil {
if manifest.FileSizeInBytes >= fileSizeLimit {
log.Debugf("could not download file, size %v greater than limit %v", manifest.FileSizeInBytes, fileSizeLimitValue)
} else {
manifest.Title = manifest.FileName
manifest.FileName = downloadFilePath
log.Debugf("saving manifest")
err = manifest.Save(manifestFilePath)
if err != nil {
log.Errorf("could not save manifest: %v", err)
} else {
tempFile := ""
if runtime.GOOS == "android" {
tempFile = manifestFilePath[0 : len(manifestFilePath)-len(".manifest")]
log.Debugf("derived android temp path: %v", tempFile)
}
profile.PublishEvent(event.NewEvent(event.ManifestSaved, map[event.Field]string{
event.FileKey: fileKey,
event.Handle: handle,
event.SerializedManifest: string(manifest.Serialize()),
event.TempFile: tempFile,
event.NameSuggestion: manifest.Title,
}))
}
}
} else {
log.Errorf("error saving manifest: file size limit is incorrect: %v", err)
}
} else {
log.Errorf("error saving manifest: could not find file size limit info")
}
} else {
log.Errorf("error saving manifest: %v", err)
}
} else {
log.Errorf("found manifest path but not download path for %v", fileKey)
}
} else {
log.Errorf("no download path found for manifest: %v", fileKey)
}
case event.FileDownloaded:
fileKey := ev.Data[event.FileKey]
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.complete", fileKey), "true")
}
} else {
log.Errorf("profile called filesharing experiment OnContactReceiveValue even though file sharing was not enabled. This is likely a programming error.")
}
}
func (f *Functionality) OnContactRequestValue(profile peer.CwtchPeer, conversation model.Conversation, eventID string, path attr.ScopedZonedPath) {
// nop
}
func (f *Functionality) OnContactReceiveValue(profile peer.CwtchPeer, conversation model.Conversation, path attr.ScopedZonedPath, value string, exists bool) {
// Profile should not call us if FileSharing is disabled
if profile.IsFeatureEnabled(constants.FileSharingExperiment) {
scope, zone, zpath := path.GetScopeZonePath()
log.Debugf("file sharing contact receive value")
if exists && scope.IsConversation() && zone == attr.FilesharingZone && strings.HasSuffix(zpath, ".manifest.size") {
fileKey := strings.Replace(zpath, ".manifest.size", "", 1)
size, err := strconv.Atoi(value)
// if size is valid and below the maximum size for a manifest
// this is to prevent malicious sharers from using large amounts of memory when distributing
// a manifest as we reconstruct this in-memory
if err == nil && size < files.MaxManifestSize {
profile.PublishEvent(event.NewEvent(event.ManifestSizeReceived, map[event.Field]string{event.FileKey: fileKey, event.ManifestSize: value, event.Handle: conversation.Handle}))
} else {
profile.PublishEvent(event.NewEvent(event.ManifestError, map[event.Field]string{event.FileKey: fileKey, event.Handle: conversation.Handle}))
}
}
} else {
log.Errorf("profile called filesharing experiment OnContactReceiveValue even though file sharing was not enabled. This is likely a programming error.")
}
}
// FunctionalityGate returns filesharing functionality - gates now happen on function calls.
func FunctionalityGate() *Functionality {
return new(Functionality)
}
// PreviewFunctionalityGate returns filesharing if image previews are enabled
func PreviewFunctionalityGate(experimentMap map[string]bool) (*Functionality, error) {
if experimentMap[constants.FileSharingExperiment] && experimentMap[constants.ImagePreviewsExperiment] {
return new(Functionality), nil
}
return nil, errors.New("image previews are not enabled")
}
// OverlayMessage presents the canonical format of the File Sharing functionality Overlay Message
// This is the format that the UI will parse to display the message
type OverlayMessage struct {
Name string `json:"f"`
Hash string `json:"h"`
Nonce string `json:"n"`
Size uint64 `json:"s"`
}
// FileKey is the unique reference to a file offer
func (om *OverlayMessage) FileKey() string {
return fmt.Sprintf("%s.%s", om.Hash, om.Nonce)
}
// ShouldAutoDL checks file size and file name. *DOES NOT* check user settings or contact state
func (om *OverlayMessage) ShouldAutoDL() bool {
if om.Size > constants.ImagePreviewMaxSizeInBytes {
return false
}
lname := strings.ToLower(om.Name)
for _, s := range constants.AutoDLFileExts {
if strings.HasSuffix(lname, s) {
return true
}
}
return false
}
func (f *Functionality) VerifyOrResumeDownloadDefaultLimit(profile peer.CwtchPeer, conversation int, fileKey string) error {
return f.VerifyOrResumeDownload(profile, conversation, fileKey, files.MaxManifestSize*files.DefaultChunkSize)
}
func (f *Functionality) VerifyOrResumeDownload(profile peer.CwtchPeer, conversation int, fileKey string, size uint64) error {
if manifestFilePath, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", fileKey)); exists {
if downloadfilepath, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.path", fileKey)); exists {
manifest, err := files.LoadManifest(manifestFilePath)
if err == nil {
// Assert the filename...this is technically not necessary, but is here for completeness
manifest.FileName = downloadfilepath
if manifest.VerifyFile() == nil {
// Send a FileDownloaded Event. Usually when VerifyOrResumeDownload is triggered it's because some UI is awaiting the results of a
// Download.
profile.PublishEvent(event.NewEvent(event.FileDownloaded, map[event.Field]string{event.FileKey: fileKey, event.FilePath: downloadfilepath, event.TempFile: downloadfilepath}))
// File is verified and there is nothing else to do...
return nil
} else {
// Kick off another Download...
return f.DownloadFile(profile, conversation, downloadfilepath, manifestFilePath, fileKey, size)
}
}
}
}
return errors.New("file download metadata does not exist, or is corrupted")
}
func (f *Functionality) CheckDownloadStatus(profile peer.CwtchPeer, fileKey string) error {
path, _ := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.path", fileKey))
if value, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.complete", fileKey)); exists && value == event.True {
profile.PublishEvent(event.NewEvent(event.FileDownloaded, map[event.Field]string{
event.ProfileOnion: profile.GetOnion(),
event.FileKey: fileKey,
event.FilePath: path,
event.TempFile: "",
}))
} else {
log.Debugf("CheckDownloadStatus found .path but not .complete")
profile.PublishEvent(event.NewEvent(event.FileDownloadProgressUpdate, map[event.Field]string{
event.ProfileOnion: profile.GetOnion(),
event.FileKey: fileKey,
event.Progress: "-1",
event.FileSizeInChunks: "-1",
event.FilePath: path,
}))
}
return nil // cannot fail
}
func (f *Functionality) EnhancedShareFile(profile peer.CwtchPeer, conversationID int, sharefilepath string) string {
fileKey, overlay, err := f.ShareFile(sharefilepath, profile)
if err != nil {
log.Errorf("error sharing file: %v", err)
} else if conversationID == -1 {
// FIXME: At some point we might want to allow arbitrary public files, but for now this API will assume
// there is only one, and it is the custom profile image...
profile.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.CustomProfileImageKey, fileKey)
} else {
// Set a new attribute so we can associate this download with this conversation...
profile.SetConversationAttribute(conversationID, attr.ConversationScope.ConstructScopedZonedPath(attr.FilesharingZone.ConstructZonedPath(fileKey)), "")
id, err := profile.SendMessage(conversationID, overlay)
if err == nil {
return profile.EnhancedGetMessageById(conversationID, id)
}
}
return ""
}
// DownloadFileDefaultLimit given a profile, a conversation handle and a file sharing key, start off a download process
// to downloadFilePath with a default filesize limit
func (f *Functionality) DownloadFileDefaultLimit(profile peer.CwtchPeer, conversationID int, downloadFilePath string, manifestFilePath string, key string) error {
return f.DownloadFile(profile, conversationID, downloadFilePath, manifestFilePath, key, files.MaxManifestSize*files.DefaultChunkSize)
}
// DownloadFile given a profile, a conversation handle and a file sharing key, start off a download process
// to downloadFilePath
func (f *Functionality) DownloadFile(profile peer.CwtchPeer, conversationID int, downloadFilePath string, manifestFilePath string, key string, limit uint64) error {
// assert that we are allowed to download the file
if !profile.IsFeatureEnabled(constants.FileSharingExperiment) {
return errors.New("filesharing functionality is not enabled")
}
// Don't download files if the download or manifest path is not set
if downloadFilePath == "" || manifestFilePath == "" {
return errors.New("download path or manifest path is empty")
}
// Don't download files if the download file directory does not exist
// Unless we are on Android where the kernel wishes to keep us ignorant of the
// actual path and/or existence of the file. We handle this case further down
// the line when the manifest is received and protocol engine and the Android layer
// negotiate a temporary local file -> final file copy. We don't want to worry
// about that here...
if runtime.GOOS != "android" {
if _, err := os.Stat(path.Dir(downloadFilePath)); os.IsNotExist(err) {
return errors.New("download directory does not exist")
}
// Don't download files if the manifest file directory does not exist
if _, err := os.Stat(path.Dir(manifestFilePath)); os.IsNotExist(err) {
return errors.New("manifest directory does not exist")
}
}
// Store local.filesharing.filekey.manifest as the location of the manifest
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", key), manifestFilePath)
// Store local.filesharing.filekey.path as the location of the download
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.path", key), downloadFilePath)
// Store local.filesharing.filekey.limit as the max file size of the download
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.limit", key), strconv.FormatUint(limit, 10))
// Get the value of conversation.filesharing.filekey.manifest.size from `handle`
profile.SendScopedZonedGetValToContact(conversationID, attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest.size", key))
return nil
}
// startFileShare is a private method used to finalize a file share and publish it to the protocol engine for processing.
// if force is set to true, this function will ignore timestamp checks...
func (f *Functionality) startFileShare(profile peer.CwtchPeer, filekey string, manifest string, force bool) error {
tsStr, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.ts", filekey))
if exists && !force {
ts, err := strconv.ParseInt(tsStr, 10, 64)
if err != nil || ts < time.Now().Unix()-2592000 {
log.Errorf("ignoring request to download a file offered more than 30 days ago")
return err
}
}
// set the filekey status to active
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.active", filekey), constants.True)
// reset the timestamp...
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.ts", filekey), strconv.FormatInt(time.Now().Unix(), 10))
// share the manifest
profile.PublishEvent(event.NewEvent(event.ShareManifest, map[event.Field]string{event.FileKey: filekey, event.SerializedManifest: manifest}))
return nil
}
// RestartFileShare takes in an existing filekey and, assuming the manifest exists, restarts sharing of the manifest
// by default this function always forces a file share, even if the file has timed out.
func (f *Functionality) RestartFileShare(profile peer.CwtchPeer, filekey string) error {
return f.restartFileShareAdvanced(profile, filekey, true)
}
// RestartFileShareAdvanced takes in an existing filekey and, assuming the manifest exists, restarts sharing of the manifest in addition
// to a set of parameters
func (f *Functionality) restartFileShareAdvanced(profile peer.CwtchPeer, filekey string, force bool) error {
// assert that we are allowed to restart filesharing
if !profile.IsFeatureEnabled(constants.FileSharingExperiment) {
return errors.New("filesharing functionality is not enabled")
}
// check that a manifest exists
manifest, manifestExists := profile.GetScopedZonedAttribute(attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", filekey))
if manifestExists {
// everything is in order, so reshare this file with the engine
log.Debugf("restarting file share: %v", filekey)
return f.startFileShare(profile, filekey, manifest, force)
}
return fmt.Errorf("manifest does not exist for filekey: %v", filekey)
}
// ReShareFiles given a profile we iterate through all existing fileshares and re-share them
// if the time limit has not expired
func (f *Functionality) ReShareFiles(profile peer.CwtchPeer) error {
// assert that we are allowed to restart filesharing
if !profile.IsFeatureEnabled(constants.FileSharingExperiment) {
return errors.New("filesharing functionality is not enabled")
}
keys, err := profile.GetScopedZonedAttributeKeys(attr.LocalScope, attr.FilesharingZone)
if err != nil {
return err
}
for _, key := range keys {
// only look at timestamp keys
// this is an arbitrary choice
if strings.HasSuffix(key, ".ts") {
_, zonedpath := attr.ParseScope(key)
_, keypath := attr.ParseZone(zonedpath)
keyparts := strings.Split(keypath, ".")
// assert that the key is well-formed
if len(keyparts) == 3 && keyparts[2] == "ts" {
// fetch the timestamp key
filekey := strings.Join(keyparts[:2], ".")
sharedFile, err := f.GetFileShareInfo(profile, filekey)
// If we haven't explicitly stopped sharing the file then attempt a reshare
if err == nil && sharedFile.Active {
// this reshare can fail because we don't force sharing of files older than 30 days...
err := f.restartFileShareAdvanced(profile, filekey, false)
if err != nil {
log.Debugf("could not reshare file: %v", err)
}
} else {
log.Debugf("could not get fileshare info %v", err)
}
}
}
}
return nil
}
// GetFileShareInfo returns information related to a known fileshare.
// An error is returned if the data is incomplete
func (f *Functionality) GetFileShareInfo(profile peer.CwtchPeer, filekey string) (*SharedFile, error) {
timestampString, tsExists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.ts", filekey))
pathString, pathExists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.path", filekey))
activeString, activeExists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.active", filekey))
if tsExists && pathExists && activeExists {
timestamp, err := strconv.Atoi(timestampString)
if err == nil {
dateShared := time.Unix(int64(timestamp), 0)
expired := time.Since(dateShared) >= time.Hour*24*30
return &SharedFile{
FileKey: filekey,
Path: pathString,
DateShared: dateShared,
Active: !expired && activeString == constants.True,
Expired: expired,
}, nil
}
}
return nil, fmt.Errorf("nonexistant or malformed fileshare %v", filekey)
}
// ShareFile given a profile and a conversation handle, sets up a file sharing process to share the file
// at filepath
func (f *Functionality) ShareFile(filepath string, profile peer.CwtchPeer) (string, string, error) {
// assert that we are allowed to share files
if !profile.IsFeatureEnabled(constants.FileSharingExperiment) {
return "", "", errors.New("filesharing functionality is not enabled")
}
manifest, err := files.CreateManifest(filepath)
if err != nil {
return "", "", err
}
var nonce [24]byte
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
log.Errorf("Cannot read from random: %v\n", err)
return "", "", err
}
message := OverlayMessage{
Name: path.Base(manifest.FileName),
Hash: hex.EncodeToString(manifest.RootHash),
Nonce: hex.EncodeToString(nonce[:]),
Size: manifest.FileSizeInBytes,
}
data, _ := json.Marshal(message)
wrapper := model.MessageWrapper{
Overlay: model.OverlayFileSharing,
Data: string(data),
}
wrapperJSON, _ := json.Marshal(wrapper)
key := fmt.Sprintf("%x.%x", manifest.RootHash, nonce)
serializedManifest, _ := json.Marshal(manifest)
// Store the size of the manifest (in chunks) as part of the public scope so contacts who we share the file with
// can fetch the manifest as if it were a file.
// manifest.FileName gets redacted in filesharing_subsystem (to remove the system-specific file hierarchy),
// but we need to *store* the full path because the sender also uses it to locate the file
lenDiff := len(filepath) - len(path.Base(filepath))
// the sender needs to know the location of the file so they can display it in a preview...
// This eventually becomes a message attribute, but we don't have access to the message identifier until
// the message gets sent.
// In the worst case, this can be obtained using CheckDownloadStatus (though in practice this lookup will be
// rare because the UI will almost always initiate the construction of a preview a file directly after sending it).
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.path", key), filepath)
// Store the timestamp, manifest and manifest size for later.
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.ts", key), strconv.FormatInt(time.Now().Unix(), 10))
profile.SetScopedZonedAttribute(attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", key), string(serializedManifest))
profile.SetScopedZonedAttribute(attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest.size", key), strconv.Itoa(int(math.Ceil(float64(len(serializedManifest)-lenDiff)/float64(files.DefaultChunkSize)))))
err = f.startFileShare(profile, key, string(serializedManifest), false)
return key, string(wrapperJSON), err
}
// SharedFile encapsulates information about a shared file
// including the file key, file path, the original share date and the
// current sharing status
type SharedFile struct {
// The roothash.nonce identifier derived for this file share
FileKey string
// Path is the OS specific location of the file
Path string
// DateShared is the original datetime the file was shared
DateShared time.Time
// Active is true if the file is currently being shared, false otherwise
Active bool
// Expired is true if the file is not eligible to be shared (because e.g. it has been too long since the file was originally shared,
// or the file no longer exists).
Expired bool
}
func (f *Functionality) EnhancedGetSharedFiles(profile peer.CwtchPeer, conversationID int) string {
data, err := json.Marshal(f.GetSharedFiles(profile, conversationID))
if err == nil {
return string(data)
}
return ""
}
// GetSharedFiles returns all file shares associated with a given conversation
func (f *Functionality) GetSharedFiles(profile peer.CwtchPeer, conversationID int) []SharedFile {
var sharedFiles []SharedFile
ci, err := profile.GetConversationInfo(conversationID)
if err == nil {
for k := range ci.Attributes {
// when we share a file with a conversation we set a single attribute conversation.filesharing.<filekey>
if strings.HasPrefix(k, "conversation.filesharing") {
parts := strings.SplitN(k, ".", 3)
if len(parts) == 3 {
key := parts[2]
sharedFile, err := f.GetFileShareInfo(profile, key)
if err == nil {
sharedFiles = append(sharedFiles, *sharedFile)
}
}
}
}
}
return sharedFiles
}
// GenerateDownloadPath creates a file path that doesn't currently exist on the filesystem
func GenerateDownloadPath(basePath, fileName string, overwrite bool) (filePath, manifestPath string) {
// avoid all kina funky shit
re := regexp.MustCompile(`[^A-Za-z0-9._-]`)
filePath = re.ReplaceAllString(filePath, "")
// avoid hidden files on linux
for strings.HasPrefix(filePath, ".") {
filePath = strings.TrimPrefix(filePath, ".")
}
// avoid empties
if strings.TrimSpace(filePath) == "" {
filePath = "untitled"
}
// if you like it, put a / on it
if !strings.HasSuffix(basePath, string(os.PathSeparator)) {
basePath = fmt.Sprintf("%s%s", basePath, string(os.PathSeparator))
}
filePath = fmt.Sprintf("%s%s", basePath, fileName)
manifestPath = fmt.Sprintf("%s.manifest", filePath)
// if file is named "file", iterate "file", "file (2)", "file (3)", ... until DNE
// if file is named "file.ext", iterate "file.ext", "file (2).ext", "file (3).ext", ... until DNE
parts := strings.Split(fileName, ".")
fileNameBase := parts[0]
fileNameExt := ""
if len(parts) > 1 {
fileNameBase = strings.Join(parts[0:len(parts)-1], ".")
fileNameExt = fmt.Sprintf(".%s", parts[len(parts)-1])
}
if !overwrite {
for i := 2; ; i++ {
if _, err := os.Open(filePath); os.IsNotExist(err) {
if _, err := os.Open(manifestPath); os.IsNotExist(err) {
return
}
}
filePath = fmt.Sprintf("%s%s (%d)%s", basePath, fileNameBase, i, fileNameExt)
manifestPath = fmt.Sprintf("%s.manifest", filePath)
}
}
return
}
// StopFileShare sends a message to the ProtocolEngine to cease sharing a particular file
func (f *Functionality) StopFileShare(profile peer.CwtchPeer, fileKey string) error {
// Note we do not do a permissions check here, as we are *always* permitted to stop sharing files.
// set the filekey status to inactive
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.active", fileKey), constants.False)
profile.PublishEvent(event.NewEvent(event.StopFileShare, map[event.Field]string{event.FileKey: fileKey}))
return nil // cannot fail
}
// StopAllFileShares sends a message to the ProtocolEngine to cease sharing all files
func (f *Functionality) StopAllFileShares(profile peer.CwtchPeer) {
// Note we do not do a permissions check here, as we are *always* permitted to stop sharing files.
profile.PublishEvent(event.NewEvent(event.StopAllFileShares, map[event.Field]string{}))
}

View File

@ -1,173 +0,0 @@
package filesharing
import (
"cwtch.im/cwtch/event"
"cwtch.im/cwtch/model"
"cwtch.im/cwtch/model/attr"
"cwtch.im/cwtch/model/constants"
"cwtch.im/cwtch/peer"
"cwtch.im/cwtch/protocol/connections"
"cwtch.im/cwtch/settings"
"encoding/json"
"fmt"
"git.openprivacy.ca/openprivacy/log"
"os"
"strconv"
"time"
)
type ImagePreviewsFunctionality struct {
downloadFolder string
}
func (i *ImagePreviewsFunctionality) NotifySettingsUpdate(settings settings.GlobalSettings) {
i.downloadFolder = settings.DownloadPath
}
func (i *ImagePreviewsFunctionality) EventsToRegister() []event.Type {
return []event.Type{event.ProtocolEngineCreated, event.NewMessageFromPeer, event.NewMessageFromGroup, event.PeerStateChange, event.Heartbeat}
}
func (i *ImagePreviewsFunctionality) ExperimentsToRegister() []string {
return []string{constants.FileSharingExperiment, constants.ImagePreviewsExperiment}
}
func (i *ImagePreviewsFunctionality) OnEvent(ev event.Event, profile peer.CwtchPeer) {
if profile.IsFeatureEnabled(constants.FileSharingExperiment) && profile.IsFeatureEnabled(constants.ImagePreviewsExperiment) {
switch ev.EventType {
case event.NewMessageFromPeer:
ci, err := profile.FetchConversationInfo(ev.Data["RemotePeer"])
if err == nil {
if ci.GetPeerAC().RenderImages {
i.handleImagePreviews(profile, &ev, ci.ID, ci.ID)
}
}
case event.NewMessageFromGroup:
ci, err := profile.FetchConversationInfo(ev.Data["RemotePeer"])
if err == nil {
if ci.GetPeerAC().RenderImages {
i.handleImagePreviews(profile, &ev, ci.ID, ci.ID)
}
}
case event.PeerStateChange:
ci, err := profile.FetchConversationInfo(ev.Data["RemotePeer"])
if err == nil {
// if we have re-authenticated with this peer then request their profile image...
if connections.ConnectionStateToType()[ev.Data[event.ConnectionState]] == connections.AUTHENTICATED {
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.CustomProfileImageKey)
}
}
case event.Heartbeat:
conversations, err := profile.FetchConversations()
if err == nil {
for _, ci := range conversations {
if profile.GetPeerState(ci.Handle) == connections.AUTHENTICATED {
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.CustomProfileImageKey)
}
}
}
case event.ProtocolEngineCreated:
// Now that the Peer Engine is Activated, Reshare Profile Images
key, exists := profile.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.CustomProfileImageKey)
if exists {
serializedManifest, _ := profile.GetScopedZonedAttribute(attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", key))
// reset the share timestamp, currently file shares are hardcoded to expire after 30 days...
// we reset the profile image here so that it is always available.
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.ts", key), strconv.FormatInt(time.Now().Unix(), 10))
log.Debugf("Custom Profile Image: %v %s", key, serializedManifest)
f := Functionality{}
f.RestartFileShare(profile, key)
}
}
}
}
func (i *ImagePreviewsFunctionality) OnContactRequestValue(profile peer.CwtchPeer, conversation model.Conversation, eventID string, path attr.ScopedZonedPath) {
}
func (i *ImagePreviewsFunctionality) OnContactReceiveValue(profile peer.CwtchPeer, conversation model.Conversation, path attr.ScopedZonedPath, value string, exists bool) {
if profile.IsFeatureEnabled(constants.FileSharingExperiment) && profile.IsFeatureEnabled(constants.ImagePreviewsExperiment) {
_, zone, path := path.GetScopeZonePath()
if exists && zone == attr.ProfileZone && path == constants.CustomProfileImageKey {
// We only download from accepted conversations
if conversation.GetPeerAC().RenderImages {
fileKey := value
basepath := i.downloadFolder
fsf := FunctionalityGate()
// We always overwrite profile image files...
fp, mp := GenerateDownloadPath(basepath, fileKey, true)
// If we have marked this file as complete...
if value, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.complete", fileKey)); exists && value == event.True {
if _, err := os.Stat(fp); err == nil {
// file is marked as completed downloaded and exists...
// Note: this will also resend the FileDownloaded event if successful...
if fsf.VerifyOrResumeDownload(profile, conversation.ID, fileKey, constants.ImagePreviewMaxSizeInBytes) == nil {
return
}
// Otherwise we fall through...
}
// Something went wrong...the file is marked as complete but either doesn't exist, or is corrupted such that we can't continue...
// So mark complete as false...
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.complete", fileKey), event.False)
}
// If we have reached this point then we need to download the file again...
log.Debugf("Downloading Profile Image %v %v %v", fp, mp, fileKey)
fsf.DownloadFile(profile, conversation.ID, fp, mp, fileKey, constants.ImagePreviewMaxSizeInBytes)
}
}
}
}
// handleImagePreviews checks settings and, if appropriate, auto-downloads any images
func (i *ImagePreviewsFunctionality) handleImagePreviews(profile peer.CwtchPeer, ev *event.Event, conversationID, senderID int) {
if profile.IsFeatureEnabled(constants.FileSharingExperiment) && profile.IsFeatureEnabled(constants.ImagePreviewsExperiment) {
ci, err := profile.GetConversationInfo(senderID)
if err != nil {
log.Errorf("attempted to call handleImagePreviews with unknown conversation: %v", senderID)
return
}
if !ci.GetPeerAC().ShareFiles || !ci.GetPeerAC().RenderImages {
log.Infof("refusing to autodownload files from sender: %v. conversation AC does not permit image rendering", senderID)
return
}
// Short-circuit failures
// Don't auto-download images if the download path does not exist.
if i.downloadFolder == "" {
log.Errorf("download folder %v is not set", i.downloadFolder)
return
}
// Don't auto-download images if the download path does not exist.
if _, err := os.Stat(i.downloadFolder); os.IsNotExist(err) {
log.Errorf("download folder %v does not exist", i.downloadFolder)
return
}
// If file sharing is enabled then reshare all active files...
fsf := FunctionalityGate()
// Now look at the image preview experiment
var cm model.MessageWrapper
err = json.Unmarshal([]byte(ev.Data[event.Data]), &cm)
if err == nil && cm.Overlay == model.OverlayFileSharing {
log.Debugf("Received File Sharing Message")
var fm OverlayMessage
err = json.Unmarshal([]byte(cm.Data), &fm)
if err == nil {
if fm.ShouldAutoDL() {
basepath := i.downloadFolder
fp, mp := GenerateDownloadPath(basepath, fm.Name, false)
log.Debugf("autodownloading file! %v %v %v", basepath, fp, i.downloadFolder)
ev.Data["Auto"] = constants.True
mID, _ := strconv.Atoi(ev.Data["Index"])
profile.UpdateMessageAttribute(conversationID, 0, mID, constants.AttrDownloaded, constants.True)
fsf.DownloadFile(profile, senderID, fp, mp, fm.FileKey(), constants.ImagePreviewMaxSizeInBytes)
}
}
}
}
}

View File

@ -1,150 +0,0 @@
package servers
import (
"cwtch.im/cwtch/event"
"cwtch.im/cwtch/model"
"cwtch.im/cwtch/model/attr"
"cwtch.im/cwtch/model/constants"
"cwtch.im/cwtch/peer"
"cwtch.im/cwtch/protocol/connections"
"cwtch.im/cwtch/settings"
"encoding/json"
"errors"
"git.openprivacy.ca/openprivacy/log"
)
const (
// ServerList is a json encoded list of servers
ServerList = event.Field("ServerList")
)
const (
// UpdateServerInfo is an event containing a ProfileOnion and a ServerList
UpdateServerInfo = event.Type("UpdateServerInfo")
)
// Functionality groups some common UI triggered functions for contacts...
type Functionality struct {
}
func (f *Functionality) NotifySettingsUpdate(settings settings.GlobalSettings) {
}
func (f *Functionality) EventsToRegister() []event.Type {
return []event.Type{event.QueueJoinServer}
}
func (f *Functionality) ExperimentsToRegister() []string {
return []string{constants.GroupsExperiment}
}
// OnEvent handles File Sharing Hooks like Manifest Received and FileDownloaded
func (f *Functionality) OnEvent(ev event.Event, profile peer.CwtchPeer) {
if profile.IsFeatureEnabled(constants.GroupsExperiment) {
switch ev.EventType {
// keep the UI in sync with the current backend server updates...
// queue join server gets triggered on load and on new servers so it's a nice
// low-noise event to hook into...
case event.QueueJoinServer:
f.PublishServerUpdate(profile)
}
}
}
func (f *Functionality) OnContactRequestValue(profile peer.CwtchPeer, conversation model.Conversation, eventID string, path attr.ScopedZonedPath) {
// nop
}
func (f *Functionality) OnContactReceiveValue(profile peer.CwtchPeer, conversation model.Conversation, path attr.ScopedZonedPath, value string, exists bool) {
// nopt
}
// FunctionalityGate returns filesharing functionality - gates now happen on function calls.
func FunctionalityGate() *Functionality {
return new(Functionality)
}
// ServerKey packages up key information...
// TODO: Can this be merged with KeyBundle?
type ServerKey struct {
Type string `json:"type"`
Key string `json:"key"`
}
// SyncStatus packages up server sync information...
type SyncStatus struct {
StartTime string `json:"startTime"`
LastMessageTime string `json:"lastMessageTime"`
}
// Server encapsulates the information needed to represent a server...
type Server struct {
Onion string `json:"onion"`
Identifier int `json:"identifier"`
Status string `json:"status"`
Description string `json:"description"`
Keys []ServerKey `json:"keys"`
SyncProgress SyncStatus `json:"syncProgress"`
}
// PublishServerUpdate serializes the current list of group servers and publishes an event with this information
func (f *Functionality) PublishServerUpdate(profile peer.CwtchPeer) error {
serverListForOnion := f.GetServerInfoList(profile)
serversListBytes, err := json.Marshal(serverListForOnion)
profile.PublishEvent(event.NewEvent(UpdateServerInfo, map[event.Field]string{"ProfileOnion": profile.GetOnion(), ServerList: string(serversListBytes)}))
return err
}
// GetServerInfoList compiles all the information the UI might need regarding all servers..
func (f *Functionality) GetServerInfoList(profile peer.CwtchPeer) []Server {
var servers []Server
for _, server := range profile.GetServers() {
server, err := f.GetServerInfo(profile, server)
if err != nil {
log.Errorf("profile server list is corrupted: %v", err)
continue
}
servers = append(servers, server)
}
return servers
}
// DeleteServer purges a server and all related keys from a profile
func (f *Functionality) DeleteServerInfo(profile peer.CwtchPeer, serverOnion string) error {
// Servers are stores as special conversations
ci, err := profile.FetchConversationInfo(serverOnion)
if err != nil {
return err
}
// Purge keys...
// NOTE: This will leave some groups in the state of being unable to connect to a particular
// server.
profile.DeleteConversation(ci.ID)
f.PublishServerUpdate(profile)
return nil
}
// GetServerInfo compiles all the information the UI might need regarding a particular server including any verified
// cryptographic keys
func (f *Functionality) GetServerInfo(profile peer.CwtchPeer, serverOnion string) (Server, error) {
serverInfo, err := profile.FetchConversationInfo(serverOnion)
if err != nil {
return Server{}, errors.New("server not found")
}
keyTypes := []model.KeyType{model.KeyTypeServerOnion, model.KeyTypeTokenOnion, model.KeyTypePrivacyPass}
var serverKeys []ServerKey
for _, keyType := range keyTypes {
if key, has := serverInfo.GetAttribute(attr.PublicScope, attr.ServerKeyZone, string(keyType)); has {
serverKeys = append(serverKeys, ServerKey{Type: string(keyType), Key: key})
}
}
description, _ := serverInfo.GetAttribute(attr.LocalScope, attr.ServerZone, constants.Description)
startTimeStr := serverInfo.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.SyncPreLastMessageTime)).ToString()]
recentTimeStr := serverInfo.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.SyncMostRecentMessageTime)).ToString()]
syncStatus := SyncStatus{startTimeStr, recentTimeStr}
return Server{Onion: serverOnion, Identifier: serverInfo.ID, Status: connections.ConnectionStateName[profile.GetPeerState(serverInfo.Handle)], Keys: serverKeys, Description: description, SyncProgress: syncStatus}, nil
}

30
go.mod
View File

@ -1,30 +0,0 @@
module cwtch.im/cwtch
go 1.20
require (
git.openprivacy.ca/cwtch.im/tapir v0.6.0
git.openprivacy.ca/openprivacy/connectivity v1.11.0
git.openprivacy.ca/openprivacy/log v1.0.3
github.com/gtank/ristretto255 v0.1.3-0.20210930101514-6bb39798585c
github.com/mutecomm/go-sqlcipher/v4 v4.4.2
github.com/onsi/ginkgo/v2 v2.1.4
github.com/onsi/gomega v1.20.1
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d
)
require (
filippo.io/edwards25519 v1.0.0 // indirect
git.openprivacy.ca/openprivacy/bine v0.0.5 // indirect
github.com/client9/misspell v0.3.4 // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/gtank/merlin v0.1.1 // indirect
github.com/mimoo/StrobeGo v0.0.0-20220103164710-9a04d6ca976b // indirect
github.com/stretchr/testify v1.7.0 // indirect
go.etcd.io/bbolt v1.3.6 // indirect
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b // indirect
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64 // indirect
golang.org/x/text v0.3.7 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

72
go.sum
View File

@ -1,72 +0,0 @@
filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek=
filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns=
git.openprivacy.ca/cwtch.im/tapir v0.6.0 h1:TtnKjxitkIDMM7Qn0n/u+mOHRLJzuQUYjYRu5n0/QFY=
git.openprivacy.ca/cwtch.im/tapir v0.6.0/go.mod h1:iQIq4y7N+DuP3CxyG66WNEC/d6vzh+wXvvOmelB+KoY=
git.openprivacy.ca/openprivacy/bine v0.0.5 h1:DJs5gqw3SkvLSgRDvroqJxZ7F+YsbxbBRg5t0rU5gYE=
git.openprivacy.ca/openprivacy/bine v0.0.5/go.mod h1:fwdeq6RO08WDkV0k7HfArsjRvurVULoUQmT//iaABZM=
git.openprivacy.ca/openprivacy/connectivity v1.11.0 h1:roASjaFtQLu+HdH5fa2wx6F00NL3YsUTlmXBJh8aLZk=
git.openprivacy.ca/openprivacy/connectivity v1.11.0/go.mod h1:OQO1+7OIz/jLxDrorEMzvZA6SEbpbDyLGpjoFqT3z1Y=
git.openprivacy.ca/openprivacy/log v1.0.3 h1:E/PMm4LY+Q9s3aDpfySfEDq/vYQontlvNj/scrPaga0=
git.openprivacy.ca/openprivacy/log v1.0.3/go.mod h1:gGYK8xHtndRLDymFtmjkG26GaMQNgyhioNS82m812Iw=
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is=
github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s=
github.com/gtank/ristretto255 v0.1.3-0.20210930101514-6bb39798585c h1:gkfmnY4Rlt3VINCo4uKdpvngiibQyoENVj5Q88sxXhE=
github.com/gtank/ristretto255 v0.1.3-0.20210930101514-6bb39798585c/go.mod h1:tDPFhGdt3hJWqtKwx57i9baiB1Cj0yAg22VOPUqm5vY=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM=
github.com/mimoo/StrobeGo v0.0.0-20220103164710-9a04d6ca976b h1:QrHweqAtyJ9EwCaGHBu1fghwxIPiopAHV06JlXrMHjk=
github.com/mimoo/StrobeGo v0.0.0-20220103164710-9a04d6ca976b/go.mod h1:xxLb2ip6sSUts3g1irPVHyk/DGslwQsNOo9I7smJfNU=
github.com/mutecomm/go-sqlcipher/v4 v4.4.2 h1:eM10bFtI4UvibIsKr10/QT7Yfz+NADfjZYh0GKrXUNc=
github.com/mutecomm/go-sqlcipher/v4 v4.4.2/go.mod h1:mF2UmIpBnzFeBdu/ypTDb/LdbS0nk0dfSN1WUsWTjMA=
github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY=
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d h1:3qF+Z8Hkrw9sOhrFHti9TlB1Hkac1x+DNRkv0XQiFjo=
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b h1:ZmngSVLe/wycRns9MKikG9OWIEjGcGAkacif7oYQaUY=
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64 h1:UiNENfZ8gDvpiWw7IpOMQ27spWmThO1RwwdQVbJahJM=
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -1,104 +0,0 @@
package attr
import (
"git.openprivacy.ca/openprivacy/log"
"strings"
)
/*
Scope model for peer attributes and requests
A local peer "Alice" has a PublicScope that is queryable by getVal requests.
By default, for now, other scopes are private, of which we here define SettingsScope
Alice's peer structs of remote peers such as "Bob" keep the queried
PublicScope values in the PeerScope, which can be overridden by the same named
values stored in the LocalScope.
*/
// Scope strongly types Scope strings
type Scope string
// ScopedZonedPath typed path with a scope and a zone
type ScopedZonedPath string
func (szp ScopedZonedPath) GetScopeZonePath() (Scope, Zone, string) {
scope, path := ParseScope(string(szp))
zone, zpath := ParseZone(path)
return scope, zone, zpath
}
// scopes for attributes
const (
// on a peer, local and peer supplied data
LocalScope = Scope("local")
PeerScope = Scope("peer")
ConversationScope = Scope("conversation")
// on a local profile, public data and private settings
PublicScope = Scope("public")
UnknownScope = Scope("unknown")
)
// Separator for scope and the rest of path
const Separator = "."
// IntoScope converts a string to a Scope
func IntoScope(scope string) Scope {
switch scope {
case "local":
return LocalScope
case "peer":
return PeerScope
case "conversation":
return ConversationScope
case "public":
return PublicScope
}
return UnknownScope
}
// ConstructScopedZonedPath enforces a scope over a zoned path
func (scope Scope) ConstructScopedZonedPath(zonedPath ZonedPath) ScopedZonedPath {
return ScopedZonedPath(string(scope) + Separator + string(zonedPath))
}
// ToString converts a ScopedZonedPath to a string
func (szp ScopedZonedPath) ToString() string {
return string(szp)
}
// IsLocal returns true if the scope is a local scope
func (scope Scope) IsLocal() bool {
return scope == LocalScope
}
// IsPeer returns true if the scope is a peer scope
func (scope Scope) IsPeer() bool {
return scope == PeerScope
}
// IsPublic returns true if the scope is a public scope
func (scope Scope) IsPublic() bool {
return scope == PublicScope
}
// IsConversation returns true if the scope is a conversation scope
func (scope Scope) IsConversation() bool {
return scope == ConversationScope
}
// ParseScope takes in an untyped string and returns an explicit Scope along with the rest of the untyped path
func ParseScope(path string) (Scope, string) {
parts := strings.SplitN(path, Separator, 3)
log.Debugf("parsed scope: %v %v", parts, path)
if len(parts) != 3 {
return UnknownScope, ""
}
return IntoScope(parts[0]), parts[1] + Separator + parts[2]
}

View File

@ -1,71 +0,0 @@
package attr
import (
"git.openprivacy.ca/openprivacy/log"
"strings"
)
// Zone forces attributes to belong to a given subsystem e.g profile or filesharing
// Note: Zone is different from Scope which deals with public visibility of a given attribute
type Zone string
// ZonedPath explicitly types paths that contain a zone for strongly typed APIs
type ZonedPath string
const (
// ProfileZone for attributes related to profile details like name and profile image
ProfileZone = Zone("profile")
// LegacyGroupZone for attributes related to legacy group experiment
LegacyGroupZone = Zone("legacygroup")
// FilesharingZone for attributes related to file sharing
FilesharingZone = Zone("filesharing")
// ServerKeyZone for attributes related to Server Keys
ServerKeyZone = Zone("serverkey")
// ServerZone is for attributes related to the server
ServerZone = Zone("server")
// UnknownZone is a catch all useful for error handling
UnknownZone = Zone("unknown")
)
// ConstructZonedPath takes a path and attaches a zone to it.
// Note that this returns a ZonedPath which isn't directly usable, it must be given to ConstructScopedZonedPath
// in order to be realized into an actual attribute path.
func (zone Zone) ConstructZonedPath(path string) ZonedPath {
return ZonedPath(string(zone) + Separator + path)
}
func (zp ZonedPath) ToString() string {
return string(zp)
}
// ParseZone takes in an untyped string and returns an explicit Zone along with the rest of the untyped path
func ParseZone(path string) (Zone, string) {
parts := strings.SplitN(path, Separator, 2)
log.Debugf("parsed zone: %v %v", parts, path)
if len(parts) != 2 {
return UnknownZone, ""
}
switch Zone(parts[0]) {
case ProfileZone:
return ProfileZone, parts[1]
case LegacyGroupZone:
return LegacyGroupZone, parts[1]
case FilesharingZone:
return FilesharingZone, parts[1]
case ServerKeyZone:
return ServerKeyZone, parts[1]
case ServerZone:
return ServerZone, parts[1]
default:
return UnknownZone, parts[1]
}
}

View File

@ -1,74 +0,0 @@
package constants
// Name refers to a Profile Name
const Name = "name"
// Onion refers the Onion address of the profile
const Onion = "onion"
// Tag describes the type of a profile e.g. default password / encrypted etc.
const Tag = "tag"
// ProfileTypeV1DefaultPassword is a tag describing a profile protected with the default password.
const ProfileTypeV1DefaultPassword = "v1-defaultPassword"
// ProfileTypeV1Password is a tag describing a profile encrypted derived from a user-provided password.
const ProfileTypeV1Password = "v1-userPassword"
// GroupID is the ID of a group
const GroupID = "groupid"
// GroupServer identifies the Server the legacy group is hosted on
const GroupServer = "groupserver"
// GroupKey is the name of the group key attribute...
const GroupKey = "groupkey"
// True - true
const True = "true"
// False - false
const False = "false"
// AttrAuthor - conversation attribute for author of the message - referenced by pub key rather than conversation id because of groups.
const AttrAuthor = "author"
// AttrAck - conversation attribute for acknowledgement status
const AttrAck = "ack"
// AttrErr - conversation attribute for errored status
const AttrErr = "error"
// AttrSentTimestamp - conversation attribute for the time the message was (nominally) sent
const AttrSentTimestamp = "sent"
// Legacy MessageFlags
// AttrRejected - conversation attribute for storing rejected prompts (for invites)
const AttrRejected = "rejected-invite"
// AttrDownloaded - conversation attribute for storing downloaded prompts (for file downloads)
const AttrDownloaded = "file-downloaded"
const CustomProfileImageKey = "custom-profile-image"
const SyncPreLastMessageTime = "SyncPreLastMessageTime"
const SyncMostRecentMessageTime = "SyncMostRecentMessageTime"
const AttrLastConnectionTime = "last-connection-time"
const PeerAutostart = "autostart"
const PeerAppearOffline = "appear-offline"
const Archived = "archived"
const ProfileStatus = "profile-status"
const ProfileAttribute1 = "profile-attribute-1"
const ProfileAttribute2 = "profile-attribute-2"
const ProfileAttribute3 = "profile-attribute-3"
// Description is used on server contacts,
const Description = "description"
// Used to store the status of acl migrations
const ACLVersion = "acl-version"
const ACLVersionOne = "acl-v1"
const ACLVersionTwo = "acl-v2"

View File

@ -1,13 +0,0 @@
package constants
// ServerPrefix precedes a server import statement
const ServerPrefix = "server:"
// TofuBundlePrefix precedes a server and a group import statement
const TofuBundlePrefix = "tofubundle:"
// GroupPrefix precedes a group import statement
const GroupPrefix = "torv3"
// ImportBundlePrefix is an error api constant for import bundle error messages
const ImportBundlePrefix = "importBundle"

View File

@ -1,7 +0,0 @@
package constants
// InvalidPasswordError is returned when an incorrect password is provided to a function that requires the current active password
const InvalidPasswordError = "invalid_password_error"
// PasswordsDoNotMatchError is returned when two passwords do not match
const PasswordsDoNotMatchError = "passwords_do_not_match"

View File

@ -1,21 +0,0 @@
package constants
const GroupsExperiment = "tapir-groups-experiment"
// FileSharingExperiment Allows file sharing
const FileSharingExperiment = "filesharing"
// ImagePreviewsExperiment Causes images (up to ImagePreviewMaxSizeInBytes, from accepted contacts) to auto-dl and preview
// requires FileSharingExperiment to be enabled
const ImagePreviewsExperiment = "filesharing-images"
// ImagePreviewMaxSizeInBytes Files up to this size will be autodownloaded using ImagePreviewsExperiment
const ImagePreviewMaxSizeInBytes = 20971520
const MessageFormattingExperiment = "message-formatting"
// AutoDLFileExts Files with these extensions will be autodownloaded using ImagePreviewsExperiment
var AutoDLFileExts = [...]string{".jpg", ".jpeg", ".png", ".gif", ".webp", ".bmp"}
// BlodeuweddExperiment enables the Blodeuwedd Assistant
const BlodeuweddExperiment = "blodeuwedd"

View File

@ -1,149 +0,0 @@
package model
import (
"cwtch.im/cwtch/model/attr"
"cwtch.im/cwtch/model/constants"
"encoding/json"
"git.openprivacy.ca/openprivacy/log"
"time"
)
// AccessControl is a type determining client assigned authorization to a peer
// for a given conversation
type AccessControl struct {
Blocked bool // Any attempts from this handle to connect are blocked overrides all other settings
// Basic Conversation Rights
Read bool // Allows a handle to access the conversation
Append bool // Allows a handle to append new messages to the conversation
AutoConnect bool // Profile should automatically try to connect with peer
ExchangeAttributes bool // Profile should automatically exchange attributes like Name, Profile Image, etc.
// Extension Related Permissions
ShareFiles bool // Allows a handle to share files to a conversation
RenderImages bool // Indicates that certain filetypes should be autodownloaded and rendered when shared by this contact
}
// DefaultP2PAccessControl defaults to a semi-trusted peer with no access to special extensions.
func DefaultP2PAccessControl() AccessControl {
return AccessControl{Read: true, Append: true, ExchangeAttributes: true, Blocked: false,
AutoConnect: true, ShareFiles: false, RenderImages: false}
}
// AccessControlList represents an access control list for a conversation. Mapping handles to conversation
// functions
type AccessControlList map[string]AccessControl
// Serialize transforms the ACL into json.
func (acl *AccessControlList) Serialize() []byte {
data, _ := json.Marshal(acl)
return data
}
// DeserializeAccessControlList takes in JSON and returns an AccessControlList
func DeserializeAccessControlList(data []byte) (AccessControlList, error) {
var acl AccessControlList
err := json.Unmarshal(data, &acl)
return acl, err
}
// Attributes a type-driven encapsulation of an Attribute map.
type Attributes map[string]string
// Serialize transforms an Attributes map into a JSON struct
func (a *Attributes) Serialize() []byte {
data, _ := json.Marshal(a)
return data
}
// DeserializeAttributes converts a JSON struct into an Attributes map
func DeserializeAttributes(data []byte) Attributes {
attributes := make(Attributes)
err := json.Unmarshal(data, &attributes)
if err != nil {
log.Error("error deserializing attributes (this is likely a programming error): %v", err)
return make(Attributes)
}
return attributes
}
// Conversation encapsulates high-level information about a conversation, including the
// handle, any set attributes, the access control list associated with the message tree and the
// accepted status of the conversation (whether the user has consented into the conversation).
type Conversation struct {
ID int
Handle string
Attributes Attributes
ACL AccessControlList
// Deprecated, please use ACL for permissions related functions
Accepted bool
}
// GetAttribute is a helper function that fetches a conversation attribute by scope, zone and key
func (ci *Conversation) GetAttribute(scope attr.Scope, zone attr.Zone, key string) (string, bool) {
if value, exists := ci.Attributes[scope.ConstructScopedZonedPath(zone.ConstructZonedPath(key)).ToString()]; exists {
return value, true
}
return "", false
}
// GetPeerAC returns a suitable Access Control object for a the given peer conversation
// If this is called for a group conversation, this method will error and return a safe default AC.
func (ci *Conversation) GetPeerAC() AccessControl {
if acl, exists := ci.ACL[ci.Handle]; exists {
return acl
}
log.Errorf("attempted to access a Peer Access Control object from %v but peer ACL is undefined. This is likely a programming error", ci.Handle)
return DefaultP2PAccessControl()
}
// IsGroup is a helper attribute that identifies whether a conversation is a legacy group
func (ci *Conversation) IsGroup() bool {
if _, exists := ci.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.GroupID)).ToString()]; exists {
return true
}
return false
}
// IsServer is a helper attribute that identifies whether a conversation is with a server
func (ci *Conversation) IsServer() bool {
if _, exists := ci.Attributes[attr.PublicScope.ConstructScopedZonedPath(attr.ServerKeyZone.ConstructZonedPath(string(BundleType))).ToString()]; exists {
return true
}
return false
}
// ServerSyncProgress is only valid during a server being in the AUTHENTICATED state and therefor in the syncing process
// it returns a double (0-1) representing the estimated progress of the syncing
func (ci *Conversation) ServerSyncProgress() float64 {
startTimeStr, startExists := ci.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.SyncPreLastMessageTime)).ToString()]
recentTimeStr, recentExists := ci.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.SyncMostRecentMessageTime)).ToString()]
if !startExists || !recentExists {
return 0.0
}
startTime, err := time.Parse(startTimeStr, time.RFC3339Nano)
if err != nil {
return 0.0
}
recentTime, err := time.Parse(recentTimeStr, time.RFC3339Nano)
if err != nil {
return 0.0
}
syncRange := time.Since(startTime)
pointFromStart := startTime.Sub(recentTime)
return pointFromStart.Seconds() / syncRange.Seconds()
}
// ConversationMessage bundles an instance of a conversation message row
type ConversationMessage struct {
ID int
Body string
Attr Attributes
Signature string
ContentHash string
}

View File

@ -1,15 +0,0 @@
package model
// Error models some common errors that need to be handled by applications that use Cwtch
type Error string
// Error is the error interface
func (e Error) Error() string {
return string(e)
}
// Error definitions
const (
InvalidEd25519PublicKey = Error("InvalidEd25519PublicKey")
InconsistentKeyBundleError = Error("InconsistentKeyBundleError")
)

View File

@ -1,41 +0,0 @@
package model
import "sync"
// Experiments are optional functionality that can be enabled/disabled by an application either completely or individually.
// examples of experiments include File Sharing, Profile Images and Groups.
type Experiments struct {
enabled bool
experiments sync.Map
}
// InitExperiments encapsulates a set of experiments separate from their storage in GlobalSettings.
func InitExperiments(enabled bool, experiments map[string]bool) Experiments {
var syncExperiments sync.Map
for experiment, set := range experiments {
syncExperiments.Store(experiment, set)
}
return Experiments{
enabled: enabled,
experiments: syncExperiments,
}
}
// IsEnabled is a convenience function that takes in an experiment and returns true if it is enabled. Experiments
// are only enabled if both global experiments are turned on and if the specific experiment is also turned on.
// The one exception to this is experiments that have been promoted to default functionality which may be turned on
// even if experiments turned off globally. These experiments are defined by DefaultEnabledFunctionality.
func (e *Experiments) IsEnabled(experiment string) bool {
if !e.enabled {
// todo handle default-enabled functionality
return false
}
enabled, exists := e.experiments.Load(experiment)
if !exists {
return false
}
return enabled.(bool)
}

View File

@ -1,280 +1,134 @@
package model
import (
"crypto/ed25519"
"crypto/rand"
"crypto/sha512"
"cwtch.im/cwtch/protocol/groups"
"encoding/base32"
"encoding/base64"
"encoding/hex"
"encoding/json"
"cwtch.im/cwtch/protocol"
"errors"
"fmt"
"git.openprivacy.ca/cwtch.im/tapir/primitives"
"git.openprivacy.ca/openprivacy/connectivity/tor"
"git.openprivacy.ca/openprivacy/log"
"github.com/golang/protobuf/proto"
"github.com/s-rah/go-ricochet/utils"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/pbkdf2"
"io"
"strings"
"sync"
"time"
)
// CurrentGroupVersion is used to set the version of newly created groups and make sure group structs stored are correct and up to date
const CurrentGroupVersion = 4
// GroupInvitePrefix identifies a particular string as being a serialized group invite.
const GroupInvitePrefix = "torv3"
// Group defines and encapsulates Cwtch's conception of group chat. Which are sessions
// tied to a server under a given group key. Each group has a set of Messages.
//Group defines and encapsulates Cwtch's conception of group chat. Which are sessions
// tied to a server under a given group key. Each group has a set of messages.
type Group struct {
// GroupID is now derived from the GroupKey and the GroupServer
GroupID string
GroupName string
GroupKey [32]byte
GroupServer string
Attributes map[string]string //legacy to not use
Version int
Timeline Timeline `json:"-"`
LocalID string
GroupID string
SignedGroupID []byte
GroupKey [32]byte
GroupServer string
Timeline Timeline
Accepted bool
Owner string
IsCompromised bool
lock sync.Mutex
}
// NewGroup initializes a new group associated with a given CwtchServer
func NewGroup(server string) (*Group, error) {
func NewGroup(server string) *Group {
group := new(Group)
if !tor.IsValidHostname(server) {
return nil, errors.New("server is not a valid v3 onion")
}
group.GroupServer = server
var groupID [16]byte
if _, err := io.ReadFull(rand.Reader, groupID[:]); err != nil {
panic(err)
}
group.GroupID = fmt.Sprintf("%x", groupID)
var groupKey [32]byte
if _, err := io.ReadFull(rand.Reader, groupKey[:]); err != nil {
log.Errorf("Error: Cannot read from random: %v\n", err)
return nil, err
panic(err)
}
copy(group.GroupKey[:], groupKey[:])
// Derive Group ID from the group key and the server public key. This binds the group to a particular server
// and key.
var err error
group.GroupID, err = deriveGroupID(groupKey[:], server)
return group, err
group.Owner = "self"
return group
}
// CheckGroup returns true only if the ID of the group is cryptographically valid.
func (g *Group) CheckGroup() bool {
id, _ := deriveGroupID(g.GroupKey[:], g.GroupServer)
return g.GroupID == id
// SignGroup adds a signature to the group.
func (g *Group) SignGroup(signature []byte) {
g.SignedGroupID = signature
copy(g.Timeline.SignedGroupId[:], g.SignedGroupID)
}
// deriveGroupID hashes together the key and the hostname to create a bound identifier that can later
// be referenced and checked by profiles when they receive invites and messages.
func deriveGroupID(groupKey []byte, serverHostname string) (string, error) {
data, err := base32.StdEncoding.DecodeString(strings.ToUpper(serverHostname))
if err != nil {
return "", err
}
pubkey := data[0:ed25519.PublicKeySize]
return hex.EncodeToString(pbkdf2.Key(groupKey, pubkey, 4096, 16, sha512.New)), nil
// Compromised should be called if we detect a a groupkey leak.
func (g *Group) Compromised() {
g.IsCompromised = true
}
// Invite generates a invitation that can be sent to a cwtch peer
func (g *Group) Invite() (string, error) {
func (g *Group) Invite() ([]byte, error) {
gci := &groups.GroupInvite{
GroupID: g.GroupID,
GroupName: g.GroupName,
SharedKey: g.GroupKey[:],
ServerHost: g.GroupServer,
if g.SignedGroupID == nil {
return nil, errors.New("group isn't signed")
}
invite, err := json.Marshal(gci)
serializedInvite := fmt.Sprintf("%v%v", GroupInvitePrefix, base64.StdEncoding.EncodeToString(invite))
return serializedInvite, err
gci := &protocol.GroupChatInvite{
GroupName: g.GroupID,
GroupSharedKey: g.GroupKey[:],
ServerHost: g.GroupServer,
SignedGroupId: g.SignedGroupID[:],
}
cp := &protocol.CwtchPeerPacket{
GroupChatInvite: gci,
}
invite, err := proto.Marshal(cp)
return invite, err
}
// EncryptMessage takes a message and encrypts the message under the group key.
func (g *Group) EncryptMessage(message *groups.DecryptedGroupMessage) ([]byte, error) {
// AddMessage takes a DecryptedGroupMessage and adds it to the Groups Timeline
func (g *Group) AddMessage(message *protocol.DecryptedGroupMessage, verified bool) *Message {
g.lock.Lock()
timelineMessage := &Message{
Message: message.GetText(),
Timestamp: time.Unix(int64(message.GetTimestamp()), 0),
Received: time.Now(),
Signature: message.GetSignature(),
Verified: verified,
PeerID: message.GetOnion(),
PreviousMessageSig: message.GetPreviousMessageSig(),
}
g.Timeline.Insert(timelineMessage)
g.lock.Unlock()
return timelineMessage
}
// GetTimeline provides a safe copy of the timeline-=
func (g *Group) GetTimeline() (t []Message) {
g.lock.Lock()
t = g.Timeline.GetMessages()
g.lock.Unlock()
return
}
//EncryptMessage takes a message and encrypts the message under the group key.
func (g *Group) EncryptMessage(message *protocol.DecryptedGroupMessage) []byte {
var nonce [24]byte
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
log.Errorf("Cannot read from random: %v\n", err)
return nil, err
}
wire, err := json.Marshal(message)
if err != nil {
return nil, err
panic(err)
}
wire, err := proto.Marshal(message)
utils.CheckError(err)
encrypted := secretbox.Seal(nonce[:], []byte(wire), &nonce, &g.GroupKey)
return encrypted, nil
return encrypted
}
// DecryptMessage takes a ciphertext and returns true and the decrypted message if the
// cipher text can be successfully decrypted,else false.
func (g *Group) DecryptMessage(ciphertext []byte) (bool, *groups.DecryptedGroupMessage) {
if len(ciphertext) > 24 {
var decryptNonce [24]byte
copy(decryptNonce[:], ciphertext[:24])
decrypted, ok := secretbox.Open(nil, ciphertext[24:], &decryptNonce, &g.GroupKey)
if ok {
dm := &groups.DecryptedGroupMessage{}
err := json.Unmarshal(decrypted, dm)
if err == nil {
return true, dm
}
}
}
return false, nil
}
// ValidateInvite takes in a serialized invite and returns the invite structure if it is cryptographically valid
// and an error if it is not
func ValidateInvite(invite string) (*groups.GroupInvite, error) {
// We prefix invites for groups with torv3
if strings.HasPrefix(invite, GroupInvitePrefix) {
data, err := base64.StdEncoding.DecodeString(invite[len(GroupInvitePrefix):])
func (g *Group) DecryptMessage(ciphertext []byte) (bool, *protocol.DecryptedGroupMessage) {
var decryptNonce [24]byte
copy(decryptNonce[:], ciphertext[:24])
decrypted, ok := secretbox.Open(nil, ciphertext[24:], &decryptNonce, &g.GroupKey)
if ok {
dm := &protocol.DecryptedGroupMessage{}
err := proto.Unmarshal(decrypted, dm)
if err == nil {
// First attempt to unmarshal the json...
var gci groups.GroupInvite
err := json.Unmarshal(data, &gci)
if err == nil {
// Validate the Invite by first checking that the server is a valid v3 onion
if !tor.IsValidHostname(gci.ServerHost) {
return nil, errors.New("server is not a valid v3 onion")
}
// Validate the length of the shared key...
if len(gci.SharedKey) != 32 {
return nil, errors.New("key length is not 32 bytes")
}
// Derive the servers public key (we can ignore the error checking here because it's already been
// done by IsValidHostname, and check that we derive the same groupID...
derivedGroupID, _ := deriveGroupID(gci.SharedKey, gci.ServerHost)
if derivedGroupID != gci.GroupID {
return nil, errors.New("group id is invalid")
}
// Replace the original with the derived, this should be a no-op at this point but defense in depth...
gci.GroupID = derivedGroupID
return &gci, nil
}
return true, dm
}
}
return nil, errors.New("invite has invalid structure")
}
// AttemptDecryption takes a ciphertext and signature and attempts to decrypt it under known groups.
// If successful, adds the message to the group's timeline
func (g *Group) AttemptDecryption(ciphertext []byte, signature []byte) (bool, *groups.DecryptedGroupMessage) {
success, dgm := g.DecryptMessage(ciphertext)
// the second check here is not needed, but DecryptMessage violates the usual
// go calling convention and we want static analysis tools to pick it up
if success && dgm != nil {
// Attempt to serialize this message
serialized, err := json.Marshal(dgm)
// Someone send a message that isn't a valid Decrypted Group Message. Since we require this struct in orer
// to verify the message, we simply ignore it.
if err != nil {
return false, nil
}
// This now requires knowledge of the Sender, the Onion and the Specific Decrypted Group Message (which should only
// be derivable from the cryptographic key) which contains many unique elements such as the time and random padding
verified := g.VerifyGroupMessage(dgm.Onion, g.GroupID, base64.StdEncoding.EncodeToString(serialized), signature)
if !verified {
// An earlier version of this protocol mistakenly signed the ciphertext of the message
// instead of the serialized decrypted group message.
// This has 2 issues:
// 1. A server with knowledge of group members public keys AND the Group ID would be able to detect valid messages
// 2. It made the metadata-security of a group dependent on keeping the cryptographically derived Group ID secret.
// While not awful, it also isn't good. For Version 3 groups only we permit Cwtch to check this older signature
// structure in a backwards compatible way for the duration of the Groups Experiment.
// TODO: Delete this check when Groups are no long Experimental
if g.Version == 3 {
verified = g.VerifyGroupMessage(dgm.Onion, g.GroupID, string(ciphertext), signature)
}
}
// So we have a message that has a valid group key, but the signature can't be verified.
// The most obvious explanation for this is that the group key has been compromised (or we are in an open group and the server is being malicious)
// Either way, someone who has the private key is being detectably bad so we are just going to throw this message away and mark the group as Compromised.
if !verified {
return false, nil
}
return true, dgm
}
// If we couldn't find a group to decrypt the message with we just return false. This is an expected case
return false, nil
}
// VerifyGroupMessage confirms the authenticity of a message given an sender onion, message and signature.
// The goal of this function is 2-fold:
// 1. We confirm that the sender referenced in the group text is the actual sender of the message (or at least
// knows the senders private key)
// 2. Secondly, we confirm that the sender sent the message to a particular group id on a specific server (it doesn't
// matter if we actually received this message from the server or from a hybrid protocol, all that matters is
// that the sender and receivers agree that this message was intended for the group
//
// The 2nd point is important as it prevents an attack documented in the original Cwtch paper (and later at
// https://docs.openprivacy.ca/cwtch-security-handbook/groups.html) in which a malicious profile sets up 2 groups
// on two different servers with the same key and then forwards messages between them to convince the parties in
// each group that they are actually in one big group (with the intent to later censor and/or selectively send messages
// to each group).
func (g *Group) VerifyGroupMessage(onion string, groupID string, message string, signature []byte) bool {
// We use our group id, a known reference server and the ciphertext of the message.
m := groupID + g.GroupServer + message
// Otherwise we derive the public key from the sender and check it against that.
decodedPub, err := base32.StdEncoding.DecodeString(strings.ToUpper(onion))
if err == nil && len(decodedPub) >= 32 {
return ed25519.Verify(decodedPub[:32], []byte(m), signature)
}
return false
}
// EncryptMessageToGroup when given a message and a group, encrypts and signs the message under the group and
// profile
func EncryptMessageToGroup(message string, author primitives.Identity, group *Group, prevSig string) ([]byte, []byte, *groups.DecryptedGroupMessage, error) {
if len(message) > MaxGroupMessageLength {
return nil, nil, nil, errors.New("group message is too long")
}
timestamp := time.Now().Unix()
lenPadding := MaxGroupMessageLength - len(message)
padding := make([]byte, lenPadding)
getRandomness(&padding)
hexGroupID, err := hex.DecodeString(group.GroupID)
if err != nil {
return nil, nil, nil, err
}
prevSigBytes, err := base64.StdEncoding.DecodeString(prevSig)
if err != nil {
return nil, nil, nil, err
}
dm := &groups.DecryptedGroupMessage{
Onion: author.Hostname(),
Text: message,
SignedGroupID: hexGroupID,
Timestamp: uint64(timestamp),
PreviousMessageSig: prevSigBytes,
Padding: padding[:],
}
ciphertext, err := group.EncryptMessage(dm)
if err != nil {
return nil, nil, nil, err
}
serialized, _ := json.Marshal(dm)
signature := author.Sign([]byte(group.GroupID + group.GroupServer + base64.StdEncoding.EncodeToString(serialized)))
return ciphertext, signature, dm, nil
}

View File

@ -1,113 +1,28 @@
package model
import (
"crypto/sha256"
"cwtch.im/cwtch/protocol/groups"
"strings"
"cwtch.im/cwtch/protocol"
"github.com/golang/protobuf/proto"
"testing"
"time"
)
func TestGroup(t *testing.T) {
g, err := NewGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
if err != nil {
t.Fatalf("Group with real group server should not fail")
}
dgm := &groups.DecryptedGroupMessage{
Onion: "onion",
Text: "Hello World!",
Timestamp: uint64(time.Now().Unix()),
SignedGroupID: []byte{},
g := NewGroup("server.onion")
dgm := &protocol.DecryptedGroupMessage{
Onion: proto.String("onion"),
Text: proto.String("Hello World!"),
Timestamp: proto.Int32(int32(time.Now().Unix())),
SignedGroupId: []byte{},
Signature: []byte{},
PreviousMessageSig: []byte{},
Padding: []byte{},
}
invite, err := g.Invite()
if err != nil {
t.Fatalf("error creating group invite: %v", err)
}
validatedInvite, err := ValidateInvite(invite)
if err != nil {
t.Fatalf("error validating group invite: %v", err)
}
if validatedInvite.GroupID != g.GroupID {
t.Fatalf("after validate group invite id should be identical to original: %v", err)
}
encMessage, _ := g.EncryptMessage(dgm)
encMessage := g.EncryptMessage(dgm)
ok, message := g.DecryptMessage(encMessage)
if (!ok || message == nil) || message.Text != "Hello World!" {
if !ok || message.GetText() != "Hello World!" {
t.Errorf("group encryption was invalid, or returned wrong message decrypted:%v message:%v", ok, message)
return
}
t.Logf("Got message %v", message)
}
func TestGroupErr(t *testing.T) {
_, err := NewGroup("not a real group name")
if err == nil {
t.Errorf("Group Setup Should Have Failed")
}
}
// Test various group invite validation failures...
func TestGroupValidation(t *testing.T) {
group := &Group{
GroupID: "",
GroupKey: [32]byte{},
GroupServer: "",
Timeline: Timeline{},
LocalID: "",
Version: 0,
}
invite, _ := group.Invite()
_, err := ValidateInvite(invite)
if err == nil {
t.Fatalf("Group with empty group id should have been an error")
}
t.Logf("Error: %v", err)
// Generate a valid group but replace the group server...
group, err = NewGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
if err != nil {
t.Fatalf("Group with real group server should not fail")
}
group.GroupServer = "tcnkoch4nyr3cldkemejtkpqok342rbql6iclnjjs3ndgnjgufzyxvqd"
invite, _ = group.Invite()
_, err = ValidateInvite(invite)
if err == nil {
t.Fatalf("Group with empty group id should have been an error")
}
t.Logf("Error: %v", err)
// Generate a valid group but replace the group key...
group, err = NewGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
if err != nil {
t.Fatalf("Group with real group server should not fail")
}
group.GroupKey = sha256.Sum256([]byte{})
invite, _ = group.Invite()
_, err = ValidateInvite(invite)
if err == nil {
t.Fatalf("Group with different group key should have errored")
}
t.Logf("Error: %v", err)
// mangle the invite
_, err = ValidateInvite(strings.ReplaceAll(invite, GroupInvitePrefix, ""))
if err == nil {
t.Fatalf("Group with different group key should have errored")
}
t.Logf("Error: %v", err)
}

View File

@ -1,110 +0,0 @@
package model_test
import (
"cwtch.im/cwtch/model"
"cwtch.im/cwtch/protocol/groups"
"encoding/base64"
"git.openprivacy.ca/cwtch.im/tapir/primitives"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("group models", func() {
var (
newgroup *model.Group
anothergroup *model.Group
dgm groups.DecryptedGroupMessage
alice primitives.Identity
)
BeforeEach(func() {
newgroup, _ = model.NewGroup("iikv7tizbyxc42rsagnjxss65h3nfiwrkkoiikh7ui27r5xkav7gzuid")
anothergroup, _ = model.NewGroup("iikv7tizbyxc42rsagnjxss65h3nfiwrkkoiikh7ui27r5xkav7gzuid")
alice, _ = primitives.InitializeEphemeralIdentity()
dgm = groups.DecryptedGroupMessage{
Text: "hello world",
Onion: "some random onion",
Timestamp: 0,
SignedGroupID: nil,
PreviousMessageSig: nil,
Padding: nil,
}
})
Context("on creation of a group", func() {
It("should pass the cryptographic check", func() {
Expect(newgroup.CheckGroup()).To(Equal(true))
})
})
Context("after generating an invite", func() {
It("should validate", func() {
invite, err := newgroup.Invite()
Expect(err).NotTo(HaveOccurred())
anotherGroup, err := model.ValidateInvite(invite)
Expect(err).NotTo(HaveOccurred())
Expect(anotherGroup.GroupID).To(Equal(newgroup.GroupID))
Expect(anotherGroup.GroupName).To(Equal(newgroup.GroupName))
Expect(anotherGroup.SharedKey).To(Equal(newgroup.GroupKey[:]))
})
})
Context("when encrypting a message", func() {
Context("decrypting with the same group", func() {
It("should succeed", func() {
ciphertext, err := newgroup.EncryptMessage(&dgm)
Expect(err).NotTo(HaveOccurred())
success, decryptedMessage := newgroup.DecryptMessage(ciphertext)
Expect(success).To(Equal(true))
Expect(decryptedMessage.Text).To(Equal(dgm.Text))
Expect(decryptedMessage.Onion).To(Equal(dgm.Onion))
})
})
Context("decrypting with a different group", func() {
It("should fail", func() {
ciphertext, err := newgroup.EncryptMessage(&dgm)
Expect(err).NotTo(HaveOccurred())
success, decryptedMessage := anothergroup.DecryptMessage(ciphertext)
Expect(success).To(Equal(false))
Expect(decryptedMessage).To(BeNil())
})
})
})
Context("when alice encrypts a message to new group", func() {
It("should succeed and bob should succeed in decrypting it", func() {
ciphertext, sign, _, err := model.EncryptMessageToGroup("hello world", alice, newgroup, base64.StdEncoding.EncodeToString([]byte("hello world")))
Expect(err).NotTo(HaveOccurred())
success, dgm := newgroup.AttemptDecryption(ciphertext, sign)
Expect(success).To(BeTrue())
Expect(dgm.Text).To(Equal("hello world"))
})
})
Context("when alice encrypts a message to new group", func() {
It("should succeed and eve should fail in decrypting it", func() {
ciphertext, sign, _, err := model.EncryptMessageToGroup("hello world", alice, newgroup, base64.StdEncoding.EncodeToString([]byte("hello world")))
Expect(err).NotTo(HaveOccurred())
success, dgm := anothergroup.AttemptDecryption(ciphertext, sign)
Expect(success).To(BeFalse())
Expect(dgm).To(BeNil())
})
})
Context("when alice encrypts a message to new group", func() {
Context("and the server messes with the signature", func() {
It("bob should be unable to verify the message with the wrong signature", func() {
ciphertext, _, _, err := model.EncryptMessageToGroup("hello world", alice, newgroup, base64.StdEncoding.EncodeToString([]byte("hello world")))
Expect(err).NotTo(HaveOccurred())
success, dgm := newgroup.AttemptDecryption(ciphertext, []byte("bad signature"))
Expect(success).To(BeFalse())
Expect(dgm).To(BeNil())
})
})
})
})

View File

@ -1,103 +0,0 @@
package model
import (
"crypto/ed25519"
"encoding/base32"
"encoding/json"
"errors"
"git.openprivacy.ca/cwtch.im/tapir/primitives"
"strings"
)
// KeyType provides a wrapper for a generic public key type identifier (could be an onion address, a zcash address etc.)
type KeyType string
const (
// BundleType - the attribute under which the signed server bundle is stored...
BundleType = KeyType("server_key_bundle")
// KeyTypeServerOnion - a cwtch address
KeyTypeServerOnion = KeyType("bulletin_board_onion") // bulletin board
// KeyTypeTokenOnion - a cwtch peer with a PoW based token protocol
KeyTypeTokenOnion = KeyType("token_service_onion")
//KeyTypePrivacyPass - a privacy pass based token server
KeyTypePrivacyPass = KeyType("privacy_pass_public_key")
)
// Key provides a wrapper for a generic public key identifier (could be an onion address, a zcash address etc.)
type Key string
// KeyBundle manages a collection of related keys for various different services.
type KeyBundle struct {
Keys map[KeyType]Key
Signature []byte
}
// NewKeyBundle creates a new KeyBundle initialized with no keys.
func NewKeyBundle() *KeyBundle {
keyBundle := new(KeyBundle)
keyBundle.Keys = make(map[KeyType]Key)
return keyBundle
}
// HasKeyType returns true if the bundle has a public key of a given type.
func (kb *KeyBundle) HasKeyType(keytype KeyType) bool {
_, exists := kb.Keys[keytype]
return exists
}
// GetKey retrieves a key with a given type from the bundle
func (kb *KeyBundle) GetKey(keytype KeyType) (Key, error) {
key, exists := kb.Keys[keytype]
if exists {
return key, nil
}
return "", errors.New("no such key")
}
// Serialize produces a json encoded byte array.
func (kb KeyBundle) Serialize() []byte {
// json.Marshal sorts map keys
bundle, _ := json.Marshal(kb)
return bundle
}
// Sign allows a server to authenticate a key bundle by signing it (this uses the tapir identity interface)
func (kb *KeyBundle) Sign(identity primitives.Identity) {
kb.Signature = identity.Sign(kb.Serialize())
}
// DeserializeAndVerify takes in a json formatted bundle and only returns a valid key bundle
// if it has been signed by the server.
func DeserializeAndVerify(bundle []byte) (*KeyBundle, error) {
keyBundle := new(KeyBundle)
err := json.Unmarshal(bundle, &keyBundle)
if err == nil {
signature := keyBundle.Signature
keyBundle.Signature = nil
serverKey, _ := keyBundle.GetKey(KeyTypeServerOnion)
// We have to do convert the encoded key to a format that can be used to verify the signature
var decodedPub []byte
decodedPub, err = base32.StdEncoding.DecodeString(strings.ToUpper(string(serverKey)))
if err == nil && len(decodedPub) == 35 {
if ed25519.Verify(decodedPub[:32], keyBundle.Serialize(), signature) { // == true
return keyBundle, nil
}
}
err = InvalidEd25519PublicKey
}
return nil, err
}
// AttributeBundle returns a map that can be used as part of a peer attribute bundle
func (kb *KeyBundle) AttributeBundle() map[string]string {
ab := make(map[string]string)
for k, v := range kb.Keys {
ab[string(k)] = string(v)
}
return ab
}

View File

@ -1,64 +0,0 @@
package model
import (
"git.openprivacy.ca/cwtch.im/tapir/primitives"
"testing"
)
func TestDeserializeAndVerify(t *testing.T) {
server, _ := primitives.InitializeEphemeralIdentity()
serverKeyBundle := NewKeyBundle()
serverKeyBundle.Keys[KeyTypeServerOnion] = Key(server.Hostname())
serverKeyBundle.Keys[KeyTypePrivacyPass] = Key("random 1")
serverKeyBundle.Keys[KeyTypeTokenOnion] = Key("random 2")
serverKeyBundle.Sign(server)
//eyeball keys are sorted
t.Logf("%s", serverKeyBundle.Serialize())
serialize := serverKeyBundle.Serialize()
newKeyBundle, err := DeserializeAndVerify(serialize)
if err != nil {
t.Fatalf("Key Bundle did not Deserialize %v", err)
}
if newKeyBundle.Keys[KeyTypeServerOnion] != Key(server.Hostname()) {
t.Fatalf("Key Bundle did not Serialize Correctly Actual: %v Expected: %v", newKeyBundle, serverKeyBundle)
}
}
func TestDeserializeAndVerifyMaliciousSignShouldFail(t *testing.T) {
server, _ := primitives.InitializeEphemeralIdentity()
maliciousServer, _ := primitives.InitializeEphemeralIdentity()
serverKeyBundle := NewKeyBundle()
serverKeyBundle.Keys[KeyTypeServerOnion] = Key(server.Hostname())
// This time we sign with a malicious server
serverKeyBundle.Sign(maliciousServer)
serialize := serverKeyBundle.Serialize()
newKeyBundle, err := DeserializeAndVerify(serialize)
if err == nil {
t.Fatalf("Key Bundle did Deserialize (it should have failed): %v", newKeyBundle)
}
}
func TestDeserializeAndVerifyUnsignedShouldFail(t *testing.T) {
server, _ := primitives.InitializeEphemeralIdentity()
serverKeyBundle := NewKeyBundle()
serverKeyBundle.Keys[KeyTypeServerOnion] = Key(server.Hostname())
// This time we don't sign
// serverKeyBundle.Sign(server)
serialize := serverKeyBundle.Serialize()
newKeyBundle, err := DeserializeAndVerify(serialize)
if err == nil {
t.Fatalf("Key Bundle did Deserialize (it should have failed): %v", newKeyBundle)
}
}

View File

@ -1,38 +1,17 @@
package model
import (
"crypto/sha256"
"encoding/base64"
"errors"
"sort"
"sync"
"time"
)
// Timeline encapsulates a collection of ordered Messages, and a mechanism to access them
// Timeline encapsulates a collection of ordered messages, and a mechanism to access them
// in a threadsafe manner.
type Timeline struct {
Messages []Message
SignedGroupID []byte
SignedGroupId []byte
lock sync.Mutex
// a cache to allow quick checks for existing messages...
signatureCache map[string]int
// a cache to allowing looking up messages by content hash
// we need this for features like reply-to message, and other self
// referential applications.
// note: that the index stored here is not global as different peers may have difference views of the timeline
// depending on if they save history, and when the last time they purged their timeline was, as such we can't
// simply send the index of the message.
hashCache map[string][]int
}
// LocallyIndexedMessage is a type wrapper around a Message and a TimeLine Index that is local to this
// instance of the timeline.
type LocallyIndexedMessage struct {
Message
LocalIndex int
}
// Message is a local representation of a given message sent over a group chat channel.
@ -42,20 +21,10 @@ type Message struct {
PeerID string
Message string
Signature []byte
Verified bool
PreviousMessageSig []byte
ReceivedByServer bool // messages sent to a server
Acknowledged bool // peer to peer
Error string `json:",omitempty"`
// Application specific flags, useful for storing small amounts of metadata
Flags uint64
}
// MessageBaseSize 2021.06 byte size of an *empty* message json serialized
const MessageBaseSize float64 = 463
// compareSignatures checks if a and b are equal. Note: this function does
// not need to be constant time - in fact it is better that it is not as it's only main use
// is in sorting timeline state consistently.
func compareSignatures(a []byte, b []byte) bool {
if len(a) != len(b) {
return false
@ -68,7 +37,6 @@ func compareSignatures(a []byte, b []byte) bool {
return true
}
// GetMessages returns a copy of the entire timeline
func (t *Timeline) GetMessages() []Message {
t.lock.Lock()
messages := make([]Message, len(t.Messages))
@ -77,87 +45,24 @@ func (t *Timeline) GetMessages() []Message {
return messages
}
// GetCopy returns a duplicate of the Timeline
func (t *Timeline) GetCopy() *Timeline {
t.lock.Lock()
defer t.lock.Unlock()
newt := &Timeline{}
// initialize the timeline and copy the message over...
newt.SetMessages(t.Messages)
return newt
}
// SetMessages sets the Messages of this timeline. Only to be used in loading/initialization
func (t *Timeline) SetMessages(messages []Message) {
t.lock.Lock()
t.init()
t.lock.Unlock()
for _, m := range messages {
t.Insert(&m)
}
}
// GetMessagesByHash attempts to find messages that match the given
// content hash in the timeline. If successful it returns a list of messages as well as their local index
// , on failure it returns an error.
// We return a list of messages because content hashes are not guaranteed to be unique from a given Peer. This allows
// us to do things like: ensure that reply-to and quotes reference the last seen message from the message they are quoted
// in or detect duplicate messages from a peer.
func (t *Timeline) GetMessagesByHash(contentHash string) ([]LocallyIndexedMessage, error) {
t.lock.Lock()
defer t.lock.Unlock()
t.init()
if idxs, exists := t.hashCache[contentHash]; exists {
var messages []LocallyIndexedMessage
for _, idx := range idxs {
messages = append(messages, LocallyIndexedMessage{LocalIndex: idx, Message: t.Messages[idx]})
}
return messages, nil
}
return nil, errors.New("cannot find message by hash")
}
// calculateHash calculates the content hash of a given message
// the content used is the sender of the message, the body of the message
//
// content hashes must be calculable across timeline views so that different participants can
// calculate the same hash for the same message - as such we cannot use timestamps from peers or groups
// as they are mostly fuzzy.
//
// As a reminder: for p2p messages PeerID is authenticated by the initial 3DH handshake, for groups
// each message is signed by the sender, and this signature is checked prior to inclusion in the timeline.
//
// Multiple messages from the same peer can result in the same hash (where the same user sends the same message more
// than once) - in this case we will only store the idx of the most recent message - and use that for reference lookups.
func (t *Timeline) calculateHash(message Message) string {
content := []byte(message.PeerID + message.Message)
contentBasedHash := sha256.Sum256(content)
return base64.StdEncoding.EncodeToString(contentBasedHash[:])
}
// Len gets the length of the timeline
func (t *Timeline) Len() int {
return len(t.Messages)
}
// Swap swaps 2 Messages on the timeline.
// Swap swaps 2 messages on the timeline.
func (t *Timeline) Swap(i, j int) {
t.Messages[i], t.Messages[j] = t.Messages[j], t.Messages[i]
}
// Less checks 2 Messages (i and j) in the timeline and returns true if i occurred before j, else false
// Less checks 2 messages (i and j) in the timeline and returns true if i occcured before j, else false
func (t *Timeline) Less(i, j int) bool {
if t.Messages[i].Timestamp.Before(t.Messages[j].Timestamp) {
return true
}
// Short circuit false if j is before i, signature checks will give a wrong order in this case.
if t.Messages[j].Timestamp.Before(t.Messages[i].Timestamp) {
return false
}
if compareSignatures(t.Messages[i].PreviousMessageSig, t.SignedGroupID) {
if compareSignatures(t.Messages[i].PreviousMessageSig, t.SignedGroupId) {
return true
}
@ -168,70 +73,18 @@ func (t *Timeline) Less(i, j int) bool {
return false
}
// Sort sorts the timeline in a canonical order.
func (t *Timeline) Sort() {
// Insert inserts a message into the timeline in a thread safe way.
func (t *Timeline) Insert(mi *Message) {
t.lock.Lock()
defer t.lock.Unlock()
sort.Sort(t)
}
// Insert a message into the timeline in a thread safe way.
func (t *Timeline) Insert(mi *Message) int {
t.lock.Lock()
defer t.lock.Unlock()
// assert timeline is initialized
t.init()
// check that we haven't seen this message before (this has no impact on p2p messages, but is essential for
// group messages)
// FIXME: The below code now checks if the message has a signature. If it doesn't then skip duplication check.
// We do this because p2p messages right now do not have a signature, and so many p2p messages are not stored
// with a signature. In the future in hybrid groups this check will go away as all timelines will use the same
// underlying protocol.
// This is currently safe to do because p2p does not rely on signatures and groups will verify the signature of
// messages prior to generating an event to include them in the timeline.
if len(mi.Signature) != 0 {
idx, exists := t.signatureCache[base64.StdEncoding.EncodeToString(mi.Signature)]
if exists {
t.Messages[idx].Acknowledged = true
return idx
for _, m := range t.Messages {
// If the message already exists, then we don't add it
if compareSignatures(m.Signature, mi.Signature) {
return
}
}
// update the message store
t.Messages = append(t.Messages, *mi)
// add to signature cache for fast checking of group messages...
t.signatureCache[base64.StdEncoding.EncodeToString(mi.Signature)] = len(t.Messages) - 1
// content based addressing index
contentHash := t.calculateHash(*mi)
t.hashCache[contentHash] = append(t.hashCache[contentHash], len(t.Messages)-1)
return len(t.Messages) - 1
}
func (t *Timeline) init() {
// only allow this setting once...
if t.signatureCache == nil {
t.signatureCache = make(map[string]int)
}
if t.hashCache == nil {
t.hashCache = make(map[string][]int)
}
}
// SetSendError marks a message has having some kind of application specific error.
// Note: The message here is indexed by signature.
func (t *Timeline) SetSendError(sig []byte, e string) bool {
t.lock.Lock()
defer t.lock.Unlock()
idx, exists := t.signatureCache[base64.StdEncoding.EncodeToString(sig)]
if !exists {
return false
}
t.Messages[idx].Error = e
return true
sort.Sort(t)
}

104
model/message_test.go Normal file
View File

@ -0,0 +1,104 @@
package model
import (
"cwtch.im/cwtch/protocol"
"github.com/golang/protobuf/proto"
"strconv"
"testing"
"time"
)
func TestMessagePadding(t *testing.T) {
// Setup the Group
sarah := GenerateNewProfile("Sarah")
alice := GenerateNewProfile("Alice")
sarah.AddContact(alice.Onion, &alice.PublicProfile)
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
gid, invite, _ := alice.StartGroup("aaa.onion")
gci := &protocol.CwtchPeerPacket{}
proto.Unmarshal(invite, gci)
sarah.ProcessInvite(gci.GetGroupChatInvite(), alice.Onion)
group := alice.GetGroupByGroupID(gid)
c1, _ := sarah.EncryptMessageToGroup("Hello World 1", group.GroupID)
t.Logf("Length of Encrypted Message: %v", len(c1))
alice.AttemptDecryption(c1)
c2, _ := alice.EncryptMessageToGroup("Hello World 2", group.GroupID)
t.Logf("Length of Encrypted Message: %v", len(c2))
alice.AttemptDecryption(c2)
c3, _ := alice.EncryptMessageToGroup("Hello World 3", group.GroupID)
t.Logf("Length of Encrypted Message: %v", len(c3))
alice.AttemptDecryption(c3)
c4, _ := alice.EncryptMessageToGroup("Hello World this is a much longer message 3", group.GroupID)
t.Logf("Length of Encrypted Message: %v", len(c4))
alice.AttemptDecryption(c4)
}
func TestTranscriptConsistency(t *testing.T) {
timeline := new(Timeline)
// Setup the Group
sarah := GenerateNewProfile("Sarah")
alice := GenerateNewProfile("Alice")
sarah.AddContact(alice.Onion, &alice.PublicProfile)
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
gid, invite, _ := alice.StartGroup("aaa.onion")
gci := &protocol.CwtchPeerPacket{}
proto.Unmarshal(invite, gci)
sarah.ProcessInvite(gci.GetGroupChatInvite(), alice.Onion)
group := alice.GetGroupByGroupID(gid)
t.Logf("group: %v, sarah %v", group, sarah)
c1, _ := alice.EncryptMessageToGroup("Hello World 1", group.GroupID)
t.Logf("Length of Encrypted Message: %v", len(c1))
alice.AttemptDecryption(c1)
c2, _ := alice.EncryptMessageToGroup("Hello World 2", group.GroupID)
t.Logf("Length of Encrypted Message: %v", len(c2))
alice.AttemptDecryption(c2)
c3, _ := alice.EncryptMessageToGroup("Hello World 3", group.GroupID)
t.Logf("Length of Encrypted Message: %v", len(c3))
alice.AttemptDecryption(c3)
time.Sleep(time.Second * 1)
c4, _ := alice.EncryptMessageToGroup("Hello World 4", group.GroupID)
t.Logf("Length of Encrypted Message: %v", len(c4))
alice.AttemptDecryption(c4)
c5, _ := alice.EncryptMessageToGroup("Hello World 5", group.GroupID)
t.Logf("Length of Encrypted Message: %v", len(c5))
_, m1 := sarah.AttemptDecryption(c1)
sarah.AttemptDecryption(c1) // Try a duplicate
_, m2 := sarah.AttemptDecryption(c2)
_, m3 := sarah.AttemptDecryption(c3)
_, m4 := sarah.AttemptDecryption(c4)
_, m5 := sarah.AttemptDecryption(c5)
// Now we simulate a client receiving these messages completely out of order
timeline.Insert(m1)
timeline.Insert(m5)
timeline.Insert(m4)
timeline.Insert(m3)
timeline.Insert(m2)
for i, m := range group.GetTimeline() {
if m.Message != "Hello World "+strconv.Itoa(i+1) {
t.Fatalf("Timeline Out of Order!: %v %v", i, m)
}
t.Logf("Messages %v: %v %x %x", i, m.Message, m.Signature, m.PreviousMessageSig)
}
}

View File

@ -1,25 +0,0 @@
package model
import (
"crypto/sha256"
"encoding/base64"
"encoding/json"
)
// CalculateContentHash derives a hash using the author and the message body. It is intended to be
// globally referencable in the context of a single conversation
func CalculateContentHash(author string, messageBody string) string {
content := []byte(author + messageBody)
contentBasedHash := sha256.Sum256(content)
return base64.StdEncoding.EncodeToString(contentBasedHash[:])
}
func DeserializeMessage(message string) (*MessageWrapper, error) {
var cm MessageWrapper
err := json.Unmarshal([]byte(message), &cm)
if err != nil {
return nil, err
}
return &cm, err
}

View File

@ -1,13 +0,0 @@
package model_test
import (
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestModel(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Model Suite")
}

View File

@ -1,50 +0,0 @@
package model
import (
"time"
)
// MessageWrapper is the canonical Cwtch overlay wrapper
type MessageWrapper struct {
Overlay int `json:"o"`
Data string `json:"d"`
// when the data was assembled
SendTime *time.Time `json:"s,omitempty"`
// when the data was transmitted (by protocol engine e.g. over Tor)
TransitTime *time.Time `json:"t,omitempty"`
// when the data was received
RecvTime *time.Time `json:"r,omitempty"`
}
// Channel is defined as being the last 3 bits of the overlay id
// Channel 0 is reserved for the main conversation
// Channel 2 is reserved for conversation admin (managed groups)
// Channel 7 is reserved for streams (no ack, no store)
func (mw MessageWrapper) Channel() int {
if mw.Overlay > 1024 {
return mw.Overlay & 0x07
}
// for backward compatibilty all overlays less than 0x400 i.e. 1024 are
// mapped to channel 0 regardless of their channel status.
return 0
}
// If Overlay is a Stream Message it should not be ackd, or stored.
func (mw MessageWrapper) IsStream() bool {
return mw.Channel() == 0x07
}
// OverlayChat is the canonical identifier for chat overlays
const OverlayChat = 1
// OverlayInviteContact is the canonical identifier for the contact invite overlay
const OverlayInviteContact = 100
// OverlayInviteGroup is the canonical identifier for the group invite overlay
const OverlayInviteGroup = 101
// OverlayFileSharing is the canonical identifier for the file sharing overlay
const OverlayFileSharing = 200

View File

@ -2,99 +2,308 @@ package model
import (
"crypto/rand"
"encoding/hex"
"crypto/rsa"
"cwtch.im/cwtch/protocol"
"encoding/asn1"
"encoding/json"
"errors"
"github.com/golang/protobuf/proto"
"github.com/s-rah/go-ricochet/utils"
"golang.org/x/crypto/ed25519"
"io"
"io/ioutil"
"strconv"
"sync"
)
// Authorization is a type determining client assigned authorization to a peer
// Deprecated - Only used for Importing legacy profile formats
// Still used in some APIs in UI but will be replaced prior to full deprecation
type Authorization string
const (
// AuthUnknown is an initial state for a new unseen peer
AuthUnknown Authorization = "unknown"
// AuthApproved means the client has approved the peer, it can send messages to us, perform GetVals, etc
AuthApproved Authorization = "approved"
// AuthBlocked means the client has blocked the peer, it's messages and connections should be rejected
AuthBlocked Authorization = "blocked"
"time"
)
// PublicProfile is a local copy of a CwtchIdentity
// Deprecated - Only used for Importing legacy profile formats
type PublicProfile struct {
Name string
Ed25519PublicKey ed25519.PublicKey
Authorization Authorization
DeprecatedBlocked bool `json:"Blocked"`
Onion string
Attributes map[string]string
Timeline Timeline `json:"-"`
LocalID string // used by storage engine
State string `json:"-"`
lock sync.Mutex
UnacknowledgedMessages map[string]int
Name string
Ed25519PublicKey ed25519.PublicKey
Trusted bool
Blocked bool
Onion string
}
// Profile encapsulates all the attributes necessary to be a Cwtch Peer.
// Deprecated - Only used for Importing legacy profile formats
type Profile struct {
PublicProfile
Contacts map[string]*PublicProfile
Ed25519PrivateKey ed25519.PrivateKey
OnionPrivateKey *rsa.PrivateKey
Groups map[string]*Group
lock sync.Mutex
}
// MaxGroupMessageLength is the maximum length of a message posted to a server group.
// TODO: Should this be per server?
const MaxGroupMessageLength = 1800
// GenerateNewProfile creates a new profile, with new encryption and signing keys, and a profile name.
func GenerateNewProfile(name string) *Profile {
p := new(Profile)
p.Name = name
pub, priv, _ := ed25519.GenerateKey(rand.Reader)
p.Ed25519PublicKey = pub
p.Ed25519PrivateKey = priv
p.OnionPrivateKey, _ = utils.GeneratePrivateKey()
// DER Encode the Public Key
publicKeyBytes, _ := asn1.Marshal(rsa.PublicKey{
N: p.OnionPrivateKey.PublicKey.N,
E: p.OnionPrivateKey.PublicKey.E,
})
p.Onion = utils.GetTorHostname(publicKeyBytes)
p.Contacts = make(map[string]*PublicProfile)
p.Contacts[p.Onion] = &p.PublicProfile
p.Groups = make(map[string]*Group)
return p
}
// GetCwtchIdentityPacket returns the wire message for conveying this profiles identity.
func (p *Profile) GetCwtchIdentityPacket() (message []byte) {
ci := &protocol.CwtchIdentity{
Name: p.Name,
Ed25519PublicKey: p.Ed25519PublicKey,
}
cpp := &protocol.CwtchPeerPacket{
CwtchIdentify: ci,
}
message, err := proto.Marshal(cpp)
utils.CheckError(err)
return
}
// AddCwtchIdentity takes a wire message and if it is a CwtchIdentity message adds the identity as a contact
// otherwise returns an error
func (p *Profile) AddCwtchIdentity(onion string, ci *protocol.CwtchIdentity) {
p.AddContact(onion, &PublicProfile{Name: ci.GetName(), Ed25519PublicKey: ci.GetEd25519PublicKey(), Onion: onion})
}
// AddContact allows direct manipulation of cwtch contacts
func (p *Profile) AddContact(onion string, profile *PublicProfile) {
p.lock.Lock()
p.Contacts[onion] = profile
p.lock.Unlock()
}
// RejectInvite rejects and removes a group invite
func (p *Profile) RejectInvite(groupID string) {
p.lock.Lock()
delete(p.Groups, groupID)
p.lock.Unlock()
}
// AcceptInvite accepts a group invite
func (p *Profile) AcceptInvite(groupID string) (err error) {
p.lock.Lock()
defer p.lock.Unlock()
group, ok := p.Groups[groupID]
if ok {
group.Accepted = true
} else {
err = errors.New("group does not exist")
}
return
}
// BlockPeer blocks a contact
func (p *Profile) BlockPeer(onion string) (err error) {
p.lock.Lock()
defer p.lock.Unlock()
contact, ok := p.Contacts[onion]
if ok {
contact.Blocked = true
} else {
err = errors.New("peer does not exist")
}
return
}
// TrustPeer sets a contact to trusted
func (p *Profile) TrustPeer(onion string) (err error) {
p.lock.Lock()
defer p.lock.Unlock()
contact, ok := p.Contacts[onion]
if ok {
contact.Trusted = true
} else {
err = errors.New("peer does not exist")
}
return
}
// IsBlocked returns true if the contact has been blocked, false otherwise
func (p *Profile) IsBlocked(onion string) bool {
contact, ok := p.GetContact(onion)
if ok {
return contact.Blocked
}
return false
}
func (p *Profile) GetContact(onion string) (*PublicProfile, bool) {
p.lock.Lock()
defer p.lock.Unlock()
contact, ok := p.Contacts[onion]
return contact, ok
}
// VerifyGroupMessage confirms the authenticity of a message given an onion, message and signature.
func (p *Profile) VerifyGroupMessage(onion string, groupID string, message string, timestamp int32, signature []byte) bool {
if onion == p.Onion {
m := message + groupID + strconv.Itoa(int(timestamp))
return ed25519.Verify(p.Ed25519PublicKey, []byte(m), signature)
}
contact, found := p.GetContact(onion)
if found {
m := message + groupID + strconv.Itoa(int(timestamp))
return ed25519.Verify(contact.Ed25519PublicKey, []byte(m), signature)
}
return false
}
// SignMessage takes a given message and returns an Ed21159 signature
func (p *Profile) SignMessage(message string) []byte {
sig := ed25519.Sign(p.Ed25519PrivateKey, []byte(message))
return sig
}
//StartGroup when given a server, creates a new Group under this profile and returns the group id an a precomputed
// invite which can be sent on the wire.
func (p *Profile) StartGroup(server string) (groupID string, invite []byte, err error) {
group := NewGroup(server)
groupID = group.GroupID
signedGroupID := p.SignMessage(groupID + server)
group.SignGroup(signedGroupID)
invite, err = group.Invite()
p.lock.Lock()
defer p.lock.Unlock()
p.Groups[group.GroupID] = group
return
}
// GetGroupByGroupID a pointer to a Group by the group Id, returns nil if no group found.
func (p *Profile) GetGroupByGroupID(groupID string) (g *Group) {
p.lock.Lock()
defer p.lock.Unlock()
g = p.Groups[groupID]
return
}
// ProcessInvite adds a new group invite to the profile.
func (p *Profile) ProcessInvite(gci *protocol.GroupChatInvite, peerHostname string) {
group := new(Group)
group.GroupID = gci.GetGroupName()
group.SignedGroupID = gci.GetSignedGroupId()
copy(group.GroupKey[:], gci.GetGroupSharedKey()[:])
group.GroupServer = gci.GetServerHost()
group.Accepted = false
group.Owner = peerHostname
p.AddGroup(group)
}
// AddGroup is a convenience method for adding a group to a profile.
func (p *Profile) AddGroup(group *Group) {
existingGroup, exists := p.Groups[group.GroupID]
if !exists {
owner, ok := p.GetContact(group.Owner)
if ok {
valid := ed25519.Verify(owner.Ed25519PublicKey, []byte(group.GroupID+group.GroupServer), group.SignedGroupID)
if valid {
p.lock.Lock()
defer p.lock.Unlock()
p.Groups[group.GroupID] = group
}
}
} else if exists && existingGroup.Owner == group.Owner {
p.lock.Lock()
defer p.lock.Unlock()
p.Groups[group.GroupID] = group
}
// If we are sent an invite or group update by someone who is not an owner
// then we reject the group.
}
// AttemptDecryption takes a ciphertext and signature and attempts to decrypt it under known groups.
func (p *Profile) AttemptDecryption(ciphertext []byte) (bool, *Message) {
for _, group := range p.Groups {
success, dgm := group.DecryptMessage(ciphertext)
if success {
// Assert that we know the owner of the group
owner, ok := p.Contacts[group.Owner]
if ok {
valid := ed25519.Verify(owner.Ed25519PublicKey, []byte(group.GroupID+group.GroupServer), dgm.SignedGroupId)
// If we can decrypt the message, but the group id is wrong that means that
// this message is from someone who was not invited to the group.
// As such this group has been compromised, probably by one of the other members.
// We set the flag to be handled by the UX and reject the message.
if !valid {
group.Compromised()
return false, nil
}
}
verified := p.VerifyGroupMessage(dgm.GetOnion(), group.GroupID, dgm.GetText(), dgm.GetTimestamp(), dgm.GetSignature())
return true, group.AddMessage(dgm, verified)
}
}
return false, nil
}
func getRandomness(arr *[]byte) {
if _, err := io.ReadFull(rand.Reader, (*arr)[:]); err != nil {
if err != nil {
// If we can't do randomness, just crash something is very very wrong and we are not going
// to resolve it here....
panic(err.Error())
}
utils.CheckError(err)
}
}
// GenerateRandomID generates a random 16 byte hex id code
func GenerateRandomID() string {
randBytes := make([]byte, 16)
rand.Read(randBytes)
return hex.EncodeToString(randBytes)
// EncryptMessageToGroup when given a message and a group, encrypts and signs the message under the group and
// profile
func (p *Profile) EncryptMessageToGroup(message string, groupID string) ([]byte, error) {
group := p.GetGroupByGroupID(groupID)
if group != nil {
timestamp := time.Now().Unix()
signature := p.SignMessage(message + groupID + strconv.Itoa(int(timestamp)))
var prevSig []byte
if len(group.Timeline.Messages) > 0 {
prevSig = group.Timeline.Messages[len(group.Timeline.Messages)-1].Signature
} else {
prevSig = group.SignedGroupID
}
lenPadding := 1024 - len(message)
padding := make([]byte, lenPadding)
getRandomness(&padding)
dm := &protocol.DecryptedGroupMessage{
Onion: proto.String(p.Onion),
Text: proto.String(message),
SignedGroupId: group.SignedGroupID[:],
Timestamp: proto.Int32(int32(timestamp)),
Signature: signature,
PreviousMessageSig: prevSig,
Padding: padding[:],
}
ciphertext := group.EncryptMessage(dm)
return ciphertext, nil
}
return nil, errors.New("group does not exist")
}
// GetCopy returns a full deep copy of the Profile struct and its members (timeline inclusion control by arg)
func (p *Profile) GetCopy(timeline bool) *Profile {
// Save makes a opy of the profile in the given file
func (p *Profile) Save(profilefile string) error {
p.lock.Lock()
defer p.lock.Unlock()
newp := new(Profile)
bytes, _ := json.Marshal(p)
json.Unmarshal(bytes, &newp)
if timeline {
for groupID := range newp.Groups {
if group, exists := newp.Groups[groupID]; exists {
if pGroup, exists := p.Groups[groupID]; exists {
group.Timeline = *(pGroup).Timeline.GetCopy()
}
}
}
for peerID := range newp.Contacts {
if peer, exists := newp.Contacts[peerID]; exists {
if pPeer, exists := p.Contacts[peerID]; exists {
peer.Timeline = *(pPeer).Timeline.GetCopy()
}
}
}
}
return newp
return ioutil.WriteFile(profilefile, bytes, 0600)
}
// LoadProfile loads a saved profile from a file.
func LoadProfile(profilefile string) (*Profile, error) {
bytes, _ := ioutil.ReadFile(profilefile)
profile := new(Profile)
err := json.Unmarshal(bytes, &profile)
return profile, err
}

140
model/profile_test.go Normal file
View File

@ -0,0 +1,140 @@
package model
import (
"cwtch.im/cwtch/protocol"
"github.com/golang/protobuf/proto"
"testing"
)
func TestProfile(t *testing.T) {
profile := GenerateNewProfile("Sarah")
err := profile.Save("./profile_test")
if err != nil {
t.Errorf("Should have saved profile, but got error: %v", err)
}
loadedProfile, err := LoadProfile("./profile_test")
if err != nil || loadedProfile.Name != "Sarah" {
t.Errorf("Issue loading profile from file %v %v", err, loadedProfile)
}
}
func TestProfileIdentity(t *testing.T) {
sarah := GenerateNewProfile("Sarah")
alice := GenerateNewProfile("Alice")
message := sarah.GetCwtchIdentityPacket()
ci := &protocol.CwtchPeerPacket{}
err := proto.Unmarshal(message, ci)
if err != nil {
t.Errorf("alice should have added sarah as a contact %v", err)
}
alice.AddCwtchIdentity("sarah.onion", ci.GetCwtchIdentify())
if alice.Contacts["sarah.onion"].Name != "Sarah" {
t.Errorf("alice should have added sarah as a contact %v", alice.Contacts)
}
t.Logf("%v", alice)
}
func TestTrustPeer(t *testing.T) {
sarah := GenerateNewProfile("Sarah")
alice := GenerateNewProfile("Alice")
sarah.AddContact(alice.Onion, &alice.PublicProfile)
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
alice.TrustPeer(sarah.Onion)
if alice.IsBlocked(sarah.Onion) {
t.Errorf("peer should not be blocked")
}
if alice.TrustPeer("") == nil {
t.Errorf("trusting a non existent peer should error")
}
}
func TestBlockPeer(t *testing.T) {
sarah := GenerateNewProfile("Sarah")
alice := GenerateNewProfile("Alice")
sarah.AddContact(alice.Onion, &alice.PublicProfile)
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
alice.BlockPeer(sarah.Onion)
if !alice.IsBlocked(sarah.Onion) {
t.Errorf("peer should not be blocked")
}
if alice.BlockPeer("") == nil {
t.Errorf("blocking a non existent peer should error")
}
}
func TestAcceptNonExistentGroup(t *testing.T) {
sarah := GenerateNewProfile("Sarah")
sarah.AcceptInvite("doesnotexist")
}
func TestRejectGroupInvite(t *testing.T) {
sarah := GenerateNewProfile("Sarah")
alice := GenerateNewProfile("Alice")
sarah.AddContact(alice.Onion, &alice.PublicProfile)
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
gid, invite, _ := alice.StartGroup("aaa.onion")
gci := &protocol.CwtchPeerPacket{}
proto.Unmarshal(invite, gci)
sarah.ProcessInvite(gci.GetGroupChatInvite(), alice.Onion)
group := alice.GetGroupByGroupID(gid)
if len(sarah.Groups) == 1 {
if sarah.GetGroupByGroupID(group.GroupID).Accepted {
t.Errorf("Group should not be accepted")
}
sarah.RejectInvite(group.GroupID)
if len(sarah.Groups) != 0 {
t.Errorf("Group %v should have been deleted", group.GroupID)
}
return
}
t.Errorf("Group should exist in map")
}
func TestProfileGroup(t *testing.T) {
sarah := GenerateNewProfile("Sarah")
alice := GenerateNewProfile("Alice")
sarah.AddContact(alice.Onion, &alice.PublicProfile)
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
gid, invite, _ := alice.StartGroup("aaa.onion")
gci := &protocol.CwtchPeerPacket{}
proto.Unmarshal(invite, gci)
sarah.ProcessInvite(gci.GetGroupChatInvite(), alice.Onion)
group := alice.GetGroupByGroupID(gid)
sarah.AcceptInvite(group.GroupID)
c, _ := sarah.EncryptMessageToGroup("Hello World", group.GroupID)
alice.AttemptDecryption(c)
gid2, invite2, _ := alice.StartGroup("bbb.onion")
gci2 := &protocol.CwtchPeerPacket{}
proto.Unmarshal(invite2, gci2)
sarah.ProcessInvite(gci2.GetGroupChatInvite(), alice.Onion)
group2 := alice.GetGroupByGroupID(gid2)
c2, _ := sarah.EncryptMessageToGroup("Hello World", group2.GroupID)
alice.AttemptDecryption(c2)
bob := GenerateNewProfile("bob")
bob.AddContact(alice.Onion, &alice.PublicProfile)
bob.ProcessInvite(gci2.GetGroupChatInvite(), alice.Onion)
c3, err := bob.EncryptMessageToGroup("Bobs Message", group2.GroupID)
if err == nil {
ok, message := alice.AttemptDecryption(c3)
if ok != true || message.Verified == true {
t.Errorf("Bobs message to the group should be decrypted but not verified by alice instead %v %v", message, ok)
}
eve := GenerateNewProfile("eve")
ok, _ = eve.AttemptDecryption(c3)
if ok {
t.Errorf("Eves hould not be able to decrypt messages!")
}
} else {
t.Errorf("Bob failed to encrypt a message to the group")
}
}

View File

@ -0,0 +1,147 @@
package connections
import (
"cwtch.im/cwtch/model"
"cwtch.im/cwtch/protocol"
"sync"
"time"
)
// Manager encapsulates all the logic necessary to manage outgoing peer and server connections.
type Manager struct {
peerConnections map[string]*PeerPeerConnection
serverConnections map[string]*PeerServerConnection
lock sync.Mutex
breakChannel chan bool
}
// NewConnectionsManager creates a new instance of Manager.
func NewConnectionsManager() *Manager {
m := new(Manager)
m.peerConnections = make(map[string]*PeerPeerConnection)
m.serverConnections = make(map[string]*PeerServerConnection)
m.breakChannel = make(chan bool)
return m
}
// ManagePeerConnection creates a new PeerConnection for the given Host and Profile.
func (m *Manager) ManagePeerConnection(host string, profile *model.Profile) {
m.lock.Lock()
_, exists := m.peerConnections[host]
if !exists {
ppc := NewPeerPeerConnection(host, profile)
go ppc.Run()
m.peerConnections[host] = ppc
}
m.lock.Unlock()
}
// ManageServerConnection creates a new ServerConnection for Host with the given callback handler.
func (m *Manager) ManageServerConnection(host string, handler func(string, *protocol.GroupMessage)) {
m.lock.Lock()
_, exists := m.serverConnections[host]
if !exists {
psc := NewPeerServerConnection(host)
go psc.Run()
psc.GroupMessageHandler = handler
m.serverConnections[host] = psc
}
m.lock.Unlock()
}
// GetPeers returns a map of all peer connections with their state
func (m *Manager) GetPeers() map[string]ConnectionState {
rm := make(map[string]ConnectionState)
m.lock.Lock()
for onion, ppc := range m.peerConnections {
rm[onion] = ppc.GetState()
}
m.lock.Unlock()
return rm
}
// GetServers returns a map of all server connections with their state.
func (m *Manager) GetServers() map[string]ConnectionState {
rm := make(map[string]ConnectionState)
m.lock.Lock()
for onion, psc := range m.serverConnections {
rm[onion] = psc.GetState()
}
m.lock.Unlock()
return rm
}
// GetPeerPeerConnectionForOnion safely returns a given peer connection
func (m *Manager) GetPeerPeerConnectionForOnion(host string) (ppc *PeerPeerConnection) {
m.lock.Lock()
ppc = m.peerConnections[host]
m.lock.Unlock()
return
}
// GetPeerServerConnectionForOnion safely returns a given host connection
func (m *Manager) GetPeerServerConnectionForOnion(host string) (psc *PeerServerConnection) {
m.lock.Lock()
psc = m.serverConnections[host]
m.lock.Unlock()
return
}
// AttemptReconnections repeatedly attempts to reconnect with failed peers and servers.
func (m *Manager) AttemptReconnections() {
timeout := time.Duration(0) // first pass right away
for {
select {
case <-time.After(timeout * time.Second):
m.lock.Lock()
for _, ppc := range m.peerConnections {
if ppc.GetState() == FAILED {
go ppc.Run()
}
}
m.lock.Unlock()
m.lock.Lock()
for _, psc := range m.serverConnections {
if psc.GetState() == FAILED {
go psc.Run()
}
}
m.lock.Unlock()
// Launch Another Run In 30 Seconds
timeout = time.Duration(30)
case <-m.breakChannel:
return
}
}
}
// ClosePeerConnection closes an existing peer connection
func (m *Manager) ClosePeerConnection(onion string) {
m.lock.Lock()
pc, ok := m.peerConnections[onion]
if ok {
pc.Close()
delete(m.peerConnections, onion)
}
m.lock.Unlock()
}
func (m *Manager) Shutdown() {
m.breakChannel <- true
m.lock.Lock()
for onion, ppc := range m.peerConnections {
ppc.Close()
delete(m.peerConnections, onion)
}
for onion, psc := range m.serverConnections {
psc.Close()
delete(m.serverConnections, onion)
}
m.lock.Unlock()
}

View File

@ -0,0 +1,10 @@
package connections
import (
"testing"
)
func TestConnectionsManager(t *testing.T) {
// TODO We need to encapsulate connections behind a well defined interface for tesintg
NewConnectionsManager()
}

View File

@ -0,0 +1,111 @@
package connections
import (
"cwtch.im/cwtch/peer/peer"
"cwtch.im/cwtch/protocol"
"github.com/s-rah/go-ricochet"
"github.com/s-rah/go-ricochet/channels"
"github.com/s-rah/go-ricochet/connection"
"github.com/s-rah/go-ricochet/identity"
//"github.com/s-rah/go-ricochet/utils"
"cwtch.im/cwtch/model"
"log"
"time"
)
// PeerPeerConnection encapsulates a single outgoing Peer->Peer connection
type PeerPeerConnection struct {
connection.AutoConnectionHandler
PeerHostname string
state ConnectionState
connection *connection.Connection
profile *model.Profile
}
// NewPeerPeerConnection creates a new peer connection for the given hostname and profile.
func NewPeerPeerConnection(peerhostname string, profile *model.Profile) *PeerPeerConnection {
ppc := new(PeerPeerConnection)
ppc.PeerHostname = peerhostname
ppc.profile = profile
ppc.Init()
return ppc
}
// GetState returns the current connection state
func (ppc *PeerPeerConnection) GetState() ConnectionState {
return ppc.state
}
// ClientIdentity passes the given CwtchIdentity packet to the profile.
func (ppc *PeerPeerConnection) ClientIdentity(ci *protocol.CwtchIdentity) {
ppc.profile.AddCwtchIdentity(ppc.PeerHostname, ci)
}
// HandleGroupInvite passes the given group invite tothe profile
func (ppc *PeerPeerConnection) HandleGroupInvite(gci *protocol.GroupChatInvite) {
ppc.profile.ProcessInvite(gci, ppc.PeerHostname)
}
// GetClientIdentityPacket returns nil to avoid peers constantly sending identity packets to eachother.
func (ppc *PeerPeerConnection) GetClientIdentityPacket() []byte {
return nil
}
// SendGroupInvite sends the given serialized invite packet to the Peer
func (ppc *PeerPeerConnection) SendGroupInvite(invite []byte) {
ppc.connection.Do(func() error {
channel := ppc.connection.Channel("im.cwtch.peer", channels.Outbound)
if channel != nil {
peerchannel, ok := channel.Handler.(*peer.CwtchPeerChannel)
if ok {
log.Printf("Sending group invite packet\n")
peerchannel.SendMessage(invite)
}
}
return nil
})
}
// Run manages the setup and teardown of a peer->peer connection
func (ppc *PeerPeerConnection) Run() error {
ppc.state = CONNECTING
rc, err := goricochet.Open(ppc.PeerHostname)
if err == nil {
rc.TraceLog(false)
ppc.connection = rc
ppc.state = CONNECTED
_, err := connection.HandleOutboundConnection(ppc.connection).ProcessAuthAsClient(identity.Initialize(ppc.profile.Name, ppc.profile.OnionPrivateKey))
if err == nil {
ppc.state = AUTHENTICATED
go func() {
ppc.connection.Do(func() error {
ppc.connection.RequestOpenChannel("im.cwtch.peer", &peer.CwtchPeerChannel{Handler: ppc})
return nil
})
time.Sleep(time.Second * 1)
ppc.connection.Do(func() error {
channel := ppc.connection.Channel("im.cwtch.peer", channels.Outbound)
if channel != nil {
peerchannel, ok := channel.Handler.(*peer.CwtchPeerChannel)
if ok {
peerchannel.SendMessage(ppc.profile.GetCwtchIdentityPacket())
}
}
return nil
})
}()
ppc.connection.Process(ppc)
}
}
ppc.state = FAILED
return err
}
// Close closes the connection
func (ppc *PeerPeerConnection) Close() {
ppc.state = KILLED
ppc.connection.Conn.Close()
}

View File

@ -0,0 +1,116 @@
package connections
import (
"crypto/rsa"
"cwtch.im/cwtch/model"
"cwtch.im/cwtch/peer/peer"
"cwtch.im/cwtch/protocol"
"github.com/s-rah/go-ricochet"
"github.com/s-rah/go-ricochet/channels"
"github.com/s-rah/go-ricochet/connection"
"github.com/s-rah/go-ricochet/identity"
"github.com/s-rah/go-ricochet/utils"
"net"
"testing"
"time"
)
func PeerAuthValid(string, rsa.PublicKey) (allowed, known bool) {
return true, true
}
func runtestpeer(t *testing.T, tp *TestPeer, privateKey *rsa.PrivateKey) {
ln, _ := net.Listen("tcp", "127.0.0.1:5452")
conn, _ := ln.Accept()
defer conn.Close()
rc, err := goricochet.NegotiateVersionInbound(conn)
if err != nil {
t.Errorf("Negotiate Version Error: %v", err)
}
rc.TraceLog(true)
err = connection.HandleInboundConnection(rc).ProcessAuthAsServer(identity.Initialize("", privateKey), PeerAuthValid)
if err != nil {
t.Errorf("ServerAuth Error: %v", err)
}
tp.RegisterChannelHandler("im.cwtch.peer", func() channels.Handler {
cpc := new(peer.CwtchPeerChannel)
cpc.Handler = tp
return cpc
})
go func() {
alice := model.GenerateNewProfile("alice")
time.Sleep(time.Second * 1)
rc.Do(func() error {
channel := rc.Channel("im.cwtch.peer", channels.Inbound)
if channel != nil {
peerchannel, ok := channel.Handler.(*peer.CwtchPeerChannel)
if ok {
peerchannel.SendMessage(alice.GetCwtchIdentityPacket())
}
}
return nil
})
}()
rc.Process(tp)
}
type TestPeer struct {
connection.AutoConnectionHandler
ReceivedIdentityPacket bool
ReceivedGroupInvite bool
}
func (tp *TestPeer) ClientIdentity(ci *protocol.CwtchIdentity) {
tp.ReceivedIdentityPacket = true
}
func (tp *TestPeer) HandleGroupInvite(gci *protocol.GroupChatInvite) {
tp.ReceivedGroupInvite = true
}
func (tp *TestPeer) GetClientIdentityPacket() []byte {
return nil
}
func TestPeerPeerConnection(t *testing.T) {
privateKey, err := utils.GeneratePrivateKey()
if err != nil {
t.Errorf("Private Key Error %v", err)
}
onionAddr, err := utils.GetOnionAddress(privateKey)
if err != nil {
t.Errorf("Onion address error %v", err)
}
profile := model.GenerateNewProfile("sarah")
ppc := NewPeerPeerConnection("127.0.0.1:5452|"+onionAddr, profile)
//numcalls := 0
tp := new(TestPeer)
tp.Init()
go runtestpeer(t, tp, privateKey)
state := ppc.GetState()
if state != DISCONNECTED {
t.Errorf("new connections should start in disconnected state")
}
go ppc.Run()
time.Sleep(time.Second * 5)
state = ppc.GetState()
if state != AUTHENTICATED {
t.Errorf("connection state should be authenticated(3), was instead %v", state)
}
if tp.ReceivedIdentityPacket == false {
t.Errorf("should have received an identity packet")
}
_, invite, _ := profile.StartGroup("aaa.onion")
ppc.SendGroupInvite(invite)
time.Sleep(time.Second * 3)
if tp.ReceivedGroupInvite == false {
t.Errorf("should have received an group invite packet")
}
}

View File

@ -0,0 +1,124 @@
package connections
import (
"cwtch.im/cwtch/peer/fetch"
"cwtch.im/cwtch/peer/listen"
"cwtch.im/cwtch/peer/send"
"cwtch.im/cwtch/protocol"
"errors"
"github.com/s-rah/go-ricochet"
"github.com/s-rah/go-ricochet/channels"
"github.com/s-rah/go-ricochet/connection"
"github.com/s-rah/go-ricochet/identity"
"github.com/s-rah/go-ricochet/utils"
"log"
"time"
)
// PeerServerConnection encapsulates a single Peer->Server connection
type PeerServerConnection struct {
connection.AutoConnectionHandler
Server string
state ConnectionState
connection *connection.Connection
GroupMessageHandler func(string, *protocol.GroupMessage)
}
// NewPeerServerConnection creates a new Peer->Server outbound connection
func NewPeerServerConnection(serverhostname string) *PeerServerConnection {
psc := new(PeerServerConnection)
psc.Server = serverhostname
psc.Init()
return psc
}
// GetState returns the current connection state
func (psc *PeerServerConnection) GetState() ConnectionState {
return psc.state
}
// Run manages the setup and teardown of a peer server connection
func (psc *PeerServerConnection) Run() error {
log.Printf("Connecting to %v", psc.Server)
rc, err := goricochet.Open(psc.Server)
if err == nil {
rc.TraceLog(true)
psc.connection = rc
psc.state = CONNECTED
pk, err := utils.GeneratePrivateKey()
if err == nil {
_, err := connection.HandleOutboundConnection(psc.connection).ProcessAuthAsClient(identity.Initialize("cwtchpeer", pk))
if err == nil {
psc.state = AUTHENTICATED
go func() {
psc.connection.Do(func() error {
psc.connection.RequestOpenChannel("im.cwtch.server.fetch", &fetch.CwtchPeerFetchChannel{Handler: psc})
return nil
})
psc.connection.Do(func() error {
psc.connection.RequestOpenChannel("im.cwtch.server.listen", &listen.CwtchPeerListenChannel{Handler: psc})
return nil
})
}()
psc.connection.Process(psc)
}
}
}
psc.state = FAILED
return err
}
// Break makes Run() return and prevents processing, but doesn't close the connection.
func (psc *PeerServerConnection) Break() error {
return psc.connection.Break()
}
// SendGroupMessage sends the given protocol message to the Server.
func (psc *PeerServerConnection) SendGroupMessage(gm *protocol.GroupMessage) error {
if psc.state != AUTHENTICATED {
return errors.New("peer is not yet connected & authenticated to server cannot send message")
}
err := psc.connection.Do(func() error {
psc.connection.RequestOpenChannel("im.cwtch.server.send", &send.CwtchPeerSendChannel{})
return nil
})
errCount := 0
for errCount < 5 {
time.Sleep(time.Second * 1)
err = psc.connection.Do(func() error {
channel := psc.connection.Channel("im.cwtch.server.send", channels.Outbound)
if channel == nil {
return errors.New("no channel found")
}
sendchannel, ok := channel.Handler.(*send.CwtchPeerSendChannel)
if ok {
return sendchannel.SendGroupMessage(gm)
}
return errors.New("channel is not a peer send channel (this should definitely not happen)")
})
if err != nil {
errCount++
} else {
return nil
}
}
return err
}
func (psc *PeerServerConnection) Close() {
psc.state = KILLED
psc.connection.Conn.Close()
}
// HandleGroupMessage passes the given group message back to the profile.
func (psc *PeerServerConnection) HandleGroupMessage(gm *protocol.GroupMessage) {
log.Printf("Received Group Message: %v", gm)
psc.GroupMessageHandler(psc.Server, gm)
}

View File

@ -0,0 +1,106 @@
package connections
import (
"crypto/rsa"
"cwtch.im/cwtch/protocol"
"cwtch.im/cwtch/server/fetch"
"cwtch.im/cwtch/server/send"
"github.com/s-rah/go-ricochet"
"github.com/s-rah/go-ricochet/channels"
"github.com/s-rah/go-ricochet/connection"
"github.com/s-rah/go-ricochet/identity"
"github.com/s-rah/go-ricochet/utils"
"net"
"testing"
"time"
)
func ServerAuthValid(string, rsa.PublicKey) (allowed, known bool) {
return true, true
}
type TestServer struct {
connection.AutoConnectionHandler
Received bool
}
func (ts *TestServer) HandleGroupMessage(gm *protocol.GroupMessage) {
ts.Received = true
}
func (ts *TestServer) HandleFetchRequest() []*protocol.GroupMessage {
return []*protocol.GroupMessage{{Ciphertext: []byte("hello"), Spamguard: []byte{}}, {Ciphertext: []byte("hello"), Spamguard: []byte{}}}
}
func runtestserver(t *testing.T, ts *TestServer, privateKey *rsa.PrivateKey) {
ln, _ := net.Listen("tcp", "127.0.0.1:5451")
conn, _ := ln.Accept()
defer conn.Close()
rc, err := goricochet.NegotiateVersionInbound(conn)
if err != nil {
t.Errorf("Negotiate Version Error: %v", err)
}
rc.TraceLog(true)
err = connection.HandleInboundConnection(rc).ProcessAuthAsServer(identity.Initialize("", privateKey), ServerAuthValid)
if err != nil {
t.Errorf("ServerAuth Error: %v", err)
}
ts.RegisterChannelHandler("im.cwtch.server.send", func() channels.Handler {
server := new(send.CwtchServerSendChannel)
server.Handler = ts
return server
})
ts.RegisterChannelHandler("im.cwtch.server.fetch", func() channels.Handler {
server := new(fetch.CwtchServerFetchChannel)
server.Handler = ts
return server
})
rc.Process(ts)
}
func TestPeerServerConnection(t *testing.T) {
privateKey, err := utils.GeneratePrivateKey()
if err != nil {
t.Errorf("Private Key Error %v", err)
}
ts := new(TestServer)
ts.Init()
go runtestserver(t, ts, privateKey)
onionAddr, err := utils.GetOnionAddress(privateKey)
if err != nil {
t.Errorf("Error getting onion address: %v", err)
}
psc := NewPeerServerConnection("127.0.0.1:5451|" + onionAddr)
numcalls := 0
psc.GroupMessageHandler = func(s string, gm *protocol.GroupMessage) {
numcalls++
}
state := psc.GetState()
if state != DISCONNECTED {
t.Errorf("new connections should start in disconnected state")
}
time.Sleep(time.Second * 1)
go psc.Run()
time.Sleep(time.Second * 2)
state = psc.GetState()
if state != AUTHENTICATED {
t.Errorf("connection should now be authed(%v), instead was %v", AUTHENTICATED, state)
}
gm := &protocol.GroupMessage{Ciphertext: []byte("hello")}
psc.SendGroupMessage(gm)
time.Sleep(time.Second * 2)
if ts.Received == false {
t.Errorf("Should have received a group message in test server")
}
if numcalls != 2 {
t.Errorf("Should have received 2 calls from fetch request, instead received %v", numcalls)
}
}

18
peer/connections/state.go Normal file
View File

@ -0,0 +1,18 @@
package connections
// ConnectionState defines the various states a connection can be in from disconnected to authenticated
type ConnectionState int
// Connection States
// DISCONNECTED - No existing connection has been made, or all attempts have failed
// CONNECTING - We are in the process of attempting to connect to a given endpoint
// CONNECTED - We have connected but not yet authenticated
// AUTHENTICATED - im.ricochet.auth-hidden-server has succeeded on thec onnection.
const (
DISCONNECTED ConnectionState = iota
CONNECTING
CONNECTED
AUTHENTICATED
FAILED
KILLED
)

File diff suppressed because it is too large Load Diff

25
peer/cwtch_peer_test.go Normal file
View File

@ -0,0 +1,25 @@
package peer
import (
"testing"
)
func TestCwtchPeerGenerate(t *testing.T) {
alice := NewCwtchPeer("alice")
alice.Save("./test_profile")
aliceLoaded, err := LoadCwtchPeer("./test_profile")
if err != nil || aliceLoaded.Profile.Name != "alice" {
t.Errorf("something went wrong saving and loading profiles %v %v", err, aliceLoaded)
}
groupID, _, _ := aliceLoaded.Profile.StartGroup("test.server")
exportedGroup, _ := aliceLoaded.ExportGroup(groupID)
t.Logf("Exported Group: %v from %v", exportedGroup, aliceLoaded.Profile.Onion)
importedGroupID, err := alice.ImportGroup(exportedGroup)
group := alice.Profile.GetGroupByGroupID(importedGroupID)
t.Logf("Imported Group: %v, err := %v %v", group, err, importedGroupID)
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,103 @@
package fetch
import (
"cwtch.im/cwtch/protocol"
"errors"
"github.com/golang/protobuf/proto"
"github.com/s-rah/go-ricochet/channels"
"github.com/s-rah/go-ricochet/utils"
"github.com/s-rah/go-ricochet/wire/control"
)
// CwtchPeerFetchChannel is the peer implementation of the im.cwtch.server.fetch
// channel.
type CwtchPeerFetchChannel struct {
channel *channels.Channel
Handler CwtchPeerFetchChannelHandler
}
// CwtchPeerFetchChannelHandler should be implemented by peers to receive new messages.
type CwtchPeerFetchChannelHandler interface {
HandleGroupMessage(*protocol.GroupMessage)
}
// Type returns the type string for this channel, e.g. "im.ricochet.server.fetch)
func (cpfc *CwtchPeerFetchChannel) Type() string {
return "im.cwtch.server.fetch"
}
// Closed is called when the channel is closed for any reason.
func (cpfc *CwtchPeerFetchChannel) Closed(err error) {
}
// OnlyClientCanOpen - for Cwtch server channels only client can open
func (cpfc *CwtchPeerFetchChannel) OnlyClientCanOpen() bool {
return true
}
// Singleton - for Cwtch channels there can only be one instance per direction
func (cpfc *CwtchPeerFetchChannel) Singleton() bool {
return true
}
// Bidirectional - for Cwtch channels are not bidrectional
func (cpfc *CwtchPeerFetchChannel) Bidirectional() bool {
return false
}
// RequiresAuthentication - Cwtch server channels require no auth.
func (cpfc *CwtchPeerFetchChannel) RequiresAuthentication() string {
return "none"
}
// OpenInbound - cwtch server peer implementations shouldnever respond to inbound requests
func (cpfc *CwtchPeerFetchChannel) OpenInbound(channel *channels.Channel, raw *Protocol_Data_Control.OpenChannel) ([]byte, error) {
return nil, errors.New("client does not receive inbound listen channels")
}
// OpenOutbound sets up a new cwtch fetch channel
func (cpfc *CwtchPeerFetchChannel) OpenOutbound(channel *channels.Channel) ([]byte, error) {
cpfc.channel = channel
messageBuilder := new(utils.MessageBuilder)
return messageBuilder.OpenChannel(channel.ID, cpfc.Type()), nil
}
// OpenOutboundResult confirms a previous open channel request
func (cpfc *CwtchPeerFetchChannel) OpenOutboundResult(err error, crm *Protocol_Data_Control.ChannelResult) {
if err == nil {
if crm.GetOpened() {
cpfc.channel.Pending = false
cpfc.FetchRequest()
}
}
}
// FetchRequest sends a FetchMessage to the Server.
func (cpfc *CwtchPeerFetchChannel) FetchRequest() error {
if cpfc.channel.Pending == false {
fm := &protocol.FetchMessage{}
csp := &protocol.CwtchServerPacket{
FetchMessage: fm,
}
packet, _ := proto.Marshal(csp)
cpfc.channel.SendMessage(packet)
} else {
return errors.New("channel isn't set up yet")
}
return nil
}
// Packet is called for each raw packet received on this channel.
func (cpfc *CwtchPeerFetchChannel) Packet(data []byte) {
csp := &protocol.CwtchServerPacket{}
err := proto.Unmarshal(data, csp)
if err == nil {
if csp.GetGroupMessage() != nil {
gm := csp.GetGroupMessage()
// We create a new go routine here to avoid leaking any information about processing time
// TODO Server can probably try to use this to DoS a peer
go cpfc.Handler.HandleGroupMessage(gm)
}
}
}

View File

@ -0,0 +1,92 @@
package fetch
import (
"cwtch.im/cwtch/protocol"
"github.com/golang/protobuf/proto"
"github.com/s-rah/go-ricochet/channels"
"github.com/s-rah/go-ricochet/wire/control"
"testing"
"time"
)
type TestHandler struct {
Received bool
}
func (th *TestHandler) HandleGroupMessage(m *protocol.GroupMessage) {
th.Received = true
}
func TestPeerFetchChannelAttributes(t *testing.T) {
cssc := new(CwtchPeerFetchChannel)
if cssc.Type() != "im.cwtch.server.fetch" {
t.Errorf("cwtch channel type is incorrect %v", cssc.Type())
}
if !cssc.OnlyClientCanOpen() {
t.Errorf("only clients should be able to open im.cwtch.server.Fetch channel")
}
if cssc.Bidirectional() {
t.Errorf("im.cwtch.server.fetch should not be bidirectional")
}
if !cssc.Singleton() {
t.Errorf("im.cwtch.server.fetch should be a Singleton")
}
if cssc.RequiresAuthentication() != "none" {
t.Errorf("cwtch channel required auth is incorrect %v", cssc.RequiresAuthentication())
}
}
func TestPeerFetchChannelOpenInbound(t *testing.T) {
cssc := new(CwtchPeerFetchChannel)
channel := new(channels.Channel)
_, err := cssc.OpenInbound(channel, nil)
if err == nil {
t.Errorf("client implementation of im.cwtch.server.Fetch should never open an inbound channel")
}
}
func TestPeerFetchChannel(t *testing.T) {
pfc := new(CwtchPeerFetchChannel)
th := new(TestHandler)
pfc.Handler = th
channel := new(channels.Channel)
channel.ID = 3
channel.SendMessage = func([]byte) {}
channel.CloseChannel = func() {}
result, err := pfc.OpenOutbound(channel)
if err != nil {
t.Errorf("expected result but also got non-nil error: result:%v, err: %v", result, err)
}
cr := &Protocol_Data_Control.ChannelResult{
ChannelIdentifier: proto.Int32(3),
Opened: proto.Bool(true),
}
pfc.OpenOutboundResult(nil, cr)
if channel.Pending {
t.Errorf("once opened channel should no longer be pending")
}
csp := &protocol.CwtchServerPacket{
GroupMessage: &protocol.GroupMessage{
Ciphertext: []byte("hello"), Spamguard: []byte{},
},
}
packet, _ := proto.Marshal(csp)
pfc.Packet(packet)
time.Sleep(time.Second * 2)
if th.Received != true {
t.Errorf("group message should not have been received")
}
pfc.Closed(nil)
}

View File

@ -1,52 +0,0 @@
package peer
import (
"cwtch.im/cwtch/event"
"cwtch.im/cwtch/model"
"cwtch.im/cwtch/model/attr"
"cwtch.im/cwtch/settings"
)
type ProfileHooks interface {
// EventsToRegister returns a set of events that the extension is interested hooking
EventsToRegister() []event.Type
// ExperimentsToRegister returns a set of experiments that the extension is interested in being notified about
ExperimentsToRegister() []string
// OnEvent is called whenever an event Registered with RegisterEvents is called
OnEvent(event event.Event, profile CwtchPeer)
// OnContactRequestValue is Hooked when a contact sends a request for the given path
OnContactRequestValue(profile CwtchPeer, conversation model.Conversation, eventID string, path attr.ScopedZonedPath)
// OnContactReceiveValue is Hooked after a profile receives a response to a Get/Val Request
OnContactReceiveValue(profile CwtchPeer, conversation model.Conversation, path attr.ScopedZonedPath, value string, exists bool)
// NotifySettingsUpdate allow profile hooks to access configs e.g. download folder
NotifySettingsUpdate(settings settings.GlobalSettings)
}
type ProfileHook struct {
extension ProfileHooks
events map[event.Type]bool
experiments map[string]bool
}
func ConstructHook(extension ProfileHooks) ProfileHook {
events := make(map[event.Type]bool)
for _, e := range extension.EventsToRegister() {
events[e] = true
}
experiments := make(map[string]bool)
for _, experiment := range extension.ExperimentsToRegister() {
experiments[experiment] = true
}
return ProfileHook{
extension,
events,
experiments,
}
}

View File

@ -0,0 +1,86 @@
package listen
import (
"cwtch.im/cwtch/protocol"
"errors"
"github.com/golang/protobuf/proto"
"github.com/s-rah/go-ricochet/channels"
"github.com/s-rah/go-ricochet/utils"
"github.com/s-rah/go-ricochet/wire/control"
)
// CwtchPeerListenChannel is the peer implementation of im.cwtch.server.listen
type CwtchPeerListenChannel struct {
channel *channels.Channel
Handler CwtchPeerSendChannelHandler
}
// CwtchPeerSendChannelHandler is implemented by peers who want to listen to new messages
type CwtchPeerSendChannelHandler interface {
HandleGroupMessage(*protocol.GroupMessage)
}
// Type returns the type string for this channel, e.g. "im.ricochet.server.listen".
func (cplc *CwtchPeerListenChannel) Type() string {
return "im.cwtch.server.listen"
}
// Closed is called when the channel is closed for any reason.
func (cplc *CwtchPeerListenChannel) Closed(err error) {
}
// OnlyClientCanOpen - for Cwtch server channels can only be opened by peers
func (cplc *CwtchPeerListenChannel) OnlyClientCanOpen() bool {
return true
}
// Singleton - for Cwtch channels there can only be one instance per direction
func (cplc *CwtchPeerListenChannel) Singleton() bool {
return true
}
// Bidirectional - for Cwtch channels are not bidrectional
func (cplc *CwtchPeerListenChannel) Bidirectional() bool {
return false
}
// RequiresAuthentication - Cwtch channels require no auth channels
func (cplc *CwtchPeerListenChannel) RequiresAuthentication() string {
return "none"
}
// OpenInbound - peers should never respond to open inbound requests from servers
func (cplc *CwtchPeerListenChannel) OpenInbound(channel *channels.Channel, raw *Protocol_Data_Control.OpenChannel) ([]byte, error) {
return nil, errors.New("client does not receive inbound listen channels")
}
// OpenOutbound sets up a new server listen channel
func (cplc *CwtchPeerListenChannel) OpenOutbound(channel *channels.Channel) ([]byte, error) {
cplc.channel = channel
messageBuilder := new(utils.MessageBuilder)
return messageBuilder.OpenChannel(channel.ID, cplc.Type()), nil
}
// OpenOutboundResult confirms a previous open channel request
func (cplc *CwtchPeerListenChannel) OpenOutboundResult(err error, crm *Protocol_Data_Control.ChannelResult) {
if err == nil {
if crm.GetOpened() {
cplc.channel.Pending = false
}
}
}
// Packet is called for each server packet received on this channel.
func (cplc *CwtchPeerListenChannel) Packet(data []byte) {
csp := &protocol.CwtchServerPacket{}
err := proto.Unmarshal(data, csp)
if err == nil {
if csp.GetGroupMessage() != nil {
gm := csp.GetGroupMessage()
// We create a new go routine here to avoid leaking any information about processing time
// TODO Server can probably try to use this to DoS a peer
go cplc.Handler.HandleGroupMessage(gm)
}
}
}

View File

@ -0,0 +1,89 @@
package listen
import (
"cwtch.im/cwtch/protocol"
"github.com/golang/protobuf/proto"
"github.com/s-rah/go-ricochet/channels"
"github.com/s-rah/go-ricochet/wire/control"
"testing"
"time"
)
type TestHandler struct {
Received bool
}
func (th *TestHandler) HandleGroupMessage(m *protocol.GroupMessage) {
th.Received = true
}
func TestPeerListenChannelAttributes(t *testing.T) {
cssc := new(CwtchPeerListenChannel)
if cssc.Type() != "im.cwtch.server.listen" {
t.Errorf("cwtch channel type is incorrect %v", cssc.Type())
}
if !cssc.OnlyClientCanOpen() {
t.Errorf("only clients should be able to open im.cwtch.server.listen channel")
}
if cssc.Bidirectional() {
t.Errorf("im.cwtch.server.listen should not be bidirectional")
}
if !cssc.Singleton() {
t.Errorf("im.cwtch.server.listen should be a Singleton")
}
if cssc.RequiresAuthentication() != "none" {
t.Errorf("cwtch channel required auth is incorrect %v", cssc.RequiresAuthentication())
}
}
func TestPeerListenChannelOpenInbound(t *testing.T) {
cssc := new(CwtchPeerListenChannel)
channel := new(channels.Channel)
_, err := cssc.OpenInbound(channel, nil)
if err == nil {
t.Errorf("client implementation of im.cwtch.server.Listen should never open an inbound channel")
}
}
func TestPeerListenChannel(t *testing.T) {
pfc := new(CwtchPeerListenChannel)
th := new(TestHandler)
pfc.Handler = th
channel := new(channels.Channel)
channel.ID = 3
result, err := pfc.OpenOutbound(channel)
if err != nil {
t.Errorf("expected result but also got non-nil error: result:%v, err: %v", result, err)
}
cr := &Protocol_Data_Control.ChannelResult{
ChannelIdentifier: proto.Int32(3),
Opened: proto.Bool(true),
}
pfc.OpenOutboundResult(nil, cr)
if channel.Pending {
t.Errorf("once opened channel should no longer be pending")
}
csp := &protocol.CwtchServerPacket{
GroupMessage: &protocol.GroupMessage{Ciphertext: []byte("hello"), Spamguard: []byte{}},
}
packet, _ := proto.Marshal(csp)
pfc.Packet(packet)
// Wait for goroutine to run
time.Sleep(time.Second * 1)
if !th.Received {
t.Errorf("group message should have been received")
}
pfc.Closed(nil)
}

114
peer/peer/peer_channel.go Normal file
View File

@ -0,0 +1,114 @@
package peer
import (
"cwtch.im/cwtch/protocol"
"github.com/golang/protobuf/proto"
"github.com/s-rah/go-ricochet/channels"
"github.com/s-rah/go-ricochet/utils"
"github.com/s-rah/go-ricochet/wire/control"
"log"
)
// CwtchPeerChannel implements the ChannelHandler interface for a channel of
// type "im.ricochet.Cwtch". The channel may be inbound or outbound.
//
// CwtchPeerChannel implements protocol-level sanity and state validation, but
// does not handle or acknowledge Cwtch messages. The application must provide
// a CwtchPeerChannelHandler implementation to handle Cwtch events.
type CwtchPeerChannel struct {
// Methods of Handler are called for Cwtch events on this channel
Handler CwtchPeerChannelHandler
channel *channels.Channel
}
// CwtchPeerChannelHandler is implemented by an application type to receive
// events from a CwtchPeerChannel.
type CwtchPeerChannelHandler interface {
ClientIdentity(*protocol.CwtchIdentity)
HandleGroupInvite(*protocol.GroupChatInvite)
GetClientIdentityPacket() []byte
}
// SendMessage sends a raw message on this channel
func (cpc *CwtchPeerChannel) SendMessage(data []byte) {
cpc.channel.SendMessage(data)
}
// Type returns the type string for this channel, e.g. "im.ricochet.Cwtch".
func (cpc *CwtchPeerChannel) Type() string {
return "im.cwtch.peer"
}
// Closed is called when the channel is closed for any reason.
func (cpc *CwtchPeerChannel) Closed(err error) {
}
// OnlyClientCanOpen - for Cwtch channels any side can open
func (cpc *CwtchPeerChannel) OnlyClientCanOpen() bool {
return false
}
// Singleton - for Cwtch channels there can only be one instance per direction
func (cpc *CwtchPeerChannel) Singleton() bool {
return true
}
// Bidirectional - for Cwtch channels are not bidrectional
func (cpc *CwtchPeerChannel) Bidirectional() bool {
return false
}
// RequiresAuthentication - Cwtch channels require hidden service auth
func (cpc *CwtchPeerChannel) RequiresAuthentication() string {
return "im.ricochet.auth.hidden-service"
}
// OpenInbound is the first method called for an inbound channel request.
// If an error is returned, the channel is rejected. If a RawMessage is
// returned, it will be sent as the ChannelResult message.
func (cpc *CwtchPeerChannel) OpenInbound(channel *channels.Channel, raw *Protocol_Data_Control.OpenChannel) ([]byte, error) {
cpc.channel = channel
messageBuilder := new(utils.MessageBuilder)
return messageBuilder.AckOpenChannel(channel.ID), nil
}
// OpenOutbound is the first method called for an outbound channel request.
// If an error is returned, the channel is not opened. If a RawMessage is
// returned, it will be sent as the OpenChannel message.
func (cpc *CwtchPeerChannel) OpenOutbound(channel *channels.Channel) ([]byte, error) {
cpc.channel = channel
messageBuilder := new(utils.MessageBuilder)
return messageBuilder.OpenChannel(channel.ID, cpc.Type()), nil
}
// OpenOutboundResult is called when a response is received for an
// outbound OpenChannel request. If `err` is non-nil, the channel was
// rejected and Closed will be called immediately afterwards. `raw`
// contains the raw protocol message including any extension data.
func (cpc *CwtchPeerChannel) OpenOutboundResult(err error, crm *Protocol_Data_Control.ChannelResult) {
if err == nil {
if crm.GetOpened() {
cpc.channel.Pending = false
}
}
}
// Packet is called for each raw packet received on this channel.
func (cpc *CwtchPeerChannel) Packet(data []byte) {
cpp := &protocol.CwtchPeerPacket{}
err := proto.Unmarshal(data, cpp)
if err == nil {
if cpp.GetCwtchIdentify() != nil {
cpc.Handler.ClientIdentity(cpp.GetCwtchIdentify())
pkt := cpc.Handler.GetClientIdentityPacket()
if pkt != nil {
cpc.SendMessage(pkt)
}
} else if cpp.GetGroupChatInvite() != nil {
cpc.Handler.HandleGroupInvite(cpp.GetGroupChatInvite())
}
} else {
log.Printf("Error Receivng Packet %v\n", err)
}
}

View File

@ -0,0 +1,121 @@
package peer
import (
"cwtch.im/cwtch/protocol"
"github.com/golang/protobuf/proto"
"github.com/s-rah/go-ricochet/channels"
"github.com/s-rah/go-ricochet/wire/control"
"testing"
)
func TestPeerChannelAttributes(t *testing.T) {
cssc := new(CwtchPeerChannel)
if cssc.Type() != "im.cwtch.peer" {
t.Errorf("cwtch channel type is incorrect %v", cssc.Type())
}
if cssc.OnlyClientCanOpen() {
t.Errorf("either side should be able to open im.cwtch.peer channel")
}
if cssc.Bidirectional() {
t.Errorf("im.cwtch.peer should not be bidirectional")
}
if !cssc.Singleton() {
t.Errorf("im.cwtch.server.listen should be a Singleton")
}
if cssc.RequiresAuthentication() != "im.ricochet.auth.hidden-service" {
t.Errorf("cwtch channel required auth is incorrect %v", cssc.RequiresAuthentication())
}
}
type TestHandler struct {
Received bool
ReceviedGroupInvite bool
}
func (th *TestHandler) ClientIdentity(ci *protocol.CwtchIdentity) {
if ci.GetName() == "hello" {
th.Received = true
}
}
func (th *TestHandler) HandleGroupInvite(ci *protocol.GroupChatInvite) {
///if ci.GetName() == "hello" {
th.ReceviedGroupInvite = true
//}
}
func (th *TestHandler) GetClientIdentityPacket() []byte {
return nil
}
func TestPeerChannel(t *testing.T) {
th := new(TestHandler)
cpc := new(CwtchPeerChannel)
cpc.Handler = th
channel := new(channels.Channel)
channel.ID = 3
result, err := cpc.OpenOutbound(channel)
if err != nil {
t.Errorf("should have send open channel request instead %v, %v", result, err)
}
cpc2 := new(CwtchPeerChannel)
channel2 := new(channels.Channel)
channel2.ID = 3
sent := false
channel2.SendMessage = func(message []byte) {
sent = true
}
control := new(Protocol_Data_Control.Packet)
proto.Unmarshal(result[:], control)
ack, err := cpc2.OpenInbound(channel2, control.GetOpenChannel())
if err != nil {
t.Errorf("should have ack open channel request instead %v, %v", ack, err)
}
ackpacket := new(Protocol_Data_Control.Packet)
proto.Unmarshal(ack[:], ackpacket)
cpc.OpenOutboundResult(nil, ackpacket.GetChannelResult())
if channel.Pending != false {
t.Errorf("Channel should no longer be pending")
}
gm := &protocol.CwtchIdentity{
Name: "hello",
Ed25519PublicKey: []byte{},
}
cpp := &protocol.CwtchPeerPacket{
CwtchIdentify: gm,
}
packet, _ := proto.Marshal(cpp)
cpc.Packet(packet)
if th.Received == false {
t.Errorf("Should have sent packet to handler")
}
cpc2.SendMessage(packet)
if sent == false {
t.Errorf("Should have sent packet to channel")
}
gci := &protocol.GroupChatInvite{
GroupName: "hello",
GroupSharedKey: []byte{},
ServerHost: "abc.onion",
}
cpp = &protocol.CwtchPeerPacket{
GroupChatInvite: gci,
}
packet, _ = proto.Marshal(cpp)
cpc.Packet(packet)
if th.ReceviedGroupInvite == false {
t.Errorf("Should have sent invite packet to handler")
}
}

View File

@ -1,181 +0,0 @@
package peer
import (
"cwtch.im/cwtch/event"
"cwtch.im/cwtch/model"
"cwtch.im/cwtch/model/attr"
"cwtch.im/cwtch/protocol/connections"
"cwtch.im/cwtch/settings"
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
"git.openprivacy.ca/openprivacy/connectivity"
)
// AccessPeeringState provides access to functions relating to the underlying connections of a peer.
type AccessPeeringState interface {
GetPeerState(string) connections.ConnectionState
}
// ModifyPeeringState is a meta-interface intended to restrict callers to modify-only access to connection peers
type ModifyPeeringState interface {
BlockUnknownConnections()
AllowUnknownConnections()
PeerWithOnion(string)
QueueJoinServer(string)
DisconnectFromPeer(string)
DisconnectFromServer(string)
}
// ModifyContactsAndPeers is a meta-interface intended to restrict a call to reading and modifying contacts
// and peers.
type ModifyContactsAndPeers interface {
ModifyPeeringState
}
// ReadServers provides access to the servers
type ReadServers interface {
GetServers() []string
}
// ModifyGroups provides write-only access add/edit/remove new groups
type ModifyGroups interface {
ImportGroup(string) (int, error)
StartGroup(string, string) (int, error)
}
// ModifyServers provides write-only access to servers
type ModifyServers interface {
AddServer(string) (string, error)
ResyncServer(onion string) error
}
// SendMessages enables a caller to sender messages to a contact
type SendMessages interface {
SendMessage(conversation int, message string) (int, error)
// EnhancedSendMessage Attempts to Send a Message and Immediately Attempts to Lookup the Message in the Database
EnhancedSendMessage(conversation int, message string) string
SendInviteToConversation(conversationID int, inviteConversationID int) (int, error)
// EnhancedSendInviteMessage Attempts to Send an Invite and Immediately Attempts to Lookup the Message in the Database
EnhancedSendInviteMessage(conversation int, inviteConversationID int) string
SendScopedZonedGetValToContact(conversationID int, scope attr.Scope, zone attr.Zone, key string)
}
// CwtchPeer provides us with a way of testing systems built on top of cwtch without having to
// directly implement a cwtchPeer.
type CwtchPeer interface {
// Core Cwtch Peer Functions that should not be exposed to
// most functions
Init(event.Manager)
GenerateProtocolEngine(acn connectivity.ACN, bus event.Manager, engineHooks connections.EngineHooks) (connections.Engine, error)
AutoHandleEvents(events []event.Type)
Listen()
StartConnections(doPeers, doServers bool)
// Deprecated in 1.10
StartPeersConnections()
// Deprecated in 1.10
StartServerConnections()
Shutdown()
// GetOnion is deprecated. If you find yourself needing to rely on this method it is time
// to consider replacing this with a GetAddress(es) function that can fully expand cwtch beyond the boundaries
// of tor v3 onion services.
// Deprecated
GetOnion() string
// SetScopedZonedAttribute allows the setting of an attribute by scope and zone
// scope.zone.key = value
SetScopedZonedAttribute(scope attr.Scope, zone attr.Zone, key string, value string)
// GetScopedZonedAttribute allows the retrieval of an attribute by scope and zone
// scope.zone.key = value
GetScopedZonedAttribute(scope attr.Scope, zone attr.Zone, key string) (string, bool)
// GetScopedZonedAttributeKeys returns all keys associated with a given scope and zone
GetScopedZonedAttributeKeys(scope attr.Scope, zone attr.Zone) ([]string, error)
AccessPeeringState
ModifyPeeringState
ModifyGroups
ReadServers
ModifyServers
SendMessages
// Import Bundle
ImportBundle(string) error
EnhancedImportBundle(string) string
// New Unified Conversation Interfaces
NewContactConversation(handle string, acl model.AccessControl, accepted bool) (int, error)
FetchConversations() ([]*model.Conversation, error)
ArchiveConversation(conversation int)
GetConversationInfo(conversation int) (*model.Conversation, error)
FetchConversationInfo(handle string) (*model.Conversation, error)
// API-level management of conversation access control
UpdateConversationAccessControlList(id int, acl model.AccessControlList) error
EnhancedUpdateConversationAccessControlList(conversation int, acjson string) error
GetConversationAccessControlList(conversation int) (model.AccessControlList, error)
EnhancedGetConversationAccessControlList(conversation int) (string, error)
// Convieniance Functions for ACL Management
AcceptConversation(conversation int) error
BlockConversation(conversation int) error
UnblockConversation(conversation int) error
SetConversationAttribute(conversation int, path attr.ScopedZonedPath, value string) error
GetConversationAttribute(conversation int, path attr.ScopedZonedPath) (string, error)
DeleteConversation(conversation int) error
// New Unified Conversation Channel Interfaces
GetChannelMessage(conversation int, channel int, id int) (string, model.Attributes, error)
GetChannelMessageCount(conversation int, channel int) (int, error)
GetChannelMessageByContentHash(conversation int, channel int, contenthash string) (int, error)
GetMostRecentMessages(conversation int, channel int, offset int, limit uint) ([]model.ConversationMessage, error)
UpdateMessageAttribute(conversation int, channel int, id int, key string, value string) error
SearchConversations(pattern string) string
// EnhancedGetMessageById returns a json-encoded enhanced message, suitable for rendering in a UI
EnhancedGetMessageById(conversation int, mid int) string
// EnhancedGetMessageByContentHash returns a json-encoded enhanced message, suitable for rendering in a UI
EnhancedGetMessageByContentHash(conversation int, hash string) string
// EnhancedGetMessages returns a set of json-encoded enhanced messages, suitable for rendering in a UI
EnhancedGetMessages(conversation int, index int, count uint) string
// Server Token APIS
// TODO move these to feature protected interfaces
StoreCachedTokens(tokenServer string, tokens []*privacypass.Token)
// Profile Management
CheckPassword(password string) bool
ChangePassword(oldpassword string, newpassword string, newpasswordAgain string) error
ExportProfile(file string) error
Delete()
PublishEvent(resp event.Event)
RegisterHook(hook ProfileHooks)
UpdateExperiments(enabled bool, experiments map[string]bool)
NotifySettingsUpdate(settings settings.GlobalSettings)
IsFeatureEnabled(featureName string) bool
}
// EnhancedMessage wraps a Cwtch model.Message with some additional data to reduce calls from the UI.
type EnhancedMessage struct {
model.Message
ID int // the actual ID of the message in the database (not the row number)
LocalIndex int // local index in the DB (row #). Can be empty (most calls supply it) but lookup by hash will fill it
ContentHash string
ContactImage string
Attributes map[string]string
}

View File

@ -1,13 +0,0 @@
package peer
import "errors"
// Response is a wrapper to better semantically convey the response type...
type Response error
const errorSeparator = "."
// ConstructResponse is a helper function for creating Response structures.
func ConstructResponse(prefix string, error string) Response {
return errors.New(prefix + errorSeparator + error)
}

View File

@ -0,0 +1,96 @@
package send
import (
"cwtch.im/cwtch/protocol"
"cwtch.im/cwtch/protocol/spam"
"errors"
"github.com/golang/protobuf/proto"
"github.com/s-rah/go-ricochet/channels"
"github.com/s-rah/go-ricochet/utils"
"github.com/s-rah/go-ricochet/wire/control"
)
// CwtchPeerSendChannel is the peer implementation of im.cwtch.server.send
type CwtchPeerSendChannel struct {
channel *channels.Channel
spamGuard spam.Guard
challenge []byte
}
// Type returns the type string for this channel, e.g. "im.ricochet.server.send".
func (cpsc *CwtchPeerSendChannel) Type() string {
return "im.cwtch.server.send"
}
// Closed is called when the channel is closed for any reason.
func (cpsc *CwtchPeerSendChannel) Closed(err error) {
}
// OnlyClientCanOpen - for Cwtch server channels only peers may open.
func (cpsc *CwtchPeerSendChannel) OnlyClientCanOpen() bool {
return true
}
// Singleton - for Cwtch channels there can only be one instance per direction
func (cpsc *CwtchPeerSendChannel) Singleton() bool {
return true
}
// Bidirectional - for Cwtch channels are not bidrectional
func (cpsc *CwtchPeerSendChannel) Bidirectional() bool {
return false
}
// RequiresAuthentication - Cwtch channels require no auth
func (cpsc *CwtchPeerSendChannel) RequiresAuthentication() string {
return "none"
}
// OpenInbound should never be called on peers.
func (cpsc *CwtchPeerSendChannel) OpenInbound(channel *channels.Channel, raw *Protocol_Data_Control.OpenChannel) ([]byte, error) {
return nil, errors.New("client does not receive inbound listen channels")
}
// OpenOutbound is used to set up a new send channel and initialize spamguard
func (cpsc *CwtchPeerSendChannel) OpenOutbound(channel *channels.Channel) ([]byte, error) {
cpsc.spamGuard.Difficulty = 2
cpsc.channel = channel
messageBuilder := new(utils.MessageBuilder)
return messageBuilder.OpenChannel(channel.ID, cpsc.Type()), nil
}
// OpenOutboundResult confirms the open channel request and sets the spamguard challenge
func (cpsc *CwtchPeerSendChannel) OpenOutboundResult(err error, crm *Protocol_Data_Control.ChannelResult) {
if err == nil {
if crm.GetOpened() {
cpsc.channel.Pending = false
ce, _ := proto.GetExtension(crm, protocol.E_ServerNonce)
cpsc.challenge = ce.([]byte)[:]
}
}
}
// SendGroupMessage performs the spamguard proof of work and sends a message.
func (cpsc *CwtchPeerSendChannel) SendGroupMessage(gm *protocol.GroupMessage) error {
if cpsc.channel.Pending == false {
sgsolve := cpsc.spamGuard.SolveChallenge(cpsc.challenge, gm.GetCiphertext())
gm.Spamguard = sgsolve[:]
csp := &protocol.CwtchServerPacket{
GroupMessage: gm,
}
packet, _ := proto.Marshal(csp)
cpsc.channel.SendMessage(packet)
cpsc.channel.CloseChannel()
} else {
return errors.New("channel isn't set up yet")
}
return nil
}
// Packet should never be
func (cpsc *CwtchPeerSendChannel) Packet(data []byte) {
// If we receive a packet on this channel, close the connection
cpsc.channel.CloseChannel()
}

View File

@ -0,0 +1,108 @@
package send
import (
"cwtch.im/cwtch/protocol"
"cwtch.im/cwtch/protocol/spam"
"github.com/golang/protobuf/proto"
"github.com/s-rah/go-ricochet/channels"
"github.com/s-rah/go-ricochet/wire/control"
"testing"
)
func TestPeerSendChannelAttributes(t *testing.T) {
cssc := new(CwtchPeerSendChannel)
if cssc.Type() != "im.cwtch.server.send" {
t.Errorf("cwtch channel type is incorrect %v", cssc.Type())
}
if !cssc.OnlyClientCanOpen() {
t.Errorf("only clients should be able to open im.cwtch.server.send channel")
}
if cssc.Bidirectional() {
t.Errorf("im.cwtch.server.listen should not be bidirectional")
}
if !cssc.Singleton() {
t.Errorf("im.cwtch.server.listen should be a Singleton")
}
if cssc.RequiresAuthentication() != "none" {
t.Errorf("cwtch channel required auth is incorrect %v", cssc.RequiresAuthentication())
}
}
func TestPeerSendChannelOpenInbound(t *testing.T) {
cssc := new(CwtchPeerSendChannel)
channel := new(channels.Channel)
_, err := cssc.OpenInbound(channel, nil)
if err == nil {
t.Errorf("client implementation of im.cwtch.server.Listen should never open an inbound channel")
}
}
func TestPeerSendChannelClosesOnPacket(t *testing.T) {
pfc := new(CwtchPeerSendChannel)
channel := new(channels.Channel)
closed := false
channel.CloseChannel = func() {
closed = true
}
pfc.OpenOutbound(channel)
pfc.Packet([]byte{})
if !closed {
t.Errorf("send channel should close if server attempts to send packets")
}
}
func TestPeerSendChannel(t *testing.T) {
pfc := new(CwtchPeerSendChannel)
channel := new(channels.Channel)
channel.ID = 3
success := false
var sg spam.Guard
sg.Difficulty = 2
closed := false
channel.CloseChannel = func() {
closed = true
}
channel.SendMessage = func(message []byte) {
packet := new(protocol.CwtchServerPacket)
proto.Unmarshal(message[:], packet)
if packet.GetGroupMessage() != nil {
success = sg.ValidateChallenge(packet.GetGroupMessage().GetCiphertext(), packet.GetGroupMessage().GetSpamguard())
}
}
result, err := pfc.OpenOutbound(channel)
if err != nil {
t.Errorf("expected result but also got non-nil error: result:%v, err: %v", result, err)
}
challenge := sg.GenerateChallenge(3)
control := new(Protocol_Data_Control.Packet)
proto.Unmarshal(challenge[:], control)
pfc.OpenOutboundResult(nil, control.GetChannelResult())
if channel.Pending {
t.Errorf("once opened channel should no longer be pending")
}
gm := &protocol.GroupMessage{Ciphertext: []byte("hello")}
pfc.SendGroupMessage(gm)
if !success {
t.Errorf("send channel should have successfully sent a valid group message")
}
if !closed {
t.Errorf("send channel should have successfully closed after a valid group message")
}
pfc.Closed(nil)
}

View File

@ -1,29 +0,0 @@
package peer
import (
"database/sql"
"fmt"
)
// SQLCreateTableProfileKeyValue creates the Profile Key Value Table
const SQLCreateTableProfileKeyValue = `create table if not exists profile_kv (KeyType text, KeyName text, KeyValue blob, UNIQUE (KeyType,KeyName));`
// SQLCreateTableConversations creates the Profile Key Value Table
const SQLCreateTableConversations = `create table if not exists conversations (ID integer unique primary key autoincrement, Handle text, Attributes blob, ACL blob, Accepted bool);`
// initializeDatabase executes all the sql statements necessary to construct the base of the database.
// db must be open
func initializeDatabase(db *sql.DB) error {
_, err := db.Exec(SQLCreateTableProfileKeyValue)
if err != nil {
return fmt.Errorf("error On Executing Query: %v %v", SQLCreateTableProfileKeyValue, err)
}
_, err = db.Exec(SQLCreateTableConversations)
if err != nil {
return fmt.Errorf("error On Executing Query: %v %v", SQLCreateTableConversations, err)
}
return nil
}

View File

@ -1,329 +0,0 @@
package peer
import (
"archive/tar"
"compress/gzip"
"crypto/rand"
"database/sql"
"encoding/hex"
"errors"
"fmt"
"git.openprivacy.ca/openprivacy/log"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/sha3"
"io"
"os"
"path"
"path/filepath"
"strings"
)
const versionFile = "VERSION"
const version = "2"
const saltFile = "SALT"
const dbFile = "db"
// CreateKeySalt derives a key and salt from a password: returns key, salt, err
func CreateKeySalt(password string) ([32]byte, [128]byte, error) {
var salt [128]byte
if _, err := io.ReadFull(rand.Reader, salt[:]); err != nil {
log.Errorf("Cannot read from random: %v\n", err)
return [32]byte{}, salt, err
}
dk := pbkdf2.Key([]byte(password), salt[:], 4096, 32, sha3.New512)
var dkr [32]byte
copy(dkr[:], dk)
return dkr, salt, nil
}
// createKey derives a key from a password and salt
func createKey(password string, salt []byte) [32]byte {
dk := pbkdf2.Key([]byte(password), salt, 4096, 32, sha3.New512)
var dkr [32]byte
copy(dkr[:], dk)
return dkr
}
func initV2Directory(directory, password string) ([32]byte, [128]byte, error) {
os.MkdirAll(directory, 0700)
key, salt, err := CreateKeySalt(password)
if err != nil {
log.Errorf("Could not create key for profile store from password: %v\n", err)
return [32]byte{}, [128]byte{}, err
}
if err = os.WriteFile(path.Join(directory, versionFile), []byte(version), 0600); err != nil {
log.Errorf("Could not write version file: %v", err)
return [32]byte{}, [128]byte{}, err
}
if err = os.WriteFile(path.Join(directory, saltFile), salt[:], 0600); err != nil {
log.Errorf("Could not write salt file: %v", err)
return [32]byte{}, [128]byte{}, err
}
return key, salt, nil
}
func openEncryptedDatabase(profileDirectory string, password string, createIfNotExists bool) (*sql.DB, error) {
salt, err := os.ReadFile(path.Join(profileDirectory, saltFile))
if err != nil {
return nil, err
}
key := createKey(password, salt)
dbPath := filepath.Join(profileDirectory, "db")
if !createIfNotExists {
if _, err := os.Stat(dbPath); errors.Is(err, os.ErrNotExist) {
return nil, err
}
}
dbname := fmt.Sprintf("%v?_pragma_key=x'%x'&_pragma_cipher_page_size=8192", dbPath, key)
db, err := sql.Open("sqlite3", dbname)
if err != nil {
log.Errorf("could not open encrypted database", err)
return nil, err
}
return db, nil
}
// CreateEncryptedStorePeer creates a *new* Cwtch Profile backed by an encrypted datastore
func CreateEncryptedStorePeer(profileDirectory string, name string, password string) (CwtchPeer, error) {
log.Debugf("Initializing Encrypted Storage Directory")
_, _, err := initV2Directory(profileDirectory, password)
if err != nil {
return nil, err
}
log.Debugf("Opening Encrypted Database")
db, err := openEncryptedDatabase(profileDirectory, password, true)
if db == nil || err != nil {
return nil, fmt.Errorf("unable to open encrypted database: error: %v", err)
}
log.Debugf("Initializing Database")
err = initializeDatabase(db)
if err != nil {
db.Close()
return nil, err
}
log.Debugf("Creating Cwtch Profile Backed By Encrypted Database")
cps, err := NewCwtchProfileStorage(db, profileDirectory)
if err != nil {
db.Close()
return nil, err
}
return NewProfileWithEncryptedStorage(name, cps), nil
}
// CreateEncryptedStore creates a encrypted datastore
func CreateEncryptedStore(profileDirectory string, password string) (*CwtchProfileStorage, error) {
log.Debugf("Creating Encrypted Database")
db, err := openEncryptedDatabase(profileDirectory, password, true)
if db == nil || err != nil {
return nil, fmt.Errorf("unable to open encrypted database: error: %v", err)
}
log.Debugf("Initializing Database")
err = initializeDatabase(db)
if err != nil {
db.Close()
return nil, err
}
log.Debugf("Creating Cwtch Profile Backed By Encrypted Database")
cps, err := NewCwtchProfileStorage(db, profileDirectory)
if err != nil {
db.Close()
return nil, err
}
return cps, nil
}
// FromEncryptedDatabase constructs a Cwtch Profile from an existing Encrypted Database
func FromEncryptedDatabase(profileDirectory string, password string) (CwtchPeer, error) {
log.Debugf("Loading Encrypted Profile: %v", profileDirectory)
db, err := openEncryptedDatabase(profileDirectory, password, false)
if db == nil || err != nil {
return nil, fmt.Errorf("unable to open encrypted database: error: %v", err)
}
log.Debugf("Initializing Profile from Encrypted Storage")
cps, err := NewCwtchProfileStorage(db, profileDirectory)
if err != nil {
db.Close()
return nil, err
}
return FromEncryptedStorage(cps), nil
}
func ImportProfile(exportedCwtchFile string, profilesDir string, password string) (CwtchPeer, error) {
profileID, err := checkCwtchProfileBackupFile(exportedCwtchFile)
if profileID == "" || err != nil {
log.Errorf("%s is an invalid cwtch backup file: %s", profileID, err)
return nil, err
}
log.Debugf("%s is a valid cwtch backup file", profileID)
profileDBFile := filepath.Join(profilesDir, profileID, dbFile)
log.Debugf("checking %v", profileDBFile)
if _, err := os.Stat(profileDBFile); errors.Is(err, os.ErrNotExist) {
// backup is valid and the profile hasn't been imported yet, time to extract and check the password
profileDir := filepath.Join(profilesDir, profileID)
os.MkdirAll(profileDir, 0700)
err := importCwtchProfileBackupFile(exportedCwtchFile, profilesDir)
if err == nil {
profile, err := FromEncryptedDatabase(profileDir, password)
if err == nil {
return profile, err
}
// Otherwise purge
log.Errorf("error importing profile: %v. removing %s", err, profileDir)
os.RemoveAll(profileDir)
return nil, err
}
return nil, err
}
return nil, fmt.Errorf("%s is already a profile for this app", profileID)
}
func checkCwtchProfileBackupFile(srcFile string) (string, error) {
f, err := os.Open(srcFile)
if err != nil {
return "", err
}
defer f.Close()
gzf, err := gzip.NewReader(f)
if err != nil {
return "", err
}
tarReader := tar.NewReader(gzf)
profileName := ""
for {
header, err := tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
return "", err
}
switch header.Typeflag {
case tar.TypeDir:
return "", errors.New("invalid cwtch backup file")
case tar.TypeReg:
parts := strings.Split(header.Name, "/")
if len(parts) != 2 {
return "", errors.New("invalid header name")
}
dir := parts[0]
profileFileType := parts[1]
_, hexErr := hex.DecodeString(dir)
if dir == "." || dir == ".." || len(dir) != 32 || hexErr != nil {
return "", errors.New("invalid profile name")
}
if profileName == "" {
profileName = dir
}
if dir != profileName {
return "", errors.New("invalid cwtch backup file")
}
if profileFileType != dbFile && profileFileType != saltFile && profileFileType != versionFile {
return "", errors.New("invalid cwtch backup file")
}
default:
return "", errors.New("invalid cwtch backup file")
}
}
return profileName, nil
}
func importCwtchProfileBackupFile(srcFile string, profilesDir string) error {
f, err := os.Open(srcFile)
if err != nil {
return err
}
defer f.Close()
gzf, err := gzip.NewReader(f)
if err != nil {
return err
}
tarReader := tar.NewReader(gzf)
profileName := ""
for {
header, err := tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
switch header.Typeflag {
case tar.TypeDir:
return errors.New("invalid cwtch backup file")
case tar.TypeReg:
// using split here because we deliberately construct these paths in a cross-platform consistent way
parts := strings.Split(header.Name, "/")
if len(parts) != 2 {
return errors.New("invalid header name")
}
dir := parts[0]
base := parts[1]
_, hexErr := hex.DecodeString(dir)
if dir == "." || dir == ".." || len(dir) != 32 || hexErr != nil {
return errors.New("invalid profile name")
}
if profileName == "" {
profileName = dir
}
if dir != profileName {
return errors.New("invalid cwtch backup file")
}
// here we use filepath.Join to construct a valid directory path
outFile, err := os.Create(filepath.Join(profilesDir, dir, base))
if err != nil {
return fmt.Errorf("error importing cwtch profile file: %s", err)
}
defer outFile.Close()
if _, err := io.Copy(outFile, tarReader); err != nil {
return fmt.Errorf("error importing cwtch profile file: %s", err)
}
default:
return errors.New("invalid cwtch backup file")
}
}
return nil
}

View File

@ -0,0 +1,325 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: ControlChannel.proto
/*
Package protocol is a generated protocol buffer package.
It is generated from these files:
ControlChannel.proto
cwtch-profile.proto
group_message.proto
It has these top-level messages:
Packet
OpenChannel
ChannelResult
KeepAlive
EnableFeatures
FeaturesEnabled
CwtchPeerPacket
CwtchIdentity
GroupChatInvite
CwtchServerPacket
FetchMessage
GroupMessage
DecryptedGroupMessage
*/
package protocol
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type ChannelResult_CommonError int32
const (
ChannelResult_GenericError ChannelResult_CommonError = 0
ChannelResult_UnknownTypeError ChannelResult_CommonError = 1
ChannelResult_UnauthorizedError ChannelResult_CommonError = 2
ChannelResult_BadUsageError ChannelResult_CommonError = 3
ChannelResult_FailedError ChannelResult_CommonError = 4
)
var ChannelResult_CommonError_name = map[int32]string{
0: "GenericError",
1: "UnknownTypeError",
2: "UnauthorizedError",
3: "BadUsageError",
4: "FailedError",
}
var ChannelResult_CommonError_value = map[string]int32{
"GenericError": 0,
"UnknownTypeError": 1,
"UnauthorizedError": 2,
"BadUsageError": 3,
"FailedError": 4,
}
func (x ChannelResult_CommonError) Enum() *ChannelResult_CommonError {
p := new(ChannelResult_CommonError)
*p = x
return p
}
func (x ChannelResult_CommonError) String() string {
return proto.EnumName(ChannelResult_CommonError_name, int32(x))
}
func (x *ChannelResult_CommonError) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(ChannelResult_CommonError_value, data, "ChannelResult_CommonError")
if err != nil {
return err
}
*x = ChannelResult_CommonError(value)
return nil
}
func (ChannelResult_CommonError) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
type Packet struct {
// Must contain exactly one field
OpenChannel *OpenChannel `protobuf:"bytes,1,opt,name=open_channel,json=openChannel" json:"open_channel,omitempty"`
ChannelResult *ChannelResult `protobuf:"bytes,2,opt,name=channel_result,json=channelResult" json:"channel_result,omitempty"`
KeepAlive *KeepAlive `protobuf:"bytes,3,opt,name=keep_alive,json=keepAlive" json:"keep_alive,omitempty"`
EnableFeatures *EnableFeatures `protobuf:"bytes,4,opt,name=enable_features,json=enableFeatures" json:"enable_features,omitempty"`
FeaturesEnabled *FeaturesEnabled `protobuf:"bytes,5,opt,name=features_enabled,json=featuresEnabled" json:"features_enabled,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Packet) Reset() { *m = Packet{} }
func (m *Packet) String() string { return proto.CompactTextString(m) }
func (*Packet) ProtoMessage() {}
func (*Packet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Packet) GetOpenChannel() *OpenChannel {
if m != nil {
return m.OpenChannel
}
return nil
}
func (m *Packet) GetChannelResult() *ChannelResult {
if m != nil {
return m.ChannelResult
}
return nil
}
func (m *Packet) GetKeepAlive() *KeepAlive {
if m != nil {
return m.KeepAlive
}
return nil
}
func (m *Packet) GetEnableFeatures() *EnableFeatures {
if m != nil {
return m.EnableFeatures
}
return nil
}
func (m *Packet) GetFeaturesEnabled() *FeaturesEnabled {
if m != nil {
return m.FeaturesEnabled
}
return nil
}
type OpenChannel struct {
ChannelIdentifier *int32 `protobuf:"varint,1,req,name=channel_identifier,json=channelIdentifier" json:"channel_identifier,omitempty"`
ChannelType *string `protobuf:"bytes,2,req,name=channel_type,json=channelType" json:"channel_type,omitempty"`
proto.XXX_InternalExtensions `json:"-"`
XXX_unrecognized []byte `json:"-"`
}
func (m *OpenChannel) Reset() { *m = OpenChannel{} }
func (m *OpenChannel) String() string { return proto.CompactTextString(m) }
func (*OpenChannel) ProtoMessage() {}
func (*OpenChannel) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
var extRange_OpenChannel = []proto.ExtensionRange{
{100, 536870911},
}
func (*OpenChannel) ExtensionRangeArray() []proto.ExtensionRange {
return extRange_OpenChannel
}
func (m *OpenChannel) GetChannelIdentifier() int32 {
if m != nil && m.ChannelIdentifier != nil {
return *m.ChannelIdentifier
}
return 0
}
func (m *OpenChannel) GetChannelType() string {
if m != nil && m.ChannelType != nil {
return *m.ChannelType
}
return ""
}
type ChannelResult struct {
ChannelIdentifier *int32 `protobuf:"varint,1,req,name=channel_identifier,json=channelIdentifier" json:"channel_identifier,omitempty"`
Opened *bool `protobuf:"varint,2,req,name=opened" json:"opened,omitempty"`
CommonError *ChannelResult_CommonError `protobuf:"varint,3,opt,name=common_error,json=commonError,enum=protocol.ChannelResult_CommonError" json:"common_error,omitempty"`
proto.XXX_InternalExtensions `json:"-"`
XXX_unrecognized []byte `json:"-"`
}
func (m *ChannelResult) Reset() { *m = ChannelResult{} }
func (m *ChannelResult) String() string { return proto.CompactTextString(m) }
func (*ChannelResult) ProtoMessage() {}
func (*ChannelResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
var extRange_ChannelResult = []proto.ExtensionRange{
{100, 536870911},
}
func (*ChannelResult) ExtensionRangeArray() []proto.ExtensionRange {
return extRange_ChannelResult
}
func (m *ChannelResult) GetChannelIdentifier() int32 {
if m != nil && m.ChannelIdentifier != nil {
return *m.ChannelIdentifier
}
return 0
}
func (m *ChannelResult) GetOpened() bool {
if m != nil && m.Opened != nil {
return *m.Opened
}
return false
}
func (m *ChannelResult) GetCommonError() ChannelResult_CommonError {
if m != nil && m.CommonError != nil {
return *m.CommonError
}
return ChannelResult_GenericError
}
type KeepAlive struct {
ResponseRequested *bool `protobuf:"varint,1,req,name=response_requested,json=responseRequested" json:"response_requested,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *KeepAlive) Reset() { *m = KeepAlive{} }
func (m *KeepAlive) String() string { return proto.CompactTextString(m) }
func (*KeepAlive) ProtoMessage() {}
func (*KeepAlive) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *KeepAlive) GetResponseRequested() bool {
if m != nil && m.ResponseRequested != nil {
return *m.ResponseRequested
}
return false
}
type EnableFeatures struct {
Feature []string `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty"`
proto.XXX_InternalExtensions `json:"-"`
XXX_unrecognized []byte `json:"-"`
}
func (m *EnableFeatures) Reset() { *m = EnableFeatures{} }
func (m *EnableFeatures) String() string { return proto.CompactTextString(m) }
func (*EnableFeatures) ProtoMessage() {}
func (*EnableFeatures) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
var extRange_EnableFeatures = []proto.ExtensionRange{
{100, 536870911},
}
func (*EnableFeatures) ExtensionRangeArray() []proto.ExtensionRange {
return extRange_EnableFeatures
}
func (m *EnableFeatures) GetFeature() []string {
if m != nil {
return m.Feature
}
return nil
}
type FeaturesEnabled struct {
Feature []string `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty"`
proto.XXX_InternalExtensions `json:"-"`
XXX_unrecognized []byte `json:"-"`
}
func (m *FeaturesEnabled) Reset() { *m = FeaturesEnabled{} }
func (m *FeaturesEnabled) String() string { return proto.CompactTextString(m) }
func (*FeaturesEnabled) ProtoMessage() {}
func (*FeaturesEnabled) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
var extRange_FeaturesEnabled = []proto.ExtensionRange{
{100, 536870911},
}
func (*FeaturesEnabled) ExtensionRangeArray() []proto.ExtensionRange {
return extRange_FeaturesEnabled
}
func (m *FeaturesEnabled) GetFeature() []string {
if m != nil {
return m.Feature
}
return nil
}
func init() {
proto.RegisterType((*Packet)(nil), "protocol.Packet")
proto.RegisterType((*OpenChannel)(nil), "protocol.OpenChannel")
proto.RegisterType((*ChannelResult)(nil), "protocol.ChannelResult")
proto.RegisterType((*KeepAlive)(nil), "protocol.KeepAlive")
proto.RegisterType((*EnableFeatures)(nil), "protocol.EnableFeatures")
proto.RegisterType((*FeaturesEnabled)(nil), "protocol.FeaturesEnabled")
proto.RegisterEnum("protocol.ChannelResult_CommonError", ChannelResult_CommonError_name, ChannelResult_CommonError_value)
}
func init() { proto.RegisterFile("ControlChannel.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 461 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x4d, 0x8f, 0xd3, 0x30,
0x10, 0x25, 0xe9, 0xee, 0x92, 0x4e, 0xfa, 0x91, 0x9a, 0x5d, 0x30, 0xb7, 0x12, 0x2e, 0x15, 0x12,
0x3d, 0x54, 0x20, 0x21, 0x0e, 0x48, 0x4b, 0xd9, 0x22, 0xc4, 0x01, 0x64, 0xd1, 0x73, 0x64, 0x92,
0x29, 0x1b, 0x35, 0x6b, 0x1b, 0xc7, 0x05, 0x2d, 0xa7, 0xfe, 0x0e, 0xfe, 0x0c, 0x7f, 0x0d, 0xc5,
0x89, 0x9b, 0x14, 0x09, 0x09, 0x4e, 0xc9, 0x9b, 0xf7, 0xde, 0x8c, 0xfc, 0x66, 0xe0, 0x7c, 0x29,
0x85, 0xd1, 0xb2, 0x58, 0x5e, 0x73, 0x21, 0xb0, 0x98, 0x2b, 0x2d, 0x8d, 0x24, 0x81, 0xfd, 0xa4,
0xb2, 0x88, 0x7f, 0xf9, 0x70, 0xf6, 0x91, 0xa7, 0x5b, 0x34, 0xe4, 0x05, 0x0c, 0xa4, 0x42, 0x91,
0xa4, 0xb5, 0x94, 0x7a, 0x53, 0x6f, 0x16, 0x2e, 0x2e, 0xe6, 0x4e, 0x3b, 0xff, 0xa0, 0x50, 0x34,
0x7d, 0x58, 0x28, 0x5b, 0x40, 0x5e, 0xc1, 0xa8, 0x31, 0x25, 0x1a, 0xcb, 0x5d, 0x61, 0xa8, 0x6f,
0xbd, 0x0f, 0x5a, 0xaf, 0xf3, 0x59, 0x9a, 0x0d, 0xd3, 0x2e, 0x24, 0x0b, 0x80, 0x2d, 0xa2, 0x4a,
0x78, 0x91, 0x7f, 0x43, 0xda, 0xb3, 0xde, 0x7b, 0xad, 0xf7, 0x3d, 0xa2, 0xba, 0xac, 0x28, 0xd6,
0xdf, 0xba, 0x5f, 0x72, 0x09, 0x63, 0x14, 0xfc, 0x73, 0x81, 0xc9, 0x06, 0xb9, 0xd9, 0x69, 0x2c,
0xe9, 0x89, 0x35, 0xd2, 0xd6, 0x78, 0x65, 0x05, 0xab, 0x86, 0x67, 0x23, 0x3c, 0xc2, 0xe4, 0x0d,
0x44, 0xce, 0x9b, 0xd4, 0x54, 0x46, 0x4f, 0x6d, 0x8f, 0x87, 0x6d, 0x0f, 0xa7, 0xae, 0x7b, 0x65,
0x6c, 0xbc, 0x39, 0x2e, 0xc4, 0x39, 0x84, 0x9d, 0x60, 0xc8, 0x53, 0x20, 0x2e, 0x8b, 0x3c, 0x43,
0x61, 0xf2, 0x4d, 0x8e, 0x9a, 0x7a, 0x53, 0x7f, 0x76, 0xca, 0x26, 0x0d, 0xf3, 0xee, 0x40, 0x90,
0x47, 0x30, 0x70, 0x72, 0x73, 0xab, 0x90, 0xfa, 0x53, 0x7f, 0xd6, 0x67, 0x61, 0x53, 0xfb, 0x74,
0xab, 0xf0, 0x49, 0x10, 0x64, 0xd1, 0x7e, 0xbf, 0xdf, 0xfb, 0xf1, 0x4f, 0x1f, 0x86, 0x47, 0x41,
0xfe, 0xef, 0xb4, 0xfb, 0x70, 0x56, 0xed, 0x0d, 0x33, 0x3b, 0x27, 0x60, 0x0d, 0x22, 0x2b, 0x18,
0xa4, 0xf2, 0xe6, 0x46, 0x8a, 0x04, 0xb5, 0x96, 0xda, 0xae, 0x60, 0xb4, 0x78, 0xfc, 0x97, 0xf5,
0xcd, 0x97, 0x56, 0x7b, 0x55, 0x49, 0x59, 0x98, 0xb6, 0x20, 0x56, 0x10, 0x76, 0x38, 0x12, 0xc1,
0xe0, 0x2d, 0x0a, 0xd4, 0x79, 0x6a, 0x71, 0x74, 0x87, 0x9c, 0x43, 0xb4, 0x16, 0x5b, 0x21, 0xbf,
0x8b, 0xea, 0x69, 0x75, 0xd5, 0x23, 0x17, 0x30, 0x59, 0x0b, 0xbe, 0x33, 0xd7, 0x52, 0xe7, 0x3f,
0x30, 0xab, 0xcb, 0x3e, 0x99, 0xc0, 0xf0, 0x35, 0xcf, 0xd6, 0x25, 0xff, 0xd2, 0x28, 0x7b, 0x64,
0x0c, 0xe1, 0x8a, 0xe7, 0x85, 0xd3, 0x9c, 0x74, 0xc2, 0x79, 0x09, 0xfd, 0xc3, 0xa1, 0x54, 0xb9,
0x68, 0x2c, 0x95, 0x14, 0x25, 0x26, 0x1a, 0xbf, 0xee, 0xb0, 0x34, 0x98, 0xd9, 0x5c, 0x02, 0x36,
0x71, 0x0c, 0x73, 0x44, 0xfc, 0x0c, 0x46, 0xc7, 0xb7, 0x42, 0x28, 0xdc, 0x6d, 0x16, 0x4d, 0xbd,
0x69, 0x6f, 0xd6, 0x67, 0x0e, 0x76, 0x26, 0x3e, 0x87, 0xf1, 0x1f, 0xd7, 0xf1, 0x2f, 0xb6, 0xdf,
0x01, 0x00, 0x00, 0xff, 0xff, 0x9d, 0x32, 0x16, 0x1e, 0x93, 0x03, 0x00, 0x00,
}

View File

@ -0,0 +1,53 @@
syntax = "proto2";
package protocol;
message Packet {
// Must contain exactly one field
optional OpenChannel open_channel = 1;
optional ChannelResult channel_result = 2;
optional KeepAlive keep_alive = 3;
optional EnableFeatures enable_features = 4;
optional FeaturesEnabled features_enabled = 5;
}
message OpenChannel {
required int32 channel_identifier = 1; // Arbitrary unique identifier for this channel instance
required string channel_type = 2; // String identifying channel type; e.g. im.ricochet.chat
// It is valid to extend the OpenChannel message to add fields specific
// to the requested channel_type.
extensions 100 to max;
}
message ChannelResult {
required int32 channel_identifier = 1; // Matching the value from OpenChannel
required bool opened = 2; // If the channel is now open
enum CommonError {
GenericError = 0;
UnknownTypeError = 1;
UnauthorizedError = 2;
BadUsageError = 3;
FailedError = 4;
}
optional CommonError common_error = 3;
// As with OpenChannel, it is valid to extend this message with fields specific
// to the channel type.
extensions 100 to max;
}
message KeepAlive {
required bool response_requested = 1;
}
message EnableFeatures {
repeated string feature = 1;
extensions 100 to max;
}
message FeaturesEnabled {
repeated string feature = 1;
extensions 100 to max;
}

View File

@ -1,811 +0,0 @@
package connections
import (
"encoding/base64"
"encoding/json"
"fmt"
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"cwtch.im/cwtch/event"
"cwtch.im/cwtch/model"
"cwtch.im/cwtch/protocol/files"
"cwtch.im/cwtch/protocol/groups"
pmodel "cwtch.im/cwtch/protocol/model"
"git.openprivacy.ca/cwtch.im/tapir"
"git.openprivacy.ca/cwtch.im/tapir/applications"
"git.openprivacy.ca/cwtch.im/tapir/networks/tor"
"git.openprivacy.ca/cwtch.im/tapir/primitives"
"git.openprivacy.ca/openprivacy/connectivity"
torProvider "git.openprivacy.ca/openprivacy/connectivity/tor"
"git.openprivacy.ca/openprivacy/log"
"github.com/gtank/ristretto255"
"golang.org/x/crypto/ed25519"
)
// 32 from tor/src/app/config/config.c MaxClientCircuitsPending
// we lower a bit because there's a lot of spillage
// - just cus we get a SOCKS timeout doesn't mean tor has stopped trying as a huge sorce
// - potential multiple profiles as a huge source
// - second order connections like token service's second servers aren't tracked in our system adding a few extra periodically
const TorMaxPendingConns = 28
type connectionLockedService struct {
service tapir.Service
connectingLock sync.Mutex
}
type engine struct {
queue event.Queue
// Engine Attributes
identity primitives.Identity
acn connectivity.ACN
// Authorization list of contacts to authorization status
authorizations sync.Map // string(onion) => model.Authorization
// Block Unknown Contacts
blockUnknownContacts atomic.Bool
// Pointer to the Global Event Manager
eventManager event.Manager
// Nextgen Tapir Service
service tapir.Service
getValRequests sync.Map // [string]string eventID:Data
// Nextgen Tapir Service
ephemeralServices map[string]*connectionLockedService //sync.Map // string(onion) => tapir.Service
ephemeralServicesLock sync.Mutex
// Required for listen(), inaccessible from identity
privateKey ed25519.PrivateKey
// file sharing subsystem is responsible for maintaining active shares and downloads
filesharingSubSystem files.FileSharingSubSystem
tokenManagers sync.Map // [tokenService][]TokenManager
shuttingDown atomic.Bool
onSendMessage func(connection tapir.Connection, message []byte) error
}
// Engine (ProtocolEngine) encapsulates the logic necessary to make and receive Cwtch connections.
// Note: ProtocolEngine doesn't have access to any information necessary to encrypt or decrypt GroupMessages
// Protocol Engine *can* associate Group Identifiers with Group Servers, although we don't currently make use of this fact
// other than to route errors back to the UI.
type Engine interface {
ACN() connectivity.ACN
EventManager() event.Manager
Shutdown()
}
// NewProtocolEngine initializes a new engine that runs Cwtch using the given parameters
func NewProtocolEngine(identity primitives.Identity, privateKey ed25519.PrivateKey, acn connectivity.ACN, eventManager event.Manager, peerAuthorizations map[string]model.Authorization, engineHooks EngineHooks) Engine {
engine := new(engine)
engine.identity = identity
engine.privateKey = privateKey
engine.ephemeralServices = make(map[string]*connectionLockedService)
engine.queue = event.NewQueue()
// the standard send message function
engine.onSendMessage = engineHooks.SendPeerMessage
go engine.eventHandler()
engine.acn = acn
// Init the Server running the Simple App.
engine.service = new(tor.BaseOnionService)
engine.service.Init(acn, privateKey, &identity)
engine.eventManager = eventManager
engine.eventManager.Subscribe(event.ProtocolEngineStartListen, engine.queue)
engine.eventManager.Subscribe(event.ProtocolEngineShutdown, engine.queue)
engine.eventManager.Subscribe(event.PeerRequest, engine.queue)
engine.eventManager.Subscribe(event.InvitePeerToGroup, engine.queue)
engine.eventManager.Subscribe(event.JoinServer, engine.queue)
engine.eventManager.Subscribe(event.LeaveServer, engine.queue)
engine.eventManager.Subscribe(event.SendMessageToGroup, engine.queue)
engine.eventManager.Subscribe(event.SendMessageToPeer, engine.queue)
engine.eventManager.Subscribe(event.SendGetValMessageToPeer, engine.queue)
engine.eventManager.Subscribe(event.SendRetValMessageToPeer, engine.queue)
engine.eventManager.Subscribe(event.DeleteContact, engine.queue)
engine.eventManager.Subscribe(event.UpdateConversationAuthorization, engine.queue)
engine.eventManager.Subscribe(event.BlockUnknownPeers, engine.queue)
engine.eventManager.Subscribe(event.AllowUnknownPeers, engine.queue)
engine.eventManager.Subscribe(event.DisconnectPeerRequest, engine.queue)
engine.eventManager.Subscribe(event.DisconnectServerRequest, engine.queue)
// File Handling
engine.eventManager.Subscribe(event.ShareManifest, engine.queue)
engine.eventManager.Subscribe(event.StopFileShare, engine.queue)
engine.eventManager.Subscribe(event.StopAllFileShares, engine.queue)
engine.eventManager.Subscribe(event.ManifestSizeReceived, engine.queue)
engine.eventManager.Subscribe(event.ManifestSaved, engine.queue)
// Token Server
engine.eventManager.Subscribe(event.MakeAntispamPayment, engine.queue)
for peer, authorization := range peerAuthorizations {
engine.authorizations.Store(peer, authorization)
}
return engine
}
func (e *engine) ACN() connectivity.ACN {
return e.acn
}
func (e *engine) EventManager() event.Manager {
return e.eventManager
}
// eventHandler process events from other subsystems
func (e *engine) eventHandler() {
log.Debugf("restartFlow Launching ProtocolEngine listener")
for {
ev := e.queue.Next()
// optimistic shutdown...
if e.shuttingDown.Load() {
return
}
switch ev.EventType {
case event.StatusRequest:
e.eventManager.Publish(event.Event{EventType: event.ProtocolEngineStatus, EventID: ev.EventID})
case event.PeerRequest:
log.Debugf("restartFlow Handling Peer Request")
if torProvider.IsValidHostname(ev.Data[event.RemotePeer]) {
go e.peerWithOnion(ev.Data[event.RemotePeer])
}
case event.InvitePeerToGroup:
err := e.sendPeerMessage(ev.Data[event.RemotePeer], pmodel.PeerMessage{ID: ev.EventID, Context: event.ContextInvite, Data: []byte(ev.Data[event.GroupInvite])})
if err != nil {
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.EventContext: string(event.InvitePeerToGroup), event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: "peer is offline or the connection has yet to finalize"}))
}
case event.JoinServer:
signature, err := base64.StdEncoding.DecodeString(ev.Data[event.Signature])
if err != nil {
// will result in a full sync
signature = []byte{}
}
// if we have been sent cached tokens, also deserialize them
cachedTokensJson := ev.Data[event.CachedTokens]
var cachedTokens []*privacypass.Token
if len(cachedTokensJson) != 0 {
json.Unmarshal([]byte(cachedTokensJson), &cachedTokens)
}
// create a new token handler...
e.NewTokenHandler(ev.Data[event.ServerTokenOnion], cachedTokens)
go e.peerWithTokenServer(ev.Data[event.GroupServer], ev.Data[event.ServerTokenOnion], ev.Data[event.ServerTokenY], signature, cachedTokens)
case event.MakeAntispamPayment:
go e.makeAntispamPayment(ev.Data[event.GroupServer])
case event.LeaveServer:
e.leaveServer(ev.Data[event.GroupServer])
case event.DeleteContact:
onion := ev.Data[event.RemotePeer]
// We remove this peer from out blocklist which will prevent them from contacting us if we have "block unknown peers" turned on.
e.authorizations.Delete(ev.Data[event.RemotePeer])
e.deleteConnection(onion)
case event.DisconnectPeerRequest:
e.deleteConnection(ev.Data[event.RemotePeer])
case event.DisconnectServerRequest:
e.leaveServer(ev.Data[event.GroupServer])
case event.SendMessageToGroup:
ciphertext, _ := base64.StdEncoding.DecodeString(ev.Data[event.Ciphertext])
signature, _ := base64.StdEncoding.DecodeString(ev.Data[event.Signature])
// launch a goroutine to post to the server
go e.sendMessageToGroup(ev.Data[event.GroupID], ev.Data[event.GroupServer], ciphertext, signature, 0)
case event.SendMessageToPeer:
// TODO: remove this passthrough once the UI is integrated.
context, ok := ev.Data[event.EventContext]
if !ok {
context = event.ContextRaw
}
if err := e.sendPeerMessage(ev.Data[event.RemotePeer], pmodel.PeerMessage{ID: ev.EventID, Context: context, Data: []byte(ev.Data[event.Data])}); err != nil {
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.EventContext: string(event.SendMessageToPeer), event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: "peer is offline or the connection has yet to finalize"}))
}
case event.SendGetValMessageToPeer:
if err := e.sendGetValToPeer(ev.EventID, ev.Data[event.RemotePeer], ev.Data[event.Scope], ev.Data[event.Path]); err != nil {
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.EventContext: string(event.SendGetValMessageToPeer), event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: err.Error()}))
}
case event.SendRetValMessageToPeer:
if err := e.sendRetValToPeer(ev.EventID, ev.Data[event.RemotePeer], ev.Data[event.Data], ev.Data[event.Exists]); err != nil {
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.EventContext: string(event.SendRetValMessageToPeer), event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: err.Error()}))
}
case event.UpdateConversationAuthorization:
accepted, _ := strconv.ParseBool(ev.Data[event.Accepted])
blocked, _ := strconv.ParseBool(ev.Data[event.Blocked])
auth := model.AuthUnknown
if blocked {
auth = model.AuthBlocked
} else if accepted {
auth = model.AuthApproved
}
e.authorizations.Store(ev.Data[event.RemotePeer], auth)
if auth == model.AuthBlocked {
connection, err := e.service.GetConnection(ev.Data[event.RemotePeer])
if connection != nil && err == nil {
connection.Close()
}
// Explicitly send a disconnected event (if we don't do this here then the UI can wait for a while before
// an ongoing Open() connection fails and so the user will see a blocked peer as still connecting (because
// there isn't an active connection and we are stuck waiting for tor to time out)
e.peerDisconnected(ev.Data[event.RemotePeer])
}
case event.AllowUnknownPeers:
log.Debugf("%v now allows unknown connections", e.identity.Hostname())
e.blockUnknownContacts.Store(false)
case event.BlockUnknownPeers:
log.Debugf("%v now forbids unknown connections", e.identity.Hostname())
e.blockUnknownContacts.Store(true)
case event.ProtocolEngineStartListen:
go e.listenFn()
case event.ShareManifest:
e.filesharingSubSystem.ShareFile(ev.Data[event.FileKey], ev.Data[event.SerializedManifest])
case event.StopFileShare:
e.filesharingSubSystem.StopFileShare(ev.Data[event.FileKey])
case event.StopAllFileShares:
e.filesharingSubSystem.StopAllFileShares()
case event.ManifestSizeReceived:
handle := ev.Data[event.Handle]
key := ev.Data[event.FileKey]
size, _ := strconv.Atoi(ev.Data[event.ManifestSize])
if err := e.sendPeerMessage(handle, e.filesharingSubSystem.FetchManifest(key, uint64(size))); err != nil {
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: err.Error()}))
}
case event.ManifestSaved:
handle := ev.Data[event.Handle]
key := ev.Data[event.FileKey]
serializedManifest := ev.Data[event.SerializedManifest]
tempFile := ev.Data[event.TempFile]
title := ev.Data[event.NameSuggestion]
// Another optimistic check here. Technically Cwtch profile should not request manifest on a download files
// but if they do then we should check if it exists up front. If it does then announce that the download
// is complete.
if _, filePath, success := e.filesharingSubSystem.VerifyFile(key); success {
log.Debugf("file verified and downloaded!")
e.eventManager.Publish(event.NewEvent(event.FileDownloaded, map[event.Field]string{event.FileKey: key, event.FilePath: filePath, event.TempFile: tempFile}))
} else {
// NOTE: for now there will probably only ever be a single chunk request. When we enable group
// sharing and rehosting then this loop will serve as a a way of splitting the request among multiple
// contacts
for _, message := range e.filesharingSubSystem.CompileChunkRequests(key, serializedManifest, tempFile, title) {
if err := e.sendPeerMessage(handle, message); err != nil {
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: err.Error()}))
}
}
}
case event.ProtocolEngineShutdown:
return
default:
return
}
}
}
func (e *engine) isBlocked(onion string) bool {
authorization, known := e.authorizations.Load(onion)
if !known {
// if we block unknown peers we will block this contact
return e.blockUnknownContacts.Load()
}
return authorization.(model.Authorization) == model.AuthBlocked
}
func (e *engine) isAllowed(onion string) bool {
authorization, known := e.authorizations.Load(onion)
if !known {
log.Errorf("attempted to lookup authorization of onion not in map...that should never happen")
return false
}
if e.blockUnknownContacts.Load() {
return authorization.(model.Authorization) == model.AuthApproved
}
return authorization.(model.Authorization) != model.AuthBlocked
}
func (e *engine) createPeerTemplate() *PeerApp {
peerAppTemplate := new(PeerApp)
peerAppTemplate.IsBlocked = e.isBlocked
peerAppTemplate.IsAllowed = e.isAllowed
peerAppTemplate.MessageHandler = e.handlePeerMessage
peerAppTemplate.OnAcknowledgement = e.ignoreOnShutdown2(e.peerAck)
peerAppTemplate.OnAuth = e.ignoreOnShutdown(e.peerAuthed)
peerAppTemplate.OnConnecting = e.ignoreOnShutdown(e.peerConnecting)
peerAppTemplate.OnClose = e.ignoreOnShutdown(e.peerDisconnected)
peerAppTemplate.OnSendMessage = e.onSendMessage
return peerAppTemplate
}
// Listen sets up an onion listener to process incoming cwtch messages
func (e *engine) listenFn() {
err := e.service.Listen(e.createPeerTemplate())
if !e.shuttingDown.Load() {
e.eventManager.Publish(event.NewEvent(event.ProtocolEngineStopped, map[event.Field]string{event.Identity: e.identity.Hostname(), event.Error: err.Error()}))
}
}
// Shutdown tears down the eventHandler goroutine
func (e *engine) Shutdown() {
// don't accept any more events...
e.queue.Publish(event.NewEvent(event.ProtocolEngineShutdown, map[event.Field]string{}))
e.eventManager.Publish(event.NewEvent(event.ProtocolEngineShutdown, map[event.Field]string{}))
e.service.Shutdown()
e.shuttingDown.Store(true)
e.ephemeralServicesLock.Lock()
defer e.ephemeralServicesLock.Unlock()
for _, connection := range e.ephemeralServices {
log.Infof("shutting down ephemeral service")
// work around: service.shutdown() can block for a long time if it is Open()ing a new connection, putting it in a
// goroutine means we can perform this operation and let the per service shutdown in their own time or until the app exits
conn := connection // don't capture loop variable
go func() {
conn.connectingLock.Lock()
conn.service.Shutdown()
conn.connectingLock.Unlock()
}()
}
e.queue.Shutdown()
}
// peerWithOnion is the entry point for cwtchPeer relationships
// needs to be run in a goroutine as will block on Open.
func (e *engine) peerWithOnion(onion string) {
log.Debugf("Called PeerWithOnion for %v", onion)
if !e.isBlocked(onion) {
e.ignoreOnShutdown(e.peerConnecting)(onion)
connected, err := e.service.Connect(onion, e.createPeerTemplate())
if connected && err == nil {
// on success CwtchPeer will handle Auth and other status updates
// early exit from this function...
return
}
// If we are already connected...check if we are authed and issue an auth event
// (This allows the ui to be stateless)
if connected && err != nil {
conn, err := e.service.WaitForCapabilityOrClose(onion, cwtchCapability)
if err == nil {
if conn.HasCapability(cwtchCapability) {
e.ignoreOnShutdown(e.peerAuthed)(onion)
return
}
log.Errorf("PeerWithOnion something went very wrong...%v %v", onion, err)
if conn != nil {
conn.Close()
}
e.ignoreOnShutdown(e.peerDisconnected)(onion)
} else {
e.ignoreOnShutdown(e.peerDisconnected)(onion)
}
}
}
e.ignoreOnShutdown(e.peerDisconnected)(onion)
}
func (e *engine) makeAntispamPayment(onion string) {
log.Debugf("making antispam payment")
e.ephemeralServicesLock.Lock()
ephemeralService, ok := e.ephemeralServices[onion]
e.ephemeralServicesLock.Unlock()
if ephemeralService == nil || !ok {
log.Debugf("could not find associated group for antispam payment")
return
}
// Before doing anything, send and event with the current number of token
// This may unblock downstream processes who don't have an accurate token count
e.PokeTokenCount(onion)
conn, err := ephemeralService.service.GetConnection(onion)
if err == nil {
tokenApp, ok := (conn.App()).(*TokenBoardClient)
if ok {
tokenManagerPointer, _ := e.tokenManagers.LoadOrStore(tokenApp.tokenServiceOnion, NewTokenManager())
tokenManager := tokenManagerPointer.(*TokenManager)
log.Debugf("checking antispam tokens %v", tokenManager.NumTokens())
if tokenManager.NumTokens() < 5 {
go tokenApp.PurchaseTokens()
}
}
}
}
// peerWithTokenServer is the entry point for cwtchPeer - server relationships
// needs to be run in a goroutine as will block on Open.
func (e *engine) peerWithTokenServer(onion string, tokenServerOnion string, tokenServerY string, lastKnownSignature []byte, cachedTokens []*privacypass.Token) {
e.ephemeralServicesLock.Lock()
_, exists := e.ephemeralServices[onion]
if exists {
e.ephemeralServicesLock.Unlock()
log.Debugf("attempted to join a server with an active connection")
return
}
connectionService := &connectionLockedService{service: new(tor.BaseOnionService)}
e.ephemeralServices[onion] = connectionService
connectionService.connectingLock.Lock()
defer connectionService.connectingLock.Unlock()
e.ephemeralServicesLock.Unlock()
log.Debugf("Peering with Token Server %v %v", onion, tokenServerOnion)
e.ignoreOnShutdown(e.serverConnecting)(onion)
// Create a new ephemeral service for this connection
eid, epk := primitives.InitializeEphemeralIdentity()
connectionService.service.Init(e.acn, epk, &eid)
Y := new(ristretto255.Element)
Y.UnmarshalText([]byte(tokenServerY))
connected, err := connectionService.service.Connect(onion, NewTokenBoardClient(e.acn, Y, tokenServerOnion, lastKnownSignature, e))
// If we are already connected...check if we are authed and issue an auth event
// (This allows the ui to be stateless)
if connected && err != nil {
conn, err := connectionService.service.GetConnection(onion)
if err == nil {
// If the server is synced, resend the synced status update
if conn.HasCapability(groups.CwtchServerSyncedCapability) {
e.ignoreOnShutdown(e.serverSynced)(onion)
return
}
// If the server is authed, resend the auth status update
if conn.HasCapability(applications.AuthCapability) {
// Resend the authed event...
e.ignoreOnShutdown(e.serverAuthed)(onion)
return
}
// if we are not authed or synced then we are stuck...
e.ignoreOnShutdown(e.serverConnecting)(onion)
log.Errorf("server connection attempt issued to active connection")
}
}
// Only issue a disconnected error if we are disconnected (Connect will fail if a connection already exists)
if !connected && err != nil {
e.ignoreOnShutdown(e.serverDisconnected)(onion)
}
}
func (e *engine) ignoreOnShutdown(f func(string)) func(string) {
return func(x string) {
if !e.shuttingDown.Load() {
f(x)
}
}
}
func (e *engine) ignoreOnShutdown2(f func(string, string)) func(string, string) {
return func(x, y string) {
if !e.shuttingDown.Load() {
f(x, y)
}
}
}
func (e *engine) peerAuthed(onion string) {
_, known := e.authorizations.Load(onion)
if !known {
e.authorizations.Store(onion, model.AuthUnknown)
}
// FIXME: This call uses WAY too much memory, and was responsible for the vast majority
// of allocations in the UI
// This is because Bine ends up reading the entire response into memory and then passes that back
// into Connectivity which eventually extracts just what it needs.
// Ideally we would just read from the control stream directly into reusable buffers.
//details, err := e.acn.GetInfo(onion)
//if err == nil {
// if hops, exists := details["circuit"]; exists {
// e.eventManager.Publish(event.NewEvent(event.ACNInfo, map[event.Field]string{
// event.Handle: onion,
// event.Key: "circuit",
// event.Data: hops,
// }))
// }
//} else {
// log.Errorf("error getting info for onion %v", err)
//}
e.eventManager.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{
event.RemotePeer: string(onion),
event.ConnectionState: ConnectionStateName[AUTHENTICATED],
}))
}
func (e *engine) peerConnecting(onion string) {
e.eventManager.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{
event.RemotePeer: onion,
event.ConnectionState: ConnectionStateName[CONNECTING],
}))
}
func (e *engine) serverConnecting(onion string) {
e.eventManager.Publish(event.NewEvent(event.ServerStateChange, map[event.Field]string{
event.GroupServer: onion,
event.ConnectionState: ConnectionStateName[CONNECTING],
}))
}
func (e *engine) serverAuthed(onion string) {
e.eventManager.Publish(event.NewEvent(event.ServerStateChange, map[event.Field]string{
event.GroupServer: onion,
event.ConnectionState: ConnectionStateName[AUTHENTICATED],
}))
}
func (e *engine) serverSynced(onion string) {
e.eventManager.Publish(event.NewEvent(event.ServerStateChange, map[event.Field]string{
event.GroupServer: onion,
event.ConnectionState: ConnectionStateName[SYNCED],
}))
}
func (e *engine) serverDisconnected(onion string) {
e.leaveServer(onion)
e.eventManager.Publish(event.NewEvent(event.ServerStateChange, map[event.Field]string{
event.GroupServer: onion,
event.ConnectionState: ConnectionStateName[DISCONNECTED],
}))
}
func (e *engine) peerAck(onion string, eventID string) {
e.eventManager.Publish(event.NewEvent(event.PeerAcknowledgement, map[event.Field]string{
event.EventID: eventID,
event.RemotePeer: onion,
}))
}
func (e *engine) peerDisconnected(onion string) {
// Clean up any existing get value requests...
e.getValRequests.Range(func(key, value interface{}) bool {
keyString := key.(string)
if strings.HasPrefix(keyString, onion) {
e.getValRequests.Delete(keyString)
}
return true
})
// Purge circuit information...
e.eventManager.Publish(event.NewEvent(event.ACNInfo, map[event.Field]string{
event.Handle: onion,
event.Key: "circuit",
event.Data: "",
}))
e.eventManager.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{
event.RemotePeer: string(onion),
event.ConnectionState: ConnectionStateName[DISCONNECTED],
}))
}
func (e *engine) sendGetValToPeer(eventID, onion, scope, path string) error {
log.Debugf("sendGetValMessage to peer %v %v.%v\n", onion, scope, path)
getVal := peerGetVal{Scope: scope, Path: path}
message, err := json.Marshal(getVal)
if err != nil {
return err
}
key := onion + eventID
e.getValRequests.Store(key, message)
err = e.sendPeerMessage(onion, pmodel.PeerMessage{ID: eventID, Context: event.ContextGetVal, Data: message})
if err != nil {
e.getValRequests.Delete(key)
}
return err
}
func (e *engine) sendRetValToPeer(eventID, onion, val, existsStr string) error {
log.Debugf("sendRetValMessage to peer %v (%v) %v %v\n", onion, eventID, val, existsStr)
exists, _ := strconv.ParseBool(existsStr)
retVal := peerRetVal{Val: val, Exists: exists}
message, err := json.Marshal(retVal)
if err != nil {
return err
}
return e.sendPeerMessage(onion, pmodel.PeerMessage{ID: eventID, Context: event.ContextRetVal, Data: message})
}
func (e *engine) deleteConnection(id string) {
conn, err := e.service.GetConnection(id)
if err == nil {
conn.Close()
}
}
// receiveGroupMessage is a callback function that processes GroupMessages from a given server
func (e *engine) receiveGroupMessage(server string, gm *groups.EncryptedGroupMessage) {
// Publish Event so that a Profile Engine can deal with it.
// Note: This technically means that *multiple* Profile Engines could listen to the same ProtocolEngine!
e.eventManager.Publish(event.NewEvent(event.EncryptedGroupMessage, map[event.Field]string{event.GroupServer: server, event.Ciphertext: base64.StdEncoding.EncodeToString(gm.Ciphertext), event.Signature: base64.StdEncoding.EncodeToString(gm.Signature)}))
}
// sendMessageToGroup attempts to sent the given message to the given group id.
func (e *engine) sendMessageToGroup(groupID string, server string, ct []byte, sig []byte, attempts int) {
// sending to groups can fail for a few reasons (slow server, not enough tokens, etc.)
// rather than trying to keep all that logic in method we simply back-off and try again
// but if we fail more than 5 times then we report back to the client so they can investigate other options.
// Note: This flow only applies to online-and-connected servers (this method will return faster if the server is not
// online)
if attempts >= 5 {
log.Errorf("failed to post a message to a group after %v attempts", attempts)
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: "could not make payment to server", event.Signature: base64.StdEncoding.EncodeToString(sig)}))
return
}
e.ephemeralServicesLock.Lock()
ephemeralService, ok := e.ephemeralServices[server]
e.ephemeralServicesLock.Unlock()
if ephemeralService == nil || !ok {
log.Debugf("could not send message to group: serve not found")
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: "server-not-found", event.Signature: base64.StdEncoding.EncodeToString(sig)}))
return
}
conn, err := ephemeralService.service.WaitForCapabilityOrClose(server, groups.CwtchServerSyncedCapability)
if err == nil {
tokenApp, ok := (conn.App()).(*TokenBoardClient)
if ok {
if spent, numtokens := tokenApp.Post(groupID, ct, sig); !spent {
// we failed to post, probably because we ran out of tokens... so make a payment
go tokenApp.PurchaseTokens()
// backoff
time.Sleep(time.Second * 5)
// try again
log.Debugf("sending message to group error attempt: %v", attempts)
e.sendMessageToGroup(groupID, server, ct, sig, attempts+1)
} else {
if numtokens < 5 {
go tokenApp.PurchaseTokens()
}
}
// regardless we return....
return
}
}
log.Debugf("could not send message to group")
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: "server-connection-not-valid", event.Signature: base64.StdEncoding.EncodeToString(sig)}))
}
// TODO this is becoming cluttered
func (e *engine) handlePeerMessage(hostname string, eventID string, context string, message []byte) {
log.Debugf("New message from peer: %v %v", hostname, context)
if context == event.ContextAck {
e.peerAck(hostname, eventID)
} else if context == event.ContextRetVal {
req, ok := e.getValRequests.Load(hostname + eventID)
if ok {
reqStr := req.([]byte)
e.handlePeerRetVal(hostname, reqStr, message)
e.getValRequests.Delete(hostname + eventID)
} else {
log.Errorf("could not find val request for %v %s", hostname, eventID)
}
} else if context == event.ContextGetVal {
var getVal peerGetVal
err := json.Unmarshal(message, &getVal)
if err == nil {
ev := event.NewEventList(event.NewGetValMessageFromPeer, event.RemotePeer, hostname, event.Scope, getVal.Scope, event.Path, getVal.Path)
ev.EventID = eventID
e.eventManager.Publish(ev)
}
} else if context == event.ContextRequestManifest {
for _, message := range e.filesharingSubSystem.RequestManifestParts(eventID) {
if err := e.sendPeerMessage(hostname, message); err != nil {
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: hostname, event.EventID: eventID, event.Error: err.Error()}))
}
}
} else if context == event.ContextSendManifest {
if fileKey, manifest := e.filesharingSubSystem.ReceiveManifestPart(eventID, message); len(manifest) != 0 {
// We have a valid manifest
e.eventManager.Publish(event.NewEvent(event.ManifestReceived, map[event.Field]string{event.Handle: hostname, event.FileKey: fileKey, event.SerializedManifest: manifest}))
}
} else if context == event.ContextRequestFile {
chunks := e.filesharingSubSystem.ProcessChunkRequest(eventID, message)
go func() {
for _, message := range chunks {
if err := e.sendPeerMessage(hostname, message); err != nil {
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: hostname, event.EventID: eventID, event.Error: err.Error()}))
}
}
}()
} else if context == event.ContextSendFile {
fileKey, progress, totalChunks, _, title := e.filesharingSubSystem.ProcessChunk(eventID, message)
if len(fileKey) != 0 {
e.eventManager.Publish(event.NewEvent(event.FileDownloadProgressUpdate, map[event.Field]string{event.FileKey: fileKey, event.Progress: strconv.Itoa(int(progress)), event.FileSizeInChunks: strconv.Itoa(int(totalChunks)), event.NameSuggestion: title}))
if progress == totalChunks {
if tempFile, filePath, success := e.filesharingSubSystem.VerifyFile(fileKey); success {
log.Debugf("file verified and downloaded!")
e.eventManager.Publish(event.NewEvent(event.FileDownloaded, map[event.Field]string{event.FileKey: fileKey, event.FilePath: filePath, event.TempFile: tempFile}))
} else {
log.Debugf("file failed to verify!")
e.eventManager.Publish(event.NewEvent(event.FileVerificationFailed, map[event.Field]string{event.FileKey: fileKey}))
}
}
}
} else {
// Fall through handler for the default text conversation.
e.eventManager.Publish(event.NewEvent(event.NewMessageFromPeerEngine, map[event.Field]string{event.TimestampReceived: time.Now().Format(time.RFC3339Nano), event.RemotePeer: hostname, event.Data: string(message)}))
// Don't ack messages in channel 7
// Note: this code explictly doesn't care about malformed messages, we deal with them
// later on...we still want to ack the original send...(as some "malformed" messages
// may be future-ok)
if cm, err := model.DeserializeMessage(string(message)); err == nil {
if cm.IsStream() {
return
}
}
// Send an explicit acknowledgement
// Every other protocol should have an explicit acknowledgement message e.g. value lookups have responses, and file handling has an explicit flow
if err := e.sendPeerMessage(hostname, pmodel.PeerMessage{ID: eventID, Context: event.ContextAck, Data: []byte{}}); err != nil {
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: hostname, event.EventID: eventID, event.Error: err.Error()}))
}
}
}
func (e *engine) handlePeerRetVal(hostname string, getValData, retValData []byte) {
var getVal peerGetVal
var retVal peerRetVal
err := json.Unmarshal(getValData, &getVal)
if err != nil {
log.Errorf("Unmarshalling our own getVal request: %v\n", err)
return
}
err = json.Unmarshal(retValData, &retVal)
if err != nil {
log.Errorf("Unmarshalling peer response to getVal request")
return
}
e.eventManager.Publish(event.NewEventList(event.NewRetValMessageFromPeer, event.RemotePeer, hostname, event.Scope, getVal.Scope, event.Path, getVal.Path, event.Exists, strconv.FormatBool(retVal.Exists), event.Data, retVal.Val))
}
// leaveServer disconnects from a server and deletes the ephemeral service
func (e *engine) leaveServer(server string) {
e.ephemeralServicesLock.Lock()
defer e.ephemeralServicesLock.Unlock()
ephemeralService, ok := e.ephemeralServices[server]
if ok {
ephemeralService.service.Shutdown()
delete(e.ephemeralServices, server)
}
}
func (e *engine) sendPeerMessage(handle string, message pmodel.PeerMessage) error {
conn, err := e.service.WaitForCapabilityOrClose(handle, cwtchCapability)
if err == nil {
peerApp, ok := (conn.App()).(*PeerApp)
if ok {
return peerApp.SendMessage(message)
}
log.Debugf("could not derive peer app: %v", err)
return fmt.Errorf("could not find peer app to send message to: %v", handle)
}
log.Debugf("could not send peer message: %v", err)
return err
}

View File

@ -1,59 +0,0 @@
package connections
import (
"cwtch.im/cwtch/event"
"cwtch.im/cwtch/protocol/groups"
"encoding/base64"
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
"strconv"
)
// Implement Token Service Handler for Engine
// GroupMessageHandler receives a server and an encrypted group message
func (e *engine) GroupMessageHandler(server string, gm *groups.EncryptedGroupMessage) {
e.receiveGroupMessage(server, gm)
}
// PostingFailed notifies a peer that a message failed to post
func (e *engine) PostingFailed(group string, sig []byte) {
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: group, event.Error: "failed to post message", event.Signature: base64.StdEncoding.EncodeToString(sig)}))
}
// ServerAuthedHandler is notified when a server has successfully authed
func (e *engine) ServerAuthedHandler(server string) {
e.serverAuthed(server)
}
// ServerSyncedHandler is notified when a server has successfully synced
func (e *engine) ServerSyncedHandler(server string) {
e.serverSynced(server)
}
// ServerClosedHandler is notified when a server connection has closed, the result is ignored during shutdown...
func (e *engine) ServerClosedHandler(server string) {
e.ignoreOnShutdown(e.serverDisconnected)(server)
}
// NewTokenHandler is notified after a successful token acquisition
func (e *engine) NewTokenHandler(tokenService string, tokens []*privacypass.Token) {
tokenManagerPointer, _ := e.tokenManagers.LoadOrStore(tokenService, NewTokenManager())
tokenManager := tokenManagerPointer.(*TokenManager)
tokenManager.StoreNewTokens(tokens)
e.eventManager.Publish(event.NewEvent(event.TokenManagerInfo, map[event.Field]string{event.ServerTokenOnion: tokenService, event.ServerTokenCount: strconv.Itoa(tokenManager.NumTokens())}))
}
// FetchToken is notified when a server requires a new token from the client
func (e *engine) FetchToken(tokenService string) (*privacypass.Token, int, error) {
tokenManagerPointer, _ := e.tokenManagers.LoadOrStore(tokenService, NewTokenManager())
tokenManager := tokenManagerPointer.(*TokenManager)
token, numTokens, err := tokenManager.FetchToken()
e.eventManager.Publish(event.NewEvent(event.TokenManagerInfo, map[event.Field]string{event.ServerTokenOnion: tokenService, event.ServerTokenCount: strconv.Itoa(numTokens)}))
return token, numTokens, err
}
func (e *engine) PokeTokenCount(tokenService string) {
tokenManagerPointer, _ := e.tokenManagers.LoadOrStore(tokenService, NewTokenManager())
tokenManager := tokenManagerPointer.(*TokenManager)
e.eventManager.Publish(event.NewEvent(event.TokenManagerInfo, map[event.Field]string{event.ServerTokenOnion: tokenService, event.ServerTokenCount: strconv.Itoa(tokenManager.NumTokens())}))
}

View File

@ -1,14 +0,0 @@
package connections
import "git.openprivacy.ca/cwtch.im/tapir"
type EngineHooks interface {
SendPeerMessage(connection tapir.Connection, message []byte) error
}
type DefaultEngineHooks struct {
}
func (deh DefaultEngineHooks) SendPeerMessage(connection tapir.Connection, message []byte) error {
return connection.Send(message)
}

View File

@ -1,59 +0,0 @@
package connections
import (
"cwtch.im/cwtch/utils"
"git.openprivacy.ca/cwtch.im/tapir/applications"
"git.openprivacy.ca/cwtch.im/tapir/networks/tor"
"git.openprivacy.ca/cwtch.im/tapir/primitives"
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
"git.openprivacy.ca/openprivacy/connectivity"
"git.openprivacy.ca/openprivacy/log"
"reflect"
"time"
)
// MakePayment uses the PoW based token protocol to obtain more tokens
func MakePayment(tokenServiceOnion string, tokenService *privacypass.TokenServer, acn connectivity.ACN, handler TokenBoardHandler) error {
log.Debugf("making a payment")
id, sk := primitives.InitializeEphemeralIdentity()
client := new(tor.BaseOnionService)
client.Init(acn, sk, &id)
defer client.Shutdown()
tokenApplication := new(applications.TokenApplication)
tokenApplication.TokenService = tokenService
powTokenApp := new(applications.ApplicationChain).
ChainApplication(new(applications.ProofOfWorkApplication), applications.SuccessfulProofOfWorkCapability).
ChainApplication(tokenApplication, applications.HasTokensCapability)
log.Debugf("waiting for successful PoW auth...")
tp := utils.TimeoutPolicy(time.Second * 30)
err := tp.ExecuteAction(func() error {
connected, err := client.Connect(tokenServiceOnion, powTokenApp)
if connected && err == nil {
log.Debugf("waiting for successful token acquisition...")
conn, err := client.WaitForCapabilityOrClose(tokenServiceOnion, applications.HasTokensCapability)
if err == nil {
powtapp, ok := conn.App().(*applications.TokenApplication)
if ok {
log.Debugf("updating tokens")
handler.NewTokenHandler(tokenServiceOnion, powtapp.Tokens)
log.Debugf("transcript: %v", powtapp.Transcript().OutputTranscriptToAudit())
conn.Close()
return nil
}
log.Errorf("invalid cast of powapp. this should never happen %v %v", powtapp, reflect.TypeOf(conn.App()))
return nil
}
return nil
}
return err
})
// we timed out
if err != nil {
log.Debugf("make payment timeout...")
return err
}
return err
}

View File

@ -1,195 +0,0 @@
package connections
import (
"cwtch.im/cwtch/event"
"cwtch.im/cwtch/model"
model2 "cwtch.im/cwtch/protocol/model"
"encoding/json"
"git.openprivacy.ca/cwtch.im/tapir"
"git.openprivacy.ca/cwtch.im/tapir/applications"
"git.openprivacy.ca/openprivacy/log"
"sync/atomic"
"time"
)
const cwtchCapability = tapir.Capability("cwtchCapability")
// PeerApp encapsulates the behaviour of a Cwtch Peer
type PeerApp struct {
applications.AuthApp
connection tapir.Connection
MessageHandler func(string, string, string, []byte)
IsBlocked func(string) bool
IsAllowed func(string) bool
OnAcknowledgement func(string, string)
OnAuth func(string)
OnClose func(string)
OnConnecting func(string)
OnSendMessage func(connection tapir.Connection, message []byte) error
version atomic.Value
}
type peerGetVal struct {
Scope, Path string
}
type peerRetVal struct {
Val string
Exists bool
}
const Version1 = 0x01
const Version2 = 0x02
// NewInstance should always return a new instantiation of the application.
func (pa *PeerApp) NewInstance() tapir.Application {
newApp := new(PeerApp)
newApp.MessageHandler = pa.MessageHandler
newApp.IsBlocked = pa.IsBlocked
newApp.IsAllowed = pa.IsAllowed
newApp.OnAcknowledgement = pa.OnAcknowledgement
newApp.OnAuth = pa.OnAuth
newApp.OnClose = pa.OnClose
newApp.OnConnecting = pa.OnConnecting
newApp.OnSendMessage = pa.OnSendMessage
newApp.version.Store(Version1)
return newApp
}
// Init is run when the connection is first started.
func (pa *PeerApp) Init(connection tapir.Connection) {
// First run the Authentication App
pa.AuthApp.Init(connection)
if connection.HasCapability(applications.AuthCapability) {
pa.connection = connection
connection.SetCapability(cwtchCapability)
if pa.IsBlocked(connection.Hostname()) {
pa.connection.Close()
pa.OnClose(connection.Hostname())
} else {
// we are authenticated
// attempt to negotiate a more efficient packet format...
// we are abusing the context here slightly by sending a "malformed" GetVal request.
// as a rule cwtch ignores getval requests that it cannot deserialize so older clients will ignore this
// message.
// version *must* be the first message sent to prevent race conditions for other events fired after-auth
// (e.g. getVal requests)
// as such, we send this message before we update the rest of the system
_ = pa.SendMessage(model2.PeerMessage{
ID: event.ContextVersion,
Context: event.ContextGetVal,
Data: []byte{Version2},
})
pa.OnAuth(connection.Hostname())
go pa.listen()
}
} else {
// The auth protocol wasn't completed, we can safely shutdown the connection
// send an onclose here because we *may* have triggered this and we want to retry later...
pa.OnClose(connection.Hostname())
connection.Close()
}
}
func (pa *PeerApp) listen() {
for {
message := pa.connection.Expect()
if len(message) == 0 {
log.Debugf("0 byte read, socket has likely failed. Closing the listen goroutine")
pa.OnClose(pa.connection.Hostname())
return
}
var packet model2.PeerMessage
var err error
if pa.version.Load() == Version1 {
err = json.Unmarshal(message, &packet)
} else if pa.version.Load() == Version2 {
parsePacket, parseErr := model2.ParsePeerMessage(message)
// if all else fails...attempt to process this message as a version 1 message
if parseErr != nil {
err = json.Unmarshal(message, &packet)
} else {
packet = *parsePacket
}
} else {
log.Errorf("invalid version")
pa.OnClose(pa.connection.Hostname())
return
}
if err == nil {
if pa.IsAllowed(pa.connection.Hostname()) {
// we don't expose im.cwtch.version messages outside of PeerApp (ideally at some point in the future we
// can remove this check all together)
if packet.ID == event.ContextVersion {
if pa.version.Load() == Version1 && len(packet.Data) == 1 && packet.Data[0] == Version2 {
log.Debugf("switching to protocol version 2")
pa.version.Store(Version2)
}
} else {
if cm, err := model.DeserializeMessage(string(packet.Data)); err == nil {
if cm.TransitTime != nil {
rt := time.Now().UTC()
cm.RecvTime = &rt
data, _ := json.Marshal(cm)
packet.Data = data
}
}
pa.MessageHandler(pa.connection.Hostname(), packet.ID, packet.Context, packet.Data)
}
}
} else {
log.Errorf("Error unmarshalling PeerMessage package: %x %v", message, err)
}
}
}
// SendMessage sends the peer a preformatted message
// NOTE: This is a stub, we will likely want to extend this to better reflect the desired protocol
func (pa *PeerApp) SendMessage(message model2.PeerMessage) error {
var serialized []byte
var err error
if cm, err := model.DeserializeMessage(string(message.Data)); err == nil {
if cm.SendTime != nil {
tt := time.Now().UTC()
cm.TransitTime = &tt
data, _ := json.Marshal(cm)
message.Data = data
}
}
if pa.version.Load() == Version2 {
// treat data as a pre-serialized string, not as a byte array (which will be base64 encoded and bloat the packet size)
serialized = message.Serialize()
} else {
serialized, err = json.Marshal(message)
}
if err == nil {
err = pa.OnSendMessage(pa.connection, serialized)
// at this point we have tried to send a message to a peer only to find that something went wrong.
// we don't know *what* went wrong - the most likely explanation is the peer went offline in the time between
// sending the message and it arriving in the engine to be sent. Other explanations include problems with Tor,
// a dropped wifi connection.
// Regardless, we error out this message and close this peer app assuming it cannot be used again.
// We expect that cwtch will eventually recreate this connection and the app.
if err != nil {
// close any associated sockets
pa.connection.Close()
// tell cwtch this connection is no longer valid
pa.OnClose(err.Error())
}
return err
}
return err
}

View File

@ -1,31 +0,0 @@
package connections
// ConnectionState defines the various states a connection can be in from disconnected to authenticated
type ConnectionState int
// Connection States
// DISCONNECTED - No existing connection has been made, or all attempts have failed
// CONNECTING - We are in the process of attempting to connect to a given endpoint
// CONNECTED - We have connected but not yet authenticated
// AUTHENTICATED - im.ricochet.auth-hidden-server has succeeded on the connection.
// SYNCED - we have pulled all the messages for groups from the server and are ready to send
const (
DISCONNECTED ConnectionState = iota
CONNECTING
CONNECTED
AUTHENTICATED
SYNCED
FAILED
KILLED
)
var (
// ConnectionStateName allows conversion of states to their string representations
ConnectionStateName = []string{"Disconnected", "Connecting", "Connected", "Authenticated", "Synced", "Failed", "Killed"}
)
// ConnectionStateToType allows conversion of strings to their state type
func ConnectionStateToType() map[string]ConnectionState {
return map[string]ConnectionState{"Disconnected": DISCONNECTED, "Connecting": CONNECTING,
"Connected": CONNECTED, "Authenticated": AUTHENTICATED, "Synced": SYNCED, "Failed": FAILED, "Killed": KILLED}
}

View File

@ -1,54 +0,0 @@
package connections
import (
"encoding/json"
"errors"
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
"git.openprivacy.ca/openprivacy/log"
"sync"
)
// TokenManager maintains a list of tokens associated with a single TokenServer
type TokenManager struct {
lock sync.Mutex
tokens map[string]*privacypass.Token
}
func NewTokenManager() *TokenManager {
tm := new(TokenManager)
tm.tokens = make(map[string]*privacypass.Token)
return tm
}
// StoreNewTokens adds tokens to the internal list managed by this TokenManager
func (tm *TokenManager) StoreNewTokens(tokens []*privacypass.Token) {
tm.lock.Lock()
defer tm.lock.Unlock()
log.Debugf("acquired %v new tokens", tokens)
for _, token := range tokens {
serialized, _ := json.Marshal(token)
tm.tokens[string(serialized)] = token
}
}
// NumTokens returns the current number of tokens
func (tm *TokenManager) NumTokens() int {
tm.lock.Lock()
defer tm.lock.Unlock()
return len(tm.tokens)
}
// FetchToken removes a token from the internal list and returns it, along with a count of the remaining tokens.
// Errors if no tokens available.
func (tm *TokenManager) FetchToken() (*privacypass.Token, int, error) {
tm.lock.Lock()
defer tm.lock.Unlock()
if len(tm.tokens) == 0 {
return nil, 0, errors.New("no more tokens")
}
for serializedToken, token := range tm.tokens {
delete(tm.tokens, serializedToken)
return token, len(tm.tokens), nil
}
return nil, 0, errors.New("no more tokens")
}

View File

@ -1,204 +0,0 @@
package connections
import (
"cwtch.im/cwtch/protocol/groups"
"encoding/json"
"git.openprivacy.ca/cwtch.im/tapir"
"git.openprivacy.ca/cwtch.im/tapir/applications"
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
"git.openprivacy.ca/openprivacy/connectivity"
"git.openprivacy.ca/openprivacy/log"
"github.com/gtank/ristretto255"
"sync"
)
// TokenBoardHandler encapsulates all the various handlers a client needs to interact with a token board
// this includes handlers to receive new messages, as well as handlers to manage tokens.
type TokenBoardHandler interface {
GroupMessageHandler(server string, gm *groups.EncryptedGroupMessage)
ServerAuthedHandler(server string)
ServerSyncedHandler(server string)
ServerClosedHandler(server string)
NewTokenHandler(tokenService string, tokens []*privacypass.Token)
PostingFailed(server string, sig []byte)
FetchToken(tokenService string) (*privacypass.Token, int, error)
}
// NewTokenBoardClient generates a new Client for Token Board
func NewTokenBoardClient(acn connectivity.ACN, Y *ristretto255.Element, tokenServiceOnion string, lastKnownSignature []byte, tokenBoardHandler TokenBoardHandler) tapir.Application {
tba := new(TokenBoardClient)
tba.acn = acn
tba.tokenService = privacypass.NewTokenServer()
tba.tokenService.Y = Y
tba.tokenServiceOnion = tokenServiceOnion
tba.tokenBoardHandler = tokenBoardHandler
tba.lastKnownSignature = lastKnownSignature
return tba
}
// TokenBoardClient defines a client for the TokenBoard server
type TokenBoardClient struct {
applications.AuthApp
connection tapir.Connection
tokenBoardHandler TokenBoardHandler
// Token service handling
acn connectivity.ACN
tokenService *privacypass.TokenServer
tokenServiceOnion string
lastKnownSignature []byte
postLock sync.Mutex
postQueue []groups.CachedEncryptedGroupMessage
}
// NewInstance Client a new TokenBoardApp
func (ta *TokenBoardClient) NewInstance() tapir.Application {
tba := new(TokenBoardClient)
tba.tokenBoardHandler = ta.tokenBoardHandler
tba.acn = ta.acn
tba.tokenService = ta.tokenService
tba.tokenServiceOnion = ta.tokenServiceOnion
tba.lastKnownSignature = ta.lastKnownSignature
return tba
}
// Init initializes the cryptographic TokenBoardApp
func (ta *TokenBoardClient) Init(connection tapir.Connection) {
// connection.Hostname is always valid because we are ALWAYS the initiating party
log.Debugf("connecting to server: %v", connection.Hostname())
ta.AuthApp.Init(connection)
log.Debugf("server protocol complete: %v", connection.Hostname())
if connection.HasCapability(applications.AuthCapability) {
log.Debugf("Successfully Initialized Connection to %v", connection.Hostname())
ta.connection = connection
ta.tokenBoardHandler.ServerAuthedHandler(connection.Hostname())
go ta.Listen()
// Optimistically acquire many tokens for this server...
go ta.PurchaseTokens()
go ta.PurchaseTokens()
ta.Replay()
} else {
log.Debugf("Error Connecting to %v", connection.Hostname())
ta.tokenBoardHandler.ServerClosedHandler(connection.Hostname())
connection.Close()
}
}
// Listen processes the messages for this application
func (ta *TokenBoardClient) Listen() {
for {
log.Debugf("Client waiting...")
data := ta.connection.Expect()
if len(data) == 0 {
log.Debugf("Server closed the connection...")
ta.tokenBoardHandler.ServerClosedHandler(ta.connection.Hostname())
return // connection is closed
}
// We always expect the server to follow protocol, and the second it doesn't we close the connection
var message groups.Message
if err := json.Unmarshal(data, &message); err != nil {
log.Debugf("Server sent an unexpected message, closing the connection: %v", err)
ta.tokenBoardHandler.ServerClosedHandler(ta.connection.Hostname())
ta.connection.Close()
return
}
switch message.MessageType {
case groups.NewMessageMessage:
if message.NewMessage != nil {
ta.tokenBoardHandler.GroupMessageHandler(ta.connection.Hostname(), &message.NewMessage.EGM)
} else {
log.Debugf("Server sent an unexpected NewMessage, closing the connection: %s", data)
ta.tokenBoardHandler.ServerClosedHandler(ta.connection.Hostname())
ta.connection.Close()
return
}
case groups.PostResultMessage:
ta.postLock.Lock()
egm := ta.postQueue[0]
ta.postQueue = ta.postQueue[1:]
ta.postLock.Unlock()
if !message.PostResult.Success {
log.Debugf("post result message: %v", message.PostResult)
// Retry using another token
posted, _ := ta.Post(egm.Group, egm.Ciphertext, egm.Signature)
// if posting failed...
if !posted {
log.Errorf("error posting message")
ta.tokenBoardHandler.PostingFailed(egm.Group, egm.Signature)
}
}
case groups.ReplayResultMessage:
if message.ReplayResult != nil {
log.Debugf("Replaying %v Messages...", message.ReplayResult.NumMessages)
for i := 0; i < message.ReplayResult.NumMessages; i++ {
data := ta.connection.Expect()
if len(data) == 0 {
log.Debugf("Server sent an unexpected EncryptedGroupMessage, closing the connection")
ta.tokenBoardHandler.ServerClosedHandler(ta.connection.Hostname())
ta.connection.Close()
return
}
egm := &groups.EncryptedGroupMessage{}
if err := json.Unmarshal(data, egm); err == nil {
ta.tokenBoardHandler.GroupMessageHandler(ta.connection.Hostname(), egm)
ta.lastKnownSignature = egm.Signature
} else {
log.Debugf("Server sent an unexpected EncryptedGroupMessage, closing the connection: %v", err)
ta.tokenBoardHandler.ServerClosedHandler(ta.connection.Hostname())
ta.connection.Close()
return
}
}
ta.tokenBoardHandler.ServerSyncedHandler(ta.connection.Hostname())
ta.connection.SetCapability(groups.CwtchServerSyncedCapability)
}
}
}
}
// Replay posts a Replay Message to the server.
func (ta *TokenBoardClient) Replay() {
data, _ := json.Marshal(groups.Message{MessageType: groups.ReplayRequestMessage, ReplayRequest: &groups.ReplayRequest{LastCommit: ta.lastKnownSignature}})
ta.connection.Send(data)
}
// PurchaseTokens purchases the given number of tokens from the server (using the provided payment handler)
func (ta *TokenBoardClient) PurchaseTokens() {
MakePayment(ta.tokenServiceOnion, ta.tokenService, ta.acn, ta.tokenBoardHandler)
}
// Post sends a Post Request to the server
func (ta *TokenBoardClient) Post(group string, ct []byte, sig []byte) (bool, int) {
egm := groups.EncryptedGroupMessage{Ciphertext: ct, Signature: sig}
token, numTokens, err := ta.NextToken(egm.ToBytes(), ta.connection.Hostname())
if err == nil {
data, _ := json.Marshal(groups.Message{MessageType: groups.PostRequestMessage, PostRequest: &groups.PostRequest{EGM: egm, Token: token}})
ta.postLock.Lock()
// ONLY put group in the EGM as a cache / for error reporting...
ta.postQueue = append(ta.postQueue, groups.CachedEncryptedGroupMessage{Group: group, EncryptedGroupMessage: egm})
log.Debugf("Message Length: %s %v", data, len(data))
err := ta.connection.Send(data)
ta.postLock.Unlock()
if err != nil {
return false, numTokens
}
return true, numTokens
}
log.Debugf("No Valid Tokens: %v", err)
return false, numTokens
}
// NextToken retrieves the next token
func (ta *TokenBoardClient) NextToken(data []byte, hostname string) (privacypass.SpentToken, int, error) {
token, numtokens, err := ta.tokenBoardHandler.FetchToken(ta.tokenServiceOnion)
if err != nil {
return privacypass.SpentToken{}, numtokens, err
}
return token.SpendToken(append(data, hostname...)), numtokens, nil
}

View File

@ -0,0 +1,133 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: cwtch-profile.proto
package protocol
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type CwtchPeerPacket struct {
CwtchIdentify *CwtchIdentity `protobuf:"bytes,1,opt,name=cwtch_identify,json=cwtchIdentify" json:"cwtch_identify,omitempty"`
GroupChatInvite *GroupChatInvite `protobuf:"bytes,2,opt,name=group_chat_invite,json=groupChatInvite" json:"group_chat_invite,omitempty"`
}
func (m *CwtchPeerPacket) Reset() { *m = CwtchPeerPacket{} }
func (m *CwtchPeerPacket) String() string { return proto.CompactTextString(m) }
func (*CwtchPeerPacket) ProtoMessage() {}
func (*CwtchPeerPacket) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
func (m *CwtchPeerPacket) GetCwtchIdentify() *CwtchIdentity {
if m != nil {
return m.CwtchIdentify
}
return nil
}
func (m *CwtchPeerPacket) GetGroupChatInvite() *GroupChatInvite {
if m != nil {
return m.GroupChatInvite
}
return nil
}
type CwtchIdentity struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Ed25519PublicKey []byte `protobuf:"bytes,2,opt,name=ed25519_public_key,json=ed25519PublicKey,proto3" json:"ed25519_public_key,omitempty"`
}
func (m *CwtchIdentity) Reset() { *m = CwtchIdentity{} }
func (m *CwtchIdentity) String() string { return proto.CompactTextString(m) }
func (*CwtchIdentity) ProtoMessage() {}
func (*CwtchIdentity) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
func (m *CwtchIdentity) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *CwtchIdentity) GetEd25519PublicKey() []byte {
if m != nil {
return m.Ed25519PublicKey
}
return nil
}
// [name] has invited you to join a group chat: [message]
type GroupChatInvite struct {
GroupName string `protobuf:"bytes,1,opt,name=group_name,json=groupName" json:"group_name,omitempty"`
GroupSharedKey []byte `protobuf:"bytes,2,opt,name=group_shared_key,json=groupSharedKey,proto3" json:"group_shared_key,omitempty"`
ServerHost string `protobuf:"bytes,3,opt,name=server_host,json=serverHost" json:"server_host,omitempty"`
SignedGroupId []byte `protobuf:"bytes,4,opt,name=signed_group_id,json=signedGroupId,proto3" json:"signed_group_id,omitempty"`
}
func (m *GroupChatInvite) Reset() { *m = GroupChatInvite{} }
func (m *GroupChatInvite) String() string { return proto.CompactTextString(m) }
func (*GroupChatInvite) ProtoMessage() {}
func (*GroupChatInvite) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
func (m *GroupChatInvite) GetGroupName() string {
if m != nil {
return m.GroupName
}
return ""
}
func (m *GroupChatInvite) GetGroupSharedKey() []byte {
if m != nil {
return m.GroupSharedKey
}
return nil
}
func (m *GroupChatInvite) GetServerHost() string {
if m != nil {
return m.ServerHost
}
return ""
}
func (m *GroupChatInvite) GetSignedGroupId() []byte {
if m != nil {
return m.SignedGroupId
}
return nil
}
func init() {
proto.RegisterType((*CwtchPeerPacket)(nil), "protocol.CwtchPeerPacket")
proto.RegisterType((*CwtchIdentity)(nil), "protocol.CwtchIdentity")
proto.RegisterType((*GroupChatInvite)(nil), "protocol.GroupChatInvite")
}
func init() { proto.RegisterFile("cwtch-profile.proto", fileDescriptor1) }
var fileDescriptor1 = []byte{
// 299 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x90, 0xc1, 0x4a, 0xf3, 0x40,
0x14, 0x85, 0xc9, 0xff, 0x17, 0xb1, 0xb7, 0xb6, 0xa9, 0xe3, 0xc2, 0xb8, 0x10, 0xa5, 0x0b, 0xe9,
0x42, 0x0b, 0x56, 0xba, 0x70, 0xe3, 0xa6, 0x88, 0x86, 0x82, 0xc4, 0xf8, 0x00, 0x43, 0x3a, 0x73,
0x93, 0x0c, 0x8d, 0x99, 0x30, 0x99, 0x56, 0xe6, 0x4d, 0xdc, 0xfb, 0xa2, 0x92, 0x1b, 0xa5, 0xad,
0xab, 0x99, 0x39, 0xe7, 0xde, 0xef, 0x1c, 0x06, 0x4e, 0xc4, 0x87, 0x15, 0xf9, 0x4d, 0x65, 0x74,
0xaa, 0x0a, 0x9c, 0x54, 0x46, 0x5b, 0xcd, 0x0e, 0xe9, 0x10, 0xba, 0x18, 0x7d, 0x7a, 0xe0, 0xcf,
0x9b, 0x89, 0x08, 0xd1, 0x44, 0x89, 0x58, 0xa1, 0x65, 0x0f, 0x30, 0xa0, 0x25, 0xae, 0x24, 0x96,
0x56, 0xa5, 0x2e, 0xf0, 0x2e, 0xbd, 0x71, 0x6f, 0x7a, 0x3a, 0xf9, 0x5d, 0x9b, 0xd0, 0x4a, 0x48,
0xb6, 0x75, 0x71, 0x5f, 0x6c, 0x9f, 0xa9, 0x63, 0x8f, 0x70, 0x9c, 0x19, 0xbd, 0xae, 0xb8, 0xc8,
0x13, 0xcb, 0x55, 0xb9, 0x51, 0x16, 0x83, 0x7f, 0x84, 0x38, 0xdb, 0x22, 0x9e, 0x9a, 0x91, 0x79,
0x9e, 0xd8, 0x90, 0x06, 0x62, 0x3f, 0xdb, 0x17, 0x46, 0xaf, 0xd0, 0xdf, 0x8b, 0x61, 0x0c, 0x3a,
0x65, 0xf2, 0x8e, 0xd4, 0xa6, 0x1b, 0xd3, 0x9d, 0x5d, 0x03, 0x43, 0x39, 0x9d, 0xcd, 0x6e, 0xef,
0x79, 0xb5, 0x5e, 0x16, 0x4a, 0xf0, 0x15, 0x3a, 0x0a, 0x3b, 0x8a, 0x87, 0x3f, 0x4e, 0x44, 0xc6,
0x02, 0xdd, 0xe8, 0xcb, 0x03, 0xff, 0x4f, 0x2e, 0x3b, 0x07, 0x68, 0xdb, 0xee, 0xb0, 0xbb, 0xa4,
0xbc, 0x34, 0x01, 0x63, 0x18, 0xb6, 0x76, 0x9d, 0x27, 0x06, 0xe5, 0x0e, 0x7e, 0x40, 0xfa, 0x1b,
0xc9, 0x0b, 0x74, 0xec, 0x02, 0x7a, 0x35, 0x9a, 0x0d, 0x1a, 0x9e, 0xeb, 0xda, 0x06, 0xff, 0x89,
0x04, 0xad, 0xf4, 0xac, 0x6b, 0xcb, 0xae, 0xc0, 0xaf, 0x55, 0x56, 0xa2, 0xe4, 0x2d, 0x51, 0xc9,
0xa0, 0x43, 0xa4, 0x7e, 0x2b, 0x53, 0xb3, 0x50, 0x2e, 0x0f, 0xe8, 0x8f, 0xee, 0xbe, 0x03, 0x00,
0x00, 0xff, 0xff, 0x62, 0x61, 0x2d, 0x00, 0xbb, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,20 @@
syntax = "proto3";
package protocol;
message CwtchPeerPacket {
CwtchIdentity cwtch_identify = 1;
GroupChatInvite group_chat_invite = 2;
}
message CwtchIdentity {
string name = 1;
bytes ed25519_public_key = 2;
}
// [name] has invited you to join a group chat: [message]
message GroupChatInvite {
string group_name = 1;
bytes group_shared_key = 2;
string server_host = 3;
bytes signed_group_id = 4;
}

View File

@ -1,87 +0,0 @@
package files
import (
"errors"
"fmt"
"strconv"
"strings"
)
// ChunkSpec is a wrapper around an uncompressed array of chunk identifiers
type ChunkSpec []uint64
// CreateChunkSpec given a full list of chunks with their downloaded status (true for downloaded, false otherwise)
// derives a list of identifiers of chunks that have not been downloaded yet
func CreateChunkSpec(progress []bool) ChunkSpec {
chunks := ChunkSpec{}
for i, p := range progress {
if !p {
chunks = append(chunks, uint64(i))
}
}
return chunks
}
// Deserialize takes in a compressed chunk spec and returns an uncompressed ChunkSpec or an error
// if the serialized chunk spec has format errors
func Deserialize(serialized string) (*ChunkSpec, error) {
var chunkSpec ChunkSpec
if len(serialized) == 0 {
return &chunkSpec, nil
}
ranges := strings.Split(serialized, ",")
for _, r := range ranges {
parts := strings.Split(r, ":")
if len(parts) == 1 {
single, err := strconv.Atoi(r)
if err != nil {
return nil, errors.New("invalid chunk spec")
}
chunkSpec = append(chunkSpec, uint64(single))
} else if len(parts) == 2 {
start, err1 := strconv.Atoi(parts[0])
end, err2 := strconv.Atoi(parts[1])
if err1 != nil || err2 != nil {
return nil, errors.New("invalid chunk spec")
}
for i := start; i <= end; i++ {
chunkSpec = append(chunkSpec, uint64(i))
}
} else {
return nil, errors.New("invalid chunk spec")
}
}
return &chunkSpec, nil
}
// Serialize compresses the ChunkSpec into a list of inclusive ranges e.g. 1,2,3,5,6,7 becomes "1:3,5:7"
func (cs ChunkSpec) Serialize() string {
result := ""
i := 0
for {
if i >= len(cs) {
break
}
j := i + 1
for ; j < len(cs) && cs[j] == cs[j-1]+1; j++ {
}
if result != "" {
result += ","
}
if j == i+1 {
result += fmt.Sprintf("%d", cs[i])
} else {
result += fmt.Sprintf("%d:%d", cs[i], cs[j-1])
}
i = j
}
return result
}

View File

@ -1,37 +0,0 @@
package files
import "testing"
func TestChunkSpec(t *testing.T) {
var testCases = map[string]ChunkSpec{
"0": CreateChunkSpec([]bool{false}),
"0:10": CreateChunkSpec([]bool{false, false, false, false, false, false, false, false, false, false, false}),
"0:1,3:5,7:9": CreateChunkSpec([]bool{false, false, true, false, false, false, true, false, false, false, true}),
"": CreateChunkSpec([]bool{true, true, true, true, true, true, true, true, true, true, true}),
"2,5,8,10": CreateChunkSpec([]bool{true, true, false, true, true, false, true, true, false, true, false}),
//
"0,2:10": CreateChunkSpec([]bool{false, true, false, false, false, false, false, false, false, false, false}),
"0:8,10": CreateChunkSpec([]bool{false, false, false, false, false, false, false, false, false, true, false}),
"1:9": CreateChunkSpec([]bool{true, false, false, false, false, false, false, false, false, false, true}),
}
for k, v := range testCases {
if k != v.Serialize() {
t.Fatalf("got %v but expected %v", v.Serialize(), k)
}
t.Logf("%v == %v", k, v.Serialize())
}
for k, v := range testCases {
if cs, err := Deserialize(k); err != nil {
t.Fatalf("error deserialized key: %v %v", k, err)
} else {
if v.Serialize() != cs.Serialize() {
t.Fatalf("got %v but expected %v", v.Serialize(), cs.Serialize())
}
t.Logf("%v == %v", cs.Serialize(), v.Serialize())
}
}
}

View File

@ -1,249 +0,0 @@
package files
import (
"encoding/hex"
"encoding/json"
"fmt"
path "path/filepath"
"strconv"
"strings"
"sync"
"cwtch.im/cwtch/event"
"cwtch.im/cwtch/protocol/model"
"git.openprivacy.ca/openprivacy/log"
)
// FileSharingSubSystem encapsulates the functionality necessary to share and download files via Cwtch
type FileSharingSubSystem struct {
// for sharing files
activeShares sync.Map // file key to manifest
// for downloading files
prospectiveManifests sync.Map // file key to serialized manifests
activeDownloads sync.Map // file key to manifests
}
// ShareFile given a file key and a serialized manifest, allow the serialized manifest to be downloaded
// by Cwtch profiles in possession of the fileKey
func (fsss *FileSharingSubSystem) ShareFile(fileKey string, serializedManifest string) {
var manifest Manifest
err := json.Unmarshal([]byte(serializedManifest), &manifest)
if err != nil {
log.Errorf("could not share file %v", err)
return
}
log.Debugf("sharing file: %v %v", fileKey, serializedManifest)
fsss.activeShares.Store(fileKey, &manifest)
}
// StopFileShare given a file key removes the serialized manifest from consideration by the file sharing
// subsystem. Future requests on this manifest will fail, as will any in-progress chunk requests.
func (fsss *FileSharingSubSystem) StopFileShare(fileKey string) {
fsss.activeShares.Delete(fileKey)
}
// StopAllFileShares removes all active file shares from consideration
func (fsss *FileSharingSubSystem) StopAllFileShares() {
fsss.activeShares.Range(func(key, value interface{}) bool {
fsss.activeShares.Delete(key)
return true
})
}
// FetchManifest given a file key and knowledge of the manifest size in chunks (obtained via an attribute lookup)
// construct a request to download the manifest.
func (fsss *FileSharingSubSystem) FetchManifest(fileKey string, manifestSize uint64) model.PeerMessage {
fsss.prospectiveManifests.Store(fileKey, strings.Repeat("\"", int(manifestSize*DefaultChunkSize)))
return model.PeerMessage{
Context: event.ContextRequestManifest,
ID: fileKey,
Data: []byte{},
}
}
// CompileChunkRequests takes in a complete serializedManifest and returns a set of chunk request messages
// TODO in the future we will want this to return the handles of contacts to request chunks from
func (fsss *FileSharingSubSystem) CompileChunkRequests(fileKey, serializedManifest, tempFile, title string) []model.PeerMessage {
var manifest Manifest
err := json.Unmarshal([]byte(serializedManifest), &manifest)
var messages []model.PeerMessage
if err == nil {
manifest.TempFileName = tempFile
manifest.Title = title
err := manifest.PrepareDownload()
if err == nil {
fsss.activeDownloads.Store(fileKey, &manifest)
log.Debugf("downloading file chunks: %v", manifest.GetChunkRequest().Serialize())
messages = append(messages, model.PeerMessage{
ID: fileKey,
Context: event.ContextRequestFile,
Data: []byte(manifest.GetChunkRequest().Serialize()),
})
} else {
log.Errorf("couldn't prepare download: %v", err)
}
}
return messages
}
// RequestManifestParts given a fileKey construct a set of messages representing requests to download various
// parts of the Manifest
func (fsss *FileSharingSubSystem) RequestManifestParts(fileKey string) []model.PeerMessage {
manifestI, exists := fsss.activeShares.Load(fileKey)
var messages []model.PeerMessage
if exists {
oldManifest := manifestI.(*Manifest)
serializedOldManifest := oldManifest.Serialize()
log.Debugf("found serialized manifest")
// copy so we dont get threading issues by modifying the original
// and then redact the file path before sending
// nb: manifest.size has already been corrected elsewhere
var manifest Manifest
json.Unmarshal([]byte(serializedOldManifest), &manifest)
manifest.FileName = path.Base(manifest.FileName)
serializedManifest := manifest.Serialize()
chunkID := 0
for i := 0; i < len(serializedManifest); i += DefaultChunkSize {
offset := i
end := i + DefaultChunkSize
// truncate end
if end > len(serializedManifest) {
end = len(serializedManifest)
}
chunk := serializedManifest[offset:end]
// request this manifest part
messages = append(messages, model.PeerMessage{
Context: event.ContextSendManifest,
ID: fmt.Sprintf("%s.%d", fileKey, chunkID),
Data: chunk,
})
chunkID++
}
}
return messages
}
// ReceiveManifestPart given a manifestKey reconstruct part the manifest from the provided part
func (fsss *FileSharingSubSystem) ReceiveManifestPart(manifestKey string, part []byte) (fileKey string, serializedManifest string) {
fileKeyParts := strings.Split(manifestKey, ".")
if len(fileKeyParts) == 3 { // rootHash.nonce.manifestPart
fileKey = fmt.Sprintf("%s.%s", fileKeyParts[0], fileKeyParts[1])
log.Debugf("manifest filekey: %s", fileKey)
manifestPart, err := strconv.Atoi(fileKeyParts[2])
if err == nil {
serializedManifest, exists := fsss.prospectiveManifests.Load(fileKey)
if exists {
serializedManifest := serializedManifest.(string)
log.Debugf("loaded manifest")
offset := manifestPart * DefaultChunkSize
end := (manifestPart + 1) * DefaultChunkSize
log.Debugf("storing manifest part %v %v", offset, end)
serializedManifestBytes := []byte(serializedManifest)
if len(serializedManifestBytes) > offset && len(serializedManifestBytes) >= end {
copy(serializedManifestBytes[offset:end], part[:])
if len(part) < DefaultChunkSize {
serializedManifestBytes = serializedManifestBytes[0 : len(serializedManifestBytes)-(DefaultChunkSize-len(part))]
}
serializedManifest = string(serializedManifestBytes)
fsss.prospectiveManifests.Store(fileKey, serializedManifest)
log.Debugf("current manifest: [%s]", serializedManifest)
var manifest Manifest
err := json.Unmarshal([]byte(serializedManifest), &manifest)
if err == nil && hex.EncodeToString(manifest.RootHash) == fileKeyParts[0] {
log.Debugf("valid manifest received! %x", manifest.RootHash)
return fileKey, serializedManifest
}
}
}
}
}
return "", ""
}
// ProcessChunkRequest given a fileKey, and a chunk request, compile a set of responses for each requested Chunk
func (fsss *FileSharingSubSystem) ProcessChunkRequest(fileKey string, serializedChunkRequest []byte) []model.PeerMessage {
log.Debugf("chunk request: %v", fileKey)
// fileKey is rootHash.nonce
manifestI, exists := fsss.activeShares.Load(fileKey)
var messages []model.PeerMessage
if exists {
manifest := manifestI.(*Manifest)
log.Debugf("manifest found: %x", manifest.RootHash)
chunkSpec, err := Deserialize(string(serializedChunkRequest))
log.Debugf("deserialized chunk spec found: %v [%s]", chunkSpec, serializedChunkRequest)
if err == nil {
for _, chunk := range *chunkSpec {
contents, err := manifest.GetChunkBytes(chunk)
if err == nil {
log.Debugf("sending chunk: %v %x", chunk, contents)
messages = append(messages, model.PeerMessage{
ID: fmt.Sprintf("%v.%d", fileKey, chunk),
Context: event.ContextSendFile,
Data: contents,
})
}
}
}
}
return messages
}
// ProcessChunk given a chunk key and a chunk attempt to store and verify the chunk as part of an active download
// If this results in the file download being completed return downloaded = true
// Always return the progress of a matched download if it exists along with the total number of chunks and the
// given chunk ID
// If not such active download exists then return an empty file key and ignore all further processing.
func (fsss *FileSharingSubSystem) ProcessChunk(chunkKey string, chunk []byte) (fileKey string, progress uint64, totalChunks uint64, chunkID uint64, title string) {
fileKeyParts := strings.Split(chunkKey, ".")
log.Debugf("got chunk for %s", fileKeyParts)
if len(fileKeyParts) == 3 { // fileKey is rootHash.nonce.chunk
// recalculate file key
fileKey = fmt.Sprintf("%s.%s", fileKeyParts[0], fileKeyParts[1])
derivedChunkID, err := strconv.Atoi(fileKeyParts[2])
if err == nil {
chunkID = uint64(derivedChunkID)
log.Debugf("got chunk id %d", chunkID)
manifestI, exists := fsss.activeDownloads.Load(fileKey)
if exists {
manifest := manifestI.(*Manifest)
totalChunks = uint64(len(manifest.Chunks))
title = manifest.Title
log.Debugf("found active manifest %v", manifest)
progress, err = manifest.StoreChunk(chunkID, chunk)
log.Debugf("attempts to store chunk %v %v", progress, err)
if err != nil {
log.Debugf("error storing chunk: %v", err)
// malicious contacts who share conversations can share random chunks
// these will not match the chunk hash and as such will fail.
// at this point we can't differentiate between a malicious chunk and failure to store a
// legitimate chunk, so if there is an error we silently drop it and expect the higher level callers (e.g. the ui)
//to detect and respond to missing chunks if it detects them..
}
}
}
}
return
}
// VerifyFile returns true if the file has been downloaded, false otherwise
// as well as the temporary filename, if one was used
func (fsss *FileSharingSubSystem) VerifyFile(fileKey string) (tempFile string, filePath string, downloaded bool) {
manifestI, exists := fsss.activeDownloads.Load(fileKey)
if exists {
manifest := manifestI.(*Manifest)
if manifest.VerifyFile() == nil {
manifest.Close()
fsss.activeDownloads.Delete(fileKey)
log.Debugf("file verified and downloaded!")
return manifest.TempFileName, manifest.FileName, true
}
}
return "", "", false
}

View File

@ -1,338 +0,0 @@
package files
import (
"bufio"
"crypto/sha256"
"crypto/sha512"
"crypto/subtle"
"encoding/json"
"errors"
"fmt"
"git.openprivacy.ca/openprivacy/log"
"io"
"os"
"sync"
)
// Chunk is a wrapper around a hash
type Chunk []byte
// DefaultChunkSize is the default value of a manifest chunk
const DefaultChunkSize = 4096
// MaxManifestSize is the maximum size of a manifest (in DefaultChunkSize)
// Because we reconstruct the manifest in memory we have to practically limit this size.
// 2622000 * 4096 ~= 10GB using 4096 byte chunks
// This makes the actual manifest size ~125Mb which seems reasonable for a 10Gb file.
// most file transfers are expected to have manifest that are much smaller.
const MaxManifestSize = 2622000
// Manifest is a collection of hashes and other metadata needed to reconstruct a file and verify contents given a root hash
type Manifest struct {
Chunks []Chunk
FileName string
RootHash []byte
FileSizeInBytes uint64
ChunkSizeInBytes uint64
TempFileName string `json:"-"`
Title string `json:"-"`
chunkComplete []bool
openFd *os.File
progress uint64
lock sync.Mutex
}
// CreateManifest takes in a file path and constructs a file sharing manifest of hashes along with
// other information necessary to download, reconstruct and verify the file.
func CreateManifest(path string) (*Manifest, error) {
// Process file into Chunks
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
reader := bufio.NewReader(f)
buf := make([]byte, DefaultChunkSize)
var chunks []Chunk
fileSizeInBytes := uint64(0)
rootHash := sha512.New()
for {
n, err := reader.Read(buf)
if err != nil {
if err != io.EOF {
return nil, err
}
break
}
hash := sha256.New()
hash.Write(buf[0:n])
rootHash.Write(buf[0:n])
chunkHash := hash.Sum(nil)
chunks = append(chunks, chunkHash)
fileSizeInBytes += uint64(n)
}
return &Manifest{
Chunks: chunks,
FileName: path,
RootHash: rootHash.Sum(nil),
ChunkSizeInBytes: DefaultChunkSize,
FileSizeInBytes: fileSizeInBytes,
chunkComplete: make([]bool, len(chunks)),
}, nil
}
// GetChunkBytes takes in a chunk identifier and returns the bytes associated with that chunk
// it does not attempt to validate the chunk Hash.
func (m *Manifest) GetChunkBytes(id uint64) ([]byte, error) {
m.lock.Lock()
defer m.lock.Unlock()
if id >= uint64(len(m.Chunks)) {
return nil, errors.New("chunk not found")
}
if err := m.getFileHandle(); err != nil {
return nil, err
}
// Seek to Chunk
offset, err := m.openFd.Seek(int64(id*m.ChunkSizeInBytes), 0)
if (uint64(offset) != id*m.ChunkSizeInBytes) || err != nil {
return nil, errors.New("chunk not found")
}
// Read chunk into memory and return...
reader := bufio.NewReader(m.openFd)
buf := make([]byte, m.ChunkSizeInBytes)
n, err := reader.Read(buf)
if err != nil {
if err != io.EOF {
return nil, err
}
}
return buf[0:n], nil
}
// LoadManifest reads in a json serialized Manifest from a file
func LoadManifest(filename string) (*Manifest, error) {
bytes, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
manifest := new(Manifest)
err = json.Unmarshal(bytes, manifest)
if err != nil {
return nil, err
}
manifest.chunkComplete = make([]bool, len(manifest.Chunks))
return manifest, nil
}
// VerifyFile attempts to calculate the rootHash of a file and compare it to the expected rootHash stored in the
// manifest
func (m *Manifest) VerifyFile() error {
m.lock.Lock()
defer m.lock.Unlock()
if err := m.getFileHandle(); err != nil {
return err
}
offset, err := m.openFd.Seek(0, 0)
if offset != 0 || err != nil {
return errors.New("chunk not found")
}
rootHash := sha512.New()
reader := bufio.NewReader(m.openFd)
buf := make([]byte, m.ChunkSizeInBytes)
for {
n, err := reader.Read(buf)
rootHash.Write(buf[0:n])
if err != nil {
if err != io.EOF {
return err
}
break
}
}
calculatedRootHash := rootHash.Sum(nil)
if subtle.ConstantTimeCompare(m.RootHash, calculatedRootHash) != 1 {
return fmt.Errorf("hashes do not match %x %x", m.RootHash, calculatedRootHash)
}
return nil
}
// StoreChunk takes in a chunk id and contents, verifies the chunk has the expected hash and if so store the contents
// in the file.
func (m *Manifest) StoreChunk(id uint64, contents []byte) (uint64, error) {
m.lock.Lock()
defer m.lock.Unlock()
// Check the chunk id
if id >= uint64(len(m.Chunks)) {
return 0, errors.New("invalid chunk id")
}
// Validate the chunk hash
hash := sha256.New()
hash.Write(contents)
chunkHash := hash.Sum(nil)
if subtle.ConstantTimeCompare(chunkHash, m.Chunks[id]) != 1 {
return 0, fmt.Errorf("invalid chunk hash %x %x", chunkHash, m.Chunks[id])
}
if err := m.getFileHandle(); err != nil {
return 0, err
}
offset, err := m.openFd.Seek(int64(id*m.ChunkSizeInBytes), 0)
if (uint64(offset) != id*m.ChunkSizeInBytes) || err != nil {
return 0, errors.New("chunk not found")
}
// Write the contents of the chunk to the file
_, err = m.openFd.Write(contents)
if err == nil && !m.chunkComplete[id] {
m.chunkComplete[id] = true
m.progress++
}
return m.progress, err
}
// private function to set the internal file handle
func (m *Manifest) getFileHandle() error {
// Seek to the chunk in the file
if m.openFd == nil {
useFileName := m.FileName
if m.TempFileName != "" {
useFileName = m.TempFileName
}
fd, err := os.OpenFile(useFileName, os.O_RDWR, 0600)
if err != nil {
return err
}
m.openFd = fd
}
return nil
}
// GetChunkRequest returns an uncompressed list of Chunks needed to complete the file described in the manifest
func (m *Manifest) GetChunkRequest() ChunkSpec {
return CreateChunkSpec(m.chunkComplete)
}
// PrepareDownload creates an empty file of the expected size of the file described by the manifest
// If the file already exists it assumes it is the correct file and that it is resuming from when it left off.
func (m *Manifest) PrepareDownload() error {
m.lock.Lock()
defer m.lock.Unlock()
m.chunkComplete = make([]bool, len(m.Chunks))
if m.ChunkSizeInBytes == 0 || m.FileSizeInBytes == 0 {
return fmt.Errorf("manifest is invalid")
}
if info, err := os.Stat(m.FileName); os.IsNotExist(err) {
useFileName := m.FileName
if m.TempFileName != "" {
useFileName = m.TempFileName
}
fd, err := os.Create(useFileName)
if err != nil {
return err
}
m.openFd = fd
writer := bufio.NewWriter(m.openFd)
buf := make([]byte, m.ChunkSizeInBytes)
for chunk := 0; chunk < len(m.Chunks)-1; chunk++ {
_, err := writer.Write(buf)
if err != nil {
return err
}
}
lastChunkSize := m.FileSizeInBytes % m.ChunkSizeInBytes
if lastChunkSize > 0 {
buf = make([]byte, lastChunkSize)
_, err := writer.Write(buf)
if err != nil {
return err
}
}
writer.Flush()
} else {
if err != nil {
return err
}
if uint64(info.Size()) != m.FileSizeInBytes {
return fmt.Errorf("file exists but is the wrong size")
}
if err := m.getFileHandle(); err != nil {
return err
}
// Calculate Progress
reader := bufio.NewReader(m.openFd)
buf := make([]byte, m.ChunkSizeInBytes)
chunkI := 0
for {
n, err := reader.Read(buf)
if err != nil {
if err != io.EOF {
return err
}
break
}
if chunkI >= len(m.Chunks) {
log.Errorf("file is larger than the number of chunks assigned. Assuming manifest was corrupted.")
return fmt.Errorf("file is larger than the number of chunks assigned. Assuming manifest was corrupted")
}
hash := sha512.New()
hash.Write(buf[0:n])
chunkHash := hash.Sum(nil)
m.progress = 0
if subtle.ConstantTimeCompare(chunkHash, m.Chunks[chunkI]) == 1 {
m.chunkComplete[chunkI] = true
m.progress++
}
chunkI++
}
}
return nil
}
// Close closes the underlying file descriptor
func (m *Manifest) Close() {
m.lock.Lock()
defer m.lock.Unlock()
if m.openFd != nil {
m.openFd.Close()
}
}
// Save writes a JSON encoded byte array version of the manifest to path
func (m *Manifest) Save(path string) error {
return os.WriteFile(path, m.Serialize(), 0600)
}
// Serialize returns the manifest as a JSON encoded byte array
func (m *Manifest) Serialize() []byte {
data, _ := json.Marshal(m)
return data
}

View File

@ -1,150 +0,0 @@
package files
import (
"encoding/hex"
"encoding/json"
"math"
"os"
"testing"
)
func TestManifest(t *testing.T) {
manifest, err := CreateManifest("testdata/example.txt")
if err != nil {
t.Fatalf("manifest create error: %v", err)
}
if len(manifest.Chunks) != 1 {
t.Fatalf("manifest had unepxected Chunks : %v", manifest.Chunks)
}
if manifest.FileSizeInBytes != 12 {
t.Fatalf("manifest had unepxected length : %v", manifest.FileSizeInBytes)
}
if hex.EncodeToString(manifest.RootHash) != "861844d6704e8573fec34d967e20bcfef3d424cf48be04e6dc08f2bd58c729743371015ead891cc3cf1c9d34b49264b510751b1ff9e537937bc46b5d6ff4ecc8" {
t.Fatalf("manifest had incorrect root Hash : %v", manifest.RootHash)
}
t.Logf("%v", manifest)
// Try to read the chunk
_, err = manifest.GetChunkBytes(1)
if err == nil {
t.Fatalf("chunk fetch should have thrown an error")
}
_, err = manifest.GetChunkBytes(0)
if err != nil {
t.Fatalf("chunk fetch error: %v", err)
}
_, err = manifest.GetChunkBytes(0)
if err != nil {
t.Fatalf("chunk fetch error: %v", err)
}
_, err = manifest.GetChunkBytes(0)
if err != nil {
t.Fatalf("chunk fetch error: %v", err)
}
json, _ := json.Marshal(manifest)
t.Logf("%s", json)
}
func TestManifestLarge(t *testing.T) {
manifest, err := CreateManifest("testdata/cwtch.png")
if err != nil {
t.Fatalf("manifest create error: %v", err)
}
if len(manifest.Chunks) != int(math.Ceil(float64(51791)/DefaultChunkSize)) {
t.Fatalf("manifest had unexpected Chunks : %v", manifest.Chunks)
}
if manifest.FileSizeInBytes != 51791 {
t.Fatalf("manifest had unepxected length : %v", manifest.FileSizeInBytes)
}
if hex.EncodeToString(manifest.RootHash) != "8f0ed73bbb30db45b6a740b1251cae02945f48e4f991464d5f3607685c45dcd136a325dab2e5f6429ce2b715e602b20b5b16bf7438fb6235fefe912adcedb5fd" {
t.Fatalf("manifest had incorrect root Hash : %v", manifest.RootHash)
}
t.Logf("%v", len(manifest.Chunks))
json, _ := json.Marshal(manifest)
t.Logf("%v %s", len(json), json)
// Pretend we downloaded the manifest
os.WriteFile("testdata/cwtch.png.manifest", json, 0600)
// Load the manifest from a file
cwtchPngManifest, err := LoadManifest("testdata/cwtch.png.manifest")
if err != nil {
t.Fatalf("manifest create error: %v", err)
}
defer cwtchPngManifest.Close()
t.Logf("%v", cwtchPngManifest)
// Test verifying the hash
if cwtchPngManifest.VerifyFile() != nil {
t.Fatalf("hashes do not validate error: %v", err)
}
// Prepare Download
cwtchPngOutManifest, err := LoadManifest("testdata/cwtch.png.manifest")
if err != nil {
t.Fatalf("could not prepare download %v", err)
}
cwtchPngOutManifest.FileName = "testdata/cwtch.out.png"
defer cwtchPngOutManifest.Close()
err = cwtchPngOutManifest.PrepareDownload()
if err != nil {
t.Fatalf("could not prepare download %v", err)
}
for i := 0; i < len(cwtchPngManifest.Chunks); i++ {
t.Logf("Sending Chunk %v %x from %v", i, cwtchPngManifest.Chunks[i], cwtchPngManifest.FileName)
contents, err := cwtchPngManifest.GetChunkBytes(uint64(i))
if err != nil {
t.Fatalf("could not get chunk %v %v", i, err)
}
t.Logf("Progress: %v", cwtchPngOutManifest.chunkComplete)
_, err = cwtchPngOutManifest.StoreChunk(uint64(i), contents)
if err != nil {
t.Fatalf("could not store chunk %v %v", i, err)
}
// Attempt to store the chunk in an invalid position...
_, err = cwtchPngOutManifest.StoreChunk(uint64(i+1), contents)
if err == nil {
t.Fatalf("incorrect chunk store")
}
}
// Attempt to store an invalid chunk...should trigger an error
_, err = cwtchPngOutManifest.StoreChunk(uint64(len(cwtchPngManifest.Chunks)), []byte{0xff})
if err == nil {
t.Fatalf("incorrect chunk store")
}
err = cwtchPngOutManifest.VerifyFile()
if err != nil {
t.Fatalf("could not verify file %v", err)
}
// Test that changing the hash throws an error
cwtchPngManifest.RootHash[3] = 0xFF
if cwtchPngManifest.VerifyFile() == nil {
t.Fatalf("hashes should not validate error")
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 51 KiB

View File

@ -1 +0,0 @@
{"Chunks":["BXbFagOrWyDwcsnW+f1O6fddCqJywEISjUrzI31FAE0=","1SZcGk0NSduL093Hh0hZ4WVcx2o6VKgL3kUy2WqmdLY=","R4wwVcR4andJJ0fkXlp/td1ZSjH7xHi3Egh8aloWONA=","TAuI06kog7TYVDSO8AgWprAGY8LSlGBwqZvpgMymhZE=","XQLxqLjiM0qIAeOmGIrZJkyuCEfJ4t+ikgbV1ohudiY=","aXInp/WF58A5/TGkwAwniNvIU2ZlRjVtrpClw0sBcVM=","oSCjcrenQ4+Pix4jtgNCRt40K0kQ41eCumSJO0Gqo/0=","FebZSfHuyVdRWkS8/IaWA6UooEURkf9vPxnqZXKII8g=","tITbm77ca1YmExGzbX4WBP5fAOh4bUzDtceN1VBYcBI=","VJd8rWuMtrZzqobdKam0n6t4Vgo72GcsNRNzMk46PsI=","7ywzxLV44HVk9wz+QQHvvVQJAFkTU6/pHyVFjE0uF40=","PoHUwEoQOSXv8ZpJ9bGeCZqiwY34bXcFcBki2OPxd8o=","eogaSYPKrl0MFEqVP1mwUMczMCcnjjwUmUz/0DsAF48="],"FileName":"testdata/cwtch.png","RootHash":"jw7XO7sw20W2p0CxJRyuApRfSOT5kUZNXzYHaFxF3NE2oyXasuX2QpzitxXmArILWxa/dDj7YjX+/pEq3O21/Q==","FileSizeInBytes":51791,"ChunkSizeInBytes":4096}

View File

@ -1 +0,0 @@
Hello World!

View File

@ -0,0 +1,189 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: group_message.proto
package protocol
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import control "github.com/s-rah/go-ricochet/wire/control"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type CwtchServerPacket struct {
GroupMessage *GroupMessage `protobuf:"bytes,1,opt,name=group_message,json=groupMessage" json:"group_message,omitempty"`
FetchMessage *FetchMessage `protobuf:"bytes,2,opt,name=fetch_message,json=fetchMessage" json:"fetch_message,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CwtchServerPacket) Reset() { *m = CwtchServerPacket{} }
func (m *CwtchServerPacket) String() string { return proto.CompactTextString(m) }
func (*CwtchServerPacket) ProtoMessage() {}
func (*CwtchServerPacket) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} }
func (m *CwtchServerPacket) GetGroupMessage() *GroupMessage {
if m != nil {
return m.GroupMessage
}
return nil
}
func (m *CwtchServerPacket) GetFetchMessage() *FetchMessage {
if m != nil {
return m.FetchMessage
}
return nil
}
type FetchMessage struct {
XXX_unrecognized []byte `json:"-"`
}
func (m *FetchMessage) Reset() { *m = FetchMessage{} }
func (m *FetchMessage) String() string { return proto.CompactTextString(m) }
func (*FetchMessage) ProtoMessage() {}
func (*FetchMessage) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} }
type GroupMessage struct {
Ciphertext []byte `protobuf:"bytes,1,req,name=ciphertext" json:"ciphertext,omitempty"`
Spamguard []byte `protobuf:"bytes,2,req,name=spamguard" json:"spamguard,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *GroupMessage) Reset() { *m = GroupMessage{} }
func (m *GroupMessage) String() string { return proto.CompactTextString(m) }
func (*GroupMessage) ProtoMessage() {}
func (*GroupMessage) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} }
func (m *GroupMessage) GetCiphertext() []byte {
if m != nil {
return m.Ciphertext
}
return nil
}
func (m *GroupMessage) GetSpamguard() []byte {
if m != nil {
return m.Spamguard
}
return nil
}
// DecryptedGroupMessage is *never* sent in the clear on the wire
// and is only ever sent when encrypted in the ciphertext parameter of
// GroupMessage
type DecryptedGroupMessage struct {
Onion *string `protobuf:"bytes,1,req,name=onion" json:"onion,omitempty"`
Timestamp *int32 `protobuf:"varint,2,req,name=timestamp" json:"timestamp,omitempty"`
Text *string `protobuf:"bytes,3,req,name=text" json:"text,omitempty"`
Signature []byte `protobuf:"bytes,4,req,name=signature" json:"signature,omitempty"`
SignedGroupId []byte `protobuf:"bytes,5,req,name=signed_group_id,json=signedGroupId" json:"signed_group_id,omitempty"`
PreviousMessageSig []byte `protobuf:"bytes,6,req,name=previous_message_sig,json=previousMessageSig" json:"previous_message_sig,omitempty"`
// Used to prevent analysis on text length, length is 1024 - len(text)
Padding []byte `protobuf:"bytes,7,req,name=padding" json:"padding,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *DecryptedGroupMessage) Reset() { *m = DecryptedGroupMessage{} }
func (m *DecryptedGroupMessage) String() string { return proto.CompactTextString(m) }
func (*DecryptedGroupMessage) ProtoMessage() {}
func (*DecryptedGroupMessage) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} }
func (m *DecryptedGroupMessage) GetOnion() string {
if m != nil && m.Onion != nil {
return *m.Onion
}
return ""
}
func (m *DecryptedGroupMessage) GetTimestamp() int32 {
if m != nil && m.Timestamp != nil {
return *m.Timestamp
}
return 0
}
func (m *DecryptedGroupMessage) GetText() string {
if m != nil && m.Text != nil {
return *m.Text
}
return ""
}
func (m *DecryptedGroupMessage) GetSignature() []byte {
if m != nil {
return m.Signature
}
return nil
}
func (m *DecryptedGroupMessage) GetSignedGroupId() []byte {
if m != nil {
return m.SignedGroupId
}
return nil
}
func (m *DecryptedGroupMessage) GetPreviousMessageSig() []byte {
if m != nil {
return m.PreviousMessageSig
}
return nil
}
func (m *DecryptedGroupMessage) GetPadding() []byte {
if m != nil {
return m.Padding
}
return nil
}
var E_ServerNonce = &proto.ExtensionDesc{
ExtendedType: (*control.ChannelResult)(nil),
ExtensionType: ([]byte)(nil),
Field: 8200,
Name: "protocol.server_nonce",
Tag: "bytes,8200,opt,name=server_nonce,json=serverNonce",
Filename: "group_message.proto",
}
func init() {
proto.RegisterType((*CwtchServerPacket)(nil), "protocol.CwtchServerPacket")
proto.RegisterType((*FetchMessage)(nil), "protocol.FetchMessage")
proto.RegisterType((*GroupMessage)(nil), "protocol.GroupMessage")
proto.RegisterType((*DecryptedGroupMessage)(nil), "protocol.DecryptedGroupMessage")
proto.RegisterExtension(E_ServerNonce)
}
func init() { proto.RegisterFile("group_message.proto", fileDescriptor2) }
var fileDescriptor2 = []byte{
// 372 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0xdd, 0xaa, 0xd3, 0x30,
0x1c, 0xa7, 0xfb, 0x70, 0xee, 0xbf, 0x6e, 0x62, 0x9c, 0x1a, 0x44, 0xa4, 0xf4, 0x42, 0x76, 0x35,
0xc4, 0x4b, 0x87, 0x20, 0x4c, 0x14, 0x41, 0x45, 0xba, 0x07, 0x28, 0xa1, 0xfd, 0x2f, 0x0d, 0xb6,
0x49, 0x48, 0xd2, 0x9d, 0x73, 0xde, 0xe0, 0x3c, 0xdc, 0x79, 0x99, 0xf3, 0x06, 0x87, 0xa6, 0xeb,
0xd6, 0x9d, 0x8b, 0x73, 0x55, 0x7e, 0xdf, 0x21, 0x0d, 0xbc, 0xe2, 0x46, 0xd5, 0x3a, 0xad, 0xd0,
0x5a, 0xc6, 0x71, 0xad, 0x8d, 0x72, 0x8a, 0x3c, 0xf7, 0x9f, 0x4c, 0x95, 0xef, 0x96, 0x5b, 0x25,
0x9d, 0x51, 0xe5, 0xb6, 0x60, 0x52, 0x62, 0xd9, 0xea, 0xf1, 0x5d, 0x00, 0x2f, 0xb7, 0x57, 0x2e,
0x2b, 0x76, 0x68, 0x0e, 0x68, 0xfe, 0xb1, 0xec, 0x3f, 0x3a, 0xb2, 0x81, 0xf9, 0x45, 0x19, 0x0d,
0xa2, 0x60, 0x35, 0xfb, 0xfc, 0x66, 0xdd, 0xb5, 0xad, 0x7f, 0x36, 0xf2, 0x9f, 0x56, 0x4d, 0x42,
0xde, 0x43, 0x4d, 0x78, 0x8f, 0x2e, 0x2b, 0x4e, 0xe1, 0xc1, 0xe3, 0xf0, 0x8f, 0x46, 0x3e, 0x85,
0xf7, 0x3d, 0x44, 0xbe, 0xc2, 0xe2, 0x62, 0xd9, 0xd2, 0x61, 0x34, 0x7c, 0x62, 0x7a, 0xde, 0x9f,
0xb6, 0xf1, 0x02, 0xc2, 0x7e, 0x79, 0xfc, 0x1b, 0xc2, 0xbe, 0x9d, 0x7c, 0x00, 0xc8, 0x84, 0x2e,
0xd0, 0x38, 0xbc, 0x76, 0x34, 0x88, 0x06, 0xab, 0x30, 0xe9, 0x31, 0xe4, 0x3d, 0x4c, 0xad, 0x66,
0x15, 0xaf, 0x99, 0xc9, 0xe9, 0xc0, 0xcb, 0x67, 0x22, 0xbe, 0x0f, 0xe0, 0xf5, 0x77, 0xcc, 0xcc,
0x8d, 0x76, 0x98, 0x5f, 0xf4, 0x2e, 0x61, 0xac, 0xa4, 0x50, 0xd2, 0x57, 0x4e, 0x93, 0x16, 0x34,
0x6d, 0x4e, 0x54, 0x68, 0x1d, 0xab, 0xb4, 0x6f, 0x1b, 0x27, 0x67, 0x82, 0x10, 0x18, 0xf9, 0x53,
0x0c, 0x7d, 0x64, 0x74, 0xda, 0x17, 0x5c, 0x32, 0x57, 0x1b, 0xa4, 0xa3, 0xe3, 0x7e, 0x47, 0x90,
0x8f, 0xf0, 0xa2, 0x01, 0x98, 0xa7, 0xed, 0x1d, 0x89, 0x9c, 0x8e, 0xbd, 0x67, 0xde, 0xd2, 0xfe,
0x48, 0xbf, 0x72, 0xf2, 0x09, 0x96, 0xda, 0xe0, 0x41, 0xa8, 0xda, 0x76, 0xf7, 0x98, 0x5a, 0xc1,
0xe9, 0x33, 0x6f, 0x26, 0x9d, 0x76, 0x3c, 0xfc, 0x4e, 0x70, 0x42, 0x61, 0xa2, 0x59, 0x9e, 0x0b,
0xc9, 0xe9, 0xc4, 0x9b, 0x3a, 0xf8, 0x65, 0x03, 0xa1, 0xf5, 0x4f, 0x23, 0x95, 0x4a, 0x66, 0x48,
0xde, 0x9e, 0x7f, 0xc4, 0xf1, 0x25, 0x25, 0x68, 0xeb, 0xd2, 0xd1, 0xdb, 0x6f, 0x51, 0xb0, 0x0a,
0x93, 0x59, 0xeb, 0xfe, 0xdb, 0x98, 0x1f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x3c, 0x2e, 0xc6, 0x75,
0x93, 0x02, 0x00, 0x00,
}

View File

@ -0,0 +1,35 @@
syntax = "proto2";
package protocol;
import "ControlChannel.proto";
message CwtchServerPacket {
optional GroupMessage group_message = 1;
optional FetchMessage fetch_message = 2;
}
extend protocol.ChannelResult {
optional bytes server_nonce = 8200; // 32 random bytes
}
message FetchMessage {
}
message GroupMessage {
required bytes ciphertext = 1;
required bytes spamguard = 2;
}
// DecryptedGroupMessage is *never* sent in the clear on the wire
// and is only ever sent when encrypted in the ciphertext parameter of
// GroupMessage
message DecryptedGroupMessage {
required string onion = 1;
required int32 timestamp = 2;
required string text = 3;
required bytes signature = 4;
required bytes signed_group_id = 5;
required bytes previous_message_sig =6;
// Used to prevent analysis on text length, length is 1024 - len(text)
required bytes padding = 7;
}

View File

@ -1,101 +0,0 @@
package groups
import (
"encoding/json"
"git.openprivacy.ca/cwtch.im/tapir"
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
)
// CwtchServerSyncedCapability is used to indicate that a given cwtch server is synced
const CwtchServerSyncedCapability = tapir.Capability("CwtchServerSyncedCapability")
// GroupInvite provides a structured type for communicating group information to peers
type GroupInvite struct {
GroupID string
GroupName string
SignedGroupID []byte
Timestamp uint64
SharedKey []byte
ServerHost string
}
// DecryptedGroupMessage is the main encapsulation of group message data
type DecryptedGroupMessage struct {
Text string
Onion string
Timestamp uint64
// NOTE: SignedGroupID is now a misnomer, the only way this is signed is indirectly via the signed encrypted group messages
// We now treat GroupID as binding to a server/key rather than an "owner" - additional validation logic (to e.g.
// respect particular group constitutions) can be built on top of group messages, but the underlying groups are
// now agnostic to those models.
SignedGroupID []byte
PreviousMessageSig []byte
Padding []byte
}
// EncryptedGroupMessage provides an encapsulation of the encrypted group message stored on the server
type EncryptedGroupMessage struct {
Ciphertext []byte
Signature []byte
}
// CachedEncryptedGroupMessage provides an encapsulation of the encrypted group message for local caching / error reporting
type CachedEncryptedGroupMessage struct {
EncryptedGroupMessage
Group string
}
// ToBytes converts the encrypted group message to a set of bytes for serialization
func (egm EncryptedGroupMessage) ToBytes() []byte {
data, _ := json.Marshal(egm)
return data
}
// MessageType defines the enum for TokenBoard messages
type MessageType int
// Message Types
const (
ReplayRequestMessage MessageType = iota
ReplayResultMessage
PostRequestMessage
PostResultMessage
NewMessageMessage
)
// Message encapsulates the application protocol
type Message struct {
MessageType MessageType
PostRequest *PostRequest `json:",omitempty"`
PostResult *PostResult `json:",omitempty"`
NewMessage *NewMessage `json:",omitempty"`
ReplayRequest *ReplayRequest `json:",omitempty"`
ReplayResult *ReplayResult `json:",omitempty"`
}
// ReplayRequest requests a reply from the given Commit
type ReplayRequest struct {
LastCommit []byte
}
// PostRequest requests to post the message to the board with the given token
type PostRequest struct {
Token privacypass.SpentToken
EGM EncryptedGroupMessage
}
// PostResult returns the success of a given post attempt
type PostResult struct {
Success bool
}
// ReplayResult is sent by the server before a stream of replayed messages
type ReplayResult struct {
NumMessages int
}
// NewMessage is used to send a new bulletin board message to interested peers.
type NewMessage struct {
//Token privacypass.SpentToken
EGM EncryptedGroupMessage
}

View File

@ -1,53 +0,0 @@
package model
import (
"bytes"
"errors"
)
// PeerMessage is an encapsulation that can be used by higher level applications
type PeerMessage struct {
// ID **must** only contain alphanumeric characters separated by period.
ID string // A unique Message ID (primarily used for acknowledgments)
// Context **must** only contain alphanumeric characters separated by period.
Context string // A unique context identifier i.e. im.cwtch.chat
// Data can contain anything
Data []byte // A data packet.
}
// Serialize constructs an efficient serialized representation
// Format: [ID String] | [Context String] | Binary Data
func (m *PeerMessage) Serialize() []byte {
return append(append([]byte(m.ID+"|"), []byte(m.Context+"|")...), m.Data...)
}
// ParsePeerMessage returns either a deserialized PeerMessage or an error if it is malformed
func ParsePeerMessage(message []byte) (*PeerMessage, error) {
// find the identifier prefix
idTerminator := bytes.IndexByte(message, '|')
if idTerminator != -1 && idTerminator+1 < len(message) {
// find the context terminator prefix
contextbegin := idTerminator + 1
contextTerminator := bytes.IndexByte(message[contextbegin:], '|')
if contextTerminator != -1 {
// check that we have data
dataBegin := contextbegin + contextTerminator + 1
var data []byte
if dataBegin < len(message) {
data = message[dataBegin:]
}
// compile the message
return &PeerMessage{
ID: string(message[0:idTerminator]),
Context: string(message[contextbegin : contextbegin+contextTerminator]),
Data: data,
}, nil
}
}
return nil, errors.New("invalid message")
}

100
protocol/spam/spamguard.go Normal file
View File

@ -0,0 +1,100 @@
package spam
import (
"crypto/rand"
"crypto/sha256"
"cwtch.im/cwtch/protocol"
"github.com/golang/protobuf/proto"
"github.com/s-rah/go-ricochet/utils"
"github.com/s-rah/go-ricochet/wire/control"
"io"
//"fmt"
)
// Guard implements a spam protection mechanism for Cwtch Servers.
type Guard struct {
Difficulty int
nonce [24]byte
}
func getRandomness(arr *[24]byte) {
if _, err := io.ReadFull(rand.Reader, arr[:]); err != nil {
utils.CheckError(err)
}
}
//GenerateChallenge returns a channel result packet with a spamguard challenge nonce
func (sg *Guard) GenerateChallenge(channelID int32) []byte {
cr := &Protocol_Data_Control.ChannelResult{
ChannelIdentifier: proto.Int32(channelID),
Opened: proto.Bool(true),
}
var nonce [24]byte
getRandomness(&nonce)
sg.nonce = nonce
err := proto.SetExtension(cr, protocol.E_ServerNonce, sg.nonce[:])
utils.CheckError(err)
pc := &Protocol_Data_Control.Packet{
ChannelResult: cr,
}
ret, err := proto.Marshal(pc)
utils.CheckError(err)
return ret
}
// SolveChallenge takes in a challenge and a message and returns a solution
// The solution is a 24 byte nonce which when hashed with the challenge and the message
// produces a sha256 hash with Difficulty leading 0s
func (sg *Guard) SolveChallenge(challenge []byte, message []byte) []byte {
solved := false
var spamguard [24]byte
sum := sha256.Sum256([]byte{})
solve := make([]byte, len(challenge)+len(message)+len(spamguard))
for !solved {
getRandomness(&spamguard)
copy(solve[0:], challenge[:])
copy(solve[len(challenge):], message[:])
copy(solve[len(challenge)+len(message):], spamguard[:])
sum = sha256.Sum256(solve)
solved = true
for i := 0; i < sg.Difficulty; i++ {
if sum[i] != 0x00 {
solved = false
}
}
}
//fmt.Printf("[SOLVED] %x\n",sha256.Sum256(solve))
return spamguard[:]
}
// ValidateChallenge returns true if the message and spamguard pass the challenge
func (sg *Guard) ValidateChallenge(message []byte, spamguard []byte) bool {
if len(spamguard) != 24 {
return false
}
// If the message is too large just throw it away.
if len(message) > 2048 {
return false
}
solve := make([]byte, len(sg.nonce)+len(message)+len(spamguard))
copy(solve[0:], sg.nonce[:])
copy(solve[len(sg.nonce):], message[:])
copy(solve[len(sg.nonce)+len(message):], spamguard[:])
sum := sha256.Sum256(solve)
for i := 0; i < sg.Difficulty; i++ {
if sum[i] != 0x00 {
return false
}
}
return true
}

View File

@ -0,0 +1,69 @@
package spam
import (
"cwtch.im/cwtch/protocol"
"github.com/golang/protobuf/proto"
"github.com/s-rah/go-ricochet/wire/control"
"testing"
)
func TestSpamGuard(t *testing.T) {
var spamGuard Guard
spamGuard.Difficulty = 2
challenge := spamGuard.GenerateChallenge(3)
control := new(Protocol_Data_Control.Packet)
proto.Unmarshal(challenge[:], control)
if control.GetChannelResult() != nil {
ce, _ := proto.GetExtension(control.GetChannelResult(), protocol.E_ServerNonce)
challenge := ce.([]byte)[:]
sgsolve := spamGuard.SolveChallenge(challenge, []byte("Hello"))
t.Logf("Solved: %v %v", challenge, sgsolve)
result := spamGuard.ValidateChallenge([]byte("Hello"), sgsolve)
if result != true {
t.Errorf("Validating Guard Failed")
}
return
}
t.Errorf("Failed SpamGaurd")
}
func TestSpamGuardBadLength(t *testing.T) {
var spamGuard Guard
spamGuard.Difficulty = 2
spamGuard.GenerateChallenge(3)
result := spamGuard.ValidateChallenge([]byte("test"), []byte{0x00, 0x00})
if result {
t.Errorf("Validating Guard should have failed")
}
}
func TestSpamGuardFail(t *testing.T) {
var spamGuard Guard
spamGuard.Difficulty = 2
challenge := spamGuard.GenerateChallenge(3)
control := new(Protocol_Data_Control.Packet)
proto.Unmarshal(challenge[:], control)
if control.GetChannelResult() != nil {
ce, _ := proto.GetExtension(control.GetChannelResult(), protocol.E_ServerNonce)
challenge := ce.([]byte)[:]
var spamGuard2 Guard
spamGuard2.Difficulty = 1
sgsolve := spamGuard2.SolveChallenge(challenge, []byte("Hello"))
t.Logf("Solved: %v %v", challenge, sgsolve)
result := spamGuard.ValidateChallenge([]byte("Hello"), sgsolve)
if result {
t.Errorf("Validating Guard successes")
}
return
}
t.Errorf("Failed SpamGaurd")
}

36
server/app/main.go Normal file
View File

@ -0,0 +1,36 @@
package main
import (
cwtchserver "cwtch.im/cwtch/server"
"github.com/s-rah/go-ricochet/utils"
"io/ioutil"
"log"
"os"
)
const privateKeyFile = "./private_key"
func checkAndGenPrivateKey(privateKeyFile string) {
if _, err := os.Stat(privateKeyFile); os.IsNotExist(err) {
log.Printf("no private key found!")
log.Printf("generating new private key...")
pk, err := utils.GeneratePrivateKey()
if err != nil {
log.Fatalf("error generating new private key: %v\n", err)
}
err = ioutil.WriteFile(privateKeyFile, []byte(utils.PrivateKeyToString(pk)), 0400)
if err != nil {
log.Fatalf("error writing new private key to file %s: %v\n", privateKeyFile, err)
}
}
}
func main() {
checkAndGenPrivateKey(privateKeyFile)
server := new(cwtchserver.Server)
log.Printf("starting cwtch server...")
// TODO load params from .cwtch/server.conf or command line flag
server.Run(privateKeyFile, 100000)
}

Some files were not shown because too many files have changed in this diff Show More