Compare commits
No commits in common. "master" and "v0.8.7" have entirely different histories.
145
.drone.yml
145
.drone.yml
|
@ -1,89 +1,64 @@
|
||||||
---
|
workspace:
|
||||||
kind: pipeline
|
base: /go
|
||||||
type: docker
|
path: src/cwtch.im/cwtch
|
||||||
name: linux-test
|
|
||||||
|
|
||||||
steps:
|
pipeline:
|
||||||
- name: fetch
|
fetch:
|
||||||
image: golang:1.21.5
|
image: golang
|
||||||
volumes:
|
|
||||||
- name: deps
|
|
||||||
path: /go
|
|
||||||
commands:
|
|
||||||
- go install honnef.co/go/tools/cmd/staticcheck@latest
|
|
||||||
- go install go.uber.org/nilaway/cmd/nilaway@latest
|
|
||||||
- wget https://git.openprivacy.ca/openprivacy/buildfiles/raw/branch/master/tor/tor-0.4.8.9-linux-x86_64.tar.gz -O tor.tar.gz
|
|
||||||
- tar -xzf tor.tar.gz
|
|
||||||
- chmod a+x Tor/tor
|
|
||||||
- export PATH=$PWD/Tor/:$PATH
|
|
||||||
- export LD_LIBRARY_PATH=$PWD/Tor/
|
|
||||||
- tor --version
|
|
||||||
- export GO111MODULE=on
|
|
||||||
- name: quality
|
|
||||||
image: golang:1.21.5
|
|
||||||
volumes:
|
|
||||||
- name: deps
|
|
||||||
path: /go
|
|
||||||
commands:
|
|
||||||
- ./testing/quality.sh
|
|
||||||
- name: units-tests
|
|
||||||
image: golang:1.21.5
|
|
||||||
volumes:
|
|
||||||
- name: deps
|
|
||||||
path: /go
|
|
||||||
commands:
|
|
||||||
- export PATH=`pwd`:$PATH
|
|
||||||
- sh testing/tests.sh
|
|
||||||
- name: integ-test
|
|
||||||
image: golang:1.21.5
|
|
||||||
volumes:
|
|
||||||
- name: deps
|
|
||||||
path: /go
|
|
||||||
commands:
|
|
||||||
- export PATH=$PWD/Tor/:$PATH
|
|
||||||
- export LD_LIBRARY_PATH=$PWD/Tor/
|
|
||||||
- tor --version
|
|
||||||
- go test -timeout=30m -race -v cwtch.im/cwtch/testing/
|
|
||||||
- name: filesharing-integ-test
|
|
||||||
image: golang:1.21.5
|
|
||||||
volumes:
|
|
||||||
- name: deps
|
|
||||||
path: /go
|
|
||||||
commands:
|
|
||||||
- export PATH=$PWD/Tor/:$PATH
|
|
||||||
- export LD_LIBRARY_PATH=$PWD/Tor/
|
|
||||||
- go test -timeout=20m -race -v cwtch.im/cwtch/testing/filesharing
|
|
||||||
- name: filesharing-autodownload-integ-test
|
|
||||||
image: golang:1.21.5
|
|
||||||
volumes:
|
|
||||||
- name: deps
|
|
||||||
path: /go
|
|
||||||
commands:
|
|
||||||
- export PATH=$PWD/Tor/:$PATH
|
|
||||||
- export LD_LIBRARY_PATH=$PWD/Tor/
|
|
||||||
- go test -timeout=20m -race -v cwtch.im/cwtch/testing/autodownload
|
|
||||||
- name: notify-gogs
|
|
||||||
image: openpriv/drone-gogs
|
|
||||||
pull: if-not-exists
|
|
||||||
when:
|
when:
|
||||||
|
repo: cwtch.im/cwtch
|
||||||
|
branch: master
|
||||||
|
event: [ push, pull_request ]
|
||||||
|
commands:
|
||||||
|
- wget https://git.openprivacy.ca/openprivacy/buildfiles/raw/master/tor/tor
|
||||||
|
- wget https://git.openprivacy.ca/openprivacy/buildfiles/raw/master/tor/torrc
|
||||||
|
- chmod a+x tor
|
||||||
|
- go get -u golang.org/x/lint/golint
|
||||||
|
- export GO111MODULE=on
|
||||||
|
- go mod vendor
|
||||||
|
quality:
|
||||||
|
image: golang
|
||||||
|
when:
|
||||||
|
repo: cwtch.im/cwtch
|
||||||
|
branch: master
|
||||||
|
event: [ push, pull_request ]
|
||||||
|
commands:
|
||||||
|
- go list ./... | xargs go vet
|
||||||
|
- go list ./... | xargs golint -set_exit_status
|
||||||
|
units-tests:
|
||||||
|
image: golang
|
||||||
|
when:
|
||||||
|
repo: cwtch.im/cwtch
|
||||||
|
branch: master
|
||||||
|
event: [ push, pull_request ]
|
||||||
|
commands:
|
||||||
|
- export PATH=$PATH:/go/src/cwtch.im/cwtch
|
||||||
|
- sh testing/tests.sh
|
||||||
|
integ-test:
|
||||||
|
image: golang
|
||||||
|
when:
|
||||||
|
repo: cwtch.im/cwtch
|
||||||
|
branch: master
|
||||||
|
event: [ push, pull_request ]
|
||||||
|
commands:
|
||||||
|
- go test -race -v cwtch.im/cwtch/testing/
|
||||||
|
notify-email:
|
||||||
|
image: drillster/drone-email
|
||||||
|
host: build.openprivacy.ca
|
||||||
|
port: 25
|
||||||
|
skip_verify: true
|
||||||
|
from: drone@openprivacy.ca
|
||||||
|
when:
|
||||||
|
repo: cwtch.im/cwtch
|
||||||
|
branch: master
|
||||||
|
event: [ push, pull_request ]
|
||||||
|
status: [ failure ]
|
||||||
|
notify-gogs:
|
||||||
|
image: openpriv/drone-gogs
|
||||||
|
when:
|
||||||
|
repo: cwtch.im/cwtch
|
||||||
|
branch: master
|
||||||
event: pull_request
|
event: pull_request
|
||||||
status: [ success, changed, failure ]
|
status: [ success, changed, failure ]
|
||||||
environment:
|
secrets: [gogs_account_token]
|
||||||
GOGS_ACCOUNT_TOKEN:
|
gogs_url: https://git.openprivacy.ca
|
||||||
from_secret: gogs_account_token
|
|
||||||
settings:
|
|
||||||
gogs_url: https://git.openprivacy.ca
|
|
||||||
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
# gopath where bin and pkg lives to persist across steps
|
|
||||||
- name: deps
|
|
||||||
temp: {}
|
|
||||||
|
|
||||||
trigger:
|
|
||||||
repo: cwtch.im/cwtch
|
|
||||||
branch: master
|
|
||||||
event:
|
|
||||||
- push
|
|
||||||
- pull_request
|
|
||||||
- tag
|
|
||||||
|
|
|
@ -12,27 +12,3 @@ server/app/messages
|
||||||
/storage/*/testing/
|
/storage/*/testing/
|
||||||
/storage/testing/
|
/storage/testing/
|
||||||
/testing/storage/
|
/testing/storage/
|
||||||
ebusgraph.txt
|
|
||||||
messages/
|
|
||||||
serverMonitorReport.txt
|
|
||||||
testing/cwtch.out.png
|
|
||||||
testing/filesharing/storage
|
|
||||||
testing/filesharing/tordir
|
|
||||||
testing/filesharing/cwtch.out.png
|
|
||||||
testing/filesharing/cwtch.out.png.manifest
|
|
||||||
testing/cwtch.out.png.manifest
|
|
||||||
testing/tordir/
|
|
||||||
tokens-bak.db
|
|
||||||
tokens.db
|
|
||||||
tokens1.db
|
|
||||||
arch/
|
|
||||||
testing/encryptedstorage/encrypted_storage_profiles
|
|
||||||
testing/encryptedstorage/tordir
|
|
||||||
*.tar.gz
|
|
||||||
data-dir-cwtchtool/
|
|
||||||
tokens
|
|
||||||
tordir/
|
|
||||||
testing/autodownload/download_dir
|
|
||||||
testing/autodownload/storage
|
|
||||||
*.swp
|
|
||||||
testing/managerstorage/*
|
|
|
@ -0,0 +1,78 @@
|
||||||
|
FROM golang as server-build-stage
|
||||||
|
ENV CGO_ENABLED=0 GOOS=linux
|
||||||
|
|
||||||
|
WORKDIR /go/src/cwtch.im/cwtch
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
RUN go get -d -v ./...
|
||||||
|
#RUN go install -v ./...
|
||||||
|
WORKDIR /go/src/cwtch.im/cwtch/server/app/
|
||||||
|
RUN go build -ldflags "-extldflags '-static'"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#----------------------------------------------
|
||||||
|
FROM alpine:latest as tor-build-stage
|
||||||
|
|
||||||
|
# Install prerequisites
|
||||||
|
RUN apk --no-cache add --update \
|
||||||
|
gnupg \
|
||||||
|
build-base \
|
||||||
|
libevent \
|
||||||
|
libevent-dev \
|
||||||
|
libressl \
|
||||||
|
libressl-dev \
|
||||||
|
xz-libs \
|
||||||
|
xz-dev \
|
||||||
|
zlib \
|
||||||
|
zlib-dev \
|
||||||
|
zstd \
|
||||||
|
zstd-dev \
|
||||||
|
&& wget -q https://www.torproject.org/dist/tor-0.3.5.3-alpha.tar.gz \
|
||||||
|
&& tar xf tor-0.3.5.3-alpha.tar.gz \
|
||||||
|
&& cd tor-0.3.5.3-alpha \
|
||||||
|
&& ./configure \
|
||||||
|
&& make install \
|
||||||
|
&& ls -R /usr/local/
|
||||||
|
|
||||||
|
FROM alpine:latest
|
||||||
|
MAINTAINER Ablative Hosting <support@ablative.hosting>
|
||||||
|
|
||||||
|
#BSD habits die hard
|
||||||
|
ENV TOR_USER=_tor
|
||||||
|
|
||||||
|
# Installing dependencies of Tor and pwgen
|
||||||
|
RUN apk --no-cache add --update \
|
||||||
|
libevent \
|
||||||
|
libressl \
|
||||||
|
xz-libs \
|
||||||
|
zlib \
|
||||||
|
zstd \
|
||||||
|
zstd-dev \
|
||||||
|
pwgen
|
||||||
|
|
||||||
|
# Copy Tor
|
||||||
|
COPY --from=tor-build-stage /usr/local/ /usr/local/
|
||||||
|
|
||||||
|
# Create an unprivileged tor user
|
||||||
|
RUN addgroup -S $TOR_USER && adduser -G $TOR_USER -S $TOR_USER && adduser -G _tor -S cwtchd && mkdir /run/tor
|
||||||
|
|
||||||
|
# Copy Tor configuration file
|
||||||
|
COPY ./server/docker/torrc /etc/tor/torrc
|
||||||
|
|
||||||
|
# Copy docker-entrypoint
|
||||||
|
COPY ./server/docker/docker-entrypoint /usr/local/bin/
|
||||||
|
|
||||||
|
# Copy across cwtch
|
||||||
|
COPY --from=server-build-stage /go/src/cwtch.im/cwtch/server/app/app /usr/local/bin/cwtch_server
|
||||||
|
|
||||||
|
# Persist data
|
||||||
|
VOLUME /etc/tor /var/lib/tor /etc/cwtch
|
||||||
|
|
||||||
|
ENTRYPOINT ["docker-entrypoint"]
|
||||||
|
|
||||||
|
#cwtchd is in the _tor group so can access the socket but that's it
|
||||||
|
#USER cwtchd
|
||||||
|
|
||||||
|
#Launches the cwtchd daemon
|
||||||
|
CMD ["/usr/local/bin/cwtch_server"]
|
|
@ -30,9 +30,6 @@ Development and Contributing information in [CONTRIBUTING.md](https://git.openpr
|
||||||
## Running Cwtch
|
## Running Cwtch
|
||||||
### Server
|
### Server
|
||||||
#### Docker
|
#### Docker
|
||||||
|
|
||||||
### NOTE: The following section is out of date. The new Cwtch server is available from https://git.openprivacy.ca/cwtch.im/server, but there is no current docker container for it.
|
|
||||||
|
|
||||||
This repository contains a `Dockerfile` allowing you to build and run the server as a [docker](https://www.docker.com/) container.
|
This repository contains a `Dockerfile` allowing you to build and run the server as a [docker](https://www.docker.com/) container.
|
||||||
|
|
||||||
To get started issue `docker build -t openpriv/cwtch-server:latest .`, this will create 2 temporary docker containers, one to build the Tor daemon and one to build Cwtch. The compiled binaries will then be bundled into a new image and tagged as `openpriv/cwtch-server:latest`.
|
To get started issue `docker build -t openpriv/cwtch-server:latest .`, this will create 2 temporary docker containers, one to build the Tor daemon and one to build Cwtch. The compiled binaries will then be bundled into a new image and tagged as `openpriv/cwtch-server:latest`.
|
||||||
|
|
617
app/app.go
617
app/app.go
|
@ -3,451 +3,232 @@ package app
|
||||||
import (
|
import (
|
||||||
"cwtch.im/cwtch/app/plugins"
|
"cwtch.im/cwtch/app/plugins"
|
||||||
"cwtch.im/cwtch/event"
|
"cwtch.im/cwtch/event"
|
||||||
"cwtch.im/cwtch/extensions"
|
|
||||||
"cwtch.im/cwtch/functionality/filesharing"
|
|
||||||
"cwtch.im/cwtch/functionality/servers"
|
|
||||||
"cwtch.im/cwtch/model"
|
"cwtch.im/cwtch/model"
|
||||||
"cwtch.im/cwtch/model/attr"
|
|
||||||
"cwtch.im/cwtch/model/constants"
|
|
||||||
"cwtch.im/cwtch/peer"
|
"cwtch.im/cwtch/peer"
|
||||||
"cwtch.im/cwtch/protocol/connections"
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
"cwtch.im/cwtch/settings"
|
|
||||||
"cwtch.im/cwtch/storage"
|
"cwtch.im/cwtch/storage"
|
||||||
|
"fmt"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/primitives"
|
||||||
"git.openprivacy.ca/openprivacy/connectivity"
|
"git.openprivacy.ca/openprivacy/connectivity"
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
path "path/filepath"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
type application struct {
|
// AttributeTag is a const name for a peer attribute that can be set at creation time, for example for versioning info
|
||||||
|
const AttributeTag = "tag"
|
||||||
|
|
||||||
|
type applicationCore struct {
|
||||||
eventBuses map[string]event.Manager
|
eventBuses map[string]event.Manager
|
||||||
directory string
|
|
||||||
|
|
||||||
peers map[string]peer.CwtchPeer
|
directory string
|
||||||
acn connectivity.ACN
|
coremutex sync.Mutex
|
||||||
plugins sync.Map //map[string] []plugins.Plugin
|
|
||||||
|
|
||||||
engines map[string]connections.Engine
|
|
||||||
appBus event.Manager
|
|
||||||
eventQueue event.Queue
|
|
||||||
appmutex sync.Mutex
|
|
||||||
engineHooks connections.EngineHooks
|
|
||||||
|
|
||||||
settings *settings.GlobalSettingsFile
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *application) IsFeatureEnabled(experiment string) bool {
|
type application struct {
|
||||||
globalSettings := app.ReadSettings()
|
applicationCore
|
||||||
if globalSettings.ExperimentsEnabled {
|
appletPeers
|
||||||
if status, exists := globalSettings.Experiments[experiment]; exists {
|
appletACN
|
||||||
return status
|
appletPlugins
|
||||||
}
|
storage map[string]storage.ProfileStore
|
||||||
}
|
engines map[string]connections.Engine
|
||||||
return false
|
appBus event.Manager
|
||||||
|
appmutex sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// Application is a full cwtch peer application. It allows management, usage and storage of multiple peers
|
// Application is a full cwtch peer application. It allows management, usage and storage of multiple peers
|
||||||
type Application interface {
|
type Application interface {
|
||||||
LoadProfiles(password string)
|
LoadProfiles(password string)
|
||||||
CreateProfile(name string, password string, autostart bool)
|
CreatePeer(name string, password string)
|
||||||
InstallEngineHooks(engineHooks connections.EngineHooks)
|
CreateTaggedPeer(name string, password string, tag string)
|
||||||
ImportProfile(exportedCwtchFile string, password string) (peer.CwtchPeer, error)
|
DeletePeer(onion string)
|
||||||
EnhancedImportProfile(exportedCwtchFile string, password string) string
|
|
||||||
DeleteProfile(onion string, currentPassword string)
|
|
||||||
AddPeerPlugin(onion string, pluginID plugins.PluginID)
|
AddPeerPlugin(onion string, pluginID plugins.PluginID)
|
||||||
|
ChangePeerPassword(onion, oldpass, newpass string)
|
||||||
|
LaunchPeers()
|
||||||
|
|
||||||
GetPrimaryBus() event.Manager
|
GetPrimaryBus() event.Manager
|
||||||
GetEventBus(onion string) event.Manager
|
GetEventBus(onion string) event.Manager
|
||||||
QueryACNStatus()
|
QueryACNStatus()
|
||||||
QueryACNVersion()
|
QueryACNVersion()
|
||||||
|
|
||||||
ConfigureConnections(onion string, doListn, doPeers, doServers bool)
|
|
||||||
ActivatePeerEngine(onion string)
|
|
||||||
DeactivatePeerEngine(onion string)
|
|
||||||
|
|
||||||
ReadSettings() settings.GlobalSettings
|
|
||||||
UpdateSettings(settings settings.GlobalSettings)
|
|
||||||
IsFeatureEnabled(experiment string) bool
|
|
||||||
|
|
||||||
ShutdownPeer(string)
|
ShutdownPeer(string)
|
||||||
Shutdown()
|
Shutdown()
|
||||||
|
|
||||||
GetPeer(onion string) peer.CwtchPeer
|
GetPeer(onion string) peer.CwtchPeer
|
||||||
ListProfiles() []string
|
ListPeers() map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadProfileFn is the function signature for a function in an app that loads a profile
|
// LoadProfileFn is the function signature for a function in an app that loads a profile
|
||||||
type LoadProfileFn func(profile peer.CwtchPeer)
|
type LoadProfileFn func(profile *model.Profile, store storage.ProfileStore)
|
||||||
|
|
||||||
func LoadAppSettings(appDirectory string) *settings.GlobalSettingsFile {
|
func newAppCore(appDirectory string) *applicationCore {
|
||||||
log.Debugf("NewApp(%v)\n", appDirectory)
|
appCore := &applicationCore{eventBuses: make(map[string]event.Manager), directory: appDirectory}
|
||||||
os.MkdirAll(path.Join(appDirectory, "profiles"), 0700)
|
os.MkdirAll(path.Join(appCore.directory, "profiles"), 0700)
|
||||||
|
return appCore
|
||||||
// Note: we basically presume this doesn't fail. If the file doesn't exist we create it, and as such the
|
|
||||||
// only plausible error conditions are related to file create e.g. low disk space. If that is the case then
|
|
||||||
// many other parts of Cwtch are likely to fail also.
|
|
||||||
globalSettingsFile, err := settings.InitGlobalSettingsFile(appDirectory, DefactoPasswordForUnencryptedProfiles)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("error initializing global globalSettingsFile file %s. Global globalSettingsFile might not be loaded or saved", err)
|
|
||||||
}
|
|
||||||
return globalSettingsFile
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewApp creates a new app with some environment awareness and initializes a Tor Manager
|
// NewApp creates a new app with some environment awareness and initializes a Tor Manager
|
||||||
func NewApp(acn connectivity.ACN, appDirectory string, settings *settings.GlobalSettingsFile) Application {
|
func NewApp(acn connectivity.ACN, appDirectory string) Application {
|
||||||
|
log.Debugf("NewApp(%v)\n", appDirectory)
|
||||||
app := &application{engines: make(map[string]connections.Engine), eventBuses: make(map[string]event.Manager), directory: appDirectory, appBus: event.NewEventManager(), settings: settings, eventQueue: event.NewQueue()}
|
app := &application{storage: make(map[string]storage.ProfileStore), engines: make(map[string]connections.Engine), applicationCore: *newAppCore(appDirectory), appBus: event.NewEventManager()}
|
||||||
app.peers = make(map[string]peer.CwtchPeer)
|
app.appletPeers.init()
|
||||||
app.engineHooks = connections.DefaultEngineHooks{}
|
|
||||||
app.acn = acn
|
|
||||||
statusHandler := app.getACNStatusHandler()
|
|
||||||
acn.SetStatusCallback(statusHandler)
|
|
||||||
acn.SetVersionCallback(app.getACNVersionHandler())
|
|
||||||
prog, status := acn.GetBootstrapStatus()
|
|
||||||
statusHandler(prog, status)
|
|
||||||
|
|
||||||
app.GetPrimaryBus().Subscribe(event.ACNStatus, app.eventQueue)
|
|
||||||
go app.eventHandler()
|
|
||||||
|
|
||||||
|
app.appletACN.init(acn, app.getACNStatusHandler())
|
||||||
return app
|
return app
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *application) InstallEngineHooks(engineHooks connections.EngineHooks) {
|
// CreatePeer creates a new Peer with a given name and core required accessories (eventbus)
|
||||||
app.appmutex.Lock()
|
func (ac *applicationCore) CreatePeer(name string) (*model.Profile, error) {
|
||||||
defer app.appmutex.Unlock()
|
log.Debugf("CreatePeer(%v)\n", name)
|
||||||
app.engineHooks = engineHooks
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *application) ReadSettings() settings.GlobalSettings {
|
profile := storage.NewProfile(name)
|
||||||
app.appmutex.Lock()
|
|
||||||
defer app.appmutex.Unlock()
|
|
||||||
return app.settings.ReadGlobalSettings()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *application) UpdateSettings(settings settings.GlobalSettings) {
|
ac.coremutex.Lock()
|
||||||
// don't allow any other application changes while settings update
|
defer ac.coremutex.Unlock()
|
||||||
app.appmutex.Lock()
|
|
||||||
defer app.appmutex.Unlock()
|
|
||||||
app.settings.WriteGlobalSettings(settings)
|
|
||||||
|
|
||||||
for _, profile := range app.peers {
|
_, exists := ac.eventBuses[profile.Onion]
|
||||||
profile.UpdateExperiments(settings.ExperimentsEnabled, settings.Experiments)
|
if exists {
|
||||||
|
return nil, fmt.Errorf("error: profile for onion %v already exists", profile.Onion)
|
||||||
// Explicitly toggle blocking/unblocking of unknown connections for profiles
|
|
||||||
// that have been loaded.
|
|
||||||
if settings.BlockUnknownConnections {
|
|
||||||
profile.BlockUnknownConnections()
|
|
||||||
} else {
|
|
||||||
profile.AllowUnknownConnections()
|
|
||||||
}
|
|
||||||
|
|
||||||
profile.NotifySettingsUpdate(settings)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListProfiles returns a map of onions to their profile's Name
|
|
||||||
func (app *application) ListProfiles() []string {
|
|
||||||
var keys []string
|
|
||||||
|
|
||||||
app.appmutex.Lock()
|
|
||||||
defer app.appmutex.Unlock()
|
|
||||||
for handle := range app.peers {
|
|
||||||
keys = append(keys, handle)
|
|
||||||
}
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPeer returns a cwtchPeer for a given onion address
|
|
||||||
func (app *application) GetPeer(onion string) peer.CwtchPeer {
|
|
||||||
app.appmutex.Lock()
|
|
||||||
defer app.appmutex.Unlock()
|
|
||||||
if profile, ok := app.peers[onion]; ok {
|
|
||||||
return profile
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *application) AddPlugin(peerid string, id plugins.PluginID, bus event.Manager, acn connectivity.ACN) {
|
|
||||||
if _, exists := app.plugins.Load(peerid); !exists {
|
|
||||||
app.plugins.Store(peerid, []plugins.Plugin{})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pluginsinf, _ := app.plugins.Load(peerid)
|
|
||||||
peerPlugins := pluginsinf.([]plugins.Plugin)
|
|
||||||
|
|
||||||
for _, plugin := range peerPlugins {
|
|
||||||
if plugin.Id() == id {
|
|
||||||
log.Errorf("trying to add second instance of plugin %v to peer %v", id, peerid)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
newp, err := plugins.Get(id, bus, acn, peerid)
|
|
||||||
if err == nil {
|
|
||||||
newp.Start()
|
|
||||||
peerPlugins = append(peerPlugins, newp)
|
|
||||||
log.Debugf("storing plugin for %v %v", peerid, peerPlugins)
|
|
||||||
app.plugins.Store(peerid, peerPlugins)
|
|
||||||
} else {
|
|
||||||
log.Errorf("error adding plugin: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *application) CreateProfile(name string, password string, autostart bool) {
|
|
||||||
autostartVal := constants.True
|
|
||||||
if !autostart {
|
|
||||||
autostartVal = constants.False
|
|
||||||
}
|
|
||||||
tagVal := constants.ProfileTypeV1Password
|
|
||||||
if password == DefactoPasswordForUnencryptedProfiles {
|
|
||||||
tagVal = constants.ProfileTypeV1DefaultPassword
|
|
||||||
}
|
|
||||||
|
|
||||||
app.CreatePeer(name, password, map[attr.ZonedPath]string{
|
|
||||||
attr.ProfileZone.ConstructZonedPath(constants.Tag): tagVal,
|
|
||||||
attr.ProfileZone.ConstructZonedPath(constants.PeerAutostart): autostartVal,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *application) setupPeer(profile peer.CwtchPeer) {
|
|
||||||
eventBus := event.NewEventManager()
|
eventBus := event.NewEventManager()
|
||||||
app.eventBuses[profile.GetOnion()] = eventBus
|
ac.eventBuses[profile.Onion] = eventBus
|
||||||
|
|
||||||
// Initialize the Peer with the Given Event Bus
|
|
||||||
app.peers[profile.GetOnion()] = profile
|
|
||||||
profile.Init(eventBus)
|
|
||||||
|
|
||||||
// Update the Peer with the Most Recent Experiment State...
|
|
||||||
globalSettings := app.settings.ReadGlobalSettings()
|
|
||||||
profile.UpdateExperiments(globalSettings.ExperimentsEnabled, globalSettings.Experiments)
|
|
||||||
app.registerHooks(profile)
|
|
||||||
|
|
||||||
// Register the Peer With Application Plugins..
|
|
||||||
app.AddPeerPlugin(profile.GetOnion(), plugins.CONNECTIONRETRY) // Now Mandatory
|
|
||||||
app.AddPeerPlugin(profile.GetOnion(), plugins.HEARTBEAT) // Now Mandatory
|
|
||||||
|
|
||||||
|
return profile, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *application) CreatePeer(name string, password string, attributes map[attr.ZonedPath]string) {
|
func (ac *applicationCore) DeletePeer(onion string) {
|
||||||
app.appmutex.Lock()
|
ac.coremutex.Lock()
|
||||||
defer app.appmutex.Unlock()
|
defer ac.coremutex.Unlock()
|
||||||
|
|
||||||
profileDirectory := path.Join(app.directory, "profiles", model.GenerateRandomID())
|
ac.eventBuses[onion].Shutdown()
|
||||||
|
delete(ac.eventBuses, onion)
|
||||||
|
}
|
||||||
|
|
||||||
profile, err := peer.CreateEncryptedStorePeer(profileDirectory, name, password)
|
func (app *application) CreateTaggedPeer(name string, password string, tag string) {
|
||||||
|
profile, err := app.applicationCore.CreatePeer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error Creating Peer: %v", err)
|
|
||||||
app.appBus.Publish(event.NewEventList(event.PeerError, event.Error, err.Error()))
|
app.appBus.Publish(event.NewEventList(event.PeerError, event.Error, err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
app.setupPeer(profile)
|
profileStore := storage.CreateProfileWriterStore(app.eventBuses[profile.Onion], path.Join(app.directory, "profiles", profile.LocalID), password, profile)
|
||||||
|
app.storage[profile.Onion] = profileStore
|
||||||
|
|
||||||
for zp, val := range attributes {
|
pc := app.storage[profile.Onion].GetProfileCopy(true)
|
||||||
zone, key := attr.ParseZone(zp.ToString())
|
p := peer.FromProfile(pc)
|
||||||
profile.SetScopedZonedAttribute(attr.LocalScope, zone, key, val)
|
p.Init(app.eventBuses[profile.Onion])
|
||||||
|
|
||||||
|
peerAuthorizations := profile.ContactsAuthorizations()
|
||||||
|
// TODO: Would be nice if ProtocolEngine did not need to explicitly be given the Private Key.
|
||||||
|
identity := primitives.InitializeIdentity(profile.Name, &profile.Ed25519PrivateKey, &profile.Ed25519PublicKey)
|
||||||
|
engine := connections.NewProtocolEngine(identity, profile.Ed25519PrivateKey, app.acn, app.eventBuses[profile.Onion], peerAuthorizations)
|
||||||
|
|
||||||
|
app.peers[profile.Onion] = p
|
||||||
|
app.engines[profile.Onion] = engine
|
||||||
|
|
||||||
|
if tag != "" {
|
||||||
|
p.SetAttribute(AttributeTag, tag)
|
||||||
}
|
}
|
||||||
|
|
||||||
app.appBus.Publish(event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.GetOnion(), event.Created: event.True}))
|
app.appBus.Publish(event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.Onion, event.Created: event.True}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *application) DeleteProfile(onion string, password string) {
|
// CreatePeer creates a new Peer with the given name and required accessories (eventbus, storage, protocol engine)
|
||||||
log.Debugf("DeleteProfile called on %v\n", onion)
|
func (app *application) CreatePeer(name string, password string) {
|
||||||
|
app.CreateTaggedPeer(name, password, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *application) DeletePeer(onion string) {
|
||||||
|
log.Infof("DeletePeer called on %v\n", onion)
|
||||||
app.appmutex.Lock()
|
app.appmutex.Lock()
|
||||||
defer app.appmutex.Unlock()
|
defer app.appmutex.Unlock()
|
||||||
|
|
||||||
// short circuit to prevent nil-pointer panic if this function is called twice (or incorrectly)
|
app.appletPlugins.ShutdownPeer(onion)
|
||||||
peer := app.peers[onion]
|
app.plugins.Delete(onion)
|
||||||
if peer == nil {
|
|
||||||
log.Errorf("shutdownPeer called with invalid onion %v", onion)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// allow a blank password to delete "unencrypted" accounts...
|
app.peers[onion].Shutdown()
|
||||||
if password == "" {
|
delete(app.peers, onion)
|
||||||
password = DefactoPasswordForUnencryptedProfiles
|
|
||||||
}
|
|
||||||
|
|
||||||
if peer.CheckPassword(password) {
|
app.engines[onion].Shutdown()
|
||||||
// soft-shutdown
|
delete(app.engines, onion)
|
||||||
peer.Shutdown()
|
|
||||||
// delete the underlying storage
|
|
||||||
peer.Delete()
|
|
||||||
// hard shutdown / remove from app
|
|
||||||
app.shutdownPeer(onion)
|
|
||||||
|
|
||||||
// Shutdown and Remove the Engine
|
app.storage[onion].Shutdown()
|
||||||
log.Debugf("Delete peer for %v Done\n", onion)
|
app.storage[onion].Delete()
|
||||||
app.appBus.Publish(event.NewEventList(event.PeerDeleted, event.Identity, onion))
|
delete(app.storage, onion)
|
||||||
return
|
|
||||||
}
|
app.eventBuses[onion].Publish(event.NewEventList(event.ShutdownPeer, event.Identity, onion))
|
||||||
app.appBus.Publish(event.NewEventList(event.AppError, event.Error, event.PasswordMatchError, event.Identity, onion))
|
|
||||||
|
app.applicationCore.DeletePeer(onion)
|
||||||
|
log.Debugf("Delete peer for %v Done\n", onion)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *application) ChangePeerPassword(onion, oldpass, newpass string) {
|
||||||
|
app.eventBuses[onion].Publish(event.NewEventList(event.ChangePassword, event.Password, oldpass, event.NewPassword, newpass))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *application) AddPeerPlugin(onion string, pluginID plugins.PluginID) {
|
func (app *application) AddPeerPlugin(onion string, pluginID plugins.PluginID) {
|
||||||
app.AddPlugin(onion, pluginID, app.eventBuses[onion], app.acn)
|
app.AddPlugin(onion, pluginID, app.eventBuses[onion], app.acn)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *application) ImportProfile(exportedCwtchFile string, password string) (peer.CwtchPeer, error) {
|
// LoadProfiles takes a password and attempts to load any profiles it can from storage with it and create Peers for them
|
||||||
profileDirectory := path.Join(app.directory, "profiles")
|
func (ac *applicationCore) LoadProfiles(password string, timeline bool, loadProfileFn LoadProfileFn) error {
|
||||||
profile, err := peer.ImportProfile(exportedCwtchFile, profileDirectory, password)
|
files, err := ioutil.ReadDir(path.Join(ac.directory, "profiles"))
|
||||||
if profile != nil || err == nil {
|
if err != nil {
|
||||||
app.installProfile(profile)
|
return fmt.Errorf("error: cannot read profiles directory: %v", err)
|
||||||
}
|
}
|
||||||
return profile, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *application) EnhancedImportProfile(exportedCwtchFile string, password string) string {
|
for _, file := range files {
|
||||||
_, err := app.ImportProfile(exportedCwtchFile, password)
|
eventBus := event.NewEventManager()
|
||||||
if err == nil {
|
profileStore, err := storage.LoadProfileWriterStore(eventBus, path.Join(ac.directory, "profiles", file.Name()), password)
|
||||||
return ""
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
profile := profileStore.GetProfileCopy(timeline)
|
||||||
|
|
||||||
|
_, exists := ac.eventBuses[profile.Onion]
|
||||||
|
if exists {
|
||||||
|
profileStore.Shutdown()
|
||||||
|
eventBus.Shutdown()
|
||||||
|
log.Errorf("profile for onion %v already exists", profile.Onion)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ac.coremutex.Lock()
|
||||||
|
ac.eventBuses[profile.Onion] = eventBus
|
||||||
|
ac.coremutex.Unlock()
|
||||||
|
|
||||||
|
loadProfileFn(profile, profileStore)
|
||||||
}
|
}
|
||||||
return err.Error()
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadProfiles takes a password and attempts to load any profiles it can from storage with it and create Peers for them
|
// LoadProfiles takes a password and attempts to load any profiles it can from storage with it and create Peers for them
|
||||||
func (app *application) LoadProfiles(password string) {
|
func (app *application) LoadProfiles(password string) {
|
||||||
count := 0
|
count := 0
|
||||||
migrating := false
|
app.applicationCore.LoadProfiles(password, true, func(profile *model.Profile, profileStore storage.ProfileStore) {
|
||||||
|
peer := peer.FromProfile(profile)
|
||||||
|
peer.Init(app.eventBuses[profile.Onion])
|
||||||
|
|
||||||
files, err := os.ReadDir(path.Join(app.directory, "profiles"))
|
peerAuthorizations := profile.ContactsAuthorizations()
|
||||||
if err != nil {
|
identity := primitives.InitializeIdentity(profile.Name, &profile.Ed25519PrivateKey, &profile.Ed25519PublicKey)
|
||||||
log.Errorf("error: cannot read profiles directory: %v", err)
|
engine := connections.NewProtocolEngine(identity, profile.Ed25519PrivateKey, app.acn, app.eventBuses[profile.Onion], peerAuthorizations)
|
||||||
return
|
app.appmutex.Lock()
|
||||||
}
|
app.peers[profile.Onion] = peer
|
||||||
|
app.storage[profile.Onion] = profileStore
|
||||||
for _, file := range files {
|
app.engines[profile.Onion] = engine
|
||||||
// Attempt to load an encrypted database
|
app.appmutex.Unlock()
|
||||||
profileDirectory := path.Join(app.directory, "profiles", file.Name())
|
app.appBus.Publish(event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.Onion, event.Created: event.False}))
|
||||||
profile, err := peer.FromEncryptedDatabase(profileDirectory, password)
|
count++
|
||||||
loaded := false
|
})
|
||||||
if err == nil {
|
|
||||||
// return the load the profile...
|
|
||||||
log.Infof("loading profile from new-type storage database...")
|
|
||||||
loaded = app.installProfile(profile)
|
|
||||||
} else { // On failure attempt to load a legacy profile
|
|
||||||
profileStore, err := storage.LoadProfileWriterStore(profileDirectory, password)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
log.Infof("found legacy profile. importing to new database structure...")
|
|
||||||
legacyProfile := profileStore.GetProfileCopy(true)
|
|
||||||
if !migrating {
|
|
||||||
migrating = true
|
|
||||||
app.appBus.Publish(event.NewEventList(event.StartingStorageMiragtion))
|
|
||||||
}
|
|
||||||
|
|
||||||
cps, err := peer.CreateEncryptedStore(profileDirectory, password)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("error creating encrypted store: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
profile := peer.ImportLegacyProfile(legacyProfile, cps)
|
|
||||||
loaded = app.installProfile(profile)
|
|
||||||
}
|
|
||||||
if loaded {
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
message := event.NewEventList(event.AppError, event.Error, event.AppErrLoaded0)
|
message := event.NewEventList(event.AppError, event.Error, event.AppErrLoaded0)
|
||||||
app.appBus.Publish(message)
|
app.appBus.Publish(message)
|
||||||
}
|
}
|
||||||
if migrating {
|
|
||||||
app.appBus.Publish(event.NewEventList(event.DoneStorageMigration))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *application) registerHooks(profile peer.CwtchPeer) {
|
|
||||||
// Register Hooks
|
|
||||||
profile.RegisterHook(extensions.ProfileValueExtension{})
|
|
||||||
profile.RegisterHook(extensions.SendWhenOnlineExtension{})
|
|
||||||
profile.RegisterHook(new(filesharing.Functionality))
|
|
||||||
profile.RegisterHook(new(filesharing.ImagePreviewsFunctionality))
|
|
||||||
profile.RegisterHook(new(servers.Functionality))
|
|
||||||
// Ensure that Profiles have the Most Up to Date Settings...
|
|
||||||
profile.NotifySettingsUpdate(app.settings.ReadGlobalSettings())
|
|
||||||
}
|
|
||||||
|
|
||||||
// installProfile takes a profile and if it isn't loaded in the app, installs it and returns true
|
|
||||||
func (app *application) installProfile(profile peer.CwtchPeer) bool {
|
|
||||||
app.appmutex.Lock()
|
|
||||||
defer app.appmutex.Unlock()
|
|
||||||
|
|
||||||
// Only attempt to finalize the profile if we don't have one loaded...
|
|
||||||
if app.peers[profile.GetOnion()] == nil {
|
|
||||||
app.setupPeer(profile)
|
|
||||||
// Finalize the Creation of Peer / Notify any Interfaces..
|
|
||||||
app.appBus.Publish(event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.GetOnion(), event.Created: event.False}))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Otherwise shutdown the connections
|
|
||||||
profile.Shutdown()
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// ActivatePeerEngine creates a peer engine for use with an ACN, should be called once the underlying ACN is online
|
|
||||||
func (app *application) ActivatePeerEngine(onion string) {
|
|
||||||
profile := app.GetPeer(onion)
|
|
||||||
if profile != nil {
|
|
||||||
if _, exists := app.engines[onion]; !exists {
|
|
||||||
eventBus, exists := app.eventBuses[profile.GetOnion()]
|
|
||||||
|
|
||||||
if !exists {
|
|
||||||
// todo handle this case?
|
|
||||||
log.Errorf("cannot activate peer engine without an event bus")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
engine, err := profile.GenerateProtocolEngine(app.acn, eventBus, app.engineHooks)
|
|
||||||
if err == nil {
|
|
||||||
log.Debugf("restartFlow: Creating a New Protocol Engine...")
|
|
||||||
app.engines[profile.GetOnion()] = engine
|
|
||||||
eventBus.Publish(event.NewEventList(event.ProtocolEngineCreated))
|
|
||||||
app.QueryACNStatus()
|
|
||||||
} else {
|
|
||||||
log.Errorf("corrupted profile detected for %v", onion)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigureConnections autostarts the given kinds of connections.
|
|
||||||
func (app *application) ConfigureConnections(onion string, listen bool, peers bool, servers bool) {
|
|
||||||
profile := app.GetPeer(onion)
|
|
||||||
if profile != nil {
|
|
||||||
|
|
||||||
profileBus, exists := app.eventBuses[profile.GetOnion()]
|
|
||||||
if exists {
|
|
||||||
// if we are making a decision to ignore
|
|
||||||
if !peers || !servers {
|
|
||||||
profileBus.Publish(event.NewEventList(event.PurgeRetries))
|
|
||||||
}
|
|
||||||
|
|
||||||
// enable the engine if it doesn't exist...
|
|
||||||
// note: this function is idempotent
|
|
||||||
app.ActivatePeerEngine(onion)
|
|
||||||
if listen {
|
|
||||||
profile.Listen()
|
|
||||||
}
|
|
||||||
|
|
||||||
profileBus.Publish(event.NewEventList(event.ResumeRetries))
|
|
||||||
// do this in the background, for large contact lists it can take a long time...
|
|
||||||
go profile.StartConnections(peers, servers)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.Errorf("profile does not exist %v", onion)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeactivatePeerEngine shutsdown and cleans up a peer engine, should be called when an underlying ACN goes offline
|
|
||||||
func (app *application) DeactivatePeerEngine(onion string) {
|
|
||||||
if engine, exists := app.engines[onion]; exists {
|
|
||||||
engine.Shutdown()
|
|
||||||
delete(app.engines, onion)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPrimaryBus returns the bus the Application uses for events that aren't peer specific
|
// GetPrimaryBus returns the bus the Application uses for events that aren't peer specific
|
||||||
|
@ -456,8 +237,8 @@ func (app *application) GetPrimaryBus() event.Manager {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetEventBus returns a cwtchPeer's event bus
|
// GetEventBus returns a cwtchPeer's event bus
|
||||||
func (app *application) GetEventBus(onion string) event.Manager {
|
func (ac *applicationCore) GetEventBus(onion string) event.Manager {
|
||||||
if manager, ok := app.eventBuses[onion]; ok {
|
if manager, ok := ac.eventBuses[onion]; ok {
|
||||||
return manager
|
return manager
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -466,20 +247,12 @@ func (app *application) GetEventBus(onion string) event.Manager {
|
||||||
func (app *application) getACNStatusHandler() func(int, string) {
|
func (app *application) getACNStatusHandler() func(int, string) {
|
||||||
return func(progress int, status string) {
|
return func(progress int, status string) {
|
||||||
progStr := strconv.Itoa(progress)
|
progStr := strconv.Itoa(progress)
|
||||||
app.appmutex.Lock()
|
app.peerLock.Lock()
|
||||||
app.appBus.Publish(event.NewEventList(event.ACNStatus, event.Progress, progStr, event.Status, status))
|
app.appBus.Publish(event.NewEventList(event.ACNStatus, event.Progress, progStr, event.Status, status))
|
||||||
for _, bus := range app.eventBuses {
|
for _, bus := range app.eventBuses {
|
||||||
bus.Publish(event.NewEventList(event.ACNStatus, event.Progress, progStr, event.Status, status))
|
bus.Publish(event.NewEventList(event.ACNStatus, event.Progress, progStr, event.Status, status))
|
||||||
}
|
}
|
||||||
app.appmutex.Unlock()
|
app.peerLock.Unlock()
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *application) getACNVersionHandler() func(string) {
|
|
||||||
return func(version string) {
|
|
||||||
app.appmutex.Lock()
|
|
||||||
defer app.appmutex.Unlock()
|
|
||||||
app.appBus.Publish(event.NewEventList(event.ACNVersion, event.Data, version))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -493,107 +266,29 @@ func (app *application) QueryACNVersion() {
|
||||||
app.appBus.Publish(event.NewEventList(event.ACNVersion, event.Data, version))
|
app.appBus.Publish(event.NewEventList(event.ACNVersion, event.Data, version))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *application) eventHandler() {
|
|
||||||
acnStatus := -1
|
|
||||||
for {
|
|
||||||
e := app.eventQueue.Next()
|
|
||||||
switch e.EventType {
|
|
||||||
case event.ACNStatus:
|
|
||||||
newAcnStatus, err := strconv.Atoi(e.Data[event.Progress])
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if newAcnStatus == 100 {
|
|
||||||
if acnStatus != 100 {
|
|
||||||
for _, onion := range app.ListProfiles() {
|
|
||||||
profile := app.GetPeer(onion)
|
|
||||||
if profile != nil {
|
|
||||||
autostart, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.PeerAutostart)
|
|
||||||
appearOffline, appearOfflineExists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.PeerAppearOffline)
|
|
||||||
if !exists || autostart == "true" {
|
|
||||||
if appearOfflineExists && appearOffline == "true" {
|
|
||||||
// don't configure any connections...
|
|
||||||
log.Infof("peer appearing offline, not launching listen threads or connecting jobs")
|
|
||||||
app.ConfigureConnections(onion, false, false, false)
|
|
||||||
} else {
|
|
||||||
app.ConfigureConnections(onion, true, true, true)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if acnStatus == 100 {
|
|
||||||
// just fell offline
|
|
||||||
for _, onion := range app.ListProfiles() {
|
|
||||||
app.DeactivatePeerEngine(onion)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
acnStatus = newAcnStatus
|
|
||||||
|
|
||||||
default:
|
|
||||||
// invalid event, signifies shutdown
|
|
||||||
if e.EventType == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShutdownPeer shuts down a peer and removes it from the app's management
|
// ShutdownPeer shuts down a peer and removes it from the app's management
|
||||||
func (app *application) ShutdownPeer(onion string) {
|
func (app *application) ShutdownPeer(onion string) {
|
||||||
app.appmutex.Lock()
|
app.appmutex.Lock()
|
||||||
defer app.appmutex.Unlock()
|
defer app.appmutex.Unlock()
|
||||||
app.shutdownPeer(onion)
|
app.eventBuses[onion].Shutdown()
|
||||||
}
|
|
||||||
|
|
||||||
// shutdownPeer mutex unlocked helper shutdown peer
|
|
||||||
//
|
|
||||||
//nolint:nilaway
|
|
||||||
func (app *application) shutdownPeer(onion string) {
|
|
||||||
|
|
||||||
// short circuit to prevent nil-pointer panic if this function is called twice (or incorrectly)
|
|
||||||
onionEventBus := app.eventBuses[onion]
|
|
||||||
onionPeer := app.peers[onion]
|
|
||||||
if onionEventBus == nil || onionPeer == nil {
|
|
||||||
log.Errorf("shutdownPeer called with invalid onion %v", onion)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// we are an internal locked method, app.eventBuses[onion] cannot fail...
|
|
||||||
onionEventBus.Publish(event.NewEventList(event.ShutdownPeer, event.Identity, onion))
|
|
||||||
onionEventBus.Shutdown()
|
|
||||||
|
|
||||||
delete(app.eventBuses, onion)
|
delete(app.eventBuses, onion)
|
||||||
onionPeer.Shutdown()
|
app.peers[onion].Shutdown()
|
||||||
delete(app.peers, onion)
|
delete(app.peers, onion)
|
||||||
if onionEngine, ok := app.engines[onion]; ok {
|
app.engines[onion].Shutdown()
|
||||||
onionEngine.Shutdown()
|
delete(app.engines, onion)
|
||||||
delete(app.engines, onion)
|
app.storage[onion].Shutdown()
|
||||||
}
|
delete(app.storage, onion)
|
||||||
log.Debugf("shutting down plugins for %v", onion)
|
app.appletPlugins.Shutdown()
|
||||||
pluginsI, ok := app.plugins.Load(onion)
|
|
||||||
if ok {
|
|
||||||
appPlugins := pluginsI.([]plugins.Plugin)
|
|
||||||
for _, plugin := range appPlugins {
|
|
||||||
plugin.Shutdown()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
app.plugins.Delete(onion)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown shutsdown all peers of an app
|
// Shutdown shutsdown all peers of an app and then the tormanager
|
||||||
func (app *application) Shutdown() {
|
func (app *application) Shutdown() {
|
||||||
app.appmutex.Lock()
|
for id, peer := range app.peers {
|
||||||
defer app.appmutex.Unlock()
|
peer.Shutdown()
|
||||||
for id := range app.peers {
|
app.appletPlugins.ShutdownPeer(id)
|
||||||
log.Debugf("Shutting Down Peer %v", id)
|
app.engines[id].Shutdown()
|
||||||
app.shutdownPeer(id)
|
app.storage[id].Shutdown()
|
||||||
|
app.eventBuses[id].Shutdown()
|
||||||
}
|
}
|
||||||
log.Debugf("Shutting Down App")
|
|
||||||
app.eventQueue.Shutdown()
|
|
||||||
app.appBus.Shutdown()
|
app.appBus.Shutdown()
|
||||||
log.Debugf("Shut Down Complete")
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,39 @@
|
||||||
|
package app
|
||||||
|
|
||||||
|
import "cwtch.im/cwtch/event"
|
||||||
|
import "git.openprivacy.ca/openprivacy/log"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DestApp should be used as a destination for IPC messages that are for the application itself an not a peer
|
||||||
|
DestApp = "app"
|
||||||
|
)
|
||||||
|
|
||||||
|
type applicationBridge struct {
|
||||||
|
applicationCore
|
||||||
|
|
||||||
|
bridge event.IPCBridge
|
||||||
|
handle func(*event.Event)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ab *applicationBridge) listen() {
|
||||||
|
log.Infoln("ab.listen()")
|
||||||
|
for {
|
||||||
|
ipcMessage, ok := ab.bridge.Read()
|
||||||
|
log.Debugf("listen() got %v for %v\n", ipcMessage.Message.EventType, ipcMessage.Dest)
|
||||||
|
if !ok {
|
||||||
|
log.Debugln("exiting appBridge.listen()")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ipcMessage.Dest == DestApp {
|
||||||
|
ab.handle(&ipcMessage.Message)
|
||||||
|
} else {
|
||||||
|
if eventBus, exists := ab.eventBuses[ipcMessage.Dest]; exists {
|
||||||
|
eventBus.PublishLocal(ipcMessage.Message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ab *applicationBridge) Shutdown() {
|
||||||
|
}
|
|
@ -0,0 +1,176 @@
|
||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/app/plugins"
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"cwtch.im/cwtch/peer"
|
||||||
|
"cwtch.im/cwtch/storage"
|
||||||
|
"fmt"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type applicationClient struct {
|
||||||
|
applicationBridge
|
||||||
|
appletPeers
|
||||||
|
|
||||||
|
appBus event.Manager
|
||||||
|
acmutex sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAppClient returns an Application that acts as a client to a AppService, connected by the IPCBridge supplied
|
||||||
|
func NewAppClient(appDirectory string, bridge event.IPCBridge) Application {
|
||||||
|
appClient := &applicationClient{appletPeers: appletPeers{peers: make(map[string]peer.CwtchPeer)}, applicationBridge: applicationBridge{applicationCore: *newAppCore(appDirectory), bridge: bridge}, appBus: event.NewEventManager()}
|
||||||
|
appClient.handle = appClient.handleEvent
|
||||||
|
|
||||||
|
go appClient.listen()
|
||||||
|
|
||||||
|
appClient.bridge.Write(&event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.ReloadClient)})
|
||||||
|
|
||||||
|
log.Infoln("Created new App Client")
|
||||||
|
return appClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPrimaryBus returns the bus the Application uses for events that aren't peer specific
|
||||||
|
func (ac *applicationClient) GetPrimaryBus() event.Manager {
|
||||||
|
return ac.appBus
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *applicationClient) handleEvent(ev *event.Event) {
|
||||||
|
switch ev.EventType {
|
||||||
|
case event.NewPeer:
|
||||||
|
localID := ev.Data[event.Identity]
|
||||||
|
key := ev.Data[event.Key]
|
||||||
|
salt := ev.Data[event.Salt]
|
||||||
|
reload := ev.Data[event.Status] == event.StorageRunning
|
||||||
|
created := ev.Data[event.Created]
|
||||||
|
ac.newPeer(localID, key, salt, reload, created)
|
||||||
|
case event.DeletePeer:
|
||||||
|
onion := ev.Data[event.Identity]
|
||||||
|
ac.handleDeletedPeer(onion)
|
||||||
|
case event.PeerError:
|
||||||
|
ac.appBus.Publish(*ev)
|
||||||
|
case event.AppError:
|
||||||
|
ac.appBus.Publish(*ev)
|
||||||
|
case event.ACNStatus:
|
||||||
|
ac.appBus.Publish(*ev)
|
||||||
|
case event.ACNVersion:
|
||||||
|
ac.appBus.Publish(*ev)
|
||||||
|
case event.ReloadDone:
|
||||||
|
ac.appBus.Publish(*ev)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *applicationClient) newPeer(localID, key, salt string, reload bool, created string) {
|
||||||
|
var keyBytes [32]byte
|
||||||
|
var saltBytes [128]byte
|
||||||
|
copy(keyBytes[:], key)
|
||||||
|
copy(saltBytes[:], salt)
|
||||||
|
profile, err := storage.ReadProfile(path.Join(ac.directory, "profiles", localID), keyBytes, saltBytes)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Could not read profile for NewPeer event: %v\n", err)
|
||||||
|
ac.appBus.Publish(event.NewEventList(event.PeerError, event.Error, fmt.Sprintf("Could not read profile for NewPeer event: %v\n", err)))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, exists := ac.peers[profile.Onion]
|
||||||
|
if exists {
|
||||||
|
log.Errorf("profile for onion %v already exists", profile.Onion)
|
||||||
|
ac.appBus.Publish(event.NewEventList(event.PeerError, event.Error, fmt.Sprintf("profile for onion %v already exists", profile.Onion)))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
eventBus := event.NewIPCEventManager(ac.bridge, profile.Onion)
|
||||||
|
peer := peer.FromProfile(profile)
|
||||||
|
peer.Init(eventBus)
|
||||||
|
|
||||||
|
ac.peerLock.Lock()
|
||||||
|
defer ac.peerLock.Unlock()
|
||||||
|
ac.peers[profile.Onion] = peer
|
||||||
|
ac.eventBuses[profile.Onion] = eventBus
|
||||||
|
npEvent := event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.Onion, event.Created: created})
|
||||||
|
if reload {
|
||||||
|
npEvent.Data[event.Status] = event.StorageRunning
|
||||||
|
}
|
||||||
|
ac.appBus.Publish(npEvent)
|
||||||
|
|
||||||
|
if reload {
|
||||||
|
ac.bridge.Write(&event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.ReloadPeer, event.Identity, profile.Onion)})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatePeer messages the service to create a new Peer with the given name
|
||||||
|
func (ac *applicationClient) CreatePeer(name string, password string) {
|
||||||
|
ac.CreateTaggedPeer(name, password, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *applicationClient) CreateTaggedPeer(name, password, tag string) {
|
||||||
|
log.Infof("appClient CreatePeer %v\n", name)
|
||||||
|
message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.CreatePeer, map[event.Field]string{event.ProfileName: name, event.Password: password, event.Data: tag})}
|
||||||
|
ac.bridge.Write(&message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletePeer messages tehe service to delete a peer
|
||||||
|
func (ac *applicationClient) DeletePeer(onion string) {
|
||||||
|
message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.DeletePeer, map[event.Field]string{event.Identity: onion})}
|
||||||
|
ac.bridge.Write(&message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *applicationClient) ChangePeerPassword(onion, oldpass, newpass string) {
|
||||||
|
message := event.IPCMessage{Dest: onion, Message: event.NewEventList(event.ChangePassword, event.Password, oldpass, event.NewPassword, newpass)}
|
||||||
|
ac.bridge.Write(&message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *applicationClient) handleDeletedPeer(onion string) {
|
||||||
|
ac.acmutex.Lock()
|
||||||
|
defer ac.acmutex.Unlock()
|
||||||
|
ac.peers[onion].Shutdown()
|
||||||
|
delete(ac.peers, onion)
|
||||||
|
ac.eventBuses[onion].Publish(event.NewEventList(event.ShutdownPeer, event.Identity, onion))
|
||||||
|
|
||||||
|
ac.applicationCore.DeletePeer(onion)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *applicationClient) AddPeerPlugin(onion string, pluginID plugins.PluginID) {
|
||||||
|
message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.AddPeerPlugin, map[event.Field]string{event.Identity: onion, event.Data: strconv.Itoa(int(pluginID))})}
|
||||||
|
ac.bridge.Write(&message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadProfiles messages the service to load any profiles for the given password
|
||||||
|
func (ac *applicationClient) LoadProfiles(password string) {
|
||||||
|
message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.LoadProfiles, map[event.Field]string{event.Password: password})}
|
||||||
|
ac.bridge.Write(&message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *applicationClient) QueryACNStatus() {
|
||||||
|
message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.GetACNStatus, map[event.Field]string{})}
|
||||||
|
ac.bridge.Write(&message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *applicationClient) QueryACNVersion() {
|
||||||
|
message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.GetACNVersion, map[event.Field]string{})}
|
||||||
|
ac.bridge.Write(&message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShutdownPeer shuts down a peer and removes it from the app's management
|
||||||
|
func (ac *applicationClient) ShutdownPeer(onion string) {
|
||||||
|
ac.acmutex.Lock()
|
||||||
|
defer ac.acmutex.Unlock()
|
||||||
|
ac.eventBuses[onion].Shutdown()
|
||||||
|
delete(ac.eventBuses, onion)
|
||||||
|
ac.peers[onion].Shutdown()
|
||||||
|
delete(ac.peers, onion)
|
||||||
|
message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.ShutdownPeer, map[event.Field]string{event.Identity: onion})}
|
||||||
|
ac.bridge.Write(&message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown shuts down the application client and all front end peer components
|
||||||
|
func (ac *applicationClient) Shutdown() {
|
||||||
|
for id := range ac.peers {
|
||||||
|
ac.ShutdownPeer(id)
|
||||||
|
}
|
||||||
|
ac.applicationBridge.Shutdown()
|
||||||
|
ac.appBus.Shutdown()
|
||||||
|
}
|
|
@ -0,0 +1,201 @@
|
||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/app/plugins"
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"cwtch.im/cwtch/model"
|
||||||
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
|
"cwtch.im/cwtch/storage"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/primitives"
|
||||||
|
"git.openprivacy.ca/openprivacy/connectivity"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type applicationService struct {
|
||||||
|
applicationBridge
|
||||||
|
appletACN
|
||||||
|
appletPlugins
|
||||||
|
|
||||||
|
storage map[string]storage.ProfileStore
|
||||||
|
engines map[string]connections.Engine
|
||||||
|
asmutex sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplicationService is the back end of an application that manages engines and writing storage and communicates to an ApplicationClient by an IPCBridge
|
||||||
|
type ApplicationService interface {
|
||||||
|
Shutdown()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAppService returns an ApplicationService that runs the backend of an app and communicates with a client by the supplied IPCBridge
|
||||||
|
func NewAppService(acn connectivity.ACN, appDirectory string, bridge event.IPCBridge) ApplicationService {
|
||||||
|
appService := &applicationService{storage: make(map[string]storage.ProfileStore), engines: make(map[string]connections.Engine), applicationBridge: applicationBridge{applicationCore: *newAppCore(appDirectory), bridge: bridge}}
|
||||||
|
|
||||||
|
appService.appletACN.init(acn, appService.getACNStatusHandler())
|
||||||
|
appService.handle = appService.handleEvent
|
||||||
|
|
||||||
|
go appService.listen()
|
||||||
|
|
||||||
|
log.Infoln("Created new App Service")
|
||||||
|
return appService
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *applicationService) handleEvent(ev *event.Event) {
|
||||||
|
log.Infof("app Service handleEvent %v\n", ev.EventType)
|
||||||
|
switch ev.EventType {
|
||||||
|
case event.CreatePeer:
|
||||||
|
profileName := ev.Data[event.ProfileName]
|
||||||
|
password := ev.Data[event.Password]
|
||||||
|
tag := ev.Data[event.Data]
|
||||||
|
as.createPeer(profileName, password, tag)
|
||||||
|
case event.DeletePeer:
|
||||||
|
onion := ev.Data[event.Identity]
|
||||||
|
as.deletePeer(onion)
|
||||||
|
|
||||||
|
message := event.IPCMessage{Dest: DestApp, Message: *ev}
|
||||||
|
as.bridge.Write(&message)
|
||||||
|
case event.AddPeerPlugin:
|
||||||
|
onion := ev.Data[event.Identity]
|
||||||
|
pluginID, _ := strconv.Atoi(ev.Data[event.Data])
|
||||||
|
as.AddPlugin(onion, plugins.PluginID(pluginID), as.eventBuses[onion], as.acn)
|
||||||
|
case event.LoadProfiles:
|
||||||
|
password := ev.Data[event.Password]
|
||||||
|
as.loadProfiles(password)
|
||||||
|
case event.ReloadClient:
|
||||||
|
for _, storage := range as.storage {
|
||||||
|
peerMsg := *storage.GetNewPeerMessage()
|
||||||
|
peerMsg.Data[event.Status] = event.StorageRunning
|
||||||
|
peerMsg.Data[event.Created] = event.False
|
||||||
|
message := event.IPCMessage{Dest: DestApp, Message: peerMsg}
|
||||||
|
as.bridge.Write(&message)
|
||||||
|
}
|
||||||
|
|
||||||
|
message := event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.ReloadDone)}
|
||||||
|
as.bridge.Write(&message)
|
||||||
|
case event.ReloadPeer:
|
||||||
|
onion := ev.Data[event.Identity]
|
||||||
|
events := as.storage[onion].GetStatusMessages()
|
||||||
|
|
||||||
|
for _, ev := range events {
|
||||||
|
message := event.IPCMessage{Dest: onion, Message: *ev}
|
||||||
|
as.bridge.Write(&message)
|
||||||
|
}
|
||||||
|
case event.GetACNStatus:
|
||||||
|
prog, status := as.acn.GetBootstrapStatus()
|
||||||
|
as.getACNStatusHandler()(prog, status)
|
||||||
|
case event.GetACNVersion:
|
||||||
|
version := as.acn.GetVersion()
|
||||||
|
as.bridge.Write(&event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.ACNVersion, event.Data, version)})
|
||||||
|
case event.ShutdownPeer:
|
||||||
|
onion := ev.Data[event.Identity]
|
||||||
|
as.ShutdownPeer(onion)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *applicationService) createPeer(name, password, tag string) {
|
||||||
|
log.Infof("app Service create peer %v %v\n", name, password)
|
||||||
|
profile, err := as.applicationCore.CreatePeer(name)
|
||||||
|
as.eventBuses[profile.Onion] = event.IPCEventManagerFrom(as.bridge, profile.Onion, as.eventBuses[profile.Onion])
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Could not create Peer: %v\n", err)
|
||||||
|
message := event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.PeerError, event.Error, err.Error())}
|
||||||
|
as.bridge.Write(&message)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if tag != "" {
|
||||||
|
profile.SetAttribute(AttributeTag, tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
profileStore := storage.CreateProfileWriterStore(as.eventBuses[profile.Onion], path.Join(as.directory, "profiles", profile.LocalID), password, profile)
|
||||||
|
|
||||||
|
peerAuthorizations := profile.ContactsAuthorizations()
|
||||||
|
// TODO: Would be nice if ProtocolEngine did not need to explicitly be given the Private Key.
|
||||||
|
identity := primitives.InitializeIdentity(profile.Name, &profile.Ed25519PrivateKey, &profile.Ed25519PublicKey)
|
||||||
|
engine := connections.NewProtocolEngine(identity, profile.Ed25519PrivateKey, as.acn, as.eventBuses[profile.Onion], peerAuthorizations)
|
||||||
|
|
||||||
|
as.storage[profile.Onion] = profileStore
|
||||||
|
as.engines[profile.Onion] = engine
|
||||||
|
|
||||||
|
peerMsg := *profileStore.GetNewPeerMessage()
|
||||||
|
peerMsg.Data[event.Created] = event.True
|
||||||
|
peerMsg.Data[event.Status] = event.StorageNew
|
||||||
|
message := event.IPCMessage{Dest: DestApp, Message: peerMsg}
|
||||||
|
as.bridge.Write(&message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *applicationService) loadProfiles(password string) {
|
||||||
|
count := 0
|
||||||
|
as.applicationCore.LoadProfiles(password, false, func(profile *model.Profile, profileStore storage.ProfileStore) {
|
||||||
|
as.eventBuses[profile.Onion] = event.IPCEventManagerFrom(as.bridge, profile.Onion, as.eventBuses[profile.Onion])
|
||||||
|
|
||||||
|
peerAuthorizations := profile.ContactsAuthorizations()
|
||||||
|
identity := primitives.InitializeIdentity(profile.Name, &profile.Ed25519PrivateKey, &profile.Ed25519PublicKey)
|
||||||
|
engine := connections.NewProtocolEngine(identity, profile.Ed25519PrivateKey, as.acn, as.eventBuses[profile.Onion], peerAuthorizations)
|
||||||
|
as.asmutex.Lock()
|
||||||
|
as.storage[profile.Onion] = profileStore
|
||||||
|
as.engines[profile.Onion] = engine
|
||||||
|
as.asmutex.Unlock()
|
||||||
|
|
||||||
|
peerMsg := *profileStore.GetNewPeerMessage()
|
||||||
|
peerMsg.Data[event.Created] = event.False
|
||||||
|
peerMsg.Data[event.Status] = event.StorageNew
|
||||||
|
message := event.IPCMessage{Dest: DestApp, Message: peerMsg}
|
||||||
|
as.bridge.Write(&message)
|
||||||
|
count++
|
||||||
|
})
|
||||||
|
if count == 0 {
|
||||||
|
message := event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.AppError, event.Error, event.AppErrLoaded0)}
|
||||||
|
as.bridge.Write(&message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *applicationService) getACNStatusHandler() func(int, string) {
|
||||||
|
return func(progress int, status string) {
|
||||||
|
progStr := strconv.Itoa(progress)
|
||||||
|
as.bridge.Write(&event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.ACNStatus, event.Progress, progStr, event.Status, status)})
|
||||||
|
as.applicationCore.coremutex.Lock()
|
||||||
|
defer as.applicationCore.coremutex.Unlock()
|
||||||
|
for _, bus := range as.eventBuses {
|
||||||
|
bus.Publish(event.NewEventList(event.ACNStatus, event.Progress, progStr, event.Status, status))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *applicationService) deletePeer(onion string) {
|
||||||
|
as.asmutex.Lock()
|
||||||
|
defer as.asmutex.Unlock()
|
||||||
|
|
||||||
|
as.appletPlugins.ShutdownPeer(onion)
|
||||||
|
as.plugins.Delete(onion)
|
||||||
|
|
||||||
|
as.engines[onion].Shutdown()
|
||||||
|
delete(as.engines, onion)
|
||||||
|
|
||||||
|
as.storage[onion].Shutdown()
|
||||||
|
as.storage[onion].Delete()
|
||||||
|
delete(as.storage, onion)
|
||||||
|
|
||||||
|
as.applicationCore.DeletePeer(onion)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *applicationService) ShutdownPeer(onion string) {
|
||||||
|
as.engines[onion].Shutdown()
|
||||||
|
delete(as.engines, onion)
|
||||||
|
as.storage[onion].Shutdown()
|
||||||
|
delete(as.storage, onion)
|
||||||
|
as.eventBuses[onion].Shutdown()
|
||||||
|
delete(as.eventBuses, onion)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown shuts down the application Service and all peer related backend parts
|
||||||
|
func (as *applicationService) Shutdown() {
|
||||||
|
log.Debugf("shutting down application service...")
|
||||||
|
as.appletPlugins.Shutdown()
|
||||||
|
for id := range as.engines {
|
||||||
|
log.Debugf("shutting down application service peer engine %v", id)
|
||||||
|
as.ShutdownPeer(id)
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,6 +0,0 @@
|
||||||
package app
|
|
||||||
|
|
||||||
// DefactoPasswordForUnencryptedProfiles is used to offer "un-passworded" profiles. Our storage encrypts everything with a password. We need an agreed upon
|
|
||||||
// password to use in that case, that the app case use behind the scenes to password and unlock with
|
|
||||||
// https://docs.openprivacy.ca/cwtch-security-handbook/profile_encryption_and_storage.html
|
|
||||||
const DefactoPasswordForUnencryptedProfiles = "be gay do crime"
|
|
|
@ -0,0 +1,121 @@
|
||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"git.openprivacy.ca/openprivacy/connectivity"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"cwtch.im/cwtch/app/plugins"
|
||||||
|
"cwtch.im/cwtch/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type appletPeers struct {
|
||||||
|
peerLock sync.Mutex
|
||||||
|
peers map[string]peer.CwtchPeer
|
||||||
|
launched bool // bit hacky, place holder while we transition to full multi peer support and a better api
|
||||||
|
}
|
||||||
|
|
||||||
|
type appletACN struct {
|
||||||
|
acn connectivity.ACN
|
||||||
|
}
|
||||||
|
|
||||||
|
type appletPlugins struct {
|
||||||
|
plugins sync.Map //map[string] []plugins.Plugin
|
||||||
|
}
|
||||||
|
|
||||||
|
// ***** applet ACN
|
||||||
|
|
||||||
|
func (a *appletACN) init(acn connectivity.ACN, publish func(int, string)) {
|
||||||
|
a.acn = acn
|
||||||
|
acn.SetStatusCallback(publish)
|
||||||
|
prog, status := acn.GetBootstrapStatus()
|
||||||
|
publish(prog, status)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *appletACN) Shutdown() {
|
||||||
|
a.acn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ***** appletPeers
|
||||||
|
|
||||||
|
func (ap *appletPeers) init() {
|
||||||
|
ap.peers = make(map[string]peer.CwtchPeer)
|
||||||
|
ap.launched = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// LaunchPeers starts each peer Listening and connecting to peers and groups
|
||||||
|
func (ap *appletPeers) LaunchPeers() {
|
||||||
|
log.Debugf("appletPeers LaunchPeers\n")
|
||||||
|
ap.peerLock.Lock()
|
||||||
|
defer ap.peerLock.Unlock()
|
||||||
|
if ap.launched {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for pid, p := range ap.peers {
|
||||||
|
log.Debugf("Launching %v\n", pid)
|
||||||
|
p.Listen()
|
||||||
|
log.Debugf("done Listen() for %v\n", pid)
|
||||||
|
p.StartPeersConnections()
|
||||||
|
log.Debugf("done StartPeersConnections() for %v\n", pid)
|
||||||
|
}
|
||||||
|
ap.launched = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListPeers returns a map of onions to their profile's Name
|
||||||
|
func (ap *appletPeers) ListPeers() map[string]string {
|
||||||
|
keys := map[string]string{}
|
||||||
|
|
||||||
|
ap.peerLock.Lock()
|
||||||
|
defer ap.peerLock.Unlock()
|
||||||
|
for k, p := range ap.peers {
|
||||||
|
keys[k] = p.GetOnion()
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPeer returns a cwtchPeer for a given onion address
|
||||||
|
func (ap *appletPeers) GetPeer(onion string) peer.CwtchPeer {
|
||||||
|
if peer, ok := ap.peers[onion]; ok {
|
||||||
|
return peer
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ***** applet Plugins
|
||||||
|
|
||||||
|
func (ap *appletPlugins) Shutdown() {
|
||||||
|
log.Debugf("shutting down applet plugins...")
|
||||||
|
ap.plugins.Range(func(k, v interface{}) bool {
|
||||||
|
log.Debugf("shutting down plugins for %v", k)
|
||||||
|
ap.ShutdownPeer(k.(string))
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *appletPlugins) ShutdownPeer(peerid string) {
|
||||||
|
log.Debugf("shutting down plugins for %v", peerid)
|
||||||
|
pluginsI, ok := ap.plugins.Load(peerid)
|
||||||
|
if ok {
|
||||||
|
plugins := pluginsI.([]plugins.Plugin)
|
||||||
|
for _, plugin := range plugins {
|
||||||
|
log.Debugf("shutting down plugin: %v", plugin)
|
||||||
|
plugin.Shutdown()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *appletPlugins) AddPlugin(peerid string, id plugins.PluginID, bus event.Manager, acn connectivity.ACN) {
|
||||||
|
if _, exists := ap.plugins.Load(peerid); !exists {
|
||||||
|
ap.plugins.Store(peerid, []plugins.Plugin{})
|
||||||
|
}
|
||||||
|
|
||||||
|
pluginsinf, _ := ap.plugins.Load(peerid)
|
||||||
|
peerPlugins := pluginsinf.([]plugins.Plugin)
|
||||||
|
|
||||||
|
newp := plugins.Get(id, bus, acn, peerid)
|
||||||
|
newp.Start()
|
||||||
|
peerPlugins = append(peerPlugins, newp)
|
||||||
|
log.Debugf("storing plugin for %v %v", peerid, peerPlugins)
|
||||||
|
ap.plugins.Store(peerid, peerPlugins)
|
||||||
|
}
|
|
@ -0,0 +1,86 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
app2 "cwtch.im/cwtch/app"
|
||||||
|
"cwtch.im/cwtch/app/utils"
|
||||||
|
"cwtch.im/cwtch/peer"
|
||||||
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func waitForPeerGroupConnection(peer peer.CwtchPeer, groupID string) error {
|
||||||
|
for {
|
||||||
|
group := peer.GetGroup(groupID)
|
||||||
|
if group != nil {
|
||||||
|
state, _ := peer.GetGroupState(groupID)
|
||||||
|
if state == connections.FAILED {
|
||||||
|
return errors.New("Connection to group " + groupID + " failed!")
|
||||||
|
}
|
||||||
|
if state != connections.AUTHENTICATED {
|
||||||
|
fmt.Printf("peer %v waiting to authenticate with group %v 's server, current state: %v\n", peer.GetOnion(), groupID, connections.ConnectionStateName[state])
|
||||||
|
time.Sleep(time.Second * 10)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return errors.New("peer server connections should have entry for server but do not")
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if len(os.Args) != 2 {
|
||||||
|
fmt.Printf("Usage: ./servermon SERVER_ADDRESS\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
serverAddr := os.Args[1]
|
||||||
|
|
||||||
|
acn, err := tor.NewTorACN(".", "")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Could not start tor: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
app := app2.NewApp(acn, ".")
|
||||||
|
|
||||||
|
app.CreatePeer("servermon", "be gay, do crimes")
|
||||||
|
|
||||||
|
botPeer := utils.WaitGetPeer(app, "servermon")
|
||||||
|
|
||||||
|
fmt.Printf("Connecting to %v...\n", serverAddr)
|
||||||
|
botPeer.JoinServer(serverAddr)
|
||||||
|
groupID, _, err := botPeer.StartGroup(serverAddr)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error creating group on server %v: %v\n", serverAddr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = waitForPeerGroupConnection(botPeer, groupID)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Could not connect to server %v: %v\n", serverAddr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
timeout := 1 * time.Second
|
||||||
|
timeElapsed := 0 * time.Second
|
||||||
|
for {
|
||||||
|
_, err := botPeer.SendMessageToGroupTracked(groupID, timeout.String())
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Sent to group on server %v failed at interval %v of total %v with: %v\n", serverAddr, timeout, timeElapsed, err)
|
||||||
|
os.Exit(1)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Successfully sent message to %v at interval %v of total %v\n", serverAddr, timeout, timeElapsed)
|
||||||
|
}
|
||||||
|
time.Sleep(timeout)
|
||||||
|
timeElapsed += timeout
|
||||||
|
if timeout < 2*time.Minute {
|
||||||
|
timeout = timeout * 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,159 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"os"
|
||||||
|
//"bufio"
|
||||||
|
//"cwtch.im/cwtch/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
func convertTorFile(filename string, password string) error {
|
||||||
|
return errors.New("this code doesn't work and can never work :( it's a math thing")
|
||||||
|
|
||||||
|
/*name, _ := diceware.Generate(2)
|
||||||
|
sk, err := ioutil.ReadFile("hs_ed25519_secret_key")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sk = sk[32:]
|
||||||
|
|
||||||
|
pk, err := ioutil.ReadFile("hs_ed25519_public_key")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pk = pk[32:]
|
||||||
|
|
||||||
|
onion, err := ioutil.ReadFile("hostname")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
onion = onion[:56]
|
||||||
|
|
||||||
|
peer := libpeer.NewCwtchPeer(strings.Join(name, "-"))
|
||||||
|
|
||||||
|
fmt.Printf("%d %d %s\n", len(peer.GetProfile().Ed25519PublicKey), len(peer.GetProfile().Ed25519PrivateKey), peer.GetProfile().Onion)
|
||||||
|
peer.GetProfile().Ed25519PrivateKey = sk
|
||||||
|
peer.GetProfile().Ed25519PublicKey = pk
|
||||||
|
peer.GetProfile().Onion = string(onion)
|
||||||
|
fileStore := storage2.NewFileStore(filename, password)
|
||||||
|
err = fileStore.save(peer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("success! loaded %d byte pk and %d byte sk for %s.onion\n", len(pk), len(sk), onion)
|
||||||
|
return nil*/
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
func vanity() error {
|
||||||
|
for {
|
||||||
|
pk, sk, err := ed25519.GenerateKey(rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
onion := utils.GetTorV3Hostname(pk)
|
||||||
|
for i := 4; i < len(os.Args); i++ {
|
||||||
|
if strings.HasPrefix(onion, os.Args[i]) {
|
||||||
|
peer := libpeer.NewCwtchPeer(os.Args[i])
|
||||||
|
peer.GetProfile().Ed25519PrivateKey = sk
|
||||||
|
peer.GetProfile().Ed25519PublicKey = pk
|
||||||
|
peer.GetProfile().Onion = onion
|
||||||
|
profileStore, _ := storage2.NewProfileStore(nil, os.Args[3], onion+".cwtch")
|
||||||
|
profileStore.Init("")
|
||||||
|
// need to signal new onion? impossible
|
||||||
|
log.Infof("found %s.onion\n", onion)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}*/
|
||||||
|
|
||||||
|
func printHelp() {
|
||||||
|
log.Infoln("usage: cwtchutil {help, convert-cwtch-file, convert-tor-file, changepw, vanity}")
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.SetLevel(log.LevelInfo)
|
||||||
|
if len(os.Args) < 2 {
|
||||||
|
printHelp()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch os.Args[1] {
|
||||||
|
default:
|
||||||
|
printHelp()
|
||||||
|
case "help":
|
||||||
|
printHelp()
|
||||||
|
case "convert-tor-file":
|
||||||
|
if len(os.Args) != 4 {
|
||||||
|
fmt.Println("example: cwtchutil convert-tor-file /var/lib/tor/hs1 passw0rd")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
err := convertTorFile(os.Args[2], os.Args[3])
|
||||||
|
if err != nil {
|
||||||
|
log.Errorln(err)
|
||||||
|
}
|
||||||
|
/*case "vanity":
|
||||||
|
if len(os.Args) < 5 {
|
||||||
|
fmt.Println("example: cwtchutil vanity 4 passw0rd erinn openpriv")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
goroutines, err := strconv.Atoi(os.Args[2])
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("first parameter after vanity should be a number\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
log.Infoln("searching. press ctrl+c to stop")
|
||||||
|
for i := 0; i < goroutines; i++ {
|
||||||
|
go vanity()
|
||||||
|
}
|
||||||
|
|
||||||
|
for { // run until ctrl+c
|
||||||
|
time.Sleep(time.Hour * 24)
|
||||||
|
}*/
|
||||||
|
/*case "changepw":
|
||||||
|
if len(os.Args) != 3 {
|
||||||
|
fmt.Println("example: cwtch changepw ~/.cwtch/profiles/XXX")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("old password: ")
|
||||||
|
reader := bufio.NewReader(os.Stdin)
|
||||||
|
pw, err := reader.ReadString('\n')
|
||||||
|
if err != nil {
|
||||||
|
log.Errorln(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
pw = pw[:len(pw)-1]
|
||||||
|
|
||||||
|
profileStore, _ := storage.NewProfileStore(nil, os.Args[2], pw)
|
||||||
|
|
||||||
|
err = profileStore.Read()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorln(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("new password: ")
|
||||||
|
newpw1, err := reader.ReadString('\n')
|
||||||
|
if err != nil {
|
||||||
|
log.Errorln(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
newpw1 = newpw1[:len(newpw1)-1] // fuck go with this linebreak shit ^ea
|
||||||
|
|
||||||
|
fileStore2, _ := storage.NewProfileStore(nil, os.Args[2], newpw1)
|
||||||
|
// No way to copy, populate this method
|
||||||
|
err = fileStore2.save(peer)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorln(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infoln("success!")
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,38 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
app2 "cwtch.im/cwtch/app"
|
||||||
|
"cwtch.im/cwtch/app/utils"
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
|
||||||
|
// System Setup, We need Tor and Logging up and Running
|
||||||
|
log.AddEverythingFromPattern("peer/alice")
|
||||||
|
log.SetLevel(log.LevelDebug)
|
||||||
|
|
||||||
|
acn, err := tor.NewTorACN(path.Join(".", ".cwtch"), "")
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("\nError connecting to Tor: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
app := app2.NewApp(acn, ".")
|
||||||
|
app.CreatePeer("alice", "be gay, do crimes")
|
||||||
|
alice := utils.WaitGetPeer(app, "alice")
|
||||||
|
app.LaunchPeers()
|
||||||
|
eventBus := app.GetEventBus(alice.GetOnion())
|
||||||
|
queue := event.NewQueue()
|
||||||
|
eventBus.Subscribe(event.NewMessageFromPeer, queue)
|
||||||
|
|
||||||
|
// For every new Data Packet Alice received she will Print it out.
|
||||||
|
for {
|
||||||
|
event := queue.Next()
|
||||||
|
log.Printf(log.LevelInfo, "Received %v from %v: %s", event.EventType, event.Data["Onion"], event.Data["Data"])
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,39 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
app2 "cwtch.im/cwtch/app"
|
||||||
|
"cwtch.im/cwtch/app/utils"
|
||||||
|
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
|
||||||
|
// System Boilerplate, We need Tor Up and Running
|
||||||
|
log.AddEverythingFromPattern("peer/bob")
|
||||||
|
log.SetLevel(log.LevelDebug)
|
||||||
|
acn, err := tor.NewTorACN(path.Join(".", ".cwtch"), "")
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("\nError connecting to Tor: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
app := app2.NewApp(acn, ".")
|
||||||
|
app.CreatePeer("bob", "be gay, do crimes")
|
||||||
|
bob := utils.WaitGetPeer(app, "bob")
|
||||||
|
|
||||||
|
// Add Alice's Onion Here (It changes run to run)
|
||||||
|
bob.PeerWithOnion("upiztu7myymjf2dn4x4czhagp7axlnqjvf5zwfegbhtpkqb6v3vgu5yd")
|
||||||
|
|
||||||
|
// Send the Message...
|
||||||
|
log.Infof("Waiting for Bob to Connect to Alice...")
|
||||||
|
bob.SendMessageToPeer("upiztu7myymjf2dn4x4czhagp7axlnqjvf5zwfegbhtpkqb6v3vgu5yd", "Hello Alice!!!")
|
||||||
|
|
||||||
|
// Wait a while...
|
||||||
|
// Everything is run in a goroutine so the main thread has to stay active
|
||||||
|
time.Sleep(time.Second * 100)
|
||||||
|
|
||||||
|
}
|
|
@ -1,47 +0,0 @@
|
||||||
package plugins
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cwtch.im/cwtch/event"
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const antispamTickTime = 30 * time.Second
|
|
||||||
|
|
||||||
type antispam struct {
|
|
||||||
bus event.Manager
|
|
||||||
queue event.Queue
|
|
||||||
breakChan chan bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *antispam) Start() {
|
|
||||||
go a.run()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *antispam) Id() PluginID {
|
|
||||||
return ANTISPAM
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *antispam) Shutdown() {
|
|
||||||
a.breakChan <- true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *antispam) run() {
|
|
||||||
log.Debugf("running antispam trigger plugin")
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-time.After(antispamTickTime):
|
|
||||||
// no fuss, just trigger the check. Downstream will filter out superfluous actions
|
|
||||||
a.bus.Publish(event.NewEvent(event.TriggerAntispamCheck, map[event.Field]string{}))
|
|
||||||
continue
|
|
||||||
case <-a.breakChan:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAntiSpam returns a Plugin that when started will trigger antispam payments on a regular interval
|
|
||||||
func NewAntiSpam(bus event.Manager) Plugin {
|
|
||||||
cr := &antispam{bus: bus, queue: event.NewQueue(), breakChan: make(chan bool, 1)}
|
|
||||||
return cr
|
|
||||||
}
|
|
|
@ -3,26 +3,13 @@ package plugins
|
||||||
import (
|
import (
|
||||||
"cwtch.im/cwtch/event"
|
"cwtch.im/cwtch/event"
|
||||||
"cwtch.im/cwtch/protocol/connections"
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
"math"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Todo: Move to protocol/connections
|
const tickTime = 10 * time.Second
|
||||||
// This Plugin is now required and it makes more sense to run more integrated in engine
|
const maxBakoff int = 32 // 320 seconds or ~5 min
|
||||||
|
|
||||||
const tickTimeSec = 30
|
|
||||||
const tickTime = tickTimeSec * time.Second
|
|
||||||
|
|
||||||
const circuitTimeoutSecs int = 120
|
|
||||||
|
|
||||||
const MaxBaseTimeoutSec = 5 * 60 // a max base time out of 5 min
|
|
||||||
const maxFailedBackoff = 6 // 2^6 = 64 -> 64 * [2m to 5m] = 2h8m to 5h20m
|
|
||||||
|
|
||||||
const PriorityQueueTimeSinceQualifierHours float64 = 168
|
|
||||||
|
|
||||||
type connectionType int
|
type connectionType int
|
||||||
|
|
||||||
|
@ -36,129 +23,28 @@ type contact struct {
|
||||||
state connections.ConnectionState
|
state connections.ConnectionState
|
||||||
ctype connectionType
|
ctype connectionType
|
||||||
|
|
||||||
lastAttempt time.Time
|
ticks int
|
||||||
failedCount int
|
backoff int
|
||||||
|
|
||||||
lastSeen time.Time
|
|
||||||
queued bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// compare a to b
|
|
||||||
// returns -1 if a < b
|
|
||||||
//
|
|
||||||
// 0 if a == b
|
|
||||||
// +1 if a > b
|
|
||||||
//
|
|
||||||
// algo: sort by failedCount first favouring less attempts, then sort by lastSeen time favouring more recent connections
|
|
||||||
func (a *contact) compare(b *contact) int {
|
|
||||||
if a.failedCount < b.failedCount {
|
|
||||||
return -1
|
|
||||||
} else if a.failedCount > b.failedCount {
|
|
||||||
return +1
|
|
||||||
}
|
|
||||||
|
|
||||||
if a.lastSeen.After(b.lastSeen) {
|
|
||||||
return -1
|
|
||||||
} else if a.lastSeen.Before(b.lastSeen) {
|
|
||||||
return +1
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type connectionQueue struct {
|
|
||||||
queue []*contact
|
|
||||||
}
|
|
||||||
|
|
||||||
func newConnectionQueue() *connectionQueue {
|
|
||||||
return &connectionQueue{queue: []*contact{}}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cq *connectionQueue) insert(c *contact) {
|
|
||||||
// find loc
|
|
||||||
i := 0
|
|
||||||
var b *contact
|
|
||||||
for i, b = range cq.queue {
|
|
||||||
if c.compare(b) >= 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// insert
|
|
||||||
if len(cq.queue) == i { // nil or empty slice or after last element
|
|
||||||
cq.queue = append(cq.queue, c)
|
|
||||||
} else {
|
|
||||||
cq.queue = append(cq.queue[:i+1], cq.queue[i:]...) // index < len(a)
|
|
||||||
cq.queue[i] = c
|
|
||||||
}
|
|
||||||
|
|
||||||
c.queued = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cq *connectionQueue) dequeue() *contact {
|
|
||||||
if len(cq.queue) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
c := cq.queue[0]
|
|
||||||
cq.queue = cq.queue[1:]
|
|
||||||
c.queued = false
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cq *connectionQueue) len() int {
|
|
||||||
return len(cq.queue)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type contactRetry struct {
|
type contactRetry struct {
|
||||||
bus event.Manager
|
bus event.Manager
|
||||||
queue event.Queue
|
queue event.Queue
|
||||||
ACNUp bool
|
networkUp bool
|
||||||
ACNUpTime time.Time
|
running bool
|
||||||
protocolEngine bool
|
breakChan chan bool
|
||||||
running bool
|
onion string
|
||||||
breakChan chan bool
|
lastCheck time.Time
|
||||||
onion string
|
|
||||||
lastCheck time.Time
|
|
||||||
acnProgress int
|
|
||||||
|
|
||||||
connections sync.Map //[string]*contact
|
connections sync.Map //[string]*contact
|
||||||
pendingQueue *connectionQueue
|
|
||||||
priorityQueue *connectionQueue
|
|
||||||
authorizedPeers sync.Map
|
|
||||||
stallRetries bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConnectionRetry returns a Plugin that when started will retry connecting to contacts with a failedCount timing
|
// NewConnectionRetry returns a Plugin that when started will retry connecting to contacts with a backoff timing
|
||||||
func NewConnectionRetry(bus event.Manager, onion string) Plugin {
|
func NewConnectionRetry(bus event.Manager, onion string) Plugin {
|
||||||
cr := &contactRetry{bus: bus, queue: event.NewQueue(), breakChan: make(chan bool, 1), authorizedPeers: sync.Map{}, connections: sync.Map{}, stallRetries: true, ACNUp: false, ACNUpTime: time.Now(), protocolEngine: false, onion: onion, pendingQueue: newConnectionQueue(), priorityQueue: newConnectionQueue()}
|
cr := &contactRetry{bus: bus, queue: event.NewQueue(), breakChan: make(chan bool), connections: sync.Map{}, networkUp: false, onion: onion}
|
||||||
return cr
|
return cr
|
||||||
}
|
}
|
||||||
|
|
||||||
// maxTorCircuitsPending a function to throttle access to tor network during start up
|
|
||||||
func (cr *contactRetry) maxTorCircuitsPending() int {
|
|
||||||
timeSinceStart := time.Since(cr.ACNUpTime)
|
|
||||||
if timeSinceStart < 30*time.Second {
|
|
||||||
return 4
|
|
||||||
} else if timeSinceStart < 4*time.Minute {
|
|
||||||
return 8
|
|
||||||
} else if timeSinceStart < 8*time.Minute {
|
|
||||||
return 16
|
|
||||||
}
|
|
||||||
return connections.TorMaxPendingConns
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cr *contactRetry) connectingCount() int {
|
|
||||||
connecting := 0
|
|
||||||
cr.connections.Range(func(k, v interface{}) bool {
|
|
||||||
conn := v.(*contact)
|
|
||||||
if conn.state == connections.CONNECTING {
|
|
||||||
connecting++
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
return connecting
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cr *contactRetry) Start() {
|
func (cr *contactRetry) Start() {
|
||||||
if !cr.running {
|
if !cr.running {
|
||||||
go cr.run()
|
go cr.run()
|
||||||
|
@ -167,173 +53,48 @@ func (cr *contactRetry) Start() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *contactRetry) Id() PluginID {
|
|
||||||
return CONNECTIONRETRY
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cr *contactRetry) run() {
|
func (cr *contactRetry) run() {
|
||||||
cr.running = true
|
cr.running = true
|
||||||
cr.bus.Subscribe(event.PeerStateChange, cr.queue)
|
cr.bus.Subscribe(event.PeerStateChange, cr.queue)
|
||||||
cr.bus.Subscribe(event.ACNStatus, cr.queue)
|
cr.bus.Subscribe(event.ACNStatus, cr.queue)
|
||||||
cr.bus.Subscribe(event.ServerStateChange, cr.queue)
|
cr.bus.Subscribe(event.ServerStateChange, cr.queue)
|
||||||
cr.bus.Subscribe(event.QueuePeerRequest, cr.queue)
|
|
||||||
cr.bus.Subscribe(event.QueueJoinServer, cr.queue)
|
|
||||||
cr.bus.Subscribe(event.DisconnectPeerRequest, cr.queue)
|
|
||||||
cr.bus.Subscribe(event.DisconnectServerRequest, cr.queue)
|
|
||||||
cr.bus.Subscribe(event.ProtocolEngineShutdown, cr.queue)
|
|
||||||
cr.bus.Subscribe(event.ProtocolEngineCreated, cr.queue)
|
|
||||||
cr.bus.Subscribe(event.DeleteContact, cr.queue)
|
|
||||||
cr.bus.Subscribe(event.UpdateConversationAuthorization, cr.queue)
|
|
||||||
cr.bus.Subscribe(event.PurgeRetries, cr.queue)
|
|
||||||
cr.bus.Subscribe(event.ResumeRetries, cr.queue)
|
|
||||||
for {
|
for {
|
||||||
// Only attempt connection if both the ACN and the Protocol Engines are Online...
|
if time.Since(cr.lastCheck) > tickTime {
|
||||||
log.Debugf("restartFlow checking state")
|
cr.retryDisconnected()
|
||||||
if cr.ACNUp && cr.protocolEngine && !cr.stallRetries {
|
|
||||||
log.Debugf("restartFlow time to queue!!")
|
|
||||||
cr.requeueReady()
|
|
||||||
connectingCount := cr.connectingCount()
|
|
||||||
|
|
||||||
// do priority connections first...
|
|
||||||
for connectingCount < cr.maxTorCircuitsPending() && len(cr.priorityQueue.queue) > 0 {
|
|
||||||
contact := cr.priorityQueue.dequeue()
|
|
||||||
if contact == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// could have received incoming connection while in queue, make sure still disconnected before trying
|
|
||||||
if contact.state == connections.DISCONNECTED {
|
|
||||||
cr.publishConnectionRequest(contact)
|
|
||||||
connectingCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for connectingCount < cr.maxTorCircuitsPending() && len(cr.pendingQueue.queue) > 0 {
|
|
||||||
contact := cr.pendingQueue.dequeue()
|
|
||||||
if contact == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// could have received incoming connection while in queue, make sure still disconnected before trying
|
|
||||||
if contact.state == connections.DISCONNECTED {
|
|
||||||
cr.publishConnectionRequest(contact)
|
|
||||||
connectingCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cr.lastCheck = time.Now()
|
cr.lastCheck = time.Now()
|
||||||
}
|
}
|
||||||
// regardless of if we're up, run manual force deconnectiong of timed out connections
|
|
||||||
cr.connections.Range(func(k, v interface{}) bool {
|
|
||||||
p := v.(*contact)
|
|
||||||
if p.state == connections.CONNECTING && time.Since(p.lastAttempt) > time.Duration(circuitTimeoutSecs)*time.Second*2 {
|
|
||||||
// we have been "connecting" for twice the circuttimeout so it's failed, we just didn't learn about it, manually disconnect
|
|
||||||
cr.handleEvent(p.id, connections.DISCONNECTED, p.ctype)
|
|
||||||
log.Errorf("had to manually set peer %v of profile %v to DISCONNECTED due to assumed circuit timeout (%v) seconds", p.id, cr.onion, circuitTimeoutSecs*2)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case e := <-cr.queue.OutChan():
|
case e := <-cr.queue.OutChan():
|
||||||
switch e.EventType {
|
switch e.EventType {
|
||||||
case event.PurgeRetries:
|
|
||||||
// Purge All Authorized Peers
|
|
||||||
cr.authorizedPeers.Range(func(key interface{}, value interface{}) bool {
|
|
||||||
cr.authorizedPeers.Delete(key)
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
// Purge All Connection States
|
|
||||||
cr.connections.Range(func(key interface{}, value interface{}) bool {
|
|
||||||
cr.connections.Delete(key)
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
case event.ResumeRetries:
|
|
||||||
log.Infof("resuming retries...")
|
|
||||||
cr.stallRetries = false
|
|
||||||
case event.DisconnectPeerRequest:
|
|
||||||
peer := e.Data[event.RemotePeer]
|
|
||||||
cr.authorizedPeers.Delete(peer)
|
|
||||||
case event.DisconnectServerRequest:
|
|
||||||
peer := e.Data[event.GroupServer]
|
|
||||||
cr.authorizedPeers.Delete(peer)
|
|
||||||
case event.DeleteContact:
|
|
||||||
// this case covers both servers and peers (servers are peers, and go through the
|
|
||||||
// same delete conversation flow)
|
|
||||||
peer := e.Data[event.RemotePeer]
|
|
||||||
cr.authorizedPeers.Delete(peer)
|
|
||||||
case event.UpdateConversationAuthorization:
|
|
||||||
// if we update the conversation authorization then we need to check if
|
|
||||||
// we need to remove blocked conversations from the regular flow.
|
|
||||||
peer := e.Data[event.RemotePeer]
|
|
||||||
blocked := e.Data[event.Blocked]
|
|
||||||
if blocked == "true" {
|
|
||||||
cr.authorizedPeers.Delete(peer)
|
|
||||||
}
|
|
||||||
case event.PeerStateChange:
|
case event.PeerStateChange:
|
||||||
state := connections.ConnectionStateToType()[e.Data[event.ConnectionState]]
|
state := connections.ConnectionStateToType()[e.Data[event.ConnectionState]]
|
||||||
peer := e.Data[event.RemotePeer]
|
peer := e.Data[event.RemotePeer]
|
||||||
// only handle state change events from pre-authorized peers;
|
cr.handleEvent(peer, state, peerConn)
|
||||||
if _, exists := cr.authorizedPeers.Load(peer); exists {
|
|
||||||
cr.handleEvent(peer, state, peerConn)
|
|
||||||
}
|
|
||||||
case event.ServerStateChange:
|
case event.ServerStateChange:
|
||||||
state := connections.ConnectionStateToType()[e.Data[event.ConnectionState]]
|
state := connections.ConnectionStateToType()[e.Data[event.ConnectionState]]
|
||||||
server := e.Data[event.GroupServer]
|
server := e.Data[event.GroupServer]
|
||||||
// only handle state change events from pre-authorized servers;
|
cr.handleEvent(server, state, serverConn)
|
||||||
if _, exists := cr.authorizedPeers.Load(server); exists {
|
|
||||||
cr.handleEvent(server, state, serverConn)
|
|
||||||
}
|
|
||||||
case event.QueueJoinServer:
|
|
||||||
fallthrough
|
|
||||||
case event.QueuePeerRequest:
|
|
||||||
lastSeen, err := time.Parse(time.RFC3339Nano, e.Data[event.LastSeen])
|
|
||||||
if err != nil {
|
|
||||||
lastSeen = event.CwtchEpoch
|
|
||||||
}
|
|
||||||
|
|
||||||
id := ""
|
|
||||||
if peer, exists := e.Data[event.RemotePeer]; exists {
|
|
||||||
id = peer
|
|
||||||
cr.addConnection(peer, connections.DISCONNECTED, peerConn, lastSeen)
|
|
||||||
} else if server, exists := e.Data[event.GroupServer]; exists {
|
|
||||||
id = server
|
|
||||||
cr.addConnection(server, connections.DISCONNECTED, serverConn, lastSeen)
|
|
||||||
}
|
|
||||||
// this was an authorized event, and so we store this peer.
|
|
||||||
log.Debugf("authorizing id: %v", id)
|
|
||||||
cr.authorizedPeers.Store(id, true)
|
|
||||||
if c, ok := cr.connections.Load(id); ok {
|
|
||||||
contact := c.(*contact)
|
|
||||||
if contact.state == connections.DISCONNECTED {
|
|
||||||
// prioritize connections made in the last week
|
|
||||||
if time.Since(contact.lastSeen).Hours() < PriorityQueueTimeSinceQualifierHours {
|
|
||||||
cr.priorityQueue.insert(contact)
|
|
||||||
} else {
|
|
||||||
cr.pendingQueue.insert(contact)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case event.ProtocolEngineShutdown:
|
|
||||||
cr.ACNUp = false
|
|
||||||
cr.protocolEngine = false
|
|
||||||
cr.stallRetries = true
|
|
||||||
cr.connections.Range(func(k, v interface{}) bool {
|
|
||||||
p := v.(*contact)
|
|
||||||
if p.state == connections.AUTHENTICATED || p.state == connections.SYNCED {
|
|
||||||
p.lastSeen = time.Now()
|
|
||||||
}
|
|
||||||
p.state = connections.DISCONNECTED
|
|
||||||
p.failedCount = 0
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
case event.ProtocolEngineCreated:
|
|
||||||
cr.protocolEngine = true
|
|
||||||
cr.processStatus()
|
|
||||||
|
|
||||||
case event.ACNStatus:
|
case event.ACNStatus:
|
||||||
progData := e.Data[event.Progress]
|
prog := e.Data[event.Progress]
|
||||||
if prog, err := strconv.Atoi(progData); err == nil {
|
if prog == "100" && !cr.networkUp {
|
||||||
cr.acnProgress = prog
|
cr.networkUp = true
|
||||||
cr.processStatus()
|
cr.connections.Range(func(k, v interface{}) bool {
|
||||||
|
p := v.(*contact)
|
||||||
|
p.ticks = 0
|
||||||
|
p.backoff = 1
|
||||||
|
if p.ctype == peerConn {
|
||||||
|
cr.bus.Publish(event.NewEvent(event.RetryPeerRequest, map[event.Field]string{event.RemotePeer: p.id}))
|
||||||
|
}
|
||||||
|
if p.ctype == serverConn {
|
||||||
|
cr.bus.Publish(event.NewEvent(event.RetryServerRequest, map[event.Field]string{event.GroupServer: p.id}))
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
} else if prog != "100" {
|
||||||
|
cr.networkUp = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -347,173 +108,54 @@ func (cr *contactRetry) run() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *contactRetry) processStatus() {
|
func (cr *contactRetry) retryDisconnected() {
|
||||||
if !cr.protocolEngine {
|
|
||||||
cr.ACNUp = false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cr.acnProgress == 100 && !cr.ACNUp {
|
|
||||||
// ACN is up...at this point we need to completely reset our state
|
|
||||||
// as there is no guarantee that the tor daemon shares our state anymore...
|
|
||||||
cr.ACNUp = true
|
|
||||||
cr.ACNUpTime = time.Now()
|
|
||||||
|
|
||||||
// reset all of the queues...
|
|
||||||
cr.priorityQueue = newConnectionQueue()
|
|
||||||
cr.pendingQueue = newConnectionQueue()
|
|
||||||
|
|
||||||
// Loop through connections. Reset state, and requeue...
|
|
||||||
cr.connections.Range(func(k, v interface{}) bool {
|
|
||||||
p := v.(*contact)
|
|
||||||
|
|
||||||
// only reload connections if they are on the authorized peers list
|
|
||||||
if _, exists := cr.authorizedPeers.Load(p.id); exists {
|
|
||||||
p.queued = true
|
|
||||||
// prioritize connections made recently...
|
|
||||||
log.Debugf("adding %v to queue", p.id)
|
|
||||||
if time.Since(p.lastSeen).Hours() < PriorityQueueTimeSinceQualifierHours {
|
|
||||||
cr.priorityQueue.insert(p)
|
|
||||||
} else {
|
|
||||||
cr.pendingQueue.insert(p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
} else if cr.acnProgress != 100 {
|
|
||||||
cr.ACNUp = false
|
|
||||||
cr.connections.Range(func(k, v interface{}) bool {
|
|
||||||
p := v.(*contact)
|
|
||||||
p.failedCount = 0
|
|
||||||
p.queued = false
|
|
||||||
p.state = connections.DISCONNECTED
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cr *contactRetry) requeueReady() {
|
|
||||||
if !cr.ACNUp {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var retryable []*contact
|
|
||||||
|
|
||||||
throughPutPerMin := int((float64(cr.maxTorCircuitsPending()) / float64(circuitTimeoutSecs)) * 60.0)
|
|
||||||
queueCount := cr.priorityQueue.len() + cr.pendingQueue.len()
|
|
||||||
// adjustedBaseTimeout = basetimeoust * (queuedItemsCount / throughPutPerMin)
|
|
||||||
// when less items are queued than through put it'll lower adjustedBaseTimeOut, but that'll be reset in the next block
|
|
||||||
// when more items are queued it will increase the timeout, to a max of MaxBaseTimeoutSec (enforced in the next block)
|
|
||||||
adjustedBaseTimeout := circuitTimeoutSecs * (queueCount / throughPutPerMin)
|
|
||||||
|
|
||||||
// circuitTimeoutSecs (120s) < adjustedBaseTimeout < MaxBaseTimeoutSec (300s)
|
|
||||||
if adjustedBaseTimeout < circuitTimeoutSecs {
|
|
||||||
adjustedBaseTimeout = circuitTimeoutSecs
|
|
||||||
} else if adjustedBaseTimeout > MaxBaseTimeoutSec {
|
|
||||||
adjustedBaseTimeout = MaxBaseTimeoutSec
|
|
||||||
}
|
|
||||||
|
|
||||||
cr.connections.Range(func(k, v interface{}) bool {
|
cr.connections.Range(func(k, v interface{}) bool {
|
||||||
p := v.(*contact)
|
p := v.(*contact)
|
||||||
|
|
||||||
// Don't retry anyone who isn't on the authorized peers list
|
if p.state == connections.DISCONNECTED {
|
||||||
if _, exists := cr.authorizedPeers.Load(p.id); exists {
|
p.ticks++
|
||||||
if p.state == connections.DISCONNECTED && !p.queued {
|
if p.ticks >= p.backoff {
|
||||||
timeout := time.Duration((math.Pow(2, float64(p.failedCount)))*float64(adjustedBaseTimeout /*baseTimeoutSec*/)) * time.Second
|
p.ticks = 0
|
||||||
if time.Since(p.lastAttempt) > timeout {
|
if cr.networkUp {
|
||||||
retryable = append(retryable, p)
|
if p.ctype == peerConn {
|
||||||
|
cr.bus.Publish(event.NewEvent(event.RetryPeerRequest, map[event.Field]string{event.RemotePeer: p.id}))
|
||||||
|
}
|
||||||
|
if p.ctype == serverConn {
|
||||||
|
cr.bus.Publish(event.NewEvent(event.RetryServerRequest, map[event.Field]string{event.GroupServer: p.id}))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
for _, contact := range retryable {
|
|
||||||
if time.Since(contact.lastSeen).Hours() < PriorityQueueTimeSinceQualifierHours {
|
|
||||||
cr.priorityQueue.insert(contact)
|
|
||||||
} else {
|
|
||||||
cr.pendingQueue.insert(contact)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cr *contactRetry) publishConnectionRequest(contact *contact) {
|
|
||||||
log.Debugf("RestartFlow Publish Connection Request listener %v", contact)
|
|
||||||
if contact.ctype == peerConn {
|
|
||||||
cr.bus.Publish(event.NewEvent(event.PeerRequest, map[event.Field]string{event.RemotePeer: contact.id}))
|
|
||||||
}
|
|
||||||
if contact.ctype == serverConn {
|
|
||||||
cr.bus.Publish(event.NewEvent(event.RetryServerRequest, map[event.Field]string{event.GroupServer: contact.id}))
|
|
||||||
}
|
|
||||||
contact.state = connections.CONNECTING // Hacky but needed so we don't over flood waiting for PeerStateChange from engine
|
|
||||||
contact.lastAttempt = time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cr *contactRetry) addConnection(id string, state connections.ConnectionState, ctype connectionType, lastSeen time.Time) {
|
|
||||||
// don't handle contact retries for ourselves
|
|
||||||
if id == cr.onion {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, exists := cr.connections.Load(id); !exists {
|
|
||||||
p := &contact{id: id, state: state, failedCount: 0, lastAttempt: event.CwtchEpoch, ctype: ctype, lastSeen: lastSeen, queued: false}
|
|
||||||
cr.connections.Store(id, p)
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
// we have rerequested this connnection, probably via an explicit ask, update it's state
|
|
||||||
if c, ok := cr.connections.Load(id); ok {
|
|
||||||
contact := c.(*contact)
|
|
||||||
contact.state = state
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *contactRetry) handleEvent(id string, state connections.ConnectionState, ctype connectionType) {
|
func (cr *contactRetry) handleEvent(id string, state connections.ConnectionState, ctype connectionType) {
|
||||||
log.Debugf("cr.handleEvent state to %v on id %v", connections.ConnectionStateName[state], id)
|
|
||||||
|
|
||||||
// don't handle contact retries for ourselves
|
|
||||||
if id == cr.onion {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// reject events that contain invalid hostnames...we cannot connect to them
|
|
||||||
// and they could result in spurious connection attempts...
|
|
||||||
if !tor.IsValidHostname(id) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, exists := cr.connections.Load(id); !exists {
|
if _, exists := cr.connections.Load(id); !exists {
|
||||||
// We have an event for something we don't know about...
|
p := &contact{id: id, state: connections.DISCONNECTED, backoff: 0, ticks: 0, ctype: ctype}
|
||||||
// The only reason this should happen is if a *new* Peer/Server connection has changed.
|
cr.connections.Store(id, p)
|
||||||
// Let's set the timeout to Now() to indicate that this is a fresh connection, and so should likely be prioritized.
|
|
||||||
cr.addConnection(id, state, ctype, time.Now())
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
pinf, _ := cr.connections.Load(id)
|
pinf, _ := cr.connections.Load(id)
|
||||||
p := pinf.(*contact)
|
p := pinf.(*contact)
|
||||||
log.Debugf(" managing state change for %v %v to %v by self %v", id, connections.ConnectionStateName[p.state], connections.ConnectionStateName[state], cr.onion)
|
|
||||||
if state == connections.DISCONNECTED || state == connections.FAILED || state == connections.KILLED {
|
if state == connections.DISCONNECTED || state == connections.FAILED || state == connections.KILLED {
|
||||||
if p.state == connections.SYNCED || p.state == connections.AUTHENTICATED {
|
|
||||||
p.lastSeen = time.Now()
|
|
||||||
} else {
|
|
||||||
p.failedCount += 1
|
|
||||||
}
|
|
||||||
p.state = connections.DISCONNECTED
|
p.state = connections.DISCONNECTED
|
||||||
p.lastAttempt = time.Now()
|
if p.backoff == 0 {
|
||||||
if p.failedCount > maxFailedBackoff {
|
p.backoff = 1
|
||||||
p.failedCount = maxFailedBackoff
|
} else if p.backoff < maxBakoff {
|
||||||
|
p.backoff *= 2
|
||||||
}
|
}
|
||||||
|
p.ticks = 0
|
||||||
} else if state == connections.CONNECTING || state == connections.CONNECTED {
|
} else if state == connections.CONNECTING || state == connections.CONNECTED {
|
||||||
p.state = state
|
p.state = state
|
||||||
} else if state == connections.AUTHENTICATED || state == connections.SYNCED {
|
} else if state == connections.AUTHENTICATED {
|
||||||
p.state = state
|
p.state = state
|
||||||
p.lastSeen = time.Now()
|
p.backoff = 0
|
||||||
p.failedCount = 0
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *contactRetry) Shutdown() {
|
func (cr *contactRetry) Shutdown() {
|
||||||
cr.breakChan <- true
|
cr.breakChan <- true
|
||||||
cr.queue.Shutdown()
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,128 +0,0 @@
|
||||||
package plugins
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cwtch.im/cwtch/event"
|
|
||||||
"cwtch.im/cwtch/protocol/connections"
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestContactRetryQueue simulates some basic connection queueing
|
|
||||||
// NOTE: This whole test is a race condition, and does flag go's detector
|
|
||||||
// We are invasively checking the internal state of the retry plugin and accessing pointers from another
|
|
||||||
// thread.
|
|
||||||
// We could build an entire thread safe monitoring functonality, but that would dramatically expand the scope of this test.
|
|
||||||
|
|
||||||
func TestContactRetryQueue(t *testing.T) {
|
|
||||||
log.SetLevel(log.LevelDebug)
|
|
||||||
bus := event.NewEventManager()
|
|
||||||
cr := NewConnectionRetry(bus, "").(*contactRetry)
|
|
||||||
cr.ACNUp = true // fake an ACN connection...
|
|
||||||
cr.protocolEngine = true // fake protocol engine
|
|
||||||
cr.stallRetries = false // fake not being in offline mode...
|
|
||||||
go cr.run()
|
|
||||||
|
|
||||||
testOnion := "2wgvbza2mbuc72a4u6r6k4hc2blcvrmk4q26bfvlwbqxv2yq5k52fcqd"
|
|
||||||
|
|
||||||
t.Logf("contact plugin up and running..sending peer connection...")
|
|
||||||
// Assert that there is a peer connection identified as "test"
|
|
||||||
bus.Publish(event.NewEvent(event.QueuePeerRequest, map[event.Field]string{event.RemotePeer: testOnion, event.LastSeen: "test"}))
|
|
||||||
|
|
||||||
// Wait until the test actually exists, and is queued
|
|
||||||
// This is the worst part of this test setup. Ideally we would sleep, or some other yielding, but
|
|
||||||
// go test scheduling doesn't like that and even sleeping long periods won't cause the event thread to make
|
|
||||||
// progress...
|
|
||||||
setup := false
|
|
||||||
for !setup {
|
|
||||||
if _, exists := cr.connections.Load(testOnion); exists {
|
|
||||||
if _, exists := cr.authorizedPeers.Load(testOnion); exists {
|
|
||||||
t.Logf("authorized")
|
|
||||||
setup = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We should very quickly become connecting...
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
pinf, _ := cr.connections.Load(testOnion)
|
|
||||||
if pinf.(*contact).state != 1 {
|
|
||||||
t.Fatalf("test connection should be in connecting after update, actually: %v", pinf.(*contact).state)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Asset that "test" is authenticated
|
|
||||||
cr.handleEvent(testOnion, connections.AUTHENTICATED, peerConn)
|
|
||||||
|
|
||||||
// Assert that "test has a valid state"
|
|
||||||
pinf, _ = cr.connections.Load(testOnion)
|
|
||||||
if pinf.(*contact).state != 3 {
|
|
||||||
t.Fatalf("test connection should be in authenticated after update, actually: %v", pinf.(*contact).state)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Publish an unrelated event to trigger the Plugin to go through a queuing cycle
|
|
||||||
// If we didn't do this we would have to wait 30 seconds for a check-in
|
|
||||||
bus.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{event.RemotePeer: "test2", event.ConnectionState: "Disconnected"}))
|
|
||||||
bus.Publish(event.NewEvent(event.QueuePeerRequest, map[event.Field]string{event.RemotePeer: testOnion, event.LastSeen: time.Now().Format(time.RFC3339Nano)}))
|
|
||||||
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
pinf, _ = cr.connections.Load(testOnion)
|
|
||||||
if pinf.(*contact).state != 1 {
|
|
||||||
t.Fatalf("test connection should be in connecting after update, actually: %v", pinf.(*contact).state)
|
|
||||||
}
|
|
||||||
|
|
||||||
cr.Shutdown()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Takes around 4 min unless you adjust the consts for tickTimeSec and circuitTimeoutSecs
|
|
||||||
/*
|
|
||||||
func TestRetryEmission(t *testing.T) {
|
|
||||||
log.SetLevel(log.LevelDebug)
|
|
||||||
log.Infof("*** Starting TestRetryEmission! ***")
|
|
||||||
bus := event.NewEventManager()
|
|
||||||
|
|
||||||
testQueue := event.NewQueue()
|
|
||||||
bus.Subscribe(event.PeerRequest, testQueue)
|
|
||||||
|
|
||||||
cr := NewConnectionRetry(bus, "").(*contactRetry)
|
|
||||||
cr.Start()
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
|
|
||||||
bus.Publish(event.NewEventList(event.ACNStatus, event.Progress, "100"))
|
|
||||||
bus.Publish(event.NewEventList(event.ProtocolEngineCreated))
|
|
||||||
|
|
||||||
pub, _, _ := ed25519.GenerateKey(rand.Reader)
|
|
||||||
peerAddr := tor.GetTorV3Hostname(pub)
|
|
||||||
|
|
||||||
bus.Publish(event.NewEventList(event.QueuePeerRequest, event.RemotePeer, peerAddr, event.LastSeen, time.Now().Format(time.RFC3339Nano)))
|
|
||||||
|
|
||||||
log.Infof("Fetching 1st event")
|
|
||||||
ev := testQueue.Next()
|
|
||||||
if ev.EventType != event.PeerRequest {
|
|
||||||
t.Errorf("1st event emitted was %v, expected %v", ev.EventType, event.PeerRequest)
|
|
||||||
}
|
|
||||||
log.Infof("1st event: %v", ev)
|
|
||||||
|
|
||||||
bus.Publish(event.NewEventList(event.PeerStateChange, event.RemotePeer, peerAddr, event.ConnectionState, connections.ConnectionStateName[connections.DISCONNECTED]))
|
|
||||||
|
|
||||||
log.Infof("fetching 2nd event")
|
|
||||||
ev = testQueue.Next()
|
|
||||||
log.Infof("2nd event: %v", ev)
|
|
||||||
if ev.EventType != event.PeerRequest {
|
|
||||||
t.Errorf("2nd event emitted was %v, expected %v", ev.EventType, event.PeerRequest)
|
|
||||||
}
|
|
||||||
|
|
||||||
bus.Publish(event.NewEventList(event.PeerStateChange, event.RemotePeer, peerAddr, event.ConnectionState, connections.ConnectionStateName[connections.CONNECTED]))
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
bus.Publish(event.NewEventList(event.PeerStateChange, event.RemotePeer, peerAddr, event.ConnectionState, connections.ConnectionStateName[connections.DISCONNECTED]))
|
|
||||||
|
|
||||||
log.Infof("fetching 3rd event")
|
|
||||||
ev = testQueue.Next()
|
|
||||||
log.Infof("3nd event: %v", ev)
|
|
||||||
if ev.EventType != event.PeerRequest {
|
|
||||||
t.Errorf("3nd event emitted was %v, expected %v", ev.EventType, event.PeerRequest)
|
|
||||||
}
|
|
||||||
|
|
||||||
cr.Shutdown()
|
|
||||||
}
|
|
||||||
*/
|
|
|
@ -1,49 +0,0 @@
|
||||||
package plugins
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cwtch.im/cwtch/event"
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const heartbeatTickTime = 60 * time.Second
|
|
||||||
|
|
||||||
type heartbeat struct {
|
|
||||||
bus event.Manager
|
|
||||||
queue event.Queue
|
|
||||||
breakChan chan bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hb *heartbeat) Start() {
|
|
||||||
go hb.run()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hb *heartbeat) Id() PluginID {
|
|
||||||
return HEARTBEAT
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hb *heartbeat) Shutdown() {
|
|
||||||
hb.breakChan <- true
|
|
||||||
hb.queue.Shutdown()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hb *heartbeat) run() {
|
|
||||||
log.Debugf("running heartbeat trigger plugin")
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-time.After(heartbeatTickTime):
|
|
||||||
// no fuss, just trigger the beat.
|
|
||||||
hb.bus.Publish(event.NewEvent(event.Heartbeat, map[event.Field]string{}))
|
|
||||||
continue
|
|
||||||
case <-hb.breakChan:
|
|
||||||
log.Debugf("shutting down heartbeat plugin")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHeartbeat returns a Plugin that when started will trigger heartbeat checks on a regular interval
|
|
||||||
func NewHeartbeat(bus event.Manager) Plugin {
|
|
||||||
cr := &heartbeat{bus: bus, queue: event.NewQueue(), breakChan: make(chan bool, 1)}
|
|
||||||
return cr
|
|
||||||
}
|
|
|
@ -3,7 +3,7 @@ package plugins
|
||||||
import (
|
import (
|
||||||
"cwtch.im/cwtch/event"
|
"cwtch.im/cwtch/event"
|
||||||
"cwtch.im/cwtch/protocol/connections"
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
"cwtch.im/cwtch/utils"
|
"fmt"
|
||||||
"git.openprivacy.ca/openprivacy/connectivity"
|
"git.openprivacy.ca/openprivacy/connectivity"
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -16,23 +16,21 @@ const NetworkCheckError = "Error"
|
||||||
// NetworkCheckSuccess is a status for when the NetworkCheck Plugin has had a successful message from a peer, indicating it is online right now
|
// NetworkCheckSuccess is a status for when the NetworkCheck Plugin has had a successful message from a peer, indicating it is online right now
|
||||||
const NetworkCheckSuccess = "Success"
|
const NetworkCheckSuccess = "Success"
|
||||||
|
|
||||||
const NetworkCheckPeriod = time.Minute
|
|
||||||
|
|
||||||
// networkCheck is a convenience plugin for testing high level availability of onion services
|
// networkCheck is a convenience plugin for testing high level availability of onion services
|
||||||
type networkCheck struct {
|
type networkCheck struct {
|
||||||
bus event.Manager
|
bus event.Manager
|
||||||
queue event.Queue
|
queue event.Queue
|
||||||
onion string
|
acn connectivity.ACN
|
||||||
acn connectivity.ACN
|
onionsToCheck sync.Map // onion:string => true:bool
|
||||||
breakChan chan bool
|
breakChan chan bool
|
||||||
running bool
|
running bool
|
||||||
offline bool
|
offline bool
|
||||||
offlineLock sync.Mutex
|
offlineLock sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNetworkCheck returns a Plugin that when started will attempt various network tests
|
// NewNetworkCheck returns a Plugin that when started will attempt various network tests
|
||||||
func NewNetworkCheck(onion string, bus event.Manager, acn connectivity.ACN) Plugin {
|
func NewNetworkCheck(bus event.Manager, acn connectivity.ACN) Plugin {
|
||||||
nc := &networkCheck{onion: onion, bus: bus, acn: acn, queue: event.NewQueue(), breakChan: make(chan bool, 1)}
|
nc := &networkCheck{bus: bus, acn: acn, queue: event.NewQueue(), breakChan: make(chan bool, 1)}
|
||||||
return nc
|
return nc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,10 +38,6 @@ func (nc *networkCheck) Start() {
|
||||||
go nc.run()
|
go nc.run()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nc *networkCheck) Id() PluginID {
|
|
||||||
return NETWORKCHECK
|
|
||||||
}
|
|
||||||
|
|
||||||
func (nc *networkCheck) run() {
|
func (nc *networkCheck) run() {
|
||||||
nc.running = true
|
nc.running = true
|
||||||
nc.offline = true
|
nc.offline = true
|
||||||
|
@ -55,7 +49,7 @@ func (nc *networkCheck) run() {
|
||||||
nc.bus.Subscribe(event.ServerStateChange, nc.queue)
|
nc.bus.Subscribe(event.ServerStateChange, nc.queue)
|
||||||
nc.bus.Subscribe(event.NewGetValMessageFromPeer, nc.queue)
|
nc.bus.Subscribe(event.NewGetValMessageFromPeer, nc.queue)
|
||||||
nc.bus.Subscribe(event.NewRetValMessageFromPeer, nc.queue)
|
nc.bus.Subscribe(event.NewRetValMessageFromPeer, nc.queue)
|
||||||
var lastMessageReceived = time.Now()
|
var lastMessageReceived time.Time
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-nc.breakChan:
|
case <-nc.breakChan:
|
||||||
|
@ -67,13 +61,12 @@ func (nc *networkCheck) run() {
|
||||||
// and then we will wait a minute and check the connection for the first time (the onion should be up)
|
// and then we will wait a minute and check the connection for the first time (the onion should be up)
|
||||||
// under normal operating circumstances
|
// under normal operating circumstances
|
||||||
case event.ProtocolEngineStartListen:
|
case event.ProtocolEngineStartListen:
|
||||||
if nc.onion == (e.Data[event.Onion]) {
|
if _, exists := nc.onionsToCheck.Load(e.Data[event.Onion]); !exists {
|
||||||
log.Debugf("initiating connection check for %v", e.Data[event.Onion])
|
log.Debugf("initiating connection check for %v", e.Data[event.Onion])
|
||||||
|
nc.onionsToCheck.Store(e.Data[event.Onion], true)
|
||||||
if time.Since(lastMessageReceived) > time.Minute {
|
if time.Since(lastMessageReceived) > time.Minute {
|
||||||
nc.selfTest()
|
nc.selfTest()
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
log.Errorf("network check plugin received an event for a different profile than it was started with. Internal wiring is probably wrong.")
|
|
||||||
}
|
}
|
||||||
case event.PeerStateChange:
|
case event.PeerStateChange:
|
||||||
fallthrough
|
fallthrough
|
||||||
|
@ -103,11 +96,10 @@ func (nc *networkCheck) run() {
|
||||||
}
|
}
|
||||||
nc.offlineLock.Unlock()
|
nc.offlineLock.Unlock()
|
||||||
}
|
}
|
||||||
case <-time.After(NetworkCheckPeriod):
|
case <-time.After(tickTime):
|
||||||
// if we haven't received an action in the last minute...kick off a set of testing
|
// if we haven't received an action in the last minute...kick off a set of testing
|
||||||
if time.Since(lastMessageReceived) > time.Minute {
|
if time.Since(lastMessageReceived) > time.Minute {
|
||||||
nc.selfTest()
|
nc.selfTest()
|
||||||
lastMessageReceived = time.Now()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -122,22 +114,26 @@ func (nc *networkCheck) Shutdown() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nc *networkCheck) selfTest() {
|
func (nc *networkCheck) selfTest() {
|
||||||
go nc.checkConnection(nc.onion)
|
nc.onionsToCheck.Range(func(key, val interface{}) bool {
|
||||||
|
go nc.checkConnection(key.(string))
|
||||||
|
return true
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//
|
||||||
func (nc *networkCheck) checkConnection(onion string) {
|
func (nc *networkCheck) checkConnection(onion string) {
|
||||||
progress, _ := nc.acn.GetBootstrapStatus()
|
prog, _ := nc.acn.GetBootstrapStatus()
|
||||||
if progress != 100 {
|
if prog != 100 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// we want to definitively time these actions out faster than tor will, because these onions should definitely be
|
// we want to definitively time these actions out faster than tor will, because these onions should definitely be
|
||||||
// online
|
// online
|
||||||
ClientTimeout := utils.TimeoutPolicy(time.Second * 60)
|
ClientTimeout := TimeoutPolicy(time.Second * 60)
|
||||||
err := ClientTimeout.ExecuteAction(func() error {
|
err := ClientTimeout.ExecuteAction(func() error {
|
||||||
conn, _, err := nc.acn.Open(onion)
|
conn, _, err := nc.acn.Open(onion)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
_ = conn.Close()
|
conn.Close()
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
@ -154,3 +150,26 @@ func (nc *networkCheck) checkConnection(onion string) {
|
||||||
nc.offline = false
|
nc.offline = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO we might want to reuse this, but for now it is only used by this plugin so it can live here
|
||||||
|
|
||||||
|
// TimeoutPolicy is an interface for enforcing common timeout patterns
|
||||||
|
type TimeoutPolicy time.Duration
|
||||||
|
|
||||||
|
// ExecuteAction runs a function and returns an error if it hasn't returned
|
||||||
|
// by the time specified by TimeoutPolicy
|
||||||
|
func (tp *TimeoutPolicy) ExecuteAction(action func() error) error {
|
||||||
|
|
||||||
|
c := make(chan error)
|
||||||
|
go func() {
|
||||||
|
c <- action()
|
||||||
|
}()
|
||||||
|
|
||||||
|
tick := time.NewTicker(time.Duration(*tp))
|
||||||
|
select {
|
||||||
|
case <-tick.C:
|
||||||
|
return fmt.Errorf("ActionTimedOutError")
|
||||||
|
case err := <-c:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -2,7 +2,6 @@ package plugins
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"cwtch.im/cwtch/event"
|
"cwtch.im/cwtch/event"
|
||||||
"fmt"
|
|
||||||
"git.openprivacy.ca/openprivacy/connectivity"
|
"git.openprivacy.ca/openprivacy/connectivity"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -13,29 +12,22 @@ type PluginID int
|
||||||
const (
|
const (
|
||||||
CONNECTIONRETRY PluginID = iota
|
CONNECTIONRETRY PluginID = iota
|
||||||
NETWORKCHECK
|
NETWORKCHECK
|
||||||
ANTISPAM
|
|
||||||
HEARTBEAT
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Plugin is the interface for a plugin
|
// Plugin is the interface for a plugin
|
||||||
type Plugin interface {
|
type Plugin interface {
|
||||||
Start()
|
Start()
|
||||||
Shutdown()
|
Shutdown()
|
||||||
Id() PluginID
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get is a plugin factory for the requested plugin
|
// Get is a plugin factory for the requested plugin
|
||||||
func Get(id PluginID, bus event.Manager, acn connectivity.ACN, onion string) (Plugin, error) {
|
func Get(id PluginID, bus event.Manager, acn connectivity.ACN, onion string) Plugin {
|
||||||
switch id {
|
switch id {
|
||||||
case CONNECTIONRETRY:
|
case CONNECTIONRETRY:
|
||||||
return NewConnectionRetry(bus, onion), nil
|
return NewConnectionRetry(bus, onion)
|
||||||
case NETWORKCHECK:
|
case NETWORKCHECK:
|
||||||
return NewNetworkCheck(onion, bus, acn), nil
|
return NewNetworkCheck(bus, acn)
|
||||||
case ANTISPAM:
|
|
||||||
return NewAntiSpam(bus), nil
|
|
||||||
case HEARTBEAT:
|
|
||||||
return NewHeartbeat(bus), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("plugin not defined %v", id)
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
package app
|
package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
app2 "cwtch.im/cwtch/app"
|
||||||
"cwtch.im/cwtch/model/attr"
|
"cwtch.im/cwtch/model/attr"
|
||||||
"cwtch.im/cwtch/model/constants"
|
|
||||||
"cwtch.im/cwtch/peer"
|
"cwtch.im/cwtch/peer"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
@ -11,14 +11,11 @@ import (
|
||||||
// Proper use of an App is to call CreatePeer and then process the NewPeer event
|
// Proper use of an App is to call CreatePeer and then process the NewPeer event
|
||||||
// however for small utility use, this function which polls the app until the peer is created
|
// however for small utility use, this function which polls the app until the peer is created
|
||||||
// may fill that usecase better
|
// may fill that usecase better
|
||||||
func WaitGetPeer(app Application, name string) peer.CwtchPeer {
|
func WaitGetPeer(app app2.Application, name string) peer.CwtchPeer {
|
||||||
for {
|
for {
|
||||||
for _, handle := range app.ListProfiles() {
|
for id := range app.ListPeers() {
|
||||||
peer := app.GetPeer(handle)
|
peer := app.GetPeer(id)
|
||||||
if peer == nil {
|
localName, _ := peer.GetAttribute(attr.GetLocalScope("name"))
|
||||||
continue
|
|
||||||
}
|
|
||||||
localName, _ := peer.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name)
|
|
||||||
if localName == name {
|
if localName == name {
|
||||||
return peer
|
return peer
|
||||||
}
|
}
|
|
@ -0,0 +1,57 @@
|
||||||
|
package bridge
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type goChanBridge struct {
|
||||||
|
in chan event.IPCMessage
|
||||||
|
out chan event.IPCMessage
|
||||||
|
closedChan chan bool
|
||||||
|
closed bool
|
||||||
|
lock sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeGoChanBridge returns a simple testing IPCBridge made from inprocess go channels
|
||||||
|
func MakeGoChanBridge() (b1, b2 event.IPCBridge) {
|
||||||
|
chan1 := make(chan event.IPCMessage)
|
||||||
|
chan2 := make(chan event.IPCMessage)
|
||||||
|
closed := make(chan bool)
|
||||||
|
|
||||||
|
a := &goChanBridge{in: chan1, out: chan2, closedChan: closed, closed: false}
|
||||||
|
b := &goChanBridge{in: chan2, out: chan1, closedChan: closed, closed: false}
|
||||||
|
|
||||||
|
go monitor(a, b)
|
||||||
|
|
||||||
|
return a, b
|
||||||
|
}
|
||||||
|
|
||||||
|
func monitor(a, b *goChanBridge) {
|
||||||
|
<-a.closedChan
|
||||||
|
a.closed = true
|
||||||
|
b.closed = true
|
||||||
|
a.closedChan <- true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *goChanBridge) Read() (*event.IPCMessage, bool) {
|
||||||
|
message, ok := <-pb.in
|
||||||
|
return &message, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *goChanBridge) Write(message *event.IPCMessage) {
|
||||||
|
pb.lock.Lock()
|
||||||
|
defer pb.lock.Unlock()
|
||||||
|
if !pb.closed {
|
||||||
|
pb.out <- *message
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *goChanBridge) Shutdown() {
|
||||||
|
if !pb.closed {
|
||||||
|
close(pb.in)
|
||||||
|
close(pb.out)
|
||||||
|
pb.closedChan <- true
|
||||||
|
<-pb.closedChan
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,72 @@
|
||||||
|
package bridge
|
||||||
|
|
||||||
|
/* Todo: When go generics ships, refactor this and event.infiniteChannel into one */
|
||||||
|
|
||||||
|
// InfiniteChannel implements the Channel interface with an infinite buffer between the input and the output.
|
||||||
|
type InfiniteChannel struct {
|
||||||
|
input, output chan interface{}
|
||||||
|
length chan int
|
||||||
|
buffer *InfiniteQueue
|
||||||
|
}
|
||||||
|
|
||||||
|
func newInfiniteChannel() *InfiniteChannel {
|
||||||
|
ch := &InfiniteChannel{
|
||||||
|
input: make(chan interface{}),
|
||||||
|
output: make(chan interface{}),
|
||||||
|
length: make(chan int),
|
||||||
|
buffer: newInfiniteQueue(),
|
||||||
|
}
|
||||||
|
go ch.infiniteBuffer()
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// In returns the input channel
|
||||||
|
func (ch *InfiniteChannel) In() chan<- interface{} {
|
||||||
|
return ch.input
|
||||||
|
}
|
||||||
|
|
||||||
|
// Out returns the output channel
|
||||||
|
func (ch *InfiniteChannel) Out() <-chan interface{} {
|
||||||
|
return ch.output
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the length of items in queue
|
||||||
|
func (ch *InfiniteChannel) Len() int {
|
||||||
|
return <-ch.length
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the InfiniteChanel
|
||||||
|
func (ch *InfiniteChannel) Close() {
|
||||||
|
close(ch.input)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ch *InfiniteChannel) infiniteBuffer() {
|
||||||
|
var input, output chan interface{}
|
||||||
|
var next interface{}
|
||||||
|
input = ch.input
|
||||||
|
|
||||||
|
for input != nil || output != nil {
|
||||||
|
select {
|
||||||
|
case elem, open := <-input:
|
||||||
|
if open {
|
||||||
|
ch.buffer.Add(elem)
|
||||||
|
} else {
|
||||||
|
input = nil
|
||||||
|
}
|
||||||
|
case output <- next:
|
||||||
|
ch.buffer.Remove()
|
||||||
|
case ch.length <- ch.buffer.Length():
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch.buffer.Length() > 0 {
|
||||||
|
output = ch.output
|
||||||
|
next = ch.buffer.Peek()
|
||||||
|
} else {
|
||||||
|
output = nil
|
||||||
|
next = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
close(ch.output)
|
||||||
|
close(ch.length)
|
||||||
|
}
|
|
@ -0,0 +1,105 @@
|
||||||
|
package bridge
|
||||||
|
|
||||||
|
/* Todo: When go generics ships, refactor this and event.infinitQueue channel into one */
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki.
|
||||||
|
Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
|
||||||
|
substantial memory and time benefits, and fewer GC pauses.
|
||||||
|
|
||||||
|
The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// minQueueLen is smallest capacity that queue may have.
|
||||||
|
// Must be power of 2 for bitwise modulus: x % n == x & (n - 1).
|
||||||
|
const minQueueLen = 16
|
||||||
|
|
||||||
|
// InfiniteQueue represents a single instance of the queue data structure.
|
||||||
|
type InfiniteQueue struct {
|
||||||
|
buf []interface{}
|
||||||
|
head, tail, count int
|
||||||
|
}
|
||||||
|
|
||||||
|
// New constructs and returns a new Queue.
|
||||||
|
func newInfiniteQueue() *InfiniteQueue {
|
||||||
|
return &InfiniteQueue{
|
||||||
|
buf: make([]interface{}, minQueueLen),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Length returns the number of elements currently stored in the queue.
|
||||||
|
func (q *InfiniteQueue) Length() int {
|
||||||
|
return q.count
|
||||||
|
}
|
||||||
|
|
||||||
|
// resizes the queue to fit exactly twice its current contents
|
||||||
|
// this can result in shrinking if the queue is less than half-full
|
||||||
|
func (q *InfiniteQueue) resize() {
|
||||||
|
newBuf := make([]interface{}, q.count<<1)
|
||||||
|
|
||||||
|
if q.tail > q.head {
|
||||||
|
copy(newBuf, q.buf[q.head:q.tail])
|
||||||
|
} else {
|
||||||
|
n := copy(newBuf, q.buf[q.head:])
|
||||||
|
copy(newBuf[n:], q.buf[:q.tail])
|
||||||
|
}
|
||||||
|
|
||||||
|
q.head = 0
|
||||||
|
q.tail = q.count
|
||||||
|
q.buf = newBuf
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add puts an element on the end of the queue.
|
||||||
|
func (q *InfiniteQueue) Add(elem interface{}) {
|
||||||
|
if q.count == len(q.buf) {
|
||||||
|
q.resize()
|
||||||
|
}
|
||||||
|
|
||||||
|
q.buf[q.tail] = elem
|
||||||
|
// bitwise modulus
|
||||||
|
q.tail = (q.tail + 1) & (len(q.buf) - 1)
|
||||||
|
q.count++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Peek returns the element at the head of the queue. This call panics
|
||||||
|
// if the queue is empty.
|
||||||
|
func (q *InfiniteQueue) Peek() interface{} {
|
||||||
|
if q.count <= 0 {
|
||||||
|
panic("queue: Peek() called on empty queue")
|
||||||
|
}
|
||||||
|
return q.buf[q.head]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the element at index i in the queue. If the index is
|
||||||
|
// invalid, the call will panic. This method accepts both positive and
|
||||||
|
// negative index values. Index 0 refers to the first element, and
|
||||||
|
// index -1 refers to the last.
|
||||||
|
func (q *InfiniteQueue) Get(i int) interface{} {
|
||||||
|
// If indexing backwards, convert to positive index.
|
||||||
|
if i < 0 {
|
||||||
|
i += q.count
|
||||||
|
}
|
||||||
|
if i < 0 || i >= q.count {
|
||||||
|
panic("queue: Get() called with index out of range")
|
||||||
|
}
|
||||||
|
// bitwise modulus
|
||||||
|
return q.buf[(q.head+i)&(len(q.buf)-1)]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes and returns the element from the front of the queue. If the
|
||||||
|
// queue is empty, the call will panic.
|
||||||
|
func (q *InfiniteQueue) Remove() interface{} {
|
||||||
|
if q.count <= 0 {
|
||||||
|
panic("queue: Remove() called on empty queue")
|
||||||
|
}
|
||||||
|
ret := q.buf[q.head]
|
||||||
|
q.buf[q.head] = nil
|
||||||
|
// bitwise modulus
|
||||||
|
q.head = (q.head + 1) & (len(q.buf) - 1)
|
||||||
|
q.count--
|
||||||
|
// Resize down if buffer 1/4 full.
|
||||||
|
if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) {
|
||||||
|
q.resize()
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
|
@ -0,0 +1,19 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package bridge
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"log"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewPipeBridgeClient(inFilename, outFilename string) event.IPCBridge {
|
||||||
|
log.Fatal("Not supported on windows")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPipeBridgeService returns a pipe backed IPCBridge for a service
|
||||||
|
func NewPipeBridgeService(inFilename, outFilename string) event.IPCBridge {
|
||||||
|
log.Fatal("Not supported on windows")
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,357 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package bridge
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"cwtch.im/cwtch/protocol/connections"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/json"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
/* pipeBridge creates a pair of named pipes
|
||||||
|
Needs a call to new client and service to fully successfully open
|
||||||
|
*/
|
||||||
|
|
||||||
|
const maxBufferSize = 1000
|
||||||
|
|
||||||
|
const serviceName = "service"
|
||||||
|
const clientName = "client"
|
||||||
|
|
||||||
|
const syn = "SYN"
|
||||||
|
const synack = "SYNACK"
|
||||||
|
const ack = "ACK"
|
||||||
|
|
||||||
|
type pipeBridge struct {
|
||||||
|
infile, outfile string
|
||||||
|
in, out *os.File
|
||||||
|
read chan event.IPCMessage
|
||||||
|
write *InfiniteChannel
|
||||||
|
closedChan chan bool
|
||||||
|
state connections.ConnectionState
|
||||||
|
lock sync.Mutex
|
||||||
|
threeShake func() bool
|
||||||
|
|
||||||
|
// For logging / debugging purposes
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPipeBridge(inFilename, outFilename string) *pipeBridge {
|
||||||
|
syscall.Mkfifo(inFilename, 0600)
|
||||||
|
syscall.Mkfifo(outFilename, 0600)
|
||||||
|
pb := &pipeBridge{infile: inFilename, outfile: outFilename, state: connections.DISCONNECTED}
|
||||||
|
pb.read = make(chan event.IPCMessage, maxBufferSize)
|
||||||
|
pb.write = newInfiniteChannel() //make(chan event.IPCMessage, maxBufferSize)
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPipeBridgeClient returns a pipe backed IPCBridge for a client
|
||||||
|
func NewPipeBridgeClient(inFilename, outFilename string) event.IPCBridge {
|
||||||
|
log.Debugf("Making new PipeBridge Client...\n")
|
||||||
|
pb := newPipeBridge(inFilename, outFilename)
|
||||||
|
pb.name = clientName
|
||||||
|
pb.threeShake = pb.threeShakeClient
|
||||||
|
go pb.connectionManager()
|
||||||
|
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPipeBridgeService returns a pipe backed IPCBridge for a service
|
||||||
|
func NewPipeBridgeService(inFilename, outFilename string) event.IPCBridge {
|
||||||
|
log.Debugf("Making new PipeBridge Service...\n")
|
||||||
|
pb := newPipeBridge(inFilename, outFilename)
|
||||||
|
pb.name = serviceName
|
||||||
|
pb.threeShake = pb.threeShakeService
|
||||||
|
|
||||||
|
go pb.connectionManager()
|
||||||
|
|
||||||
|
log.Debugf("Successfully created new PipeBridge Service!\n")
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *pipeBridge) setState(state connections.ConnectionState) {
|
||||||
|
pb.lock.Lock()
|
||||||
|
defer pb.lock.Unlock()
|
||||||
|
|
||||||
|
pb.state = state
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *pipeBridge) getState() connections.ConnectionState {
|
||||||
|
pb.lock.Lock()
|
||||||
|
defer pb.lock.Unlock()
|
||||||
|
|
||||||
|
return pb.state
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *pipeBridge) connectionManager() {
|
||||||
|
for pb.getState() != connections.KILLED {
|
||||||
|
log.Debugf("clientConnManager loop start init\n")
|
||||||
|
pb.setState(connections.CONNECTING)
|
||||||
|
|
||||||
|
var err error
|
||||||
|
log.Debugf("%v open file infile\n", pb.name)
|
||||||
|
pb.in, err = os.OpenFile(pb.infile, os.O_RDWR, 0600)
|
||||||
|
if err != nil {
|
||||||
|
pb.setState(connections.DISCONNECTED)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("%v open file outfile\n", pb.name)
|
||||||
|
pb.out, err = os.OpenFile(pb.outfile, os.O_RDWR, 0600)
|
||||||
|
if err != nil {
|
||||||
|
pb.setState(connections.DISCONNECTED)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("Successfully connected PipeBridge %v!\n", pb.name)
|
||||||
|
|
||||||
|
pb.handleConns()
|
||||||
|
}
|
||||||
|
log.Debugf("exiting %v ConnectionManager\n", pb.name)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// threeShake performs a 3way handshake sync up
|
||||||
|
func (pb *pipeBridge) threeShakeService() bool {
|
||||||
|
synacked := false
|
||||||
|
|
||||||
|
for {
|
||||||
|
resp, err := pb.readString()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(resp) == syn {
|
||||||
|
if !synacked {
|
||||||
|
err = pb.writeString([]byte(synack))
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
synacked = true
|
||||||
|
}
|
||||||
|
} else if string(resp) == ack {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *pipeBridge) synLoop(stop chan bool) {
|
||||||
|
delay := time.Duration(0)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-time.After(delay):
|
||||||
|
err := pb.writeString([]byte(syn))
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
delay = time.Second
|
||||||
|
case <-stop:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *pipeBridge) threeShakeClient() bool {
|
||||||
|
stop := make(chan bool)
|
||||||
|
go pb.synLoop(stop)
|
||||||
|
for {
|
||||||
|
resp, err := pb.readString()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(resp) == synack {
|
||||||
|
stop <- true
|
||||||
|
err := pb.writeString([]byte(ack))
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *pipeBridge) handleConns() {
|
||||||
|
|
||||||
|
if !pb.threeShake() {
|
||||||
|
pb.setState(connections.FAILED)
|
||||||
|
pb.closeReset()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
pb.setState(connections.AUTHENTICATED)
|
||||||
|
|
||||||
|
pb.closedChan = make(chan bool, 5)
|
||||||
|
|
||||||
|
log.Debugf("handleConns authed, %v 2xgo\n", pb.name)
|
||||||
|
|
||||||
|
go pb.handleRead()
|
||||||
|
go pb.handleWrite()
|
||||||
|
|
||||||
|
<-pb.closedChan
|
||||||
|
log.Debugf("handleConns <-closedChan (%v)\n", pb.name)
|
||||||
|
if pb.getState() != connections.KILLED {
|
||||||
|
pb.setState(connections.FAILED)
|
||||||
|
}
|
||||||
|
pb.closeReset()
|
||||||
|
log.Debugf("handleConns done for %v, exit\n", pb.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *pipeBridge) closeReset() {
|
||||||
|
pb.in.Close()
|
||||||
|
pb.out.Close()
|
||||||
|
close(pb.read)
|
||||||
|
pb.write.Close()
|
||||||
|
|
||||||
|
if pb.getState() != connections.KILLED {
|
||||||
|
pb.read = make(chan event.IPCMessage, maxBufferSize)
|
||||||
|
pb.write = newInfiniteChannel()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *pipeBridge) handleWrite() {
|
||||||
|
log.Debugf("handleWrite() %v\n", pb.name)
|
||||||
|
defer log.Debugf("exiting handleWrite() %v\n", pb.name)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case messageInf := <-pb.write.output:
|
||||||
|
if messageInf == nil {
|
||||||
|
pb.closedChan <- true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
message := messageInf.(event.IPCMessage)
|
||||||
|
if message.Message.EventType == event.EncryptedGroupMessage || message.Message.EventType == event.SendMessageToGroup || message.Message.EventType == event.NewMessageFromGroup {
|
||||||
|
log.Debugf("handleWrite <- message: %v %v ...\n", message.Dest, message.Message.EventType)
|
||||||
|
} else {
|
||||||
|
log.Debugf("handleWrite <- message: %v\n", message)
|
||||||
|
}
|
||||||
|
if pb.getState() == connections.AUTHENTICATED {
|
||||||
|
encMessage := &event.IPCMessage{Dest: message.Dest, Message: event.Event{EventType: message.Message.EventType, EventID: message.Message.EventID, Data: make(map[event.Field]string)}}
|
||||||
|
for k, v := range message.Message.Data {
|
||||||
|
encMessage.Message.Data[k] = base64.StdEncoding.EncodeToString([]byte(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
messageJSON, _ := json.Marshal(encMessage)
|
||||||
|
err := pb.writeString(messageJSON)
|
||||||
|
if err != nil {
|
||||||
|
pb.closedChan <- true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *pipeBridge) handleRead() {
|
||||||
|
log.Debugf("handleRead() %v\n", pb.name)
|
||||||
|
defer log.Debugf("exiting handleRead() %v", pb.name)
|
||||||
|
|
||||||
|
for {
|
||||||
|
log.Debugf("Waiting to handleRead()...\n")
|
||||||
|
|
||||||
|
buffer, err := pb.readString()
|
||||||
|
if err != nil {
|
||||||
|
pb.closedChan <- true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var message event.IPCMessage
|
||||||
|
err = json.Unmarshal(buffer, &message)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Read error: '%v', value: '%v'", err, buffer)
|
||||||
|
pb.closedChan <- true
|
||||||
|
return // probably new connection trying to initialize
|
||||||
|
}
|
||||||
|
for k, v := range message.Message.Data {
|
||||||
|
val, _ := base64.StdEncoding.DecodeString(v)
|
||||||
|
message.Message.Data[k] = string(val)
|
||||||
|
}
|
||||||
|
if message.Message.EventType == event.EncryptedGroupMessage || message.Message.EventType == event.SendMessageToGroup || message.Message.EventType == event.NewMessageFromGroup {
|
||||||
|
log.Debugf("handleRead read<-: %v %v ...\n", message.Dest, message.Message.EventType)
|
||||||
|
} else {
|
||||||
|
log.Debugf("handleRead read<-: %v\n", message)
|
||||||
|
}
|
||||||
|
pb.read <- message
|
||||||
|
log.Debugf("handleRead wrote\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *pipeBridge) Read() (*event.IPCMessage, bool) {
|
||||||
|
log.Debugf("Read() %v...\n", pb.name)
|
||||||
|
var ok = false
|
||||||
|
var message event.IPCMessage
|
||||||
|
for !ok && pb.getState() != connections.KILLED {
|
||||||
|
message, ok = <-pb.read
|
||||||
|
if message.Message.EventType == event.EncryptedGroupMessage || message.Message.EventType == event.SendMessageToGroup || message.Message.EventType == event.NewMessageFromGroup {
|
||||||
|
log.Debugf("Read %v: %v %v ...\n", pb.name, message.Dest, message.Message.EventType)
|
||||||
|
} else {
|
||||||
|
log.Debugf("Read %v: %v\n", pb.name, message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &message, pb.getState() != connections.KILLED
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *pipeBridge) Write(message *event.IPCMessage) {
|
||||||
|
if message.Message.EventType == event.EncryptedGroupMessage || message.Message.EventType == event.SendMessageToGroup || message.Message.EventType == event.NewMessageFromGroup {
|
||||||
|
log.Debugf("Write %v: %v %v ...\n", pb.name, message.Dest, message.Message.EventType)
|
||||||
|
} else {
|
||||||
|
log.Debugf("Write %v: %v\n", pb.name, message)
|
||||||
|
}
|
||||||
|
pb.write.input <- *message
|
||||||
|
log.Debugf("Wrote\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *pipeBridge) Shutdown() {
|
||||||
|
log.Debugf("pb.Shutdown() for %v currently in state: %v\n", pb.name, connections.ConnectionStateName[pb.getState()])
|
||||||
|
pb.state = connections.KILLED
|
||||||
|
pb.closedChan <- true
|
||||||
|
log.Debugf("Done Shutdown for %v\n", pb.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *pipeBridge) writeString(message []byte) error {
|
||||||
|
size := make([]byte, 2)
|
||||||
|
binary.LittleEndian.PutUint16(size, uint16(len(message)))
|
||||||
|
pb.out.Write(size)
|
||||||
|
|
||||||
|
for pos := 0; pos < len(message); {
|
||||||
|
n, err := pb.out.Write(message[pos:])
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Writing out on pipeBridge: %v\n", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pos += n
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *pipeBridge) readString() ([]byte, error) {
|
||||||
|
var n int
|
||||||
|
size := make([]byte, 2)
|
||||||
|
var err error
|
||||||
|
|
||||||
|
n, err = pb.in.Read(size)
|
||||||
|
if err != nil || n != 2 {
|
||||||
|
log.Errorf("Could not read len int from stream: %v\n", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
n = int(binary.LittleEndian.Uint16(size))
|
||||||
|
pos := 0
|
||||||
|
buffer := make([]byte, n)
|
||||||
|
for n > 0 {
|
||||||
|
m, err := pb.in.Read(buffer[pos:])
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Reading into buffer from pipe: %v\n", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
n -= m
|
||||||
|
pos += m
|
||||||
|
}
|
||||||
|
return buffer, nil
|
||||||
|
}
|
|
@ -0,0 +1,131 @@
|
||||||
|
package bridge
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
clientPipe = "./client"
|
||||||
|
servicePipe = "./service"
|
||||||
|
)
|
||||||
|
|
||||||
|
func clientHelper(t *testing.T, in, out string, messageOrig *event.IPCMessage, done chan bool) {
|
||||||
|
client := NewPipeBridgeClient(in, out)
|
||||||
|
|
||||||
|
messageAfter, ok := client.Read()
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("Reading from client IPCBridge failed")
|
||||||
|
done <- true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if messageOrig.Dest != messageAfter.Dest {
|
||||||
|
t.Errorf("Dest's value differs expected: %v actaul: %v", messageOrig.Dest, messageAfter.Dest)
|
||||||
|
}
|
||||||
|
|
||||||
|
if messageOrig.Message.EventType != messageAfter.Message.EventType {
|
||||||
|
t.Errorf("EventTypes's value differs expected: %v actaul: %v", messageOrig.Message.EventType, messageAfter.Message.EventType)
|
||||||
|
}
|
||||||
|
|
||||||
|
if messageOrig.Message.Data[event.Identity] != messageAfter.Message.Data[event.Identity] {
|
||||||
|
t.Errorf("Data[Identity]'s value differs expected: %v actaul: %v", messageOrig.Message.Data[event.Identity], messageAfter.Message.Data[event.Identity])
|
||||||
|
}
|
||||||
|
|
||||||
|
done <- true
|
||||||
|
}
|
||||||
|
|
||||||
|
func serviceHelper(t *testing.T, in, out string, messageOrig *event.IPCMessage, done chan bool) {
|
||||||
|
service := NewPipeBridgeService(in, out)
|
||||||
|
|
||||||
|
service.Write(messageOrig)
|
||||||
|
|
||||||
|
done <- true
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPipeBridge(t *testing.T) {
|
||||||
|
os.Remove(servicePipe)
|
||||||
|
os.Remove(clientPipe)
|
||||||
|
|
||||||
|
messageOrig := &event.IPCMessage{Dest: "ABC", Message: event.NewEventList(event.NewPeer, event.Identity, "It is I")}
|
||||||
|
serviceDone := make(chan bool)
|
||||||
|
clientDone := make(chan bool)
|
||||||
|
|
||||||
|
go clientHelper(t, clientPipe, servicePipe, messageOrig, clientDone)
|
||||||
|
go serviceHelper(t, servicePipe, clientPipe, messageOrig, serviceDone)
|
||||||
|
|
||||||
|
<-serviceDone
|
||||||
|
<-clientDone
|
||||||
|
}
|
||||||
|
|
||||||
|
func restartingClient(t *testing.T, in, out string, done chan bool) {
|
||||||
|
client := NewPipeBridgeClient(in, out)
|
||||||
|
|
||||||
|
message1 := &event.IPCMessage{Dest: "ABC", Message: event.NewEventList(event.NewPeer)}
|
||||||
|
log.Infoln("client writing message 1")
|
||||||
|
client.Write(message1)
|
||||||
|
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
log.Infoln("client shutdown")
|
||||||
|
client.Shutdown()
|
||||||
|
|
||||||
|
log.Infoln("client new client")
|
||||||
|
client = NewPipeBridgeClient(in, out)
|
||||||
|
message2 := &event.IPCMessage{Dest: "ABC", Message: event.NewEventList(event.DeleteContact)}
|
||||||
|
log.Infoln("client2 write message2")
|
||||||
|
client.Write(message2)
|
||||||
|
|
||||||
|
done <- true
|
||||||
|
}
|
||||||
|
|
||||||
|
func stableService(t *testing.T, in, out string, done chan bool) {
|
||||||
|
service := NewPipeBridgeService(in, out)
|
||||||
|
|
||||||
|
log.Infoln("service wait read 1")
|
||||||
|
message1, ok := service.Read()
|
||||||
|
log.Infof("service read 1 %v ok:%v\n", message1, ok)
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("Reading from client IPCBridge 1st time failed")
|
||||||
|
done <- true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if message1.Message.EventType != event.NewPeer {
|
||||||
|
t.Errorf("Wrong message received, expected NewPeer\n")
|
||||||
|
done <- true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infoln("service wait read 2")
|
||||||
|
message2, ok := service.Read()
|
||||||
|
log.Infof("service read 2 got %v ok:%v\n", message2, ok)
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("Reading from client IPCBridge 2nd time failed")
|
||||||
|
done <- true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if message2.Message.EventType != event.DeleteContact {
|
||||||
|
t.Errorf("Wrong message received, expected DeleteContact, got %v\n", message2)
|
||||||
|
done <- true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
done <- true
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReconnect(t *testing.T) {
|
||||||
|
log.Infoln("TestReconnect")
|
||||||
|
os.Remove(servicePipe)
|
||||||
|
os.Remove(clientPipe)
|
||||||
|
|
||||||
|
serviceDone := make(chan bool)
|
||||||
|
clientDone := make(chan bool)
|
||||||
|
|
||||||
|
go restartingClient(t, clientPipe, servicePipe, clientDone)
|
||||||
|
go stableService(t, servicePipe, clientPipe, serviceDone)
|
||||||
|
|
||||||
|
<-serviceDone
|
||||||
|
<-clientDone
|
||||||
|
}
|
213
event/common.go
213
event/common.go
|
@ -1,9 +1,5 @@
|
||||||
package event
|
package event
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
var CwtchEpoch = time.Date(2020, 6, 1, 0, 0, 0, 0, time.UTC)
|
|
||||||
|
|
||||||
// Type captures the definition of many common Cwtch application events
|
// Type captures the definition of many common Cwtch application events
|
||||||
type Type string
|
type Type string
|
||||||
|
|
||||||
|
@ -17,22 +13,11 @@ const (
|
||||||
// RemotePeer: [eg "chpr7qm6op5vfcg2pi4vllco3h6aa7exexc4rqwnlupqhoogx2zgd6qd"
|
// RemotePeer: [eg "chpr7qm6op5vfcg2pi4vllco3h6aa7exexc4rqwnlupqhoogx2zgd6qd"
|
||||||
PeerRequest = Type("PeerRequest")
|
PeerRequest = Type("PeerRequest")
|
||||||
|
|
||||||
// QueuePeerRequest
|
// RetryPeerRequest
|
||||||
// When peer has too many peers to try and wants to ease off Tor throttling, use this to notify ContactRetry plugin to schedule a peer for later try
|
// Identical to PeerRequest, but allows Engine to make decisions regarding blocked peers
|
||||||
// LastSeen: last seen time of the contact
|
// attributes:
|
||||||
// And one of
|
// RemotePeer: [eg "chpr7qm6op5vfcg2pi4vllco3h6aa7exexc4rqwnlupqhoogx2zgd6qd"
|
||||||
// RemotePeer
|
RetryPeerRequest = Type("RetryPeerRequest")
|
||||||
// GroupServer
|
|
||||||
QueuePeerRequest = Type("QueuePeerRequest")
|
|
||||||
|
|
||||||
// Disconnect*Request
|
|
||||||
// Close active connections and prevent new connections
|
|
||||||
DisconnectPeerRequest = Type("DisconnectPeerRequest")
|
|
||||||
DisconnectServerRequest = Type("DisconnectServerRequest")
|
|
||||||
|
|
||||||
// Events to Manage Retry Contacts
|
|
||||||
PurgeRetries = Type("PurgeRetries")
|
|
||||||
ResumeRetries = Type("ResumeRetries")
|
|
||||||
|
|
||||||
// RetryServerRequest
|
// RetryServerRequest
|
||||||
// Asks CwtchPeer to retry a server connection...
|
// Asks CwtchPeer to retry a server connection...
|
||||||
|
@ -40,24 +25,19 @@ const (
|
||||||
RetryServerRequest = Type("RetryServerRequest")
|
RetryServerRequest = Type("RetryServerRequest")
|
||||||
|
|
||||||
// RemotePeer
|
// RemotePeer
|
||||||
// ConversationID
|
// Authorization(model.peer.Auth_...)
|
||||||
// Accepted
|
SetPeerAuthorization = Type("UpdatePeerAuthorization")
|
||||||
// Blocked
|
|
||||||
UpdateConversationAuthorization = Type("UpdateConversationAuthorization")
|
|
||||||
|
|
||||||
// Turn on/off blocking of unknown peers (if peers aren't in the contact list then they will be autoblocked
|
// Turn on/off blocking of unknown peers (if peers aren't in the contact list then they will be autoblocked
|
||||||
BlockUnknownPeers = Type("BlockUnknownPeers")
|
BlockUnknownPeers = Type("BlockUnknownPeers")
|
||||||
AllowUnknownPeers = Type("AllowUnknownPeers")
|
AllowUnknownPeers = Type("AllowUnknownPeers")
|
||||||
|
|
||||||
// GroupServer
|
// GroupServer
|
||||||
QueueJoinServer = Type("QueueJoinServer")
|
JoinServer = Type("JoinServer")
|
||||||
JoinServer = Type("JoinServer")
|
|
||||||
|
|
||||||
// attributes GroupServer - the onion of the server to leave
|
// attributes GroupServer - the onion of the server to leave
|
||||||
LeaveServer = Type("LeaveServer")
|
LeaveServer = Type("LeaveServer")
|
||||||
|
|
||||||
ProtocolEngineCreated = Type("ProtocolEngineCreated")
|
|
||||||
ProtocolEngineShutdown = Type("ProtocolEngineShutdown")
|
|
||||||
ProtocolEngineStartListen = Type("ProtocolEngineStartListen")
|
ProtocolEngineStartListen = Type("ProtocolEngineStartListen")
|
||||||
ProtocolEngineStopped = Type("ProtocolEngineStopped")
|
ProtocolEngineStopped = Type("ProtocolEngineStopped")
|
||||||
|
|
||||||
|
@ -75,6 +55,10 @@ const (
|
||||||
// GroupID: groupID (allows them to fetch from the peer)
|
// GroupID: groupID (allows them to fetch from the peer)
|
||||||
NewGroup = Type("NewGroup")
|
NewGroup = Type("NewGroup")
|
||||||
|
|
||||||
|
// GroupID
|
||||||
|
AcceptGroupInvite = Type("AcceptGroupInvite")
|
||||||
|
RejectGroupInvite = Type("RejectGroupInvite")
|
||||||
|
|
||||||
SendMessageToGroup = Type("SendMessagetoGroup")
|
SendMessageToGroup = Type("SendMessagetoGroup")
|
||||||
|
|
||||||
//Ciphertext, Signature:
|
//Ciphertext, Signature:
|
||||||
|
@ -93,9 +77,8 @@ const (
|
||||||
// Error: string describing the error
|
// Error: string describing the error
|
||||||
SendMessageToGroupError = Type("SendMessageToGroupError")
|
SendMessageToGroupError = Type("SendMessageToGroupError")
|
||||||
|
|
||||||
SendMessageToPeer = Type("SendMessageToPeer")
|
SendMessageToPeer = Type("SendMessageToPeer")
|
||||||
NewMessageFromPeer = Type("NewMessageFromPeer")
|
NewMessageFromPeer = Type("NewMessageFromPeer")
|
||||||
NewMessageFromPeerEngine = Type("NewMessageFromPeerEngine")
|
|
||||||
|
|
||||||
// RemotePeer, scope, path
|
// RemotePeer, scope, path
|
||||||
NewGetValMessageFromPeer = Type("NewGetValMessageFromPeer")
|
NewGetValMessageFromPeer = Type("NewGetValMessageFromPeer")
|
||||||
|
@ -127,6 +110,12 @@ const (
|
||||||
// RemotePeer: The peer associated with the acknowledgement
|
// RemotePeer: The peer associated with the acknowledgement
|
||||||
IndexedFailure = Type("IndexedFailure")
|
IndexedFailure = Type("IndexedFailure")
|
||||||
|
|
||||||
|
// UpdateMessageFlags will change the flags associated with a given message.
|
||||||
|
// Handle
|
||||||
|
// Message Index
|
||||||
|
// Flags
|
||||||
|
UpdateMessageFlags = Type("UpdateMessageFlags")
|
||||||
|
|
||||||
// attributes:
|
// attributes:
|
||||||
// RemotePeer: [eg "chpr7qm6op5vfcg2pi4vllco3h6aa7exexc4rqwnlupqhoogx2zgd6qd"]
|
// RemotePeer: [eg "chpr7qm6op5vfcg2pi4vllco3h6aa7exexc4rqwnlupqhoogx2zgd6qd"]
|
||||||
// Error: string describing the error
|
// Error: string describing the error
|
||||||
|
@ -137,11 +126,18 @@ const (
|
||||||
// a peer contact has been added
|
// a peer contact has been added
|
||||||
// attributes:
|
// attributes:
|
||||||
// RemotePeer [eg ""]
|
// RemotePeer [eg ""]
|
||||||
ContactCreated = Type("ContactCreated")
|
// Authorization
|
||||||
|
PeerCreated = Type("PeerCreated")
|
||||||
|
|
||||||
// Password, NewPassword
|
// Password, NewPassword
|
||||||
ChangePassword = Type("ChangePassword")
|
ChangePassword = Type("ChangePassword")
|
||||||
|
|
||||||
|
// Error(err), EventID
|
||||||
|
ChangePasswordError = Type("ChangePasswordError")
|
||||||
|
|
||||||
|
// EventID
|
||||||
|
ChangePasswordSuccess = Type("ChangePasswordSuccess")
|
||||||
|
|
||||||
// a group has been successfully added or newly created
|
// a group has been successfully added or newly created
|
||||||
// attributes:
|
// attributes:
|
||||||
// Data [serialized *model.Group]
|
// Data [serialized *model.Group]
|
||||||
|
@ -150,6 +146,34 @@ const (
|
||||||
// RemotePeer
|
// RemotePeer
|
||||||
DeleteContact = Type("DeleteContact")
|
DeleteContact = Type("DeleteContact")
|
||||||
|
|
||||||
|
// GroupID
|
||||||
|
DeleteGroup = Type("DeleteGroup")
|
||||||
|
|
||||||
|
// change the .Name attribute of a profile (careful - this is not a custom attribute. it is used in the underlying protocol during handshakes!)
|
||||||
|
// attributes:
|
||||||
|
// ProfileName [eg "erinn"]
|
||||||
|
SetProfileName = Type("SetProfileName")
|
||||||
|
|
||||||
|
// request to store a profile-wide attribute (good for e.g. per-profile settings like theme prefs)
|
||||||
|
// attributes:
|
||||||
|
// Key [eg "fontcolor"]
|
||||||
|
// Data [eg "red"]
|
||||||
|
SetAttribute = Type("SetAttribute")
|
||||||
|
|
||||||
|
// request to store a per-contact attribute (e.g. display names for a peer)
|
||||||
|
// attributes:
|
||||||
|
// RemotePeer [eg ""]
|
||||||
|
// Key [eg "nick"]
|
||||||
|
// Data [eg "erinn"]
|
||||||
|
SetPeerAttribute = Type("SetPeerAttribute")
|
||||||
|
|
||||||
|
// request to store a per-cwtch-group attribute (e.g. display name for a group)
|
||||||
|
// attributes:
|
||||||
|
// GroupID [eg ""]
|
||||||
|
// Key [eg "nick"]
|
||||||
|
// Data [eg "open privacy board"]
|
||||||
|
SetGroupAttribute = Type("SetGroupAttribute")
|
||||||
|
|
||||||
// PeerStateChange servers as a new incoming connection message as well, and can/is consumed by frontends to alert of new p2p connections
|
// PeerStateChange servers as a new incoming connection message as well, and can/is consumed by frontends to alert of new p2p connections
|
||||||
// RemotePeer
|
// RemotePeer
|
||||||
// ConnectionState
|
// ConnectionState
|
||||||
|
@ -161,14 +185,29 @@ const (
|
||||||
|
|
||||||
/***** Application client / service messages *****/
|
/***** Application client / service messages *****/
|
||||||
|
|
||||||
|
// ProfileName, Password, Data(tag)
|
||||||
|
CreatePeer = Type("CreatePeer")
|
||||||
|
|
||||||
// app: Identity(onion), Created(bool)
|
// app: Identity(onion), Created(bool)
|
||||||
// service -> client: Identity(localId), Password, [Status(new/default=blank || from reload='running')], Created(bool)
|
// service -> client: Identity(localId), Password, [Status(new/default=blank || from reload='running')], Created(bool)
|
||||||
NewPeer = Type("NewPeer")
|
NewPeer = Type("NewPeer")
|
||||||
|
|
||||||
// Identity(onion)
|
// Identity(onion)
|
||||||
DeletePeer = Type("DeletePeer")
|
DeletePeer = Type("DeletePeer")
|
||||||
// Identity(onion)
|
|
||||||
PeerDeleted = Type("PeerDeleted")
|
// Identity(onion), Data(pluginID)
|
||||||
|
AddPeerPlugin = Type("AddPeerPlugin")
|
||||||
|
|
||||||
|
// Password
|
||||||
|
LoadProfiles = Type("LoadProfiles")
|
||||||
|
|
||||||
|
// Client has reloaded, triggers NewPeer s then ReloadDone
|
||||||
|
ReloadClient = Type("ReloadClient")
|
||||||
|
|
||||||
|
ReloadDone = Type("ReloadDone")
|
||||||
|
|
||||||
|
// Identity - Ask service to resend all connection states
|
||||||
|
ReloadPeer = Type("ReloadPeer")
|
||||||
|
|
||||||
// Identity(onion)
|
// Identity(onion)
|
||||||
ShutdownPeer = Type("ShutdownPeer")
|
ShutdownPeer = Type("ShutdownPeer")
|
||||||
|
@ -182,12 +221,12 @@ const (
|
||||||
// Error(err)
|
// Error(err)
|
||||||
AppError = Type("AppError")
|
AppError = Type("AppError")
|
||||||
|
|
||||||
|
GetACNStatus = Type("GetACNStatus")
|
||||||
|
GetACNVersion = Type("GetACNVersion")
|
||||||
|
|
||||||
// Progress, Status
|
// Progress, Status
|
||||||
ACNStatus = Type("ACNStatus")
|
ACNStatus = Type("ACNStatus")
|
||||||
|
|
||||||
// ID, Key, Data
|
|
||||||
ACNInfo = Type("ACNInfo")
|
|
||||||
|
|
||||||
// Data
|
// Data
|
||||||
ACNVersion = Type("ACNVersion")
|
ACNVersion = Type("ACNVersion")
|
||||||
|
|
||||||
|
@ -197,39 +236,13 @@ const (
|
||||||
// Onion: the local onion we attempt to check
|
// Onion: the local onion we attempt to check
|
||||||
NetworkStatus = Type("NetworkError")
|
NetworkStatus = Type("NetworkError")
|
||||||
|
|
||||||
|
// Notify the UI that a Server has been added
|
||||||
|
// Onion = Server Onion
|
||||||
|
ServerCreated = Type("ServerAdded")
|
||||||
|
|
||||||
// For debugging. Allows test to emit a Syn and get a response Ack(eventID) when the subsystem is done processing a queue
|
// For debugging. Allows test to emit a Syn and get a response Ack(eventID) when the subsystem is done processing a queue
|
||||||
Syn = Type("Syn")
|
Syn = Type("Syn")
|
||||||
Ack = Type("Ack")
|
Ack = Type("Ack")
|
||||||
|
|
||||||
// File Handling Events
|
|
||||||
StopFileShare = Type("StopFileShare")
|
|
||||||
StopAllFileShares = Type("StopAllFileShares")
|
|
||||||
ShareManifest = Type("ShareManifest")
|
|
||||||
ManifestSizeReceived = Type("ManifestSizeReceived")
|
|
||||||
ManifestError = Type("ManifestError")
|
|
||||||
ManifestReceived = Type("ManifestReceived")
|
|
||||||
ManifestSaved = Type("ManifestSaved")
|
|
||||||
FileDownloadProgressUpdate = Type("FileDownloadProgressUpdate")
|
|
||||||
FileDownloaded = Type("FileDownloaded")
|
|
||||||
FileVerificationFailed = Type("FileVerificationFailed")
|
|
||||||
|
|
||||||
// Profile Attribute Event
|
|
||||||
UpdatedProfileAttribute = Type("UpdatedProfileAttribute")
|
|
||||||
// Conversation Attribute Update...
|
|
||||||
UpdatedConversationAttribute = Type("UpdatedConversationAttribute")
|
|
||||||
StartingStorageMiragtion = Type("StartingStorageMigration")
|
|
||||||
DoneStorageMigration = Type("DoneStorageMigration")
|
|
||||||
|
|
||||||
TokenManagerInfo = Type("TokenManagerInfo")
|
|
||||||
TriggerAntispamCheck = Type("TriggerAntispamCheck")
|
|
||||||
MakeAntispamPayment = Type("MakeAntispamPayment")
|
|
||||||
|
|
||||||
// Heartbeat is used to trigger actions that need to happen every so often...
|
|
||||||
Heartbeat = Type("Heartbeat")
|
|
||||||
|
|
||||||
// Conversation Search
|
|
||||||
SearchResult = Type("SearchResult")
|
|
||||||
SearchCancelled = Type("SearchCancelled")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Field defines common event attributes
|
// Field defines common event attributes
|
||||||
|
@ -239,28 +252,22 @@ type Field string
|
||||||
const (
|
const (
|
||||||
|
|
||||||
// A peers local onion address
|
// A peers local onion address
|
||||||
Onion = Field("Onion")
|
Onion = Field("Onion")
|
||||||
ProfileOnion = Field("ProfileOnion")
|
|
||||||
|
|
||||||
RemotePeer = Field("RemotePeer")
|
RemotePeer = Field("RemotePeer")
|
||||||
LastSeen = Field("LastSeen")
|
|
||||||
Ciphertext = Field("Ciphertext")
|
Ciphertext = Field("Ciphertext")
|
||||||
Signature = Field("Signature")
|
Signature = Field("Signature")
|
||||||
CachedTokens = Field("CachedTokens")
|
|
||||||
PreviousSignature = Field("PreviousSignature")
|
PreviousSignature = Field("PreviousSignature")
|
||||||
TimestampSent = Field("TimestampSent")
|
TimestampSent = Field("TimestampSent")
|
||||||
TimestampReceived = Field("TimestampReceived")
|
TimestampReceived = Field("TimestampReceived")
|
||||||
|
|
||||||
Identity = Field("Identity")
|
Identity = Field("Identity")
|
||||||
|
|
||||||
ConversationID = Field("ConversationID")
|
|
||||||
GroupID = Field("GroupID")
|
GroupID = Field("GroupID")
|
||||||
GroupServer = Field("GroupServer")
|
GroupServer = Field("GroupServer")
|
||||||
GroupName = Field("GroupName")
|
|
||||||
ServerTokenY = Field("ServerTokenY")
|
ServerTokenY = Field("ServerTokenY")
|
||||||
ServerTokenOnion = Field("ServerTokenOnion")
|
ServerTokenOnion = Field("ServerTokenOnion")
|
||||||
GroupInvite = Field("GroupInvite")
|
GroupInvite = Field("GroupInvite")
|
||||||
ServerTokenCount = Field("ServerTokenCount")
|
|
||||||
|
|
||||||
ProfileName = Field("ProfileName")
|
ProfileName = Field("ProfileName")
|
||||||
Password = Field("Password")
|
Password = Field("Password")
|
||||||
|
@ -285,8 +292,6 @@ const (
|
||||||
EventID = Field("EventID")
|
EventID = Field("EventID")
|
||||||
EventContext = Field("EventContext")
|
EventContext = Field("EventContext")
|
||||||
Index = Field("Index")
|
Index = Field("Index")
|
||||||
RowIndex = Field("RowIndex")
|
|
||||||
ContentHash = Field("ContentHash")
|
|
||||||
|
|
||||||
// Handle denotes a contact handle of any type.
|
// Handle denotes a contact handle of any type.
|
||||||
Handle = Field("Handle")
|
Handle = Field("Handle")
|
||||||
|
@ -294,8 +299,7 @@ const (
|
||||||
// Flags denotes a set of message flags
|
// Flags denotes a set of message flags
|
||||||
Flags = Field("Flags")
|
Flags = Field("Flags")
|
||||||
|
|
||||||
Accepted = Field("Accepted")
|
Authorization = Field("Authorization")
|
||||||
Blocked = Field("Blocked")
|
|
||||||
|
|
||||||
KeyBundle = Field("KeyBundle")
|
KeyBundle = Field("KeyBundle")
|
||||||
|
|
||||||
|
@ -303,58 +307,41 @@ const (
|
||||||
Imported = Field("Imported")
|
Imported = Field("Imported")
|
||||||
|
|
||||||
Source = Field("Source")
|
Source = Field("Source")
|
||||||
|
|
||||||
FileKey = Field("FileKey")
|
|
||||||
FileSizeInChunks = Field("FileSizeInChunks")
|
|
||||||
ManifestSize = Field("ManifestSize")
|
|
||||||
SerializedManifest = Field("SerializedManifest")
|
|
||||||
TempFile = Field("TempFile")
|
|
||||||
FilePath = Field("FilePath")
|
|
||||||
FileDownloadFinished = Field("FileDownloadFinished")
|
|
||||||
NameSuggestion = Field("NameSuggestion")
|
|
||||||
|
|
||||||
SearchID = Field("SearchID")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Defining Common errors
|
// Defining Common errors
|
||||||
const (
|
const (
|
||||||
AppErrLoaded0 = "Loaded 0 profiles"
|
AppErrLoaded0 = "Loaded 0 profiles"
|
||||||
PasswordMatchError = "Password did not match"
|
)
|
||||||
|
|
||||||
|
// Values to be suplied in event.NewPeer for Status
|
||||||
|
const (
|
||||||
|
StorageRunning = "running"
|
||||||
|
StorageNew = "new"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Defining Protocol Contexts
|
// Defining Protocol Contexts
|
||||||
const (
|
const (
|
||||||
ContextAck = "im.cwtch.acknowledgement"
|
ContextAck = "im.cwtch.acknowledgement"
|
||||||
ContextInvite = "im.cwtch.invite"
|
ContextInvite = "im.cwtch.invite"
|
||||||
ContextRaw = "im.cwtch.raw"
|
ContextRaw = "im.cwtch.raw"
|
||||||
ContextGetVal = "im.cwtch.getVal"
|
ContextGetVal = "im.cwtch.getVal"
|
||||||
ContextVersion = "im.cwtch.version"
|
ContextRetVal = "im.cwtch.retVal"
|
||||||
ContextRetVal = "im.cwtch.retVal"
|
|
||||||
ContextRequestManifest = "im.cwtch.file.request.manifest"
|
|
||||||
ContextSendManifest = "im.cwtch.file.send.manifest"
|
|
||||||
ContextRequestFile = "im.cwtch.file.request.chunk"
|
|
||||||
ContextSendFile = "im.cwtch.file.send.chunk"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Define Attribute Keys related to history preservation
|
// Define Default Attribute Keys
|
||||||
const (
|
const (
|
||||||
PreserveHistoryDefaultSettingKey = "SaveHistoryDefault" // profile level default
|
SaveHistoryKey = "SavePeerHistory"
|
||||||
SaveHistoryKey = "SavePeerHistory" // peer level setting
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Define Default Attribute Values
|
// Define Default Attribute Values
|
||||||
const (
|
const (
|
||||||
// Save History has 3 distinct states. By default we refer to the profile level
|
// Save History has 3 distinct states. By default we don't save history (DefaultDeleteHistory), if the peer confirms this
|
||||||
// attribute PreserveHistoryDefaultSettingKey ( default: false i.e. DefaultDeleteHistory),
|
// we change to DeleteHistoryConfirmed, if they confirm they want to save then this becomes SaveHistoryConfirmed
|
||||||
// For each contact, if the profile owner confirms deletion we change to DeleteHistoryConfirmed,
|
// We use this distinction between default and confirmed to drive UI
|
||||||
// if the profile owner confirms they want to save history then this becomes SaveHistoryConfirmed
|
DeleteHistoryDefault = "DefaultDeleteHistory"
|
||||||
// These settings are set at the UI level using Get/SetScopeZoneAttribute with scoped zone: local.profile.*
|
|
||||||
SaveHistoryConfirmed = "SaveHistory"
|
SaveHistoryConfirmed = "SaveHistory"
|
||||||
DeleteHistoryConfirmed = "DeleteHistoryConfirmed"
|
DeleteHistoryConfirmed = "DeleteHistoryConfirmed"
|
||||||
|
|
||||||
// NOTE: While this says "[DeleteHistory]Default", The actual behaviour will now depend on the
|
|
||||||
// global app/profile value of PreserveHistoryDefaultSettingKey
|
|
||||||
DeleteHistoryDefault = "DefaultDeleteHistory"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Bool strings
|
// Bool strings
|
||||||
|
|
|
@ -10,6 +10,12 @@ type queue struct {
|
||||||
closed bool
|
closed bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type simpleQueue struct {
|
||||||
|
eventChannel chan Event
|
||||||
|
lock sync.Mutex
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
// Queue is a wrapper around a channel for handling Events in a consistent way across subsystems.
|
// Queue is a wrapper around a channel for handling Events in a consistent way across subsystems.
|
||||||
// The expectation is that each subsystem in Cwtch will manage a given an event.Queue fed from
|
// The expectation is that each subsystem in Cwtch will manage a given an event.Queue fed from
|
||||||
// the event.Manager.
|
// the event.Manager.
|
||||||
|
@ -27,6 +33,49 @@ func NewQueue() Queue {
|
||||||
return queue
|
return queue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewSimpleQueue initializes an event.Queue of the given buffer size.
|
||||||
|
func NewSimpleQueue(buffer int) Queue {
|
||||||
|
queue := new(simpleQueue)
|
||||||
|
queue.eventChannel = make(chan Event, buffer)
|
||||||
|
return queue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sq *simpleQueue) inChan() chan<- Event {
|
||||||
|
return sq.eventChannel
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sq *simpleQueue) OutChan() <-chan Event {
|
||||||
|
return sq.eventChannel
|
||||||
|
}
|
||||||
|
|
||||||
|
// Backlog returns the length of the queue backlog
|
||||||
|
func (sq *simpleQueue) Len() int {
|
||||||
|
return len(sq.eventChannel)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next available event from the front of the queue
|
||||||
|
func (sq *simpleQueue) Next() Event {
|
||||||
|
event := <-sq.eventChannel
|
||||||
|
return event
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown closes our eventChannel
|
||||||
|
func (sq *simpleQueue) Shutdown() {
|
||||||
|
sq.lock.Lock()
|
||||||
|
sq.closed = true
|
||||||
|
close(sq.eventChannel)
|
||||||
|
sq.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown closes our eventChannel
|
||||||
|
func (sq *simpleQueue) Publish(event Event) {
|
||||||
|
sq.lock.Lock()
|
||||||
|
if !sq.closed {
|
||||||
|
sq.inChan() <- event
|
||||||
|
}
|
||||||
|
sq.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
func (iq *queue) inChan() chan<- Event {
|
func (iq *queue) inChan() chan<- Event {
|
||||||
return iq.infChan.In()
|
return iq.infChan.In()
|
||||||
}
|
}
|
||||||
|
@ -35,7 +84,7 @@ func (iq *queue) OutChan() <-chan Event {
|
||||||
return iq.infChan.Out()
|
return iq.infChan.Out()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next returns the next available event from the front of the queue
|
// Out returns the next available event from the front of the queue
|
||||||
func (iq *queue) Next() Event {
|
func (iq *queue) Next() Event {
|
||||||
event := <-iq.infChan.Out()
|
event := <-iq.infChan.Out()
|
||||||
return event
|
return event
|
||||||
|
@ -48,10 +97,8 @@ func (iq *queue) Len() int {
|
||||||
// Shutdown closes our eventChannel
|
// Shutdown closes our eventChannel
|
||||||
func (iq *queue) Shutdown() {
|
func (iq *queue) Shutdown() {
|
||||||
iq.lock.Lock()
|
iq.lock.Lock()
|
||||||
if !iq.closed {
|
iq.closed = true
|
||||||
iq.closed = true
|
iq.infChan.Close()
|
||||||
iq.infChan.Close()
|
|
||||||
}
|
|
||||||
iq.lock.Unlock()
|
iq.lock.Unlock()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,7 @@ type Event struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRandNumber is a helper function which returns a random integer, this is
|
// GetRandNumber is a helper function which returns a random integer, this is
|
||||||
// currently mostly used to generate message IDs
|
// currently mostly used to generate messageids
|
||||||
func GetRandNumber() *big.Int {
|
func GetRandNumber() *big.Int {
|
||||||
num, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32))
|
num, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32))
|
||||||
// If we can't generate random numbers then panicking is probably
|
// If we can't generate random numbers then panicking is probably
|
||||||
|
@ -46,8 +46,6 @@ func NewEventList(eventType Type, args ...interface{}) Event {
|
||||||
val, vok := args[i+1].(string)
|
val, vok := args[i+1].(string)
|
||||||
if kok && vok {
|
if kok && vok {
|
||||||
data[key] = val
|
data[key] = val
|
||||||
} else {
|
|
||||||
log.Errorf("attempted to send a field that could not be parsed to a string: %v %v", args[i], args[i+1])
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return Event{EventType: eventType, EventID: GetRandNumber().String(), Data: data}
|
return Event{EventType: eventType, EventID: GetRandNumber().String(), Data: data}
|
||||||
|
@ -58,16 +56,17 @@ type manager struct {
|
||||||
subscribers map[Type][]Queue
|
subscribers map[Type][]Queue
|
||||||
events chan []byte
|
events chan []byte
|
||||||
mapMutex sync.Mutex
|
mapMutex sync.Mutex
|
||||||
chanMutex sync.Mutex
|
|
||||||
internal chan bool
|
internal chan bool
|
||||||
closed bool
|
closed bool
|
||||||
trace bool
|
trace bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Manager is an interface for an event bus
|
// Manager is an interface for an event bus
|
||||||
|
// FIXME this interface lends itself to race conditions around channels
|
||||||
type Manager interface {
|
type Manager interface {
|
||||||
Subscribe(Type, Queue)
|
Subscribe(Type, Queue)
|
||||||
Publish(Event)
|
Publish(Event)
|
||||||
|
PublishLocal(Event)
|
||||||
Shutdown()
|
Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,18 +94,11 @@ func (em *manager) initialize() {
|
||||||
func (em *manager) Subscribe(eventType Type, queue Queue) {
|
func (em *manager) Subscribe(eventType Type, queue Queue) {
|
||||||
em.mapMutex.Lock()
|
em.mapMutex.Lock()
|
||||||
defer em.mapMutex.Unlock()
|
defer em.mapMutex.Unlock()
|
||||||
for _, sub := range em.subscribers[eventType] {
|
|
||||||
if sub == queue {
|
|
||||||
return // don't add the same queue for the same event twice...
|
|
||||||
}
|
|
||||||
}
|
|
||||||
em.subscribers[eventType] = append(em.subscribers[eventType], queue)
|
em.subscribers[eventType] = append(em.subscribers[eventType], queue)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Publish takes an Event and sends it to the internal eventBus where it is distributed to all Subscribers
|
// Publish takes an Event and sends it to the internal eventBus where it is distributed to all Subscribers
|
||||||
func (em *manager) Publish(event Event) {
|
func (em *manager) Publish(event Event) {
|
||||||
em.chanMutex.Lock()
|
|
||||||
defer em.chanMutex.Unlock()
|
|
||||||
if event.EventType != "" && !em.closed {
|
if event.EventType != "" && !em.closed {
|
||||||
|
|
||||||
// Debug Events for Tracing, locked behind an environment variable
|
// Debug Events for Tracing, locked behind an environment variable
|
||||||
|
@ -131,12 +123,17 @@ func (em *manager) Publish(event Event) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Publish an event only locally, not going over an IPC bridge if there is one
|
||||||
|
func (em *manager) PublishLocal(event Event) {
|
||||||
|
em.Publish(event)
|
||||||
|
}
|
||||||
|
|
||||||
// eventBus is an internal function that is used to distribute events to all subscribers
|
// eventBus is an internal function that is used to distribute events to all subscribers
|
||||||
func (em *manager) eventBus() {
|
func (em *manager) eventBus() {
|
||||||
for {
|
for {
|
||||||
eventJSON := <-em.events
|
eventJSON := <-em.events
|
||||||
|
|
||||||
// In the case on an empty event. Tear down the Queue
|
// In the case on an empty event. Teardown the Queue
|
||||||
if len(eventJSON) == 0 {
|
if len(eventJSON) == 0 {
|
||||||
log.Errorf("Received zero length event")
|
log.Errorf("Received zero length event")
|
||||||
break
|
break
|
||||||
|
@ -158,10 +155,7 @@ func (em *manager) eventBus() {
|
||||||
for _, subscriber := range subscribers {
|
for _, subscriber := range subscribers {
|
||||||
// Deep Copy for Each Subscriber
|
// Deep Copy for Each Subscriber
|
||||||
var eventCopy Event
|
var eventCopy Event
|
||||||
err = json.Unmarshal(eventJSON, &eventCopy)
|
json.Unmarshal(eventJSON, &eventCopy)
|
||||||
if err != nil {
|
|
||||||
log.Errorf("error unmarshalling event: %v ", err)
|
|
||||||
}
|
|
||||||
subscriber.Publish(eventCopy)
|
subscriber.Publish(eventCopy)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -173,9 +167,7 @@ func (em *manager) eventBus() {
|
||||||
// Shutdown triggers, and waits for, the internal eventBus goroutine to finish
|
// Shutdown triggers, and waits for, the internal eventBus goroutine to finish
|
||||||
func (em *manager) Shutdown() {
|
func (em *manager) Shutdown() {
|
||||||
em.events <- []byte{}
|
em.events <- []byte{}
|
||||||
em.chanMutex.Lock()
|
|
||||||
em.closed = true
|
em.closed = true
|
||||||
em.chanMutex.Unlock()
|
|
||||||
// wait for eventBus to finish
|
// wait for eventBus to finish
|
||||||
<-em.internal
|
<-em.internal
|
||||||
close(em.events)
|
close(em.events)
|
||||||
|
|
|
@ -2,6 +2,7 @@ package event
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
@ -11,11 +12,12 @@ func TestEventManager(t *testing.T) {
|
||||||
eventManager := NewEventManager()
|
eventManager := NewEventManager()
|
||||||
|
|
||||||
// We need to make this buffer at least 1, otherwise we will log an error!
|
// We need to make this buffer at least 1, otherwise we will log an error!
|
||||||
simpleQueue := NewQueue()
|
testChan := make(chan Event, 1)
|
||||||
|
simpleQueue := &simpleQueue{testChan, sync.Mutex{}, false}
|
||||||
eventManager.Subscribe("TEST", simpleQueue)
|
eventManager.Subscribe("TEST", simpleQueue)
|
||||||
eventManager.Publish(Event{EventType: "TEST", Data: map[Field]string{"Value": "Hello World"}})
|
eventManager.Publish(Event{EventType: "TEST", Data: map[Field]string{"Value": "Hello World"}})
|
||||||
|
|
||||||
event := simpleQueue.Next()
|
event := <-testChan
|
||||||
if event.EventType == "TEST" && event.Data["Value"] == "Hello World" {
|
if event.EventType == "TEST" && event.Data["Value"] == "Hello World" {
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -25,6 +27,17 @@ func TestEventManager(t *testing.T) {
|
||||||
eventManager.Shutdown()
|
eventManager.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Most basic Manager Test, Initialize, Subscribe, Publish, Receive
|
||||||
|
func TestEventManagerOverflow(t *testing.T) {
|
||||||
|
eventManager := NewEventManager()
|
||||||
|
|
||||||
|
// Explicitly setting this to 0 log an error!
|
||||||
|
testChan := make(chan Event)
|
||||||
|
simpleQueue := &simpleQueue{testChan, sync.Mutex{}, false}
|
||||||
|
eventManager.Subscribe("TEST", simpleQueue)
|
||||||
|
eventManager.Publish(Event{EventType: "TEST"})
|
||||||
|
}
|
||||||
|
|
||||||
func TestEventManagerMultiple(t *testing.T) {
|
func TestEventManagerMultiple(t *testing.T) {
|
||||||
log.SetLevel(log.LevelDebug)
|
log.SetLevel(log.LevelDebug)
|
||||||
eventManager := NewEventManager()
|
eventManager := NewEventManager()
|
||||||
|
@ -43,7 +56,7 @@ func TestEventManagerMultiple(t *testing.T) {
|
||||||
eventManager.Publish(Event{EventType: "GroupEvent", Data: map[Field]string{"Value": "Hello World Group"}})
|
eventManager.Publish(Event{EventType: "GroupEvent", Data: map[Field]string{"Value": "Hello World Group"}})
|
||||||
eventManager.Publish(Event{EventType: "PeerEvent", Data: map[Field]string{"Value": "Hello World Peer"}})
|
eventManager.Publish(Event{EventType: "PeerEvent", Data: map[Field]string{"Value": "Hello World Peer"}})
|
||||||
eventManager.Publish(Event{EventType: "ErrorEvent", Data: map[Field]string{"Value": "Hello World Error"}})
|
eventManager.Publish(Event{EventType: "ErrorEvent", Data: map[Field]string{"Value": "Hello World Error"}})
|
||||||
eventManager.Publish(Event{EventType: "NobodyIsSubscribedToThisEvent", Data: map[Field]string{"Value": "No one should see this!"}})
|
eventManager.Publish(Event{EventType: "NobodyIsSubscribedToThisEvent", Data: map[Field]string{"Value": "Noone should see this!"}})
|
||||||
|
|
||||||
assertLength := func(len int, expected int, label string) {
|
assertLength := func(len int, expected int, label string) {
|
||||||
if len != expected {
|
if len != expected {
|
||||||
|
|
|
@ -0,0 +1,38 @@
|
||||||
|
package event
|
||||||
|
|
||||||
|
type ipcManager struct {
|
||||||
|
manager Manager
|
||||||
|
|
||||||
|
onion string
|
||||||
|
ipcBridge IPCBridge
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIPCEventManager returns an EvenetManager that also pipes events over and supplied IPCBridge
|
||||||
|
func NewIPCEventManager(bridge IPCBridge, onion string) Manager {
|
||||||
|
em := &ipcManager{onion: onion, ipcBridge: bridge, manager: NewEventManager()}
|
||||||
|
return em
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPCEventManagerFrom returns an IPCEventManger from the supplied manager and IPCBridge
|
||||||
|
func IPCEventManagerFrom(bridge IPCBridge, onion string, manager Manager) Manager {
|
||||||
|
em := &ipcManager{onion: onion, ipcBridge: bridge, manager: manager}
|
||||||
|
return em
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ipcm *ipcManager) Publish(ev Event) {
|
||||||
|
ipcm.manager.Publish(ev)
|
||||||
|
message := &IPCMessage{Dest: ipcm.onion, Message: ev}
|
||||||
|
ipcm.ipcBridge.Write(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ipcm *ipcManager) PublishLocal(ev Event) {
|
||||||
|
ipcm.manager.Publish(ev)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ipcm *ipcManager) Subscribe(eventType Type, queue Queue) {
|
||||||
|
ipcm.manager.Subscribe(eventType, queue)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ipcm *ipcManager) Shutdown() {
|
||||||
|
ipcm.manager.Shutdown()
|
||||||
|
}
|
|
@ -1,4 +1,3 @@
|
||||||
// nolint:nilaway - the infiniteBuffer function causes issues with static analysis because it is very unidomatic.
|
|
||||||
package event
|
package event
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -20,7 +19,7 @@ func newInfiniteChannel() *infiniteChannel {
|
||||||
input: make(chan Event),
|
input: make(chan Event),
|
||||||
output: make(chan Event),
|
output: make(chan Event),
|
||||||
length: make(chan int),
|
length: make(chan int),
|
||||||
buffer: newInfiniteQueue(),
|
buffer: newInfinitQueue(),
|
||||||
}
|
}
|
||||||
go ch.infiniteBuffer()
|
go ch.infiniteBuffer()
|
||||||
return ch
|
return ch
|
||||||
|
|
|
@ -24,7 +24,7 @@ type infiniteQueue struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// New constructs and returns a new Queue.
|
// New constructs and returns a new Queue.
|
||||||
func newInfiniteQueue() *infiniteQueue {
|
func newInfinitQueue() *infiniteQueue {
|
||||||
return &infiniteQueue{
|
return &infiniteQueue{
|
||||||
buf: make([]Event, minQueueLen),
|
buf: make([]Event, minQueueLen),
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
package event
|
||||||
|
|
||||||
|
// IPCMessage is a wrapper for a regular eventMessage with a destination (onion|AppDest) so the other side of the bridge can route appropriately
|
||||||
|
type IPCMessage struct {
|
||||||
|
Dest string
|
||||||
|
Message Event
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPCBridge is an interface to a IPC construct used to communicate IPCMessages
|
||||||
|
type IPCBridge interface {
|
||||||
|
Read() (*IPCMessage, bool)
|
||||||
|
Write(message *IPCMessage)
|
||||||
|
Shutdown()
|
||||||
|
}
|
|
@ -1,128 +0,0 @@
|
||||||
package extensions
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cwtch.im/cwtch/event"
|
|
||||||
"cwtch.im/cwtch/model"
|
|
||||||
"cwtch.im/cwtch/model/attr"
|
|
||||||
"cwtch.im/cwtch/model/constants"
|
|
||||||
"cwtch.im/cwtch/peer"
|
|
||||||
"cwtch.im/cwtch/protocol/connections"
|
|
||||||
"cwtch.im/cwtch/settings"
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ProfileValueExtension implements custom Profile Names over Cwtch
|
|
||||||
type ProfileValueExtension struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pne ProfileValueExtension) NotifySettingsUpdate(_ settings.GlobalSettings) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pne ProfileValueExtension) EventsToRegister() []event.Type {
|
|
||||||
return []event.Type{event.PeerStateChange, event.Heartbeat}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pne ProfileValueExtension) ExperimentsToRegister() []string {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pne ProfileValueExtension) requestProfileInfo(profile peer.CwtchPeer, ci *model.Conversation) {
|
|
||||||
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.Name)
|
|
||||||
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.ProfileStatus)
|
|
||||||
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.ProfileAttribute1)
|
|
||||||
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.ProfileAttribute2)
|
|
||||||
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.ProfileAttribute3)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pne ProfileValueExtension) OnEvent(ev event.Event, profile peer.CwtchPeer) {
|
|
||||||
switch ev.EventType {
|
|
||||||
case event.Heartbeat:
|
|
||||||
// once every heartbeat, loop through conversations and, if they are online, request an update to any long info..
|
|
||||||
conversations, err := profile.FetchConversations()
|
|
||||||
if err == nil {
|
|
||||||
for _, ci := range conversations {
|
|
||||||
if profile.GetPeerState(ci.Handle) == connections.AUTHENTICATED {
|
|
||||||
pne.requestProfileInfo(profile, ci)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case event.PeerStateChange:
|
|
||||||
ci, err := profile.FetchConversationInfo(ev.Data["RemotePeer"])
|
|
||||||
if err == nil {
|
|
||||||
// if we have re-authenticated with thie peer then request their profile image...
|
|
||||||
if connections.ConnectionStateToType()[ev.Data[event.ConnectionState]] == connections.AUTHENTICATED {
|
|
||||||
// Request some profile information...
|
|
||||||
pne.requestProfileInfo(profile, ci)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnContactReceiveValue for ProfileValueExtension handles saving specific Public Profile Values like Profile Name
|
|
||||||
func (pne ProfileValueExtension) OnContactReceiveValue(profile peer.CwtchPeer, conversation model.Conversation, szp attr.ScopedZonedPath, value string, exists bool) {
|
|
||||||
// Allow public profile parameters to be added as contact specific attributes...
|
|
||||||
scope, zone, _ := szp.GetScopeZonePath()
|
|
||||||
if exists && scope.IsPublic() && zone == attr.ProfileZone {
|
|
||||||
|
|
||||||
// Check the current value of the attribute
|
|
||||||
currentValue, err := profile.GetConversationAttribute(conversation.ID, szp)
|
|
||||||
if err == nil && currentValue == value {
|
|
||||||
// Value exists and the value is the same, short-circuit
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save the new Attribute
|
|
||||||
err = profile.SetConversationAttribute(conversation.ID, szp, value)
|
|
||||||
if err != nil {
|
|
||||||
// Something else wen't wrong.. short-circuit
|
|
||||||
log.Errorf("error setting conversation attribute %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finally publish an update for listeners to react to.
|
|
||||||
scope, zone, zpath := szp.GetScopeZonePath()
|
|
||||||
profile.PublishEvent(event.NewEvent(event.UpdatedConversationAttribute, map[event.Field]string{
|
|
||||||
event.Scope: string(scope),
|
|
||||||
event.Path: string(zone.ConstructZonedPath(zpath)),
|
|
||||||
event.Data: value,
|
|
||||||
event.RemotePeer: conversation.Handle,
|
|
||||||
event.ConversationID: strconv.Itoa(conversation.ID),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnContactRequestValue for ProfileValueExtension handles returning Public Profile Values
|
|
||||||
func (pne ProfileValueExtension) OnContactRequestValue(profile peer.CwtchPeer, conversation model.Conversation, eventID string, szp attr.ScopedZonedPath) {
|
|
||||||
scope, zone, zpath := szp.GetScopeZonePath()
|
|
||||||
log.Debugf("Looking up public | conversation scope/zone %v", szp.ToString())
|
|
||||||
if scope.IsPublic() || scope.IsConversation() {
|
|
||||||
val, exists := profile.GetScopedZonedAttribute(scope, zone, zpath)
|
|
||||||
|
|
||||||
// NOTE: Temporary Override because UI currently wipes names if it can't find them...
|
|
||||||
if !exists && zone == attr.UnknownZone && zpath == constants.Name {
|
|
||||||
val, exists = profile.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: Cwtch 1.15+ requires that profiles be able to restrict file downloading to specific contacts. As such we need an ACL check here
|
|
||||||
// on the fileshareing zone.
|
|
||||||
// TODO: Split this functionality into FilesharingFunctionality, and restrict this function to only considering Profile zoned attributes?
|
|
||||||
if zone == attr.FilesharingZone {
|
|
||||||
if !conversation.GetPeerAC().ShareFiles {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Construct a Response
|
|
||||||
resp := event.NewEvent(event.SendRetValMessageToPeer, map[event.Field]string{event.ConversationID: strconv.Itoa(conversation.ID), event.RemotePeer: conversation.Handle, event.Exists: strconv.FormatBool(exists)})
|
|
||||||
resp.EventID = eventID
|
|
||||||
if exists {
|
|
||||||
resp.Data[event.Data] = val
|
|
||||||
} else {
|
|
||||||
resp.Data[event.Data] = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Responding with SendRetValMessageToPeer exists:%v data: %v\n", exists, val)
|
|
||||||
profile.PublishEvent(resp)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,66 +0,0 @@
|
||||||
package extensions
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"cwtch.im/cwtch/event"
|
|
||||||
"cwtch.im/cwtch/model"
|
|
||||||
"cwtch.im/cwtch/model/attr"
|
|
||||||
"cwtch.im/cwtch/model/constants"
|
|
||||||
"cwtch.im/cwtch/peer"
|
|
||||||
"cwtch.im/cwtch/protocol/connections"
|
|
||||||
"cwtch.im/cwtch/settings"
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SendWhenOnlineExtension implements automatic sending
|
|
||||||
// Some Considerations:
|
|
||||||
// - There are race conditions inherant in this approach e.g. a peer could go offline just after recieving a message and never sending an ack
|
|
||||||
// - In that case the next time we connect we will send a duplicate message.
|
|
||||||
// - Currently we do not include metadata like sent time in raw peer protocols (however Overlay does now have support for that information)
|
|
||||||
type SendWhenOnlineExtension struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (soe SendWhenOnlineExtension) NotifySettingsUpdate(_ settings.GlobalSettings) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (soe SendWhenOnlineExtension) EventsToRegister() []event.Type {
|
|
||||||
return []event.Type{event.PeerStateChange}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (soe SendWhenOnlineExtension) ExperimentsToRegister() []string {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (soe SendWhenOnlineExtension) OnEvent(ev event.Event, profile peer.CwtchPeer) {
|
|
||||||
switch ev.EventType {
|
|
||||||
case event.PeerStateChange:
|
|
||||||
ci, err := profile.FetchConversationInfo(ev.Data["RemotePeer"])
|
|
||||||
if err == nil {
|
|
||||||
// if we have re-authenticated with thie peer then request their profile image...
|
|
||||||
if connections.ConnectionStateToType()[ev.Data[event.ConnectionState]] == connections.AUTHENTICATED {
|
|
||||||
// Check the last 100 messages, if any of them are pending, then send them now...
|
|
||||||
messsages, _ := profile.GetMostRecentMessages(ci.ID, 0, 0, uint(100))
|
|
||||||
for _, message := range messsages {
|
|
||||||
if message.Attr[constants.AttrAck] == constants.False {
|
|
||||||
body := message.Body
|
|
||||||
ev := event.NewEvent(event.SendMessageToPeer, map[event.Field]string{event.ConversationID: strconv.Itoa(ci.ID), event.RemotePeer: ci.Handle, event.Data: body})
|
|
||||||
ev.EventID = message.Signature // we need this ensure that we correctly ack this in the db when it comes back
|
|
||||||
// TODO: The EventBus is becoming very noisy...we may want to consider a one-way shortcut to Engine i.e. profile.Engine.SendMessageToPeer
|
|
||||||
log.Debugf("resending message that was sent when peer was offline")
|
|
||||||
profile.PublishEvent(ev)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnContactReceiveValue is nop for SendWhenOnnlineExtension
|
|
||||||
func (soe SendWhenOnlineExtension) OnContactReceiveValue(profile peer.CwtchPeer, conversation model.Conversation, szp attr.ScopedZonedPath, value string, exists bool) {
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnContactRequestValue is nop for SendWhenOnnlineExtension
|
|
||||||
func (soe SendWhenOnlineExtension) OnContactRequestValue(profile peer.CwtchPeer, conversation model.Conversation, eventID string, szp attr.ScopedZonedPath) {
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,591 +0,0 @@
|
||||||
package filesharing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"cwtch.im/cwtch/event"
|
|
||||||
"cwtch.im/cwtch/settings"
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"os"
|
|
||||||
path "path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cwtch.im/cwtch/model"
|
|
||||||
"cwtch.im/cwtch/model/attr"
|
|
||||||
"cwtch.im/cwtch/model/constants"
|
|
||||||
"cwtch.im/cwtch/peer"
|
|
||||||
"cwtch.im/cwtch/protocol/files"
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Functionality groups some common UI triggered functions for contacts...
|
|
||||||
type Functionality struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Functionality) NotifySettingsUpdate(settings settings.GlobalSettings) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Functionality) EventsToRegister() []event.Type {
|
|
||||||
return []event.Type{event.ProtocolEngineCreated, event.ManifestReceived, event.FileDownloaded}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Functionality) ExperimentsToRegister() []string {
|
|
||||||
return []string{constants.FileSharingExperiment}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnEvent handles File Sharing Hooks like Manifest Received and FileDownloaded
|
|
||||||
func (f *Functionality) OnEvent(ev event.Event, profile peer.CwtchPeer) {
|
|
||||||
if profile.IsFeatureEnabled(constants.FileSharingExperiment) {
|
|
||||||
switch ev.EventType {
|
|
||||||
case event.ProtocolEngineCreated:
|
|
||||||
f.ReShareFiles(profile)
|
|
||||||
case event.ManifestReceived:
|
|
||||||
log.Debugf("Manifest Received Event!: %v", ev)
|
|
||||||
handle := ev.Data[event.Handle]
|
|
||||||
fileKey := ev.Data[event.FileKey]
|
|
||||||
serializedManifest := ev.Data[event.SerializedManifest]
|
|
||||||
|
|
||||||
manifestFilePath, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%v.manifest", fileKey))
|
|
||||||
if exists {
|
|
||||||
downloadFilePath, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%v.path", fileKey))
|
|
||||||
if exists {
|
|
||||||
log.Debugf("downloading manifest to %v, file to %v", manifestFilePath, downloadFilePath)
|
|
||||||
var manifest files.Manifest
|
|
||||||
err := json.Unmarshal([]byte(serializedManifest), &manifest)
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
// We only need to check the file size here, as manifest is sent to engine and the file created
|
|
||||||
// will be bound to the size advertised in manifest.
|
|
||||||
fileSizeLimitValue, fileSizeLimitExists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%v.limit", fileKey))
|
|
||||||
if fileSizeLimitExists {
|
|
||||||
fileSizeLimit, err := strconv.ParseUint(fileSizeLimitValue, 10, 64)
|
|
||||||
if err == nil {
|
|
||||||
if manifest.FileSizeInBytes >= fileSizeLimit {
|
|
||||||
log.Debugf("could not download file, size %v greater than limit %v", manifest.FileSizeInBytes, fileSizeLimitValue)
|
|
||||||
} else {
|
|
||||||
manifest.Title = manifest.FileName
|
|
||||||
manifest.FileName = downloadFilePath
|
|
||||||
log.Debugf("saving manifest")
|
|
||||||
err = manifest.Save(manifestFilePath)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("could not save manifest: %v", err)
|
|
||||||
} else {
|
|
||||||
tempFile := ""
|
|
||||||
if runtime.GOOS == "android" {
|
|
||||||
tempFile = manifestFilePath[0 : len(manifestFilePath)-len(".manifest")]
|
|
||||||
log.Debugf("derived android temp path: %v", tempFile)
|
|
||||||
}
|
|
||||||
profile.PublishEvent(event.NewEvent(event.ManifestSaved, map[event.Field]string{
|
|
||||||
event.FileKey: fileKey,
|
|
||||||
event.Handle: handle,
|
|
||||||
event.SerializedManifest: string(manifest.Serialize()),
|
|
||||||
event.TempFile: tempFile,
|
|
||||||
event.NameSuggestion: manifest.Title,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.Errorf("error saving manifest: file size limit is incorrect: %v", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.Errorf("error saving manifest: could not find file size limit info")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.Errorf("error saving manifest: %v", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.Errorf("found manifest path but not download path for %v", fileKey)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.Errorf("no download path found for manifest: %v", fileKey)
|
|
||||||
}
|
|
||||||
case event.FileDownloaded:
|
|
||||||
fileKey := ev.Data[event.FileKey]
|
|
||||||
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.complete", fileKey), "true")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.Errorf("profile called filesharing experiment OnContactReceiveValue even though file sharing was not enabled. This is likely a programming error.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Functionality) OnContactRequestValue(profile peer.CwtchPeer, conversation model.Conversation, eventID string, path attr.ScopedZonedPath) {
|
|
||||||
// nop
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Functionality) OnContactReceiveValue(profile peer.CwtchPeer, conversation model.Conversation, path attr.ScopedZonedPath, value string, exists bool) {
|
|
||||||
// Profile should not call us if FileSharing is disabled
|
|
||||||
if profile.IsFeatureEnabled(constants.FileSharingExperiment) {
|
|
||||||
scope, zone, zpath := path.GetScopeZonePath()
|
|
||||||
log.Debugf("file sharing contact receive value")
|
|
||||||
if exists && scope.IsConversation() && zone == attr.FilesharingZone && strings.HasSuffix(zpath, ".manifest.size") {
|
|
||||||
fileKey := strings.Replace(zpath, ".manifest.size", "", 1)
|
|
||||||
size, err := strconv.Atoi(value)
|
|
||||||
// if size is valid and below the maximum size for a manifest
|
|
||||||
// this is to prevent malicious sharers from using large amounts of memory when distributing
|
|
||||||
// a manifest as we reconstruct this in-memory
|
|
||||||
if err == nil && size < files.MaxManifestSize {
|
|
||||||
profile.PublishEvent(event.NewEvent(event.ManifestSizeReceived, map[event.Field]string{event.FileKey: fileKey, event.ManifestSize: value, event.Handle: conversation.Handle}))
|
|
||||||
} else {
|
|
||||||
profile.PublishEvent(event.NewEvent(event.ManifestError, map[event.Field]string{event.FileKey: fileKey, event.Handle: conversation.Handle}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.Errorf("profile called filesharing experiment OnContactReceiveValue even though file sharing was not enabled. This is likely a programming error.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FunctionalityGate returns filesharing functionality - gates now happen on function calls.
|
|
||||||
func FunctionalityGate() *Functionality {
|
|
||||||
return new(Functionality)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PreviewFunctionalityGate returns filesharing if image previews are enabled
|
|
||||||
func PreviewFunctionalityGate(experimentMap map[string]bool) (*Functionality, error) {
|
|
||||||
if experimentMap[constants.FileSharingExperiment] && experimentMap[constants.ImagePreviewsExperiment] {
|
|
||||||
return new(Functionality), nil
|
|
||||||
}
|
|
||||||
return nil, errors.New("image previews are not enabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
// OverlayMessage presents the canonical format of the File Sharing functionality Overlay Message
|
|
||||||
// This is the format that the UI will parse to display the message
|
|
||||||
type OverlayMessage struct {
|
|
||||||
Name string `json:"f"`
|
|
||||||
Hash string `json:"h"`
|
|
||||||
Nonce string `json:"n"`
|
|
||||||
Size uint64 `json:"s"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileKey is the unique reference to a file offer
|
|
||||||
func (om *OverlayMessage) FileKey() string {
|
|
||||||
return fmt.Sprintf("%s.%s", om.Hash, om.Nonce)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShouldAutoDL checks file size and file name. *DOES NOT* check user settings or contact state
|
|
||||||
func (om *OverlayMessage) ShouldAutoDL() bool {
|
|
||||||
if om.Size > constants.ImagePreviewMaxSizeInBytes {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
lname := strings.ToLower(om.Name)
|
|
||||||
for _, s := range constants.AutoDLFileExts {
|
|
||||||
if strings.HasSuffix(lname, s) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Functionality) VerifyOrResumeDownloadDefaultLimit(profile peer.CwtchPeer, conversation int, fileKey string) error {
|
|
||||||
return f.VerifyOrResumeDownload(profile, conversation, fileKey, files.MaxManifestSize*files.DefaultChunkSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Functionality) VerifyOrResumeDownload(profile peer.CwtchPeer, conversation int, fileKey string, size uint64) error {
|
|
||||||
if manifestFilePath, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", fileKey)); exists {
|
|
||||||
if downloadfilepath, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.path", fileKey)); exists {
|
|
||||||
manifest, err := files.LoadManifest(manifestFilePath)
|
|
||||||
if err == nil {
|
|
||||||
// Assert the filename...this is technically not necessary, but is here for completeness
|
|
||||||
manifest.FileName = downloadfilepath
|
|
||||||
if manifest.VerifyFile() == nil {
|
|
||||||
// Send a FileDownloaded Event. Usually when VerifyOrResumeDownload is triggered it's because some UI is awaiting the results of a
|
|
||||||
// Download.
|
|
||||||
profile.PublishEvent(event.NewEvent(event.FileDownloaded, map[event.Field]string{event.FileKey: fileKey, event.FilePath: downloadfilepath, event.TempFile: downloadfilepath}))
|
|
||||||
// File is verified and there is nothing else to do...
|
|
||||||
return nil
|
|
||||||
} else {
|
|
||||||
// Kick off another Download...
|
|
||||||
return f.DownloadFile(profile, conversation, downloadfilepath, manifestFilePath, fileKey, size)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return errors.New("file download metadata does not exist, or is corrupted")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Functionality) CheckDownloadStatus(profile peer.CwtchPeer, fileKey string) error {
|
|
||||||
path, _ := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.path", fileKey))
|
|
||||||
if value, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.complete", fileKey)); exists && value == event.True {
|
|
||||||
profile.PublishEvent(event.NewEvent(event.FileDownloaded, map[event.Field]string{
|
|
||||||
event.ProfileOnion: profile.GetOnion(),
|
|
||||||
event.FileKey: fileKey,
|
|
||||||
event.FilePath: path,
|
|
||||||
event.TempFile: "",
|
|
||||||
}))
|
|
||||||
} else {
|
|
||||||
log.Debugf("CheckDownloadStatus found .path but not .complete")
|
|
||||||
profile.PublishEvent(event.NewEvent(event.FileDownloadProgressUpdate, map[event.Field]string{
|
|
||||||
event.ProfileOnion: profile.GetOnion(),
|
|
||||||
event.FileKey: fileKey,
|
|
||||||
event.Progress: "-1",
|
|
||||||
event.FileSizeInChunks: "-1",
|
|
||||||
event.FilePath: path,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
return nil // cannot fail
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Functionality) EnhancedShareFile(profile peer.CwtchPeer, conversationID int, sharefilepath string) string {
|
|
||||||
fileKey, overlay, err := f.ShareFile(sharefilepath, profile)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("error sharing file: %v", err)
|
|
||||||
} else if conversationID == -1 {
|
|
||||||
// FIXME: At some point we might want to allow arbitrary public files, but for now this API will assume
|
|
||||||
// there is only one, and it is the custom profile image...
|
|
||||||
profile.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.CustomProfileImageKey, fileKey)
|
|
||||||
} else {
|
|
||||||
// Set a new attribute so we can associate this download with this conversation...
|
|
||||||
profile.SetConversationAttribute(conversationID, attr.ConversationScope.ConstructScopedZonedPath(attr.FilesharingZone.ConstructZonedPath(fileKey)), "")
|
|
||||||
id, err := profile.SendMessage(conversationID, overlay)
|
|
||||||
if err == nil {
|
|
||||||
return profile.EnhancedGetMessageById(conversationID, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// DownloadFileDefaultLimit given a profile, a conversation handle and a file sharing key, start off a download process
|
|
||||||
// to downloadFilePath with a default filesize limit
|
|
||||||
func (f *Functionality) DownloadFileDefaultLimit(profile peer.CwtchPeer, conversationID int, downloadFilePath string, manifestFilePath string, key string) error {
|
|
||||||
return f.DownloadFile(profile, conversationID, downloadFilePath, manifestFilePath, key, files.MaxManifestSize*files.DefaultChunkSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DownloadFile given a profile, a conversation handle and a file sharing key, start off a download process
|
|
||||||
// to downloadFilePath
|
|
||||||
func (f *Functionality) DownloadFile(profile peer.CwtchPeer, conversationID int, downloadFilePath string, manifestFilePath string, key string, limit uint64) error {
|
|
||||||
|
|
||||||
// assert that we are allowed to download the file
|
|
||||||
if !profile.IsFeatureEnabled(constants.FileSharingExperiment) {
|
|
||||||
return errors.New("filesharing functionality is not enabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't download files if the download or manifest path is not set
|
|
||||||
if downloadFilePath == "" || manifestFilePath == "" {
|
|
||||||
return errors.New("download path or manifest path is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't download files if the download file directory does not exist
|
|
||||||
// Unless we are on Android where the kernel wishes to keep us ignorant of the
|
|
||||||
// actual path and/or existence of the file. We handle this case further down
|
|
||||||
// the line when the manifest is received and protocol engine and the Android layer
|
|
||||||
// negotiate a temporary local file -> final file copy. We don't want to worry
|
|
||||||
// about that here...
|
|
||||||
if runtime.GOOS != "android" {
|
|
||||||
if _, err := os.Stat(path.Dir(downloadFilePath)); os.IsNotExist(err) {
|
|
||||||
return errors.New("download directory does not exist")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't download files if the manifest file directory does not exist
|
|
||||||
if _, err := os.Stat(path.Dir(manifestFilePath)); os.IsNotExist(err) {
|
|
||||||
return errors.New("manifest directory does not exist")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Store local.filesharing.filekey.manifest as the location of the manifest
|
|
||||||
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", key), manifestFilePath)
|
|
||||||
|
|
||||||
// Store local.filesharing.filekey.path as the location of the download
|
|
||||||
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.path", key), downloadFilePath)
|
|
||||||
|
|
||||||
// Store local.filesharing.filekey.limit as the max file size of the download
|
|
||||||
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.limit", key), strconv.FormatUint(limit, 10))
|
|
||||||
|
|
||||||
// Get the value of conversation.filesharing.filekey.manifest.size from `handle`
|
|
||||||
profile.SendScopedZonedGetValToContact(conversationID, attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest.size", key))
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// startFileShare is a private method used to finalize a file share and publish it to the protocol engine for processing.
|
|
||||||
// if force is set to true, this function will ignore timestamp checks...
|
|
||||||
func (f *Functionality) startFileShare(profile peer.CwtchPeer, filekey string, manifest string, force bool) error {
|
|
||||||
tsStr, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.ts", filekey))
|
|
||||||
if exists && !force {
|
|
||||||
ts, err := strconv.ParseInt(tsStr, 10, 64)
|
|
||||||
if err != nil || ts < time.Now().Unix()-2592000 {
|
|
||||||
log.Errorf("ignoring request to download a file offered more than 30 days ago")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// set the filekey status to active
|
|
||||||
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.active", filekey), constants.True)
|
|
||||||
// reset the timestamp...
|
|
||||||
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.ts", filekey), strconv.FormatInt(time.Now().Unix(), 10))
|
|
||||||
// share the manifest
|
|
||||||
profile.PublishEvent(event.NewEvent(event.ShareManifest, map[event.Field]string{event.FileKey: filekey, event.SerializedManifest: manifest}))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RestartFileShare takes in an existing filekey and, assuming the manifest exists, restarts sharing of the manifest
|
|
||||||
// by default this function always forces a file share, even if the file has timed out.
|
|
||||||
func (f *Functionality) RestartFileShare(profile peer.CwtchPeer, filekey string) error {
|
|
||||||
return f.restartFileShareAdvanced(profile, filekey, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RestartFileShareAdvanced takes in an existing filekey and, assuming the manifest exists, restarts sharing of the manifest in addition
|
|
||||||
// to a set of parameters
|
|
||||||
func (f *Functionality) restartFileShareAdvanced(profile peer.CwtchPeer, filekey string, force bool) error {
|
|
||||||
|
|
||||||
// assert that we are allowed to restart filesharing
|
|
||||||
if !profile.IsFeatureEnabled(constants.FileSharingExperiment) {
|
|
||||||
return errors.New("filesharing functionality is not enabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
// check that a manifest exists
|
|
||||||
manifest, manifestExists := profile.GetScopedZonedAttribute(attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", filekey))
|
|
||||||
if manifestExists {
|
|
||||||
// everything is in order, so reshare this file with the engine
|
|
||||||
log.Debugf("restarting file share: %v", filekey)
|
|
||||||
return f.startFileShare(profile, filekey, manifest, force)
|
|
||||||
}
|
|
||||||
return fmt.Errorf("manifest does not exist for filekey: %v", filekey)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReShareFiles given a profile we iterate through all existing fileshares and re-share them
|
|
||||||
// if the time limit has not expired
|
|
||||||
func (f *Functionality) ReShareFiles(profile peer.CwtchPeer) error {
|
|
||||||
|
|
||||||
// assert that we are allowed to restart filesharing
|
|
||||||
if !profile.IsFeatureEnabled(constants.FileSharingExperiment) {
|
|
||||||
return errors.New("filesharing functionality is not enabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
keys, err := profile.GetScopedZonedAttributeKeys(attr.LocalScope, attr.FilesharingZone)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, key := range keys {
|
|
||||||
// only look at timestamp keys
|
|
||||||
// this is an arbitrary choice
|
|
||||||
|
|
||||||
if strings.HasSuffix(key, ".ts") {
|
|
||||||
_, zonedpath := attr.ParseScope(key)
|
|
||||||
_, keypath := attr.ParseZone(zonedpath)
|
|
||||||
keyparts := strings.Split(keypath, ".")
|
|
||||||
|
|
||||||
// assert that the key is well-formed
|
|
||||||
if len(keyparts) == 3 && keyparts[2] == "ts" {
|
|
||||||
// fetch the timestamp key
|
|
||||||
filekey := strings.Join(keyparts[:2], ".")
|
|
||||||
sharedFile, err := f.GetFileShareInfo(profile, filekey)
|
|
||||||
|
|
||||||
// If we haven't explicitly stopped sharing the file then attempt a reshare
|
|
||||||
if err == nil && sharedFile.Active {
|
|
||||||
// this reshare can fail because we don't force sharing of files older than 30 days...
|
|
||||||
err := f.restartFileShareAdvanced(profile, filekey, false)
|
|
||||||
if err != nil {
|
|
||||||
log.Debugf("could not reshare file: %v", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.Debugf("could not get fileshare info %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFileShareInfo returns information related to a known fileshare.
|
|
||||||
// An error is returned if the data is incomplete
|
|
||||||
func (f *Functionality) GetFileShareInfo(profile peer.CwtchPeer, filekey string) (*SharedFile, error) {
|
|
||||||
timestampString, tsExists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.ts", filekey))
|
|
||||||
pathString, pathExists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.path", filekey))
|
|
||||||
activeString, activeExists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.active", filekey))
|
|
||||||
if tsExists && pathExists && activeExists {
|
|
||||||
timestamp, err := strconv.Atoi(timestampString)
|
|
||||||
if err == nil {
|
|
||||||
|
|
||||||
dateShared := time.Unix(int64(timestamp), 0)
|
|
||||||
expired := time.Since(dateShared) >= time.Hour*24*30
|
|
||||||
|
|
||||||
return &SharedFile{
|
|
||||||
FileKey: filekey,
|
|
||||||
Path: pathString,
|
|
||||||
DateShared: dateShared,
|
|
||||||
Active: !expired && activeString == constants.True,
|
|
||||||
Expired: expired,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("nonexistant or malformed fileshare %v", filekey)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShareFile given a profile and a conversation handle, sets up a file sharing process to share the file
|
|
||||||
// at filepath
|
|
||||||
func (f *Functionality) ShareFile(filepath string, profile peer.CwtchPeer) (string, string, error) {
|
|
||||||
|
|
||||||
// assert that we are allowed to share files
|
|
||||||
if !profile.IsFeatureEnabled(constants.FileSharingExperiment) {
|
|
||||||
return "", "", errors.New("filesharing functionality is not enabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
manifest, err := files.CreateManifest(filepath)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
var nonce [24]byte
|
|
||||||
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
|
|
||||||
log.Errorf("Cannot read from random: %v\n", err)
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
message := OverlayMessage{
|
|
||||||
Name: path.Base(manifest.FileName),
|
|
||||||
Hash: hex.EncodeToString(manifest.RootHash),
|
|
||||||
Nonce: hex.EncodeToString(nonce[:]),
|
|
||||||
Size: manifest.FileSizeInBytes,
|
|
||||||
}
|
|
||||||
|
|
||||||
data, _ := json.Marshal(message)
|
|
||||||
|
|
||||||
wrapper := model.MessageWrapper{
|
|
||||||
Overlay: model.OverlayFileSharing,
|
|
||||||
Data: string(data),
|
|
||||||
}
|
|
||||||
|
|
||||||
wrapperJSON, _ := json.Marshal(wrapper)
|
|
||||||
key := fmt.Sprintf("%x.%x", manifest.RootHash, nonce)
|
|
||||||
serializedManifest, _ := json.Marshal(manifest)
|
|
||||||
|
|
||||||
// Store the size of the manifest (in chunks) as part of the public scope so contacts who we share the file with
|
|
||||||
// can fetch the manifest as if it were a file.
|
|
||||||
// manifest.FileName gets redacted in filesharing_subsystem (to remove the system-specific file hierarchy),
|
|
||||||
// but we need to *store* the full path because the sender also uses it to locate the file
|
|
||||||
lenDiff := len(filepath) - len(path.Base(filepath))
|
|
||||||
|
|
||||||
// the sender needs to know the location of the file so they can display it in a preview...
|
|
||||||
// This eventually becomes a message attribute, but we don't have access to the message identifier until
|
|
||||||
// the message gets sent.
|
|
||||||
// In the worst case, this can be obtained using CheckDownloadStatus (though in practice this lookup will be
|
|
||||||
// rare because the UI will almost always initiate the construction of a preview a file directly after sending it).
|
|
||||||
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.path", key), filepath)
|
|
||||||
|
|
||||||
// Store the timestamp, manifest and manifest size for later.
|
|
||||||
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.ts", key), strconv.FormatInt(time.Now().Unix(), 10))
|
|
||||||
profile.SetScopedZonedAttribute(attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", key), string(serializedManifest))
|
|
||||||
profile.SetScopedZonedAttribute(attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest.size", key), strconv.Itoa(int(math.Ceil(float64(len(serializedManifest)-lenDiff)/float64(files.DefaultChunkSize)))))
|
|
||||||
|
|
||||||
err = f.startFileShare(profile, key, string(serializedManifest), false)
|
|
||||||
|
|
||||||
return key, string(wrapperJSON), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// SharedFile encapsulates information about a shared file
|
|
||||||
// including the file key, file path, the original share date and the
|
|
||||||
// current sharing status
|
|
||||||
type SharedFile struct {
|
|
||||||
|
|
||||||
// The roothash.nonce identifier derived for this file share
|
|
||||||
FileKey string
|
|
||||||
|
|
||||||
// Path is the OS specific location of the file
|
|
||||||
Path string
|
|
||||||
|
|
||||||
// DateShared is the original datetime the file was shared
|
|
||||||
DateShared time.Time
|
|
||||||
|
|
||||||
// Active is true if the file is currently being shared, false otherwise
|
|
||||||
Active bool
|
|
||||||
|
|
||||||
// Expired is true if the file is not eligible to be shared (because e.g. it has been too long since the file was originally shared,
|
|
||||||
// or the file no longer exists).
|
|
||||||
Expired bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Functionality) EnhancedGetSharedFiles(profile peer.CwtchPeer, conversationID int) string {
|
|
||||||
data, err := json.Marshal(f.GetSharedFiles(profile, conversationID))
|
|
||||||
if err == nil {
|
|
||||||
return string(data)
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSharedFiles returns all file shares associated with a given conversation
|
|
||||||
func (f *Functionality) GetSharedFiles(profile peer.CwtchPeer, conversationID int) []SharedFile {
|
|
||||||
var sharedFiles []SharedFile
|
|
||||||
ci, err := profile.GetConversationInfo(conversationID)
|
|
||||||
if err == nil {
|
|
||||||
for k := range ci.Attributes {
|
|
||||||
// when we share a file with a conversation we set a single attribute conversation.filesharing.<filekey>
|
|
||||||
if strings.HasPrefix(k, "conversation.filesharing") {
|
|
||||||
parts := strings.SplitN(k, ".", 3)
|
|
||||||
if len(parts) == 3 {
|
|
||||||
key := parts[2]
|
|
||||||
sharedFile, err := f.GetFileShareInfo(profile, key)
|
|
||||||
if err == nil {
|
|
||||||
sharedFiles = append(sharedFiles, *sharedFile)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return sharedFiles
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateDownloadPath creates a file path that doesn't currently exist on the filesystem
|
|
||||||
func GenerateDownloadPath(basePath, fileName string, overwrite bool) (filePath, manifestPath string) {
|
|
||||||
// avoid all kina funky shit
|
|
||||||
re := regexp.MustCompile(`[^A-Za-z0-9._-]`)
|
|
||||||
filePath = re.ReplaceAllString(filePath, "")
|
|
||||||
// avoid hidden files on linux
|
|
||||||
for strings.HasPrefix(filePath, ".") {
|
|
||||||
filePath = strings.TrimPrefix(filePath, ".")
|
|
||||||
}
|
|
||||||
// avoid empties
|
|
||||||
if strings.TrimSpace(filePath) == "" {
|
|
||||||
filePath = "untitled"
|
|
||||||
}
|
|
||||||
// if you like it, put a / on it
|
|
||||||
if !strings.HasSuffix(basePath, string(os.PathSeparator)) {
|
|
||||||
basePath = fmt.Sprintf("%s%s", basePath, string(os.PathSeparator))
|
|
||||||
}
|
|
||||||
filePath = fmt.Sprintf("%s%s", basePath, fileName)
|
|
||||||
manifestPath = fmt.Sprintf("%s.manifest", filePath)
|
|
||||||
|
|
||||||
// if file is named "file", iterate "file", "file (2)", "file (3)", ... until DNE
|
|
||||||
// if file is named "file.ext", iterate "file.ext", "file (2).ext", "file (3).ext", ... until DNE
|
|
||||||
parts := strings.Split(fileName, ".")
|
|
||||||
fileNameBase := parts[0]
|
|
||||||
fileNameExt := ""
|
|
||||||
if len(parts) > 1 {
|
|
||||||
fileNameBase = strings.Join(parts[0:len(parts)-1], ".")
|
|
||||||
fileNameExt = fmt.Sprintf(".%s", parts[len(parts)-1])
|
|
||||||
}
|
|
||||||
|
|
||||||
if !overwrite {
|
|
||||||
for i := 2; ; i++ {
|
|
||||||
if _, err := os.Open(filePath); os.IsNotExist(err) {
|
|
||||||
if _, err := os.Open(manifestPath); os.IsNotExist(err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
filePath = fmt.Sprintf("%s%s (%d)%s", basePath, fileNameBase, i, fileNameExt)
|
|
||||||
manifestPath = fmt.Sprintf("%s.manifest", filePath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// StopFileShare sends a message to the ProtocolEngine to cease sharing a particular file
|
|
||||||
func (f *Functionality) StopFileShare(profile peer.CwtchPeer, fileKey string) error {
|
|
||||||
// Note we do not do a permissions check here, as we are *always* permitted to stop sharing files.
|
|
||||||
// set the filekey status to inactive
|
|
||||||
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.active", fileKey), constants.False)
|
|
||||||
profile.PublishEvent(event.NewEvent(event.StopFileShare, map[event.Field]string{event.FileKey: fileKey}))
|
|
||||||
return nil // cannot fail
|
|
||||||
}
|
|
||||||
|
|
||||||
// StopAllFileShares sends a message to the ProtocolEngine to cease sharing all files
|
|
||||||
func (f *Functionality) StopAllFileShares(profile peer.CwtchPeer) {
|
|
||||||
// Note we do not do a permissions check here, as we are *always* permitted to stop sharing files.
|
|
||||||
profile.PublishEvent(event.NewEvent(event.StopAllFileShares, map[event.Field]string{}))
|
|
||||||
}
|
|
|
@ -1,181 +0,0 @@
|
||||||
package filesharing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cwtch.im/cwtch/event"
|
|
||||||
"cwtch.im/cwtch/model"
|
|
||||||
"cwtch.im/cwtch/model/attr"
|
|
||||||
"cwtch.im/cwtch/model/constants"
|
|
||||||
"cwtch.im/cwtch/peer"
|
|
||||||
"cwtch.im/cwtch/protocol/connections"
|
|
||||||
"cwtch.im/cwtch/settings"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ImagePreviewsFunctionality struct {
|
|
||||||
downloadFolder string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *ImagePreviewsFunctionality) NotifySettingsUpdate(settings settings.GlobalSettings) {
|
|
||||||
i.downloadFolder = settings.DownloadPath
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *ImagePreviewsFunctionality) EventsToRegister() []event.Type {
|
|
||||||
return []event.Type{event.ProtocolEngineCreated, event.NewMessageFromPeer, event.NewMessageFromGroup, event.PeerStateChange, event.Heartbeat}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *ImagePreviewsFunctionality) ExperimentsToRegister() []string {
|
|
||||||
return []string{constants.FileSharingExperiment, constants.ImagePreviewsExperiment}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *ImagePreviewsFunctionality) OnEvent(ev event.Event, profile peer.CwtchPeer) {
|
|
||||||
if profile.IsFeatureEnabled(constants.FileSharingExperiment) && profile.IsFeatureEnabled(constants.ImagePreviewsExperiment) {
|
|
||||||
switch ev.EventType {
|
|
||||||
case event.NewMessageFromPeer:
|
|
||||||
ci, err := profile.FetchConversationInfo(ev.Data["RemotePeer"])
|
|
||||||
if err == nil {
|
|
||||||
if ci.GetPeerAC().RenderImages {
|
|
||||||
i.handleImagePreviews(profile, &ev, ci.ID, ci.ID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case event.NewMessageFromGroup:
|
|
||||||
ci, err := profile.FetchConversationInfo(ev.Data["RemotePeer"])
|
|
||||||
if err == nil {
|
|
||||||
if ci.GetPeerAC().RenderImages {
|
|
||||||
i.handleImagePreviews(profile, &ev, ci.ID, ci.ID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case event.PeerStateChange:
|
|
||||||
ci, err := profile.FetchConversationInfo(ev.Data["RemotePeer"])
|
|
||||||
if err == nil {
|
|
||||||
// if we have re-authenticated with this peer then request their profile image...
|
|
||||||
if connections.ConnectionStateToType()[ev.Data[event.ConnectionState]] == connections.AUTHENTICATED {
|
|
||||||
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.CustomProfileImageKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case event.Heartbeat:
|
|
||||||
conversations, err := profile.FetchConversations()
|
|
||||||
if err == nil {
|
|
||||||
for _, ci := range conversations {
|
|
||||||
if profile.GetPeerState(ci.Handle) == connections.AUTHENTICATED {
|
|
||||||
// if we have enabled file shares for this contact, then send them our profile image
|
|
||||||
// NOTE: In the past, Cwtch treated "profile image" as a public file share. As such, anyone with the file key and who is able
|
|
||||||
// to authenticate with the profile (i.e. non-blocked peers) can download the file (if the global profile images experiment is enabled)
|
|
||||||
// To better allow for fine-grained permissions (and to support hybrid group permissions), we want to enable per-conversation file
|
|
||||||
// sharing permissions. As such, profile images are now only shared with contacts with that permission enabled.
|
|
||||||
// (i.e. all previous accepted contacts, new accepted contacts, and contacts who have this toggle set explictly)
|
|
||||||
if ci.GetPeerAC().ShareFiles {
|
|
||||||
profile.SendScopedZonedGetValToContact(ci.ID, attr.PublicScope, attr.ProfileZone, constants.CustomProfileImageKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case event.ProtocolEngineCreated:
|
|
||||||
// Now that the Peer Engine is Activated, Reshare Profile Images
|
|
||||||
key, exists := profile.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.CustomProfileImageKey)
|
|
||||||
if exists {
|
|
||||||
serializedManifest, _ := profile.GetScopedZonedAttribute(attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", key))
|
|
||||||
// reset the share timestamp, currently file shares are hardcoded to expire after 30 days...
|
|
||||||
// we reset the profile image here so that it is always available.
|
|
||||||
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.ts", key), strconv.FormatInt(time.Now().Unix(), 10))
|
|
||||||
log.Debugf("Custom Profile Image: %v %s", key, serializedManifest)
|
|
||||||
f := Functionality{}
|
|
||||||
f.RestartFileShare(profile, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *ImagePreviewsFunctionality) OnContactRequestValue(profile peer.CwtchPeer, conversation model.Conversation, eventID string, path attr.ScopedZonedPath) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *ImagePreviewsFunctionality) OnContactReceiveValue(profile peer.CwtchPeer, conversation model.Conversation, path attr.ScopedZonedPath, value string, exists bool) {
|
|
||||||
if profile.IsFeatureEnabled(constants.FileSharingExperiment) && profile.IsFeatureEnabled(constants.ImagePreviewsExperiment) {
|
|
||||||
_, zone, path := path.GetScopeZonePath()
|
|
||||||
if exists && zone == attr.ProfileZone && path == constants.CustomProfileImageKey {
|
|
||||||
// We only download from accepted conversations
|
|
||||||
if conversation.GetPeerAC().RenderImages {
|
|
||||||
fileKey := value
|
|
||||||
basepath := i.downloadFolder
|
|
||||||
fsf := FunctionalityGate()
|
|
||||||
// We always overwrite profile image files...
|
|
||||||
fp, mp := GenerateDownloadPath(basepath, fileKey, true)
|
|
||||||
|
|
||||||
// If we have marked this file as complete...
|
|
||||||
if value, exists := profile.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.complete", fileKey)); exists && value == event.True {
|
|
||||||
if _, err := os.Stat(fp); err == nil {
|
|
||||||
// file is marked as completed downloaded and exists...
|
|
||||||
// Note: this will also resend the FileDownloaded event if successful...
|
|
||||||
if fsf.VerifyOrResumeDownload(profile, conversation.ID, fileKey, constants.ImagePreviewMaxSizeInBytes) == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Otherwise we fall through...
|
|
||||||
}
|
|
||||||
// Something went wrong...the file is marked as complete but either doesn't exist, or is corrupted such that we can't continue...
|
|
||||||
// So mark complete as false...
|
|
||||||
profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.complete", fileKey), event.False)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we have reached this point then we need to download the file again...
|
|
||||||
log.Debugf("Downloading Profile Image %v %v %v", fp, mp, fileKey)
|
|
||||||
fsf.DownloadFile(profile, conversation.ID, fp, mp, fileKey, constants.ImagePreviewMaxSizeInBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleImagePreviews checks settings and, if appropriate, auto-downloads any images
|
|
||||||
func (i *ImagePreviewsFunctionality) handleImagePreviews(profile peer.CwtchPeer, ev *event.Event, conversationID, senderID int) {
|
|
||||||
if profile.IsFeatureEnabled(constants.FileSharingExperiment) && profile.IsFeatureEnabled(constants.ImagePreviewsExperiment) {
|
|
||||||
ci, err := profile.GetConversationInfo(senderID)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("attempted to call handleImagePreviews with unknown conversation: %v", senderID)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if !ci.GetPeerAC().ShareFiles || !ci.GetPeerAC().RenderImages {
|
|
||||||
log.Infof("refusing to autodownload files from sender: %v. conversation AC does not permit image rendering", senderID)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Short-circuit failures
|
|
||||||
// Don't auto-download images if the download path does not exist.
|
|
||||||
if i.downloadFolder == "" {
|
|
||||||
log.Errorf("download folder %v is not set", i.downloadFolder)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't auto-download images if the download path does not exist.
|
|
||||||
if _, err := os.Stat(i.downloadFolder); os.IsNotExist(err) {
|
|
||||||
log.Errorf("download folder %v does not exist", i.downloadFolder)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// If file sharing is enabled then reshare all active files...
|
|
||||||
fsf := FunctionalityGate()
|
|
||||||
|
|
||||||
// Now look at the image preview experiment
|
|
||||||
var cm model.MessageWrapper
|
|
||||||
err = json.Unmarshal([]byte(ev.Data[event.Data]), &cm)
|
|
||||||
if err == nil && cm.Overlay == model.OverlayFileSharing {
|
|
||||||
log.Debugf("Received File Sharing Message")
|
|
||||||
var fm OverlayMessage
|
|
||||||
err = json.Unmarshal([]byte(cm.Data), &fm)
|
|
||||||
if err == nil {
|
|
||||||
if fm.ShouldAutoDL() {
|
|
||||||
basepath := i.downloadFolder
|
|
||||||
fp, mp := GenerateDownloadPath(basepath, fm.Name, false)
|
|
||||||
log.Debugf("autodownloading file! %v %v %v", basepath, fp, i.downloadFolder)
|
|
||||||
ev.Data["Auto"] = constants.True
|
|
||||||
mID, _ := strconv.Atoi(ev.Data["Index"])
|
|
||||||
profile.UpdateMessageAttribute(conversationID, 0, mID, constants.AttrDownloaded, constants.True)
|
|
||||||
fsf.DownloadFile(profile, senderID, fp, mp, fm.FileKey(), constants.ImagePreviewMaxSizeInBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,150 +0,0 @@
|
||||||
package servers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cwtch.im/cwtch/event"
|
|
||||||
"cwtch.im/cwtch/model"
|
|
||||||
"cwtch.im/cwtch/model/attr"
|
|
||||||
"cwtch.im/cwtch/model/constants"
|
|
||||||
"cwtch.im/cwtch/peer"
|
|
||||||
"cwtch.im/cwtch/protocol/connections"
|
|
||||||
"cwtch.im/cwtch/settings"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ServerList is a json encoded list of servers
|
|
||||||
ServerList = event.Field("ServerList")
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// UpdateServerInfo is an event containing a ProfileOnion and a ServerList
|
|
||||||
UpdateServerInfo = event.Type("UpdateServerInfo")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Functionality groups some common UI triggered functions for contacts...
|
|
||||||
type Functionality struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Functionality) NotifySettingsUpdate(settings settings.GlobalSettings) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Functionality) EventsToRegister() []event.Type {
|
|
||||||
return []event.Type{event.QueueJoinServer}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Functionality) ExperimentsToRegister() []string {
|
|
||||||
return []string{constants.GroupsExperiment}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnEvent handles File Sharing Hooks like Manifest Received and FileDownloaded
|
|
||||||
func (f *Functionality) OnEvent(ev event.Event, profile peer.CwtchPeer) {
|
|
||||||
if profile.IsFeatureEnabled(constants.GroupsExperiment) {
|
|
||||||
switch ev.EventType {
|
|
||||||
// keep the UI in sync with the current backend server updates...
|
|
||||||
// queue join server gets triggered on load and on new servers so it's a nice
|
|
||||||
// low-noise event to hook into...
|
|
||||||
case event.QueueJoinServer:
|
|
||||||
f.PublishServerUpdate(profile)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Functionality) OnContactRequestValue(profile peer.CwtchPeer, conversation model.Conversation, eventID string, path attr.ScopedZonedPath) {
|
|
||||||
// nop
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Functionality) OnContactReceiveValue(profile peer.CwtchPeer, conversation model.Conversation, path attr.ScopedZonedPath, value string, exists bool) {
|
|
||||||
// nopt
|
|
||||||
}
|
|
||||||
|
|
||||||
// FunctionalityGate returns filesharing functionality - gates now happen on function calls.
|
|
||||||
func FunctionalityGate() *Functionality {
|
|
||||||
return new(Functionality)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerKey packages up key information...
|
|
||||||
// TODO: Can this be merged with KeyBundle?
|
|
||||||
type ServerKey struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
Key string `json:"key"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SyncStatus packages up server sync information...
|
|
||||||
type SyncStatus struct {
|
|
||||||
StartTime string `json:"startTime"`
|
|
||||||
LastMessageTime string `json:"lastMessageTime"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Server encapsulates the information needed to represent a server...
|
|
||||||
type Server struct {
|
|
||||||
Onion string `json:"onion"`
|
|
||||||
Identifier int `json:"identifier"`
|
|
||||||
Status string `json:"status"`
|
|
||||||
Description string `json:"description"`
|
|
||||||
Keys []ServerKey `json:"keys"`
|
|
||||||
SyncProgress SyncStatus `json:"syncProgress"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishServerUpdate serializes the current list of group servers and publishes an event with this information
|
|
||||||
func (f *Functionality) PublishServerUpdate(profile peer.CwtchPeer) error {
|
|
||||||
serverListForOnion := f.GetServerInfoList(profile)
|
|
||||||
serversListBytes, err := json.Marshal(serverListForOnion)
|
|
||||||
profile.PublishEvent(event.NewEvent(UpdateServerInfo, map[event.Field]string{"ProfileOnion": profile.GetOnion(), ServerList: string(serversListBytes)}))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetServerInfoList compiles all the information the UI might need regarding all servers..
|
|
||||||
func (f *Functionality) GetServerInfoList(profile peer.CwtchPeer) []Server {
|
|
||||||
var servers []Server
|
|
||||||
for _, server := range profile.GetServers() {
|
|
||||||
server, err := f.GetServerInfo(profile, server)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("profile server list is corrupted: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
servers = append(servers, server)
|
|
||||||
}
|
|
||||||
return servers
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteServer purges a server and all related keys from a profile
|
|
||||||
func (f *Functionality) DeleteServerInfo(profile peer.CwtchPeer, serverOnion string) error {
|
|
||||||
// Servers are stores as special conversations
|
|
||||||
ci, err := profile.FetchConversationInfo(serverOnion)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Purge keys...
|
|
||||||
// NOTE: This will leave some groups in the state of being unable to connect to a particular
|
|
||||||
// server.
|
|
||||||
profile.DeleteConversation(ci.ID)
|
|
||||||
f.PublishServerUpdate(profile)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetServerInfo compiles all the information the UI might need regarding a particular server including any verified
|
|
||||||
// cryptographic keys
|
|
||||||
func (f *Functionality) GetServerInfo(profile peer.CwtchPeer, serverOnion string) (Server, error) {
|
|
||||||
serverInfo, err := profile.FetchConversationInfo(serverOnion)
|
|
||||||
if err != nil {
|
|
||||||
return Server{}, errors.New("server not found")
|
|
||||||
}
|
|
||||||
keyTypes := []model.KeyType{model.KeyTypeServerOnion, model.KeyTypeTokenOnion, model.KeyTypePrivacyPass}
|
|
||||||
var serverKeys []ServerKey
|
|
||||||
|
|
||||||
for _, keyType := range keyTypes {
|
|
||||||
if key, has := serverInfo.GetAttribute(attr.PublicScope, attr.ServerKeyZone, string(keyType)); has {
|
|
||||||
serverKeys = append(serverKeys, ServerKey{Type: string(keyType), Key: key})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
description, _ := serverInfo.GetAttribute(attr.LocalScope, attr.ServerZone, constants.Description)
|
|
||||||
startTimeStr := serverInfo.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.SyncPreLastMessageTime)).ToString()]
|
|
||||||
recentTimeStr := serverInfo.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.SyncMostRecentMessageTime)).ToString()]
|
|
||||||
syncStatus := SyncStatus{startTimeStr, recentTimeStr}
|
|
||||||
|
|
||||||
return Server{Onion: serverOnion, Identifier: serverInfo.ID, Status: connections.ConnectionStateName[profile.GetPeerState(serverInfo.Handle)], Keys: serverKeys, Description: description, SyncProgress: syncStatus}, nil
|
|
||||||
}
|
|
36
go.mod
36
go.mod
|
@ -1,29 +1,17 @@
|
||||||
module cwtch.im/cwtch
|
module cwtch.im/cwtch
|
||||||
|
|
||||||
go 1.20
|
go 1.14
|
||||||
|
|
||||||
require (
|
require (
|
||||||
git.openprivacy.ca/cwtch.im/tapir v0.6.0
|
git.openprivacy.ca/cwtch.im/tapir v0.4.3
|
||||||
git.openprivacy.ca/openprivacy/connectivity v1.11.0
|
git.openprivacy.ca/openprivacy/connectivity v1.4.4
|
||||||
git.openprivacy.ca/openprivacy/log v1.0.3
|
git.openprivacy.ca/openprivacy/log v1.0.2
|
||||||
github.com/gtank/ristretto255 v0.1.3-0.20210930101514-6bb39798585c
|
github.com/gtank/ristretto255 v0.1.2
|
||||||
github.com/mutecomm/go-sqlcipher/v4 v4.4.2
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||||
github.com/onsi/ginkgo/v2 v2.1.4
|
github.com/struCoder/pidusage v0.1.3
|
||||||
github.com/onsi/gomega v1.20.1
|
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee
|
||||||
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d
|
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
|
||||||
)
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
|
||||||
|
golang.org/x/tools v0.1.2 // indirect
|
||||||
require (
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||||
filippo.io/edwards25519 v1.0.0 // indirect
|
|
||||||
git.openprivacy.ca/openprivacy/bine v0.0.5 // indirect
|
|
||||||
github.com/google/go-cmp v0.5.8 // indirect
|
|
||||||
github.com/gtank/merlin v0.1.1 // indirect
|
|
||||||
github.com/mimoo/StrobeGo v0.0.0-20220103164710-9a04d6ca976b // indirect
|
|
||||||
github.com/stretchr/testify v1.7.0 // indirect
|
|
||||||
go.etcd.io/bbolt v1.3.6 // indirect
|
|
||||||
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b // indirect
|
|
||||||
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64 // indirect
|
|
||||||
golang.org/x/text v0.3.7 // indirect
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
|
||||||
)
|
)
|
||||||
|
|
128
go.sum
128
go.sum
|
@ -1,70 +1,110 @@
|
||||||
filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek=
|
git.openprivacy.ca/cwtch.im/tapir v0.3.1 h1:+d1dHyPvZ8JmdfFe/oXWJPardzflRIhcdILtkeArkW8=
|
||||||
filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns=
|
git.openprivacy.ca/cwtch.im/tapir v0.3.1/go.mod h1:q6RMI/TQvRN8SCtRY3GryOawMcB0uG6NjP6M77oSMx8=
|
||||||
git.openprivacy.ca/cwtch.im/tapir v0.6.0 h1:TtnKjxitkIDMM7Qn0n/u+mOHRLJzuQUYjYRu5n0/QFY=
|
git.openprivacy.ca/cwtch.im/tapir v0.3.2 h1:thLWqqY1LkirWFcy9Tg6NgWeYbvo9xBm+s2XVnCIvpY=
|
||||||
git.openprivacy.ca/cwtch.im/tapir v0.6.0/go.mod h1:iQIq4y7N+DuP3CxyG66WNEC/d6vzh+wXvvOmelB+KoY=
|
git.openprivacy.ca/cwtch.im/tapir v0.3.2/go.mod h1:q6RMI/TQvRN8SCtRY3GryOawMcB0uG6NjP6M77oSMx8=
|
||||||
git.openprivacy.ca/openprivacy/bine v0.0.5 h1:DJs5gqw3SkvLSgRDvroqJxZ7F+YsbxbBRg5t0rU5gYE=
|
git.openprivacy.ca/cwtch.im/tapir v0.3.3 h1:Q7F8JijgOMMYSy3IdZl7+r6qkWckEWV1+EY7q6MAkVs=
|
||||||
git.openprivacy.ca/openprivacy/bine v0.0.5/go.mod h1:fwdeq6RO08WDkV0k7HfArsjRvurVULoUQmT//iaABZM=
|
git.openprivacy.ca/cwtch.im/tapir v0.3.3/go.mod h1:ZMg9Jzh0n3Os2aSF4z+bx/n8WBCJBN7KCQESXperYts=
|
||||||
git.openprivacy.ca/openprivacy/connectivity v1.11.0 h1:roASjaFtQLu+HdH5fa2wx6F00NL3YsUTlmXBJh8aLZk=
|
git.openprivacy.ca/cwtch.im/tapir v0.3.4 h1:g7yZkfz/vWr/t2tFXa/t0Ebr/w665uIKpxpCZ3lIPCo=
|
||||||
git.openprivacy.ca/openprivacy/connectivity v1.11.0/go.mod h1:OQO1+7OIz/jLxDrorEMzvZA6SEbpbDyLGpjoFqT3z1Y=
|
git.openprivacy.ca/cwtch.im/tapir v0.3.4/go.mod h1:+Niy2AHhQC351ZTtfhC0uLjViCICyOxCJZsIlGKKNAU=
|
||||||
git.openprivacy.ca/openprivacy/log v1.0.3 h1:E/PMm4LY+Q9s3aDpfySfEDq/vYQontlvNj/scrPaga0=
|
git.openprivacy.ca/cwtch.im/tapir v0.3.5 h1:AlqAhluY4ivznGoHh37Khyxy0u9IbtYskP93wgtmYx8=
|
||||||
git.openprivacy.ca/openprivacy/log v1.0.3/go.mod h1:gGYK8xHtndRLDymFtmjkG26GaMQNgyhioNS82m812Iw=
|
git.openprivacy.ca/cwtch.im/tapir v0.3.5/go.mod h1:eH6dZxXrhW0C4KZX18ksUa6XJCrEvtg8cJJ/Fy6gv+E=
|
||||||
|
git.openprivacy.ca/cwtch.im/tapir v0.4.0 h1:clG8uORt0NKEhT4P+Dpw1pzyUuYzYBMevGqn2pciKk8=
|
||||||
|
git.openprivacy.ca/cwtch.im/tapir v0.4.0/go.mod h1:eH6dZxXrhW0C4KZX18ksUa6XJCrEvtg8cJJ/Fy6gv+E=
|
||||||
|
git.openprivacy.ca/cwtch.im/tapir v0.4.1 h1:9LMpQX41IzecNNlRc1FZKXHg6wlFss679tFsa3vzb3Y=
|
||||||
|
git.openprivacy.ca/cwtch.im/tapir v0.4.1/go.mod h1:eH6dZxXrhW0C4KZX18ksUa6XJCrEvtg8cJJ/Fy6gv+E=
|
||||||
|
git.openprivacy.ca/cwtch.im/tapir v0.4.2 h1:bxMWZnVJXX4dqqOFS7ELW4iFkVL4GS8wiRkjRv5rJe8=
|
||||||
|
git.openprivacy.ca/cwtch.im/tapir v0.4.2/go.mod h1:eH6dZxXrhW0C4KZX18ksUa6XJCrEvtg8cJJ/Fy6gv+E=
|
||||||
|
git.openprivacy.ca/cwtch.im/tapir v0.4.3 h1:sctSfUXHDIqaHfJPDl+5lHtmoEJolQiHTcHZGAe5Qc4=
|
||||||
|
git.openprivacy.ca/cwtch.im/tapir v0.4.3/go.mod h1:10qEaib5x021zgyZ/97JKWsEpedH5+Vfy2CvB2V+08E=
|
||||||
|
git.openprivacy.ca/openprivacy/bine v0.0.4 h1:CO7EkGyz+jegZ4ap8g5NWRuDHA/56KKvGySR6OBPW+c=
|
||||||
|
git.openprivacy.ca/openprivacy/bine v0.0.4/go.mod h1:13ZqhKyqakDsN/ZkQkIGNULsmLyqtXc46XBcnuXm/mU=
|
||||||
|
git.openprivacy.ca/openprivacy/connectivity v1.4.0 h1:c7AANUCrlA4hIqXxIGDOWMtSe8CpDleD1877PShScbM=
|
||||||
|
git.openprivacy.ca/openprivacy/connectivity v1.4.0/go.mod h1:bR0Myx9nm2YzWtsThRelkNMV4Pp7sPDa123O1qsAbVo=
|
||||||
|
git.openprivacy.ca/openprivacy/connectivity v1.4.1 h1:zoM+j7PFj8mQeUCNiDNMe7Uq9dhcJDOhaZcSANfeDL4=
|
||||||
|
git.openprivacy.ca/openprivacy/connectivity v1.4.1/go.mod h1:bR0Myx9nm2YzWtsThRelkNMV4Pp7sPDa123O1qsAbVo=
|
||||||
|
git.openprivacy.ca/openprivacy/connectivity v1.4.2 h1:rQFIjWunLlRmXL5Efsv+7+1cA70T6Uza6RCy2PRm9zc=
|
||||||
|
git.openprivacy.ca/openprivacy/connectivity v1.4.2/go.mod h1:bR0Myx9nm2YzWtsThRelkNMV4Pp7sPDa123O1qsAbVo=
|
||||||
|
git.openprivacy.ca/openprivacy/connectivity v1.4.3 h1:i2Ad/U9FlL9dKr2bhRck7lJ8NoWyGtoEfUwoCyMT0fU=
|
||||||
|
git.openprivacy.ca/openprivacy/connectivity v1.4.3/go.mod h1:bR0Myx9nm2YzWtsThRelkNMV4Pp7sPDa123O1qsAbVo=
|
||||||
|
git.openprivacy.ca/openprivacy/connectivity v1.4.4 h1:11M3akVCyy/luuhMpZTM1r9Jayl7IHD944Bxsn2FDpU=
|
||||||
|
git.openprivacy.ca/openprivacy/connectivity v1.4.4/go.mod h1:JVRCIdL+lAG6ohBFWiKeC/MN42nnC0sfFszR9XG6vPQ=
|
||||||
|
git.openprivacy.ca/openprivacy/log v1.0.1/go.mod h1:gGYK8xHtndRLDymFtmjkG26GaMQNgyhioNS82m812Iw=
|
||||||
|
git.openprivacy.ca/openprivacy/log v1.0.2 h1:HLP4wsw4ljczFAelYnbObIs821z+jgMPCe8uODPnGQM=
|
||||||
|
git.openprivacy.ca/openprivacy/log v1.0.2/go.mod h1:gGYK8xHtndRLDymFtmjkG26GaMQNgyhioNS82m812Iw=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
|
||||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
|
||||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
|
||||||
github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is=
|
github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is=
|
||||||
github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s=
|
github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s=
|
||||||
github.com/gtank/ristretto255 v0.1.3-0.20210930101514-6bb39798585c h1:gkfmnY4Rlt3VINCo4uKdpvngiibQyoENVj5Q88sxXhE=
|
github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc=
|
||||||
github.com/gtank/ristretto255 v0.1.3-0.20210930101514-6bb39798585c/go.mod h1:tDPFhGdt3hJWqtKwx57i9baiB1Cj0yAg22VOPUqm5vY=
|
github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o=
|
||||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
|
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643 h1:hLDRPB66XQT/8+wG9WsDpiCvZf1yKO7sz7scAjSlBa0=
|
||||||
github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM=
|
github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM=
|
||||||
github.com/mimoo/StrobeGo v0.0.0-20220103164710-9a04d6ca976b h1:QrHweqAtyJ9EwCaGHBu1fghwxIPiopAHV06JlXrMHjk=
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||||
github.com/mimoo/StrobeGo v0.0.0-20220103164710-9a04d6ca976b/go.mod h1:xxLb2ip6sSUts3g1irPVHyk/DGslwQsNOo9I7smJfNU=
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||||
github.com/mutecomm/go-sqlcipher/v4 v4.4.2 h1:eM10bFtI4UvibIsKr10/QT7Yfz+NADfjZYh0GKrXUNc=
|
|
||||||
github.com/mutecomm/go-sqlcipher/v4 v4.4.2/go.mod h1:mF2UmIpBnzFeBdu/ypTDb/LdbS0nk0dfSN1WUsWTjMA=
|
|
||||||
github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY=
|
|
||||||
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
|
|
||||||
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
|
|
||||||
github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
github.com/struCoder/pidusage v0.1.3 h1:pZcSa6asBE38TJtW0Nui6GeCjLTpaT/jAnNP7dUTLSQ=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/struCoder/pidusage v0.1.3/go.mod h1:pWBlW3YuSwRl6h7R5KbvA4N8oOqe9LjaKW5CwT1SPjI=
|
||||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
github.com/yuin/goldmark v1.3.5 h1:dPmz1Snjq0kmkz159iL7S6WzdahUTHnHB5M56WFVifs=
|
||||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
|
go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
|
||||||
|
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee h1:4yd7jl+vXjalO5ztz6Vc1VADv+S/80LGJmyl1ROJ2AI=
|
||||||
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d h1:3qF+Z8Hkrw9sOhrFHti9TlB1Hkac1x+DNRkv0XQiFjo=
|
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
|
||||||
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
|
||||||
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b h1:ZmngSVLe/wycRns9MKikG9OWIEjGcGAkacif7oYQaUY=
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0=
|
||||||
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
|
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64 h1:UiNENfZ8gDvpiWw7IpOMQ27spWmThO1RwwdQVbJahJM=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44 h1:Bli41pIlzTzf3KEY06n+xnzK/BESIg2ze4Pgfh/aI8c=
|
||||||
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE=
|
||||||
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e h1:FDhOuMEY4JVRztM/gsbk+IKUQ8kj74bxZrgw87eMMVc=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA=
|
||||||
|
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package attr
|
package attr
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -17,88 +16,45 @@ values stored in the LocalScope.
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Scope strongly types Scope strings
|
|
||||||
type Scope string
|
|
||||||
|
|
||||||
// ScopedZonedPath typed path with a scope and a zone
|
|
||||||
type ScopedZonedPath string
|
|
||||||
|
|
||||||
func (szp ScopedZonedPath) GetScopeZonePath() (Scope, Zone, string) {
|
|
||||||
scope, path := ParseScope(string(szp))
|
|
||||||
zone, zpath := ParseZone(path)
|
|
||||||
return scope, zone, zpath
|
|
||||||
}
|
|
||||||
|
|
||||||
// scopes for attributes
|
// scopes for attributes
|
||||||
const (
|
const (
|
||||||
// on a peer, local and peer supplied data
|
// on a peer, local and peer supplied data
|
||||||
LocalScope = Scope("local")
|
LocalScope = "local"
|
||||||
PeerScope = Scope("peer")
|
PeerScope = "peer"
|
||||||
ConversationScope = Scope("conversation")
|
|
||||||
|
|
||||||
// on a local profile, public data and private settings
|
// on a local profile, public data and private settings
|
||||||
PublicScope = Scope("public")
|
PublicScope = "public"
|
||||||
|
SettingsScope = "settings"
|
||||||
UnknownScope = Scope("unknown")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Separator for scope and the rest of path
|
// Separator for scope and the rest of path
|
||||||
const Separator = "."
|
const Separator = "."
|
||||||
|
|
||||||
// IntoScope converts a string to a Scope
|
// GetPublicScope takes a path and attaches the pubic scope to it
|
||||||
func IntoScope(scope string) Scope {
|
func GetPublicScope(path string) string {
|
||||||
switch scope {
|
return PublicScope + Separator + path
|
||||||
case "local":
|
}
|
||||||
return LocalScope
|
|
||||||
case "peer":
|
// GetSettingsScope takes a path and attaches the settings scope to it
|
||||||
return PeerScope
|
func GetSettingsScope(path string) string {
|
||||||
case "conversation":
|
return SettingsScope + Separator + path
|
||||||
return ConversationScope
|
}
|
||||||
case "public":
|
|
||||||
return PublicScope
|
// GetLocalScope takes a path and attaches the local scope to it
|
||||||
|
func GetLocalScope(path string) string {
|
||||||
|
return LocalScope + Separator + path
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPeerScope takes a path and attaches the peer scope to it
|
||||||
|
func GetPeerScope(path string) string {
|
||||||
|
return PeerScope + Separator + path
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetScopePath take a full path and returns the scope and the scope-less path
|
||||||
|
func GetScopePath(fullPath string) (string, string) {
|
||||||
|
parts := strings.SplitN(fullPath, Separator, 1)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return "", ""
|
||||||
}
|
}
|
||||||
return UnknownScope
|
return parts[0], parts[1]
|
||||||
}
|
|
||||||
|
|
||||||
// ConstructScopedZonedPath enforces a scope over a zoned path
|
|
||||||
func (scope Scope) ConstructScopedZonedPath(zonedPath ZonedPath) ScopedZonedPath {
|
|
||||||
return ScopedZonedPath(string(scope) + Separator + string(zonedPath))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToString converts a ScopedZonedPath to a string
|
|
||||||
func (szp ScopedZonedPath) ToString() string {
|
|
||||||
return string(szp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsLocal returns true if the scope is a local scope
|
|
||||||
func (scope Scope) IsLocal() bool {
|
|
||||||
return scope == LocalScope
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPeer returns true if the scope is a peer scope
|
|
||||||
func (scope Scope) IsPeer() bool {
|
|
||||||
return scope == PeerScope
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPublic returns true if the scope is a public scope
|
|
||||||
func (scope Scope) IsPublic() bool {
|
|
||||||
return scope == PublicScope
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsConversation returns true if the scope is a conversation scope
|
|
||||||
func (scope Scope) IsConversation() bool {
|
|
||||||
return scope == ConversationScope
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseScope takes in an untyped string and returns an explicit Scope along with the rest of the untyped path
|
|
||||||
func ParseScope(path string) (Scope, string) {
|
|
||||||
parts := strings.SplitN(path, Separator, 3)
|
|
||||||
|
|
||||||
log.Debugf("parsed scope: %v %v", parts, path)
|
|
||||||
|
|
||||||
if len(parts) != 3 {
|
|
||||||
return UnknownScope, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return IntoScope(parts[0]), parts[1] + Separator + parts[2]
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,71 +0,0 @@
|
||||||
package attr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Zone forces attributes to belong to a given subsystem e.g profile or filesharing
|
|
||||||
// Note: Zone is different from Scope which deals with public visibility of a given attribute
|
|
||||||
type Zone string
|
|
||||||
|
|
||||||
// ZonedPath explicitly types paths that contain a zone for strongly typed APIs
|
|
||||||
type ZonedPath string
|
|
||||||
|
|
||||||
const (
|
|
||||||
|
|
||||||
// ProfileZone for attributes related to profile details like name and profile image
|
|
||||||
ProfileZone = Zone("profile")
|
|
||||||
|
|
||||||
// LegacyGroupZone for attributes related to legacy group experiment
|
|
||||||
LegacyGroupZone = Zone("legacygroup")
|
|
||||||
|
|
||||||
// FilesharingZone for attributes related to file sharing
|
|
||||||
FilesharingZone = Zone("filesharing")
|
|
||||||
|
|
||||||
// ServerKeyZone for attributes related to Server Keys
|
|
||||||
ServerKeyZone = Zone("serverkey")
|
|
||||||
|
|
||||||
// ServerZone is for attributes related to the server
|
|
||||||
ServerZone = Zone("server")
|
|
||||||
|
|
||||||
// UnknownZone is a catch all useful for error handling
|
|
||||||
UnknownZone = Zone("unknown")
|
|
||||||
)
|
|
||||||
|
|
||||||
// ConstructZonedPath takes a path and attaches a zone to it.
|
|
||||||
// Note that this returns a ZonedPath which isn't directly usable, it must be given to ConstructScopedZonedPath
|
|
||||||
// in order to be realized into an actual attribute path.
|
|
||||||
func (zone Zone) ConstructZonedPath(path string) ZonedPath {
|
|
||||||
return ZonedPath(string(zone) + Separator + path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (zp ZonedPath) ToString() string {
|
|
||||||
return string(zp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseZone takes in an untyped string and returns an explicit Zone along with the rest of the untyped path
|
|
||||||
func ParseZone(path string) (Zone, string) {
|
|
||||||
parts := strings.SplitN(path, Separator, 2)
|
|
||||||
|
|
||||||
log.Debugf("parsed zone: %v %v", parts, path)
|
|
||||||
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return UnknownZone, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
switch Zone(parts[0]) {
|
|
||||||
case ProfileZone:
|
|
||||||
return ProfileZone, parts[1]
|
|
||||||
case LegacyGroupZone:
|
|
||||||
return LegacyGroupZone, parts[1]
|
|
||||||
case FilesharingZone:
|
|
||||||
return FilesharingZone, parts[1]
|
|
||||||
case ServerKeyZone:
|
|
||||||
return ServerKeyZone, parts[1]
|
|
||||||
case ServerZone:
|
|
||||||
return ServerZone, parts[1]
|
|
||||||
default:
|
|
||||||
return UnknownZone, parts[1]
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,74 +0,0 @@
|
||||||
package constants
|
|
||||||
|
|
||||||
// Name refers to a Profile Name
|
|
||||||
const Name = "name"
|
|
||||||
|
|
||||||
// Onion refers the Onion address of the profile
|
|
||||||
const Onion = "onion"
|
|
||||||
|
|
||||||
// Tag describes the type of a profile e.g. default password / encrypted etc.
|
|
||||||
const Tag = "tag"
|
|
||||||
|
|
||||||
// ProfileTypeV1DefaultPassword is a tag describing a profile protected with the default password.
|
|
||||||
const ProfileTypeV1DefaultPassword = "v1-defaultPassword"
|
|
||||||
|
|
||||||
// ProfileTypeV1Password is a tag describing a profile encrypted derived from a user-provided password.
|
|
||||||
const ProfileTypeV1Password = "v1-userPassword"
|
|
||||||
|
|
||||||
// GroupID is the ID of a group
|
|
||||||
const GroupID = "groupid"
|
|
||||||
|
|
||||||
// GroupServer identifies the Server the legacy group is hosted on
|
|
||||||
const GroupServer = "groupserver"
|
|
||||||
|
|
||||||
// GroupKey is the name of the group key attribute...
|
|
||||||
const GroupKey = "groupkey"
|
|
||||||
|
|
||||||
// True - true
|
|
||||||
const True = "true"
|
|
||||||
|
|
||||||
// False - false
|
|
||||||
const False = "false"
|
|
||||||
|
|
||||||
// AttrAuthor - conversation attribute for author of the message - referenced by pub key rather than conversation id because of groups.
|
|
||||||
const AttrAuthor = "author"
|
|
||||||
|
|
||||||
// AttrAck - conversation attribute for acknowledgement status
|
|
||||||
const AttrAck = "ack"
|
|
||||||
|
|
||||||
// AttrErr - conversation attribute for errored status
|
|
||||||
const AttrErr = "error"
|
|
||||||
|
|
||||||
// AttrSentTimestamp - conversation attribute for the time the message was (nominally) sent
|
|
||||||
const AttrSentTimestamp = "sent"
|
|
||||||
|
|
||||||
// Legacy MessageFlags
|
|
||||||
|
|
||||||
// AttrRejected - conversation attribute for storing rejected prompts (for invites)
|
|
||||||
const AttrRejected = "rejected-invite"
|
|
||||||
|
|
||||||
// AttrDownloaded - conversation attribute for storing downloaded prompts (for file downloads)
|
|
||||||
const AttrDownloaded = "file-downloaded"
|
|
||||||
|
|
||||||
const CustomProfileImageKey = "custom-profile-image"
|
|
||||||
|
|
||||||
const SyncPreLastMessageTime = "SyncPreLastMessageTime"
|
|
||||||
const SyncMostRecentMessageTime = "SyncMostRecentMessageTime"
|
|
||||||
|
|
||||||
const AttrLastConnectionTime = "last-connection-time"
|
|
||||||
const PeerAutostart = "autostart"
|
|
||||||
const PeerAppearOffline = "appear-offline"
|
|
||||||
const Archived = "archived"
|
|
||||||
|
|
||||||
const ProfileStatus = "profile-status"
|
|
||||||
const ProfileAttribute1 = "profile-attribute-1"
|
|
||||||
const ProfileAttribute2 = "profile-attribute-2"
|
|
||||||
const ProfileAttribute3 = "profile-attribute-3"
|
|
||||||
|
|
||||||
// Description is used on server contacts,
|
|
||||||
const Description = "description"
|
|
||||||
|
|
||||||
// Used to store the status of acl migrations
|
|
||||||
const ACLVersion = "acl-version"
|
|
||||||
const ACLVersionOne = "acl-v1"
|
|
||||||
const ACLVersionTwo = "acl-v2"
|
|
|
@ -1,13 +0,0 @@
|
||||||
package constants
|
|
||||||
|
|
||||||
// ServerPrefix precedes a server import statement
|
|
||||||
const ServerPrefix = "server:"
|
|
||||||
|
|
||||||
// TofuBundlePrefix precedes a server and a group import statement
|
|
||||||
const TofuBundlePrefix = "tofubundle:"
|
|
||||||
|
|
||||||
// GroupPrefix precedes a group import statement
|
|
||||||
const GroupPrefix = "torv3"
|
|
||||||
|
|
||||||
// ImportBundlePrefix is an error api constant for import bundle error messages
|
|
||||||
const ImportBundlePrefix = "importBundle"
|
|
|
@ -1,7 +0,0 @@
|
||||||
package constants
|
|
||||||
|
|
||||||
// InvalidPasswordError is returned when an incorrect password is provided to a function that requires the current active password
|
|
||||||
const InvalidPasswordError = "invalid_password_error"
|
|
||||||
|
|
||||||
// PasswordsDoNotMatchError is returned when two passwords do not match
|
|
||||||
const PasswordsDoNotMatchError = "passwords_do_not_match"
|
|
|
@ -1,21 +0,0 @@
|
||||||
package constants
|
|
||||||
|
|
||||||
const GroupsExperiment = "tapir-groups-experiment"
|
|
||||||
|
|
||||||
// FileSharingExperiment Allows file sharing
|
|
||||||
const FileSharingExperiment = "filesharing"
|
|
||||||
|
|
||||||
// ImagePreviewsExperiment Causes images (up to ImagePreviewMaxSizeInBytes, from accepted contacts) to auto-dl and preview
|
|
||||||
// requires FileSharingExperiment to be enabled
|
|
||||||
const ImagePreviewsExperiment = "filesharing-images"
|
|
||||||
|
|
||||||
// ImagePreviewMaxSizeInBytes Files up to this size will be autodownloaded using ImagePreviewsExperiment
|
|
||||||
const ImagePreviewMaxSizeInBytes = 20971520
|
|
||||||
|
|
||||||
const MessageFormattingExperiment = "message-formatting"
|
|
||||||
|
|
||||||
// AutoDLFileExts Files with these extensions will be autodownloaded using ImagePreviewsExperiment
|
|
||||||
var AutoDLFileExts = [...]string{".jpg", ".jpeg", ".png", ".gif", ".webp", ".bmp"}
|
|
||||||
|
|
||||||
// BlodeuweddExperiment enables the Blodeuwedd Assistant
|
|
||||||
const BlodeuweddExperiment = "blodeuwedd"
|
|
|
@ -1,156 +0,0 @@
|
||||||
package model
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cwtch.im/cwtch/model/attr"
|
|
||||||
"cwtch.im/cwtch/model/constants"
|
|
||||||
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AccessControl is a type determining client assigned authorization to a peer
|
|
||||||
// for a given conversation
|
|
||||||
type AccessControl struct {
|
|
||||||
Blocked bool // Any attempts from this handle to connect are blocked overrides all other settings
|
|
||||||
|
|
||||||
// Basic Conversation Rights
|
|
||||||
Read bool // Allows a handle to access the conversation
|
|
||||||
Append bool // Allows a handle to append new messages to the conversation
|
|
||||||
|
|
||||||
AutoConnect bool // Profile should automatically try to connect with peer
|
|
||||||
ExchangeAttributes bool // Profile should automatically exchange attributes like Name, Profile Image, etc.
|
|
||||||
|
|
||||||
// Extension Related Permissions
|
|
||||||
ShareFiles bool // Allows a handle to share files to a conversation
|
|
||||||
RenderImages bool // Indicates that certain filetypes should be autodownloaded and rendered when shared by this contact
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultP2PAccessControl defaults to a semi-trusted peer with no access to special extensions.
|
|
||||||
func DefaultP2PAccessControl() AccessControl {
|
|
||||||
return AccessControl{Read: true, Append: true, ExchangeAttributes: true, Blocked: false,
|
|
||||||
AutoConnect: true, ShareFiles: false, RenderImages: false}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AccessControlList represents an access control list for a conversation. Mapping handles to conversation
|
|
||||||
// functions
|
|
||||||
type AccessControlList map[string]AccessControl
|
|
||||||
|
|
||||||
// Serialize transforms the ACL into json.
|
|
||||||
func (acl *AccessControlList) Serialize() []byte {
|
|
||||||
data, _ := json.Marshal(acl)
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeserializeAccessControlList takes in JSON and returns an AccessControlList
|
|
||||||
func DeserializeAccessControlList(data []byte) (AccessControlList, error) {
|
|
||||||
var acl AccessControlList
|
|
||||||
err := json.Unmarshal(data, &acl)
|
|
||||||
return acl, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attributes a type-driven encapsulation of an Attribute map.
|
|
||||||
type Attributes map[string]string
|
|
||||||
|
|
||||||
// Serialize transforms an Attributes map into a JSON struct
|
|
||||||
func (a *Attributes) Serialize() []byte {
|
|
||||||
data, _ := json.Marshal(a)
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeserializeAttributes converts a JSON struct into an Attributes map
|
|
||||||
func DeserializeAttributes(data []byte) Attributes {
|
|
||||||
attributes := make(Attributes)
|
|
||||||
err := json.Unmarshal(data, &attributes)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("error deserializing attributes (this is likely a programming error): %v", err)
|
|
||||||
return make(Attributes)
|
|
||||||
}
|
|
||||||
return attributes
|
|
||||||
}
|
|
||||||
|
|
||||||
// Conversation encapsulates high-level information about a conversation, including the
|
|
||||||
// handle, any set attributes, the access control list associated with the message tree and the
|
|
||||||
// accepted status of the conversation (whether the user has consented into the conversation).
|
|
||||||
type Conversation struct {
|
|
||||||
ID int
|
|
||||||
Handle string
|
|
||||||
Attributes Attributes
|
|
||||||
ACL AccessControlList
|
|
||||||
|
|
||||||
// Deprecated, please use ACL for permissions related functions
|
|
||||||
Accepted bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAttribute is a helper function that fetches a conversation attribute by scope, zone and key
|
|
||||||
func (ci *Conversation) GetAttribute(scope attr.Scope, zone attr.Zone, key string) (string, bool) {
|
|
||||||
if value, exists := ci.Attributes[scope.ConstructScopedZonedPath(zone.ConstructZonedPath(key)).ToString()]; exists {
|
|
||||||
return value, true
|
|
||||||
}
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPeerAC returns a suitable Access Control object for a the given peer conversation
|
|
||||||
// If this is called for a group conversation, this method will error and return a safe default AC.
|
|
||||||
func (ci *Conversation) GetPeerAC() AccessControl {
|
|
||||||
if acl, exists := ci.ACL[ci.Handle]; exists {
|
|
||||||
return acl
|
|
||||||
}
|
|
||||||
log.Errorf("attempted to access a Peer Access Control object from %v but peer ACL is undefined. This is likely a programming error", ci.Handle)
|
|
||||||
return DefaultP2PAccessControl()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsCwtchPeer is a helper attribute that identifies whether a conversation is a cwtch peer
|
|
||||||
func (ci *Conversation) IsCwtchPeer() bool {
|
|
||||||
return tor.IsValidHostname(ci.Handle)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsGroup is a helper attribute that identifies whether a conversation is a legacy group
|
|
||||||
func (ci *Conversation) IsGroup() bool {
|
|
||||||
if _, exists := ci.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.GroupID)).ToString()]; exists {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsServer is a helper attribute that identifies whether a conversation is with a server
|
|
||||||
func (ci *Conversation) IsServer() bool {
|
|
||||||
if _, exists := ci.Attributes[attr.PublicScope.ConstructScopedZonedPath(attr.ServerKeyZone.ConstructZonedPath(string(BundleType))).ToString()]; exists {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerSyncProgress is only valid during a server being in the AUTHENTICATED state and therefor in the syncing process
|
|
||||||
// it returns a double (0-1) representing the estimated progress of the syncing
|
|
||||||
func (ci *Conversation) ServerSyncProgress() float64 {
|
|
||||||
startTimeStr, startExists := ci.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.SyncPreLastMessageTime)).ToString()]
|
|
||||||
recentTimeStr, recentExists := ci.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.SyncMostRecentMessageTime)).ToString()]
|
|
||||||
|
|
||||||
if !startExists || !recentExists {
|
|
||||||
return 0.0
|
|
||||||
}
|
|
||||||
|
|
||||||
startTime, err := time.Parse(startTimeStr, time.RFC3339Nano)
|
|
||||||
if err != nil {
|
|
||||||
return 0.0
|
|
||||||
}
|
|
||||||
recentTime, err := time.Parse(recentTimeStr, time.RFC3339Nano)
|
|
||||||
if err != nil {
|
|
||||||
return 0.0
|
|
||||||
}
|
|
||||||
|
|
||||||
syncRange := time.Since(startTime)
|
|
||||||
pointFromStart := startTime.Sub(recentTime)
|
|
||||||
return pointFromStart.Seconds() / syncRange.Seconds()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConversationMessage bundles an instance of a conversation message row
|
|
||||||
type ConversationMessage struct {
|
|
||||||
ID int
|
|
||||||
Body string
|
|
||||||
Attr Attributes
|
|
||||||
Signature string
|
|
||||||
ContentHash string
|
|
||||||
}
|
|
|
@ -1,41 +0,0 @@
|
||||||
package model
|
|
||||||
|
|
||||||
import "sync"
|
|
||||||
|
|
||||||
// Experiments are optional functionality that can be enabled/disabled by an application either completely or individually.
|
|
||||||
// examples of experiments include File Sharing, Profile Images and Groups.
|
|
||||||
type Experiments struct {
|
|
||||||
enabled bool
|
|
||||||
experiments sync.Map
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitExperiments encapsulates a set of experiments separate from their storage in GlobalSettings.
|
|
||||||
func InitExperiments(enabled bool, experiments map[string]bool) Experiments {
|
|
||||||
|
|
||||||
var syncExperiments sync.Map
|
|
||||||
for experiment, set := range experiments {
|
|
||||||
syncExperiments.Store(experiment, set)
|
|
||||||
}
|
|
||||||
|
|
||||||
return Experiments{
|
|
||||||
enabled: enabled,
|
|
||||||
experiments: syncExperiments,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsEnabled is a convenience function that takes in an experiment and returns true if it is enabled. Experiments
|
|
||||||
// are only enabled if both global experiments are turned on and if the specific experiment is also turned on.
|
|
||||||
// The one exception to this is experiments that have been promoted to default functionality which may be turned on
|
|
||||||
// even if experiments turned off globally. These experiments are defined by DefaultEnabledFunctionality.
|
|
||||||
func (e *Experiments) IsEnabled(experiment string) bool {
|
|
||||||
if !e.enabled {
|
|
||||||
// todo handle default-enabled functionality
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
enabled, exists := e.experiments.Load(experiment)
|
|
||||||
if !exists {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return enabled.(bool)
|
|
||||||
}
|
|
269
model/group.go
269
model/group.go
|
@ -4,6 +4,7 @@ import (
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/sha512"
|
"crypto/sha512"
|
||||||
|
"cwtch.im/cwtch/model/attr"
|
||||||
"cwtch.im/cwtch/protocol/groups"
|
"cwtch.im/cwtch/protocol/groups"
|
||||||
"encoding/base32"
|
"encoding/base32"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
@ -11,18 +12,18 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/primitives"
|
|
||||||
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
"golang.org/x/crypto/nacl/secretbox"
|
"golang.org/x/crypto/nacl/secretbox"
|
||||||
"golang.org/x/crypto/pbkdf2"
|
"golang.org/x/crypto/pbkdf2"
|
||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CurrentGroupVersion is used to set the version of newly created groups and make sure group structs stored are correct and up to date
|
// CurrentGroupVersion is used to set the version of newly created groups and make sure group structs stored are correct and up to date
|
||||||
const CurrentGroupVersion = 4
|
const CurrentGroupVersion = 3
|
||||||
|
|
||||||
// GroupInvitePrefix identifies a particular string as being a serialized group invite.
|
// GroupInvitePrefix identifies a particular string as being a serialized group invite.
|
||||||
const GroupInvitePrefix = "torv3"
|
const GroupInvitePrefix = "torv3"
|
||||||
|
@ -31,25 +32,39 @@ const GroupInvitePrefix = "torv3"
|
||||||
// tied to a server under a given group key. Each group has a set of Messages.
|
// tied to a server under a given group key. Each group has a set of Messages.
|
||||||
type Group struct {
|
type Group struct {
|
||||||
// GroupID is now derived from the GroupKey and the GroupServer
|
// GroupID is now derived from the GroupKey and the GroupServer
|
||||||
GroupID string
|
GroupID string
|
||||||
GroupName string
|
GroupKey [32]byte
|
||||||
GroupKey [32]byte
|
GroupServer string
|
||||||
GroupServer string
|
Timeline Timeline `json:"-"`
|
||||||
Attributes map[string]string //legacy to not use
|
Accepted bool
|
||||||
Version int
|
IsCompromised bool
|
||||||
Timeline Timeline `json:"-"`
|
Attributes map[string]string
|
||||||
LocalID string
|
lock sync.Mutex
|
||||||
|
LocalID string
|
||||||
|
State string `json:"-"`
|
||||||
|
UnacknowledgedMessages []Message
|
||||||
|
Version int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGroup initializes a new group associated with a given CwtchServer
|
// NewGroup initializes a new group associated with a given CwtchServer
|
||||||
func NewGroup(server string) (*Group, error) {
|
func NewGroup(server string) (*Group, error) {
|
||||||
group := new(Group)
|
group := new(Group)
|
||||||
|
group.Version = CurrentGroupVersion
|
||||||
|
group.LocalID = GenerateRandomID()
|
||||||
|
group.Accepted = true // we are starting a group, so we assume we want to connect to it...
|
||||||
if !tor.IsValidHostname(server) {
|
if !tor.IsValidHostname(server) {
|
||||||
return nil, errors.New("server is not a valid v3 onion")
|
return nil, errors.New("server is not a valid v3 onion")
|
||||||
}
|
}
|
||||||
|
|
||||||
group.GroupServer = server
|
group.GroupServer = server
|
||||||
|
|
||||||
|
var groupID [16]byte
|
||||||
|
if _, err := io.ReadFull(rand.Reader, groupID[:]); err != nil {
|
||||||
|
log.Errorf("Cannot read from random: %v\n", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
group.GroupID = fmt.Sprintf("%x", groupID)
|
||||||
|
|
||||||
var groupKey [32]byte
|
var groupKey [32]byte
|
||||||
if _, err := io.ReadFull(rand.Reader, groupKey[:]); err != nil {
|
if _, err := io.ReadFull(rand.Reader, groupKey[:]); err != nil {
|
||||||
log.Errorf("Error: Cannot read from random: %v\n", err)
|
log.Errorf("Error: Cannot read from random: %v\n", err)
|
||||||
|
@ -59,26 +74,31 @@ func NewGroup(server string) (*Group, error) {
|
||||||
|
|
||||||
// Derive Group ID from the group key and the server public key. This binds the group to a particular server
|
// Derive Group ID from the group key and the server public key. This binds the group to a particular server
|
||||||
// and key.
|
// and key.
|
||||||
var err error
|
group.GroupID = deriveGroupID(groupKey[:], server)
|
||||||
group.GroupID, err = deriveGroupID(groupKey[:], server)
|
|
||||||
return group, err
|
group.Attributes = make(map[string]string)
|
||||||
|
// By default we set the "name" of the group to a random string, we can override this later, but to simplify the
|
||||||
|
// codes around invite, we assume that this is always set.
|
||||||
|
group.Attributes[attr.GetLocalScope("name")] = group.GroupID
|
||||||
|
return group, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckGroup returns true only if the ID of the group is cryptographically valid.
|
// CheckGroup returns true only if the ID of the group is cryptographically valid.
|
||||||
func (g *Group) CheckGroup() bool {
|
func (g *Group) CheckGroup() bool {
|
||||||
id, _ := deriveGroupID(g.GroupKey[:], g.GroupServer)
|
return g.GroupID == deriveGroupID(g.GroupKey[:], g.GroupServer)
|
||||||
return g.GroupID == id
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// deriveGroupID hashes together the key and the hostname to create a bound identifier that can later
|
// deriveGroupID hashes together the key and the hostname to create a bound identifier that can later
|
||||||
// be referenced and checked by profiles when they receive invites and messages.
|
// be referenced and checked by profiles when they receive invites and messages.
|
||||||
func deriveGroupID(groupKey []byte, serverHostname string) (string, error) {
|
func deriveGroupID(groupKey []byte, serverHostname string) string {
|
||||||
data, err := base32.StdEncoding.DecodeString(strings.ToUpper(serverHostname))
|
data, _ := base32.StdEncoding.DecodeString(strings.ToUpper(serverHostname))
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
pubkey := data[0:ed25519.PublicKeySize]
|
pubkey := data[0:ed25519.PublicKeySize]
|
||||||
return hex.EncodeToString(pbkdf2.Key(groupKey, pubkey, 4096, 16, sha512.New)), nil
|
return hex.EncodeToString(pbkdf2.Key(groupKey, pubkey, 4096, 16, sha512.New))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compromised should be called if we detect a groupkey leak
|
||||||
|
func (g *Group) Compromised() {
|
||||||
|
g.IsCompromised = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Invite generates a invitation that can be sent to a cwtch peer
|
// Invite generates a invitation that can be sent to a cwtch peer
|
||||||
|
@ -86,7 +106,7 @@ func (g *Group) Invite() (string, error) {
|
||||||
|
|
||||||
gci := &groups.GroupInvite{
|
gci := &groups.GroupInvite{
|
||||||
GroupID: g.GroupID,
|
GroupID: g.GroupID,
|
||||||
GroupName: g.GroupName,
|
GroupName: g.Attributes[attr.GetLocalScope("name")],
|
||||||
SharedKey: g.GroupKey[:],
|
SharedKey: g.GroupKey[:],
|
||||||
ServerHost: g.GroupServer,
|
ServerHost: g.GroupServer,
|
||||||
}
|
}
|
||||||
|
@ -96,7 +116,81 @@ func (g *Group) Invite() (string, error) {
|
||||||
return serializedInvite, err
|
return serializedInvite, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncryptMessage takes a message and encrypts the message under the group key.
|
// AddSentMessage takes a DecryptedGroupMessage and adds it to the Groups Timeline
|
||||||
|
func (g *Group) AddSentMessage(message *groups.DecryptedGroupMessage, sig []byte) Message {
|
||||||
|
g.lock.Lock()
|
||||||
|
defer g.lock.Unlock()
|
||||||
|
timelineMessage := Message{
|
||||||
|
Message: message.Text,
|
||||||
|
Timestamp: time.Unix(int64(message.Timestamp), 0),
|
||||||
|
Received: time.Unix(0, 0),
|
||||||
|
Signature: sig,
|
||||||
|
PeerID: message.Onion,
|
||||||
|
PreviousMessageSig: message.PreviousMessageSig,
|
||||||
|
ReceivedByServer: false,
|
||||||
|
}
|
||||||
|
g.UnacknowledgedMessages = append(g.UnacknowledgedMessages, timelineMessage)
|
||||||
|
return timelineMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorSentMessage removes a sent message from the unacknowledged list and sets its error flag if found, otherwise returns false
|
||||||
|
func (g *Group) ErrorSentMessage(sig []byte, error string) bool {
|
||||||
|
g.lock.Lock()
|
||||||
|
defer g.lock.Unlock()
|
||||||
|
var message *Message
|
||||||
|
|
||||||
|
// Delete the message from the unack'd buffer if it exists
|
||||||
|
for i, unAckedMessage := range g.UnacknowledgedMessages {
|
||||||
|
if compareSignatures(unAckedMessage.Signature, sig) {
|
||||||
|
message = &unAckedMessage
|
||||||
|
g.UnacknowledgedMessages = append(g.UnacknowledgedMessages[:i], g.UnacknowledgedMessages[i+1:]...)
|
||||||
|
|
||||||
|
message.Error = error
|
||||||
|
g.Timeline.Insert(message)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMessage takes a DecryptedGroupMessage and adds it to the Groups Timeline
|
||||||
|
func (g *Group) AddMessage(message *groups.DecryptedGroupMessage, sig []byte) (*Message, bool) {
|
||||||
|
|
||||||
|
g.lock.Lock()
|
||||||
|
defer g.lock.Unlock()
|
||||||
|
|
||||||
|
// Delete the message from the unack'd buffer if it exists
|
||||||
|
for i, unAckedMessage := range g.UnacknowledgedMessages {
|
||||||
|
if compareSignatures(unAckedMessage.Signature, sig) {
|
||||||
|
g.UnacknowledgedMessages = append(g.UnacknowledgedMessages[:i], g.UnacknowledgedMessages[i+1:]...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
timelineMessage := &Message{
|
||||||
|
Message: message.Text,
|
||||||
|
Timestamp: time.Unix(int64(message.Timestamp), 0),
|
||||||
|
Received: time.Now(),
|
||||||
|
Signature: sig,
|
||||||
|
PeerID: message.Onion,
|
||||||
|
PreviousMessageSig: message.PreviousMessageSig,
|
||||||
|
ReceivedByServer: true,
|
||||||
|
Error: "",
|
||||||
|
Acknowledged: true,
|
||||||
|
}
|
||||||
|
seen := g.Timeline.Insert(timelineMessage)
|
||||||
|
|
||||||
|
return timelineMessage, seen
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTimeline provides a safe copy of the timeline
|
||||||
|
func (g *Group) GetTimeline() (timeline []Message) {
|
||||||
|
g.lock.Lock()
|
||||||
|
defer g.lock.Unlock()
|
||||||
|
return append(g.Timeline.GetMessages(), g.UnacknowledgedMessages...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//EncryptMessage takes a message and encrypts the message under the group key.
|
||||||
func (g *Group) EncryptMessage(message *groups.DecryptedGroupMessage) ([]byte, error) {
|
func (g *Group) EncryptMessage(message *groups.DecryptedGroupMessage) ([]byte, error) {
|
||||||
var nonce [24]byte
|
var nonce [24]byte
|
||||||
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
|
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
|
||||||
|
@ -129,6 +223,21 @@ func (g *Group) DecryptMessage(ciphertext []byte) (bool, *groups.DecryptedGroupM
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetAttribute allows applications to store arbitrary configuration info at the group level.
|
||||||
|
func (g *Group) SetAttribute(name string, value string) {
|
||||||
|
g.lock.Lock()
|
||||||
|
defer g.lock.Unlock()
|
||||||
|
g.Attributes[name] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAttribute returns the value of a value set with SetAttribute. If no such value has been set exists is set to false.
|
||||||
|
func (g *Group) GetAttribute(name string) (value string, exists bool) {
|
||||||
|
g.lock.Lock()
|
||||||
|
defer g.lock.Unlock()
|
||||||
|
value, exists = g.Attributes[name]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// ValidateInvite takes in a serialized invite and returns the invite structure if it is cryptographically valid
|
// ValidateInvite takes in a serialized invite and returns the invite structure if it is cryptographically valid
|
||||||
// and an error if it is not
|
// and an error if it is not
|
||||||
func ValidateInvite(invite string) (*groups.GroupInvite, error) {
|
func ValidateInvite(invite string) (*groups.GroupInvite, error) {
|
||||||
|
@ -153,7 +262,7 @@ func ValidateInvite(invite string) (*groups.GroupInvite, error) {
|
||||||
|
|
||||||
// Derive the servers public key (we can ignore the error checking here because it's already been
|
// Derive the servers public key (we can ignore the error checking here because it's already been
|
||||||
// done by IsValidHostname, and check that we derive the same groupID...
|
// done by IsValidHostname, and check that we derive the same groupID...
|
||||||
derivedGroupID, _ := deriveGroupID(gci.SharedKey, gci.ServerHost)
|
derivedGroupID := deriveGroupID(gci.SharedKey, gci.ServerHost)
|
||||||
if derivedGroupID != gci.GroupID {
|
if derivedGroupID != gci.GroupID {
|
||||||
return nil, errors.New("group id is invalid")
|
return nil, errors.New("group id is invalid")
|
||||||
}
|
}
|
||||||
|
@ -166,115 +275,3 @@ func ValidateInvite(invite string) (*groups.GroupInvite, error) {
|
||||||
}
|
}
|
||||||
return nil, errors.New("invite has invalid structure")
|
return nil, errors.New("invite has invalid structure")
|
||||||
}
|
}
|
||||||
|
|
||||||
// AttemptDecryption takes a ciphertext and signature and attempts to decrypt it under known groups.
|
|
||||||
// If successful, adds the message to the group's timeline
|
|
||||||
func (g *Group) AttemptDecryption(ciphertext []byte, signature []byte) (bool, *groups.DecryptedGroupMessage) {
|
|
||||||
success, dgm := g.DecryptMessage(ciphertext)
|
|
||||||
// the second check here is not needed, but DecryptMessage violates the usual
|
|
||||||
// go calling convention and we want static analysis tools to pick it up
|
|
||||||
if success && dgm != nil {
|
|
||||||
|
|
||||||
// Attempt to serialize this message
|
|
||||||
serialized, err := json.Marshal(dgm)
|
|
||||||
|
|
||||||
// Someone send a message that isn't a valid Decrypted Group Message. Since we require this struct in orer
|
|
||||||
// to verify the message, we simply ignore it.
|
|
||||||
if err != nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This now requires knowledge of the Sender, the Onion and the Specific Decrypted Group Message (which should only
|
|
||||||
// be derivable from the cryptographic key) which contains many unique elements such as the time and random padding
|
|
||||||
verified := g.VerifyGroupMessage(dgm.Onion, g.GroupID, base64.StdEncoding.EncodeToString(serialized), signature)
|
|
||||||
|
|
||||||
if !verified {
|
|
||||||
// An earlier version of this protocol mistakenly signed the ciphertext of the message
|
|
||||||
// instead of the serialized decrypted group message.
|
|
||||||
// This has 2 issues:
|
|
||||||
// 1. A server with knowledge of group members public keys AND the Group ID would be able to detect valid messages
|
|
||||||
// 2. It made the metadata-security of a group dependent on keeping the cryptographically derived Group ID secret.
|
|
||||||
// While not awful, it also isn't good. For Version 3 groups only we permit Cwtch to check this older signature
|
|
||||||
// structure in a backwards compatible way for the duration of the Groups Experiment.
|
|
||||||
// TODO: Delete this check when Groups are no long Experimental
|
|
||||||
if g.Version == 3 {
|
|
||||||
verified = g.VerifyGroupMessage(dgm.Onion, g.GroupID, string(ciphertext), signature)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// So we have a message that has a valid group key, but the signature can't be verified.
|
|
||||||
// The most obvious explanation for this is that the group key has been compromised (or we are in an open group and the server is being malicious)
|
|
||||||
// Either way, someone who has the private key is being detectably bad so we are just going to throw this message away and mark the group as Compromised.
|
|
||||||
if !verified {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return true, dgm
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we couldn't find a group to decrypt the message with we just return false. This is an expected case
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyGroupMessage confirms the authenticity of a message given an sender onion, message and signature.
|
|
||||||
// The goal of this function is 2-fold:
|
|
||||||
// 1. We confirm that the sender referenced in the group text is the actual sender of the message (or at least
|
|
||||||
// knows the senders private key)
|
|
||||||
// 2. Secondly, we confirm that the sender sent the message to a particular group id on a specific server (it doesn't
|
|
||||||
// matter if we actually received this message from the server or from a hybrid protocol, all that matters is
|
|
||||||
// that the sender and receivers agree that this message was intended for the group
|
|
||||||
//
|
|
||||||
// The 2nd point is important as it prevents an attack documented in the original Cwtch paper (and later at
|
|
||||||
// https://docs.openprivacy.ca/cwtch-security-handbook/groups.html) in which a malicious profile sets up 2 groups
|
|
||||||
// on two different servers with the same key and then forwards messages between them to convince the parties in
|
|
||||||
// each group that they are actually in one big group (with the intent to later censor and/or selectively send messages
|
|
||||||
// to each group).
|
|
||||||
func (g *Group) VerifyGroupMessage(onion string, groupID string, message string, signature []byte) bool {
|
|
||||||
// We use our group id, a known reference server and the ciphertext of the message.
|
|
||||||
m := groupID + g.GroupServer + message
|
|
||||||
|
|
||||||
// Otherwise we derive the public key from the sender and check it against that.
|
|
||||||
decodedPub, err := base32.StdEncoding.DecodeString(strings.ToUpper(onion))
|
|
||||||
if err == nil && len(decodedPub) >= 32 {
|
|
||||||
return ed25519.Verify(decodedPub[:32], []byte(m), signature)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncryptMessageToGroup when given a message and a group, encrypts and signs the message under the group and
|
|
||||||
// profile
|
|
||||||
func EncryptMessageToGroup(message string, author primitives.Identity, group *Group, prevSig string) ([]byte, []byte, *groups.DecryptedGroupMessage, error) {
|
|
||||||
if len(message) > MaxGroupMessageLength {
|
|
||||||
return nil, nil, nil, errors.New("group message is too long")
|
|
||||||
}
|
|
||||||
timestamp := time.Now().Unix()
|
|
||||||
|
|
||||||
lenPadding := MaxGroupMessageLength - len(message)
|
|
||||||
padding := make([]byte, lenPadding)
|
|
||||||
getRandomness(&padding)
|
|
||||||
hexGroupID, err := hex.DecodeString(group.GroupID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
prevSigBytes, err := base64.StdEncoding.DecodeString(prevSig)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dm := &groups.DecryptedGroupMessage{
|
|
||||||
Onion: author.Hostname(),
|
|
||||||
Text: message,
|
|
||||||
SignedGroupID: hexGroupID,
|
|
||||||
Timestamp: uint64(timestamp),
|
|
||||||
PreviousMessageSig: prevSigBytes,
|
|
||||||
Padding: padding[:],
|
|
||||||
}
|
|
||||||
|
|
||||||
ciphertext, err := group.EncryptMessage(dm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, err
|
|
||||||
}
|
|
||||||
serialized, _ := json.Marshal(dm)
|
|
||||||
signature := author.Sign([]byte(group.GroupID + group.GroupServer + base64.StdEncoding.EncodeToString(serialized)))
|
|
||||||
return ciphertext, signature, dm, nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -4,15 +4,13 @@ import (
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"cwtch.im/cwtch/protocol/groups"
|
"cwtch.im/cwtch/protocol/groups"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGroup(t *testing.T) {
|
func TestGroup(t *testing.T) {
|
||||||
g, err := NewGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
g, _ := NewGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Group with real group server should not fail")
|
|
||||||
}
|
|
||||||
dgm := &groups.DecryptedGroupMessage{
|
dgm := &groups.DecryptedGroupMessage{
|
||||||
Onion: "onion",
|
Onion: "onion",
|
||||||
Text: "Hello World!",
|
Text: "Hello World!",
|
||||||
|
@ -40,11 +38,15 @@ func TestGroup(t *testing.T) {
|
||||||
|
|
||||||
encMessage, _ := g.EncryptMessage(dgm)
|
encMessage, _ := g.EncryptMessage(dgm)
|
||||||
ok, message := g.DecryptMessage(encMessage)
|
ok, message := g.DecryptMessage(encMessage)
|
||||||
if (!ok || message == nil) || message.Text != "Hello World!" {
|
if !ok || message.Text != "Hello World!" {
|
||||||
t.Errorf("group encryption was invalid, or returned wrong message decrypted:%v message:%v", ok, message)
|
t.Errorf("group encryption was invalid, or returned wrong message decrypted:%v message:%v", ok, message)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
g.SetAttribute("test", "test_value")
|
||||||
|
value, exists := g.GetAttribute("test")
|
||||||
|
if !exists || value != "test_value" {
|
||||||
|
t.Errorf("Custom Attribute Should have been set, instead %v %v", exists, value)
|
||||||
|
}
|
||||||
t.Logf("Got message %v", message)
|
t.Logf("Got message %v", message)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,12 +61,18 @@ func TestGroupErr(t *testing.T) {
|
||||||
func TestGroupValidation(t *testing.T) {
|
func TestGroupValidation(t *testing.T) {
|
||||||
|
|
||||||
group := &Group{
|
group := &Group{
|
||||||
GroupID: "",
|
GroupID: "",
|
||||||
GroupKey: [32]byte{},
|
GroupKey: [32]byte{},
|
||||||
GroupServer: "",
|
GroupServer: "",
|
||||||
Timeline: Timeline{},
|
Timeline: Timeline{},
|
||||||
LocalID: "",
|
Accepted: false,
|
||||||
Version: 0,
|
IsCompromised: false,
|
||||||
|
Attributes: nil,
|
||||||
|
lock: sync.Mutex{},
|
||||||
|
LocalID: "",
|
||||||
|
State: "",
|
||||||
|
UnacknowledgedMessages: nil,
|
||||||
|
Version: 0,
|
||||||
}
|
}
|
||||||
|
|
||||||
invite, _ := group.Invite()
|
invite, _ := group.Invite()
|
||||||
|
@ -76,10 +84,7 @@ func TestGroupValidation(t *testing.T) {
|
||||||
t.Logf("Error: %v", err)
|
t.Logf("Error: %v", err)
|
||||||
|
|
||||||
// Generate a valid group but replace the group server...
|
// Generate a valid group but replace the group server...
|
||||||
group, err = NewGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
group, _ = NewGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Group with real group server should not fail")
|
|
||||||
}
|
|
||||||
group.GroupServer = "tcnkoch4nyr3cldkemejtkpqok342rbql6iclnjjs3ndgnjgufzyxvqd"
|
group.GroupServer = "tcnkoch4nyr3cldkemejtkpqok342rbql6iclnjjs3ndgnjgufzyxvqd"
|
||||||
invite, _ = group.Invite()
|
invite, _ = group.Invite()
|
||||||
_, err = ValidateInvite(invite)
|
_, err = ValidateInvite(invite)
|
||||||
|
@ -90,10 +95,7 @@ func TestGroupValidation(t *testing.T) {
|
||||||
t.Logf("Error: %v", err)
|
t.Logf("Error: %v", err)
|
||||||
|
|
||||||
// Generate a valid group but replace the group key...
|
// Generate a valid group but replace the group key...
|
||||||
group, err = NewGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
group, _ = NewGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Group with real group server should not fail")
|
|
||||||
}
|
|
||||||
group.GroupKey = sha256.Sum256([]byte{})
|
group.GroupKey = sha256.Sum256([]byte{})
|
||||||
invite, _ = group.Invite()
|
invite, _ = group.Invite()
|
||||||
_, err = ValidateInvite(invite)
|
_, err = ValidateInvite(invite)
|
||||||
|
|
|
@ -1,110 +0,0 @@
|
||||||
package model_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cwtch.im/cwtch/model"
|
|
||||||
"cwtch.im/cwtch/protocol/groups"
|
|
||||||
"encoding/base64"
|
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/primitives"
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ = Describe("group models", func() {
|
|
||||||
var (
|
|
||||||
newgroup *model.Group
|
|
||||||
anothergroup *model.Group
|
|
||||||
dgm groups.DecryptedGroupMessage
|
|
||||||
alice primitives.Identity
|
|
||||||
)
|
|
||||||
|
|
||||||
BeforeEach(func() {
|
|
||||||
newgroup, _ = model.NewGroup("iikv7tizbyxc42rsagnjxss65h3nfiwrkkoiikh7ui27r5xkav7gzuid")
|
|
||||||
anothergroup, _ = model.NewGroup("iikv7tizbyxc42rsagnjxss65h3nfiwrkkoiikh7ui27r5xkav7gzuid")
|
|
||||||
|
|
||||||
alice, _ = primitives.InitializeEphemeralIdentity()
|
|
||||||
|
|
||||||
dgm = groups.DecryptedGroupMessage{
|
|
||||||
Text: "hello world",
|
|
||||||
Onion: "some random onion",
|
|
||||||
Timestamp: 0,
|
|
||||||
SignedGroupID: nil,
|
|
||||||
PreviousMessageSig: nil,
|
|
||||||
Padding: nil,
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
Context("on creation of a group", func() {
|
|
||||||
It("should pass the cryptographic check", func() {
|
|
||||||
Expect(newgroup.CheckGroup()).To(Equal(true))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
Context("after generating an invite", func() {
|
|
||||||
It("should validate", func() {
|
|
||||||
invite, err := newgroup.Invite()
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
anotherGroup, err := model.ValidateInvite(invite)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
Expect(anotherGroup.GroupID).To(Equal(newgroup.GroupID))
|
|
||||||
Expect(anotherGroup.GroupName).To(Equal(newgroup.GroupName))
|
|
||||||
Expect(anotherGroup.SharedKey).To(Equal(newgroup.GroupKey[:]))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
Context("when encrypting a message", func() {
|
|
||||||
Context("decrypting with the same group", func() {
|
|
||||||
It("should succeed", func() {
|
|
||||||
ciphertext, err := newgroup.EncryptMessage(&dgm)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
success, decryptedMessage := newgroup.DecryptMessage(ciphertext)
|
|
||||||
Expect(success).To(Equal(true))
|
|
||||||
Expect(decryptedMessage.Text).To(Equal(dgm.Text))
|
|
||||||
Expect(decryptedMessage.Onion).To(Equal(dgm.Onion))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
Context("decrypting with a different group", func() {
|
|
||||||
It("should fail", func() {
|
|
||||||
ciphertext, err := newgroup.EncryptMessage(&dgm)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
success, decryptedMessage := anothergroup.DecryptMessage(ciphertext)
|
|
||||||
Expect(success).To(Equal(false))
|
|
||||||
Expect(decryptedMessage).To(BeNil())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
Context("when alice encrypts a message to new group", func() {
|
|
||||||
It("should succeed and bob should succeed in decrypting it", func() {
|
|
||||||
ciphertext, sign, _, err := model.EncryptMessageToGroup("hello world", alice, newgroup, base64.StdEncoding.EncodeToString([]byte("hello world")))
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
success, dgm := newgroup.AttemptDecryption(ciphertext, sign)
|
|
||||||
Expect(success).To(BeTrue())
|
|
||||||
Expect(dgm.Text).To(Equal("hello world"))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
Context("when alice encrypts a message to new group", func() {
|
|
||||||
It("should succeed and eve should fail in decrypting it", func() {
|
|
||||||
ciphertext, sign, _, err := model.EncryptMessageToGroup("hello world", alice, newgroup, base64.StdEncoding.EncodeToString([]byte("hello world")))
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
success, dgm := anothergroup.AttemptDecryption(ciphertext, sign)
|
|
||||||
Expect(success).To(BeFalse())
|
|
||||||
Expect(dgm).To(BeNil())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
Context("when alice encrypts a message to new group", func() {
|
|
||||||
Context("and the server messes with the signature", func() {
|
|
||||||
It("bob should be unable to verify the message with the wrong signature", func() {
|
|
||||||
ciphertext, _, _, err := model.EncryptMessageToGroup("hello world", alice, newgroup, base64.StdEncoding.EncodeToString([]byte("hello world")))
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
success, dgm := newgroup.AttemptDecryption(ciphertext, []byte("bad signature"))
|
|
||||||
Expect(success).To(BeFalse())
|
|
||||||
Expect(dgm).To(BeNil())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
})
|
|
136
model/message.go
136
model/message.go
|
@ -1,9 +1,7 @@
|
||||||
package model
|
package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/sha256"
|
"encoding/json"
|
||||||
"encoding/base64"
|
|
||||||
"errors"
|
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
@ -15,24 +13,6 @@ type Timeline struct {
|
||||||
Messages []Message
|
Messages []Message
|
||||||
SignedGroupID []byte
|
SignedGroupID []byte
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
|
|
||||||
// a cache to allow quick checks for existing messages...
|
|
||||||
signatureCache map[string]int
|
|
||||||
|
|
||||||
// a cache to allowing looking up messages by content hash
|
|
||||||
// we need this for features like reply-to message, and other self
|
|
||||||
// referential applications.
|
|
||||||
// note: that the index stored here is not global as different peers may have difference views of the timeline
|
|
||||||
// depending on if they save history, and when the last time they purged their timeline was, as such we can't
|
|
||||||
// simply send the index of the message.
|
|
||||||
hashCache map[string][]int
|
|
||||||
}
|
|
||||||
|
|
||||||
// LocallyIndexedMessage is a type wrapper around a Message and a TimeLine Index that is local to this
|
|
||||||
// instance of the timeline.
|
|
||||||
type LocallyIndexedMessage struct {
|
|
||||||
Message
|
|
||||||
LocalIndex int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Message is a local representation of a given message sent over a group chat channel.
|
// Message is a local representation of a given message sent over a group chat channel.
|
||||||
|
@ -50,12 +30,9 @@ type Message struct {
|
||||||
Flags uint64
|
Flags uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// MessageBaseSize 2021.06 byte size of an *empty* message json serialized
|
// MessageBaseSize is a rough estimate of the base number of bytes the struct uses before strings are populated
|
||||||
const MessageBaseSize float64 = 463
|
const MessageBaseSize = 104
|
||||||
|
|
||||||
// compareSignatures checks if a and b are equal. Note: this function does
|
|
||||||
// not need to be constant time - in fact it is better that it is not as it's only main use
|
|
||||||
// is in sorting timeline state consistently.
|
|
||||||
func compareSignatures(a []byte, b []byte) bool {
|
func compareSignatures(a []byte, b []byte) bool {
|
||||||
if len(a) != len(b) {
|
if len(a) != len(b) {
|
||||||
return false
|
return false
|
||||||
|
@ -81,58 +58,17 @@ func (t *Timeline) GetMessages() []Message {
|
||||||
func (t *Timeline) GetCopy() *Timeline {
|
func (t *Timeline) GetCopy() *Timeline {
|
||||||
t.lock.Lock()
|
t.lock.Lock()
|
||||||
defer t.lock.Unlock()
|
defer t.lock.Unlock()
|
||||||
|
bytes, _ := json.Marshal(t)
|
||||||
newt := &Timeline{}
|
newt := &Timeline{}
|
||||||
// initialize the timeline and copy the message over...
|
json.Unmarshal(bytes, newt)
|
||||||
newt.SetMessages(t.Messages)
|
|
||||||
return newt
|
return newt
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMessages sets the Messages of this timeline. Only to be used in loading/initialization
|
// SetMessages sets the Messages of this timeline. Only to be used in loading/initialization
|
||||||
func (t *Timeline) SetMessages(messages []Message) {
|
func (t *Timeline) SetMessages(messages []Message) {
|
||||||
t.lock.Lock()
|
|
||||||
t.init()
|
|
||||||
t.lock.Unlock()
|
|
||||||
for _, m := range messages {
|
|
||||||
t.Insert(&m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMessagesByHash attempts to find messages that match the given
|
|
||||||
// content hash in the timeline. If successful it returns a list of messages as well as their local index
|
|
||||||
// , on failure it returns an error.
|
|
||||||
// We return a list of messages because content hashes are not guaranteed to be unique from a given Peer. This allows
|
|
||||||
// us to do things like: ensure that reply-to and quotes reference the last seen message from the message they are quoted
|
|
||||||
// in or detect duplicate messages from a peer.
|
|
||||||
func (t *Timeline) GetMessagesByHash(contentHash string) ([]LocallyIndexedMessage, error) {
|
|
||||||
t.lock.Lock()
|
t.lock.Lock()
|
||||||
defer t.lock.Unlock()
|
defer t.lock.Unlock()
|
||||||
t.init()
|
t.Messages = messages
|
||||||
if idxs, exists := t.hashCache[contentHash]; exists {
|
|
||||||
var messages []LocallyIndexedMessage
|
|
||||||
for _, idx := range idxs {
|
|
||||||
messages = append(messages, LocallyIndexedMessage{LocalIndex: idx, Message: t.Messages[idx]})
|
|
||||||
}
|
|
||||||
return messages, nil
|
|
||||||
}
|
|
||||||
return nil, errors.New("cannot find message by hash")
|
|
||||||
}
|
|
||||||
|
|
||||||
// calculateHash calculates the content hash of a given message
|
|
||||||
// the content used is the sender of the message, the body of the message
|
|
||||||
//
|
|
||||||
// content hashes must be calculable across timeline views so that different participants can
|
|
||||||
// calculate the same hash for the same message - as such we cannot use timestamps from peers or groups
|
|
||||||
// as they are mostly fuzzy.
|
|
||||||
//
|
|
||||||
// As a reminder: for p2p messages PeerID is authenticated by the initial 3DH handshake, for groups
|
|
||||||
// each message is signed by the sender, and this signature is checked prior to inclusion in the timeline.
|
|
||||||
//
|
|
||||||
// Multiple messages from the same peer can result in the same hash (where the same user sends the same message more
|
|
||||||
// than once) - in this case we will only store the idx of the most recent message - and use that for reference lookups.
|
|
||||||
func (t *Timeline) calculateHash(message Message) string {
|
|
||||||
content := []byte(message.PeerID + message.Message)
|
|
||||||
contentBasedHash := sha256.Sum256(content)
|
|
||||||
return base64.StdEncoding.EncodeToString(contentBasedHash[:])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Len gets the length of the timeline
|
// Len gets the length of the timeline
|
||||||
|
@ -172,66 +108,22 @@ func (t *Timeline) Less(i, j int) bool {
|
||||||
func (t *Timeline) Sort() {
|
func (t *Timeline) Sort() {
|
||||||
t.lock.Lock()
|
t.lock.Lock()
|
||||||
defer t.lock.Unlock()
|
defer t.lock.Unlock()
|
||||||
|
|
||||||
sort.Sort(t)
|
sort.Sort(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Insert a message into the timeline in a thread safe way.
|
// Insert inserts a message into the timeline in a thread safe way.
|
||||||
func (t *Timeline) Insert(mi *Message) int {
|
func (t *Timeline) Insert(mi *Message) bool {
|
||||||
t.lock.Lock()
|
t.lock.Lock()
|
||||||
defer t.lock.Unlock()
|
defer t.lock.Unlock()
|
||||||
|
|
||||||
// assert timeline is initialized
|
for _, m := range t.Messages {
|
||||||
t.init()
|
// If the message already exists, then we don't add it
|
||||||
|
if compareSignatures(m.Signature, mi.Signature) {
|
||||||
// check that we haven't seen this message before (this has no impact on p2p messages, but is essential for
|
return true
|
||||||
// group messages)
|
|
||||||
// FIXME: The below code now checks if the message has a signature. If it doesn't then skip duplication check.
|
|
||||||
// We do this because p2p messages right now do not have a signature, and so many p2p messages are not stored
|
|
||||||
// with a signature. In the future in hybrid groups this check will go away as all timelines will use the same
|
|
||||||
// underlying protocol.
|
|
||||||
// This is currently safe to do because p2p does not rely on signatures and groups will verify the signature of
|
|
||||||
// messages prior to generating an event to include them in the timeline.
|
|
||||||
if len(mi.Signature) != 0 {
|
|
||||||
idx, exists := t.signatureCache[base64.StdEncoding.EncodeToString(mi.Signature)]
|
|
||||||
if exists {
|
|
||||||
t.Messages[idx].Acknowledged = true
|
|
||||||
return idx
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// update the message store
|
|
||||||
t.Messages = append(t.Messages, *mi)
|
t.Messages = append(t.Messages, *mi)
|
||||||
// add to signature cache for fast checking of group messages...
|
sort.Sort(t)
|
||||||
t.signatureCache[base64.StdEncoding.EncodeToString(mi.Signature)] = len(t.Messages) - 1
|
return false
|
||||||
// content based addressing index
|
|
||||||
contentHash := t.calculateHash(*mi)
|
|
||||||
t.hashCache[contentHash] = append(t.hashCache[contentHash], len(t.Messages)-1)
|
|
||||||
return len(t.Messages) - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Timeline) init() {
|
|
||||||
// only allow this setting once...
|
|
||||||
if t.signatureCache == nil {
|
|
||||||
t.signatureCache = make(map[string]int)
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.hashCache == nil {
|
|
||||||
t.hashCache = make(map[string][]int)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetSendError marks a message has having some kind of application specific error.
|
|
||||||
// Note: The message here is indexed by signature.
|
|
||||||
func (t *Timeline) SetSendError(sig []byte, e string) bool {
|
|
||||||
t.lock.Lock()
|
|
||||||
defer t.lock.Unlock()
|
|
||||||
|
|
||||||
idx, exists := t.signatureCache[base64.StdEncoding.EncodeToString(sig)]
|
|
||||||
if !exists {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Messages[idx].Error = e
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,100 @@
|
||||||
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMessagePadding(t *testing.T) {
|
||||||
|
|
||||||
|
// Setup the Group
|
||||||
|
sarah := GenerateNewProfile("Sarah")
|
||||||
|
alice := GenerateNewProfile("Alice")
|
||||||
|
sarah.AddContact(alice.Onion, &alice.PublicProfile)
|
||||||
|
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
|
||||||
|
|
||||||
|
gid, invite, _ := alice.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||||
|
|
||||||
|
sarah.ProcessInvite(invite)
|
||||||
|
|
||||||
|
group := alice.GetGroup(gid)
|
||||||
|
|
||||||
|
c1, s1, err := sarah.EncryptMessageToGroup("Hello World 1", group.GroupID)
|
||||||
|
t.Logf("Length of Encrypted Message: %v %v", len(c1), err)
|
||||||
|
alice.AttemptDecryption(c1, s1)
|
||||||
|
|
||||||
|
c2, s2, _ := alice.EncryptMessageToGroup("Hello World 2", group.GroupID)
|
||||||
|
t.Logf("Length of Encrypted Message: %v", len(c2))
|
||||||
|
alice.AttemptDecryption(c2, s2)
|
||||||
|
|
||||||
|
c3, s3, _ := alice.EncryptMessageToGroup("Hello World 3", group.GroupID)
|
||||||
|
t.Logf("Length of Encrypted Message: %v", len(c3))
|
||||||
|
alice.AttemptDecryption(c3, s3)
|
||||||
|
|
||||||
|
c4, s4, _ := alice.EncryptMessageToGroup("Hello World this is a much longer message 3", group.GroupID)
|
||||||
|
t.Logf("Length of Encrypted Message: %v", len(c4))
|
||||||
|
alice.AttemptDecryption(c4, s4)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTranscriptConsistency(t *testing.T) {
|
||||||
|
timeline := new(Timeline)
|
||||||
|
|
||||||
|
// Setup the Group
|
||||||
|
sarah := GenerateNewProfile("Sarah")
|
||||||
|
alice := GenerateNewProfile("Alice")
|
||||||
|
sarah.AddContact(alice.Onion, &alice.PublicProfile)
|
||||||
|
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
|
||||||
|
|
||||||
|
gid, invite, _ := alice.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||||
|
|
||||||
|
sarah.ProcessInvite(invite)
|
||||||
|
|
||||||
|
group := alice.GetGroup(gid)
|
||||||
|
|
||||||
|
t.Logf("group: %v, sarah %v", group, sarah)
|
||||||
|
|
||||||
|
c1, s1, _ := alice.EncryptMessageToGroup("Hello World 1", group.GroupID)
|
||||||
|
t.Logf("Length of Encrypted Message: %v", len(c1))
|
||||||
|
alice.AttemptDecryption(c1, s1)
|
||||||
|
|
||||||
|
c2, s2, _ := alice.EncryptMessageToGroup("Hello World 2", group.GroupID)
|
||||||
|
t.Logf("Length of Encrypted Message: %v", len(c2))
|
||||||
|
alice.AttemptDecryption(c2, s2)
|
||||||
|
|
||||||
|
c3, s3, _ := alice.EncryptMessageToGroup("Hello World 3", group.GroupID)
|
||||||
|
t.Logf("Length of Encrypted Message: %v", len(c3))
|
||||||
|
alice.AttemptDecryption(c3, s3)
|
||||||
|
|
||||||
|
time.Sleep(time.Second * 1)
|
||||||
|
|
||||||
|
c4, s4, _ := alice.EncryptMessageToGroup("Hello World 4", group.GroupID)
|
||||||
|
t.Logf("Length of Encrypted Message: %v", len(c4))
|
||||||
|
alice.AttemptDecryption(c4, s4)
|
||||||
|
|
||||||
|
c5, s5, _ := alice.EncryptMessageToGroup("Hello World 5", group.GroupID)
|
||||||
|
t.Logf("Length of Encrypted Message: %v", len(c5))
|
||||||
|
|
||||||
|
_, _, m1, _ := sarah.AttemptDecryption(c1, s1)
|
||||||
|
sarah.AttemptDecryption(c1, s1) // Try a duplicate
|
||||||
|
_, _, m2, _ := sarah.AttemptDecryption(c2, s2)
|
||||||
|
_, _, m3, _ := sarah.AttemptDecryption(c3, s3)
|
||||||
|
_, _, m4, _ := sarah.AttemptDecryption(c4, s4)
|
||||||
|
_, _, m5, _ := sarah.AttemptDecryption(c5, s5)
|
||||||
|
|
||||||
|
// Now we simulate a client receiving these Messages completely out of order
|
||||||
|
timeline.Insert(m1)
|
||||||
|
timeline.Insert(m5)
|
||||||
|
timeline.Insert(m4)
|
||||||
|
timeline.Insert(m3)
|
||||||
|
timeline.Insert(m2)
|
||||||
|
|
||||||
|
for i, m := range group.GetTimeline() {
|
||||||
|
if m.Message != "Hello World "+strconv.Itoa(i+1) {
|
||||||
|
t.Fatalf("Timeline Out of Order!: %v %v", i, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Messages %v: %v %x %x", i, m.Message, m.Signature, m.PreviousMessageSig)
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,25 +0,0 @@
|
||||||
package model
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CalculateContentHash derives a hash using the author and the message body. It is intended to be
|
|
||||||
// globally referencable in the context of a single conversation
|
|
||||||
func CalculateContentHash(author string, messageBody string) string {
|
|
||||||
content := []byte(author + messageBody)
|
|
||||||
contentBasedHash := sha256.Sum256(content)
|
|
||||||
return base64.StdEncoding.EncodeToString(contentBasedHash[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func DeserializeMessage(message string) (*MessageWrapper, error) {
|
|
||||||
var cm MessageWrapper
|
|
||||||
err := json.Unmarshal([]byte(message), &cm)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &cm, err
|
|
||||||
}
|
|
|
@ -1,13 +0,0 @@
|
||||||
package model_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestModel(t *testing.T) {
|
|
||||||
RegisterFailHandler(Fail)
|
|
||||||
RunSpecs(t, "Model Suite")
|
|
||||||
}
|
|
|
@ -1,50 +0,0 @@
|
||||||
package model
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MessageWrapper is the canonical Cwtch overlay wrapper
|
|
||||||
type MessageWrapper struct {
|
|
||||||
Overlay int `json:"o"`
|
|
||||||
Data string `json:"d"`
|
|
||||||
|
|
||||||
// when the data was assembled
|
|
||||||
SendTime *time.Time `json:"s,omitempty"`
|
|
||||||
|
|
||||||
// when the data was transmitted (by protocol engine e.g. over Tor)
|
|
||||||
TransitTime *time.Time `json:"t,omitempty"`
|
|
||||||
|
|
||||||
// when the data was received
|
|
||||||
RecvTime *time.Time `json:"r,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Channel is defined as being the last 3 bits of the overlay id
|
|
||||||
// Channel 0 is reserved for the main conversation
|
|
||||||
// Channel 2 is reserved for conversation admin (managed groups)
|
|
||||||
// Channel 7 is reserved for streams (no ack, no store)
|
|
||||||
func (mw MessageWrapper) Channel() int {
|
|
||||||
if mw.Overlay > 1024 {
|
|
||||||
return mw.Overlay & 0x07
|
|
||||||
}
|
|
||||||
// for backward compatibilty all overlays less than 0x400 i.e. 1024 are
|
|
||||||
// mapped to channel 0 regardless of their channel status.
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// If Overlay is a Stream Message it should not be ackd, or stored.
|
|
||||||
func (mw MessageWrapper) IsStream() bool {
|
|
||||||
return mw.Channel() == 0x07
|
|
||||||
}
|
|
||||||
|
|
||||||
// OverlayChat is the canonical identifier for chat overlays
|
|
||||||
const OverlayChat = 1
|
|
||||||
|
|
||||||
// OverlayInviteContact is the canonical identifier for the contact invite overlay
|
|
||||||
const OverlayInviteContact = 100
|
|
||||||
|
|
||||||
// OverlayInviteGroup is the canonical identifier for the group invite overlay
|
|
||||||
const OverlayInviteGroup = 101
|
|
||||||
|
|
||||||
// OverlayFileSharing is the canonical identifier for the file sharing overlay
|
|
||||||
const OverlayFileSharing = 200
|
|
458
model/profile.go
458
model/profile.go
|
@ -2,16 +2,22 @@ package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"cwtch.im/cwtch/model/attr"
|
||||||
|
"cwtch.im/cwtch/protocol/groups"
|
||||||
|
"encoding/base32"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
||||||
"golang.org/x/crypto/ed25519"
|
"golang.org/x/crypto/ed25519"
|
||||||
"io"
|
"io"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Authorization is a type determining client assigned authorization to a peer
|
// Authorization is a type determining client assigned authorization to a peer
|
||||||
// Deprecated - Only used for Importing legacy profile formats
|
|
||||||
// Still used in some APIs in UI but will be replaced prior to full deprecation
|
|
||||||
type Authorization string
|
type Authorization string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -24,7 +30,6 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// PublicProfile is a local copy of a CwtchIdentity
|
// PublicProfile is a local copy of a CwtchIdentity
|
||||||
// Deprecated - Only used for Importing legacy profile formats
|
|
||||||
type PublicProfile struct {
|
type PublicProfile struct {
|
||||||
Name string
|
Name string
|
||||||
Ed25519PublicKey ed25519.PublicKey
|
Ed25519PublicKey ed25519.PublicKey
|
||||||
|
@ -40,7 +45,6 @@ type PublicProfile struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Profile encapsulates all the attributes necessary to be a Cwtch Peer.
|
// Profile encapsulates all the attributes necessary to be a Cwtch Peer.
|
||||||
// Deprecated - Only used for Importing legacy profile formats
|
|
||||||
type Profile struct {
|
type Profile struct {
|
||||||
PublicProfile
|
PublicProfile
|
||||||
Contacts map[string]*PublicProfile
|
Contacts map[string]*PublicProfile
|
||||||
|
@ -52,6 +56,389 @@ type Profile struct {
|
||||||
// TODO: Should this be per server?
|
// TODO: Should this be per server?
|
||||||
const MaxGroupMessageLength = 1800
|
const MaxGroupMessageLength = 1800
|
||||||
|
|
||||||
|
// GenerateRandomID generates a random 16 byte hex id code
|
||||||
|
func GenerateRandomID() string {
|
||||||
|
randBytes := make([]byte, 16)
|
||||||
|
rand.Read(randBytes)
|
||||||
|
return filepath.Join(hex.EncodeToString(randBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PublicProfile) init() {
|
||||||
|
if p.Attributes == nil {
|
||||||
|
p.Attributes = make(map[string]string)
|
||||||
|
}
|
||||||
|
p.UnacknowledgedMessages = make(map[string]int)
|
||||||
|
p.LocalID = GenerateRandomID()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAttribute allows applications to store arbitrary configuration info at the profile level.
|
||||||
|
func (p *PublicProfile) SetAttribute(name string, value string) {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
p.Attributes[name] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsServer returns true if the profile is associated with a server.
|
||||||
|
func (p *PublicProfile) IsServer() (isServer bool) {
|
||||||
|
_, isServer = p.GetAttribute(string(KeyTypeServerOnion))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAttribute returns the value of a value set with SetCustomAttribute. If no such value has been set exists is set to false.
|
||||||
|
func (p *PublicProfile) GetAttribute(name string) (value string, exists bool) {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
value, exists = p.Attributes[name]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateNewProfile creates a new profile, with new encryption and signing keys, and a profile name.
|
||||||
|
func GenerateNewProfile(name string) *Profile {
|
||||||
|
p := new(Profile)
|
||||||
|
p.init()
|
||||||
|
p.Name = name
|
||||||
|
pub, priv, _ := ed25519.GenerateKey(rand.Reader)
|
||||||
|
p.Ed25519PublicKey = pub
|
||||||
|
p.Ed25519PrivateKey = priv
|
||||||
|
p.Onion = tor.GetTorV3Hostname(pub)
|
||||||
|
|
||||||
|
p.Contacts = make(map[string]*PublicProfile)
|
||||||
|
p.Contacts[p.Onion] = &p.PublicProfile
|
||||||
|
p.Groups = make(map[string]*Group)
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddContact allows direct manipulation of cwtch contacts
|
||||||
|
func (p *Profile) AddContact(onion string, profile *PublicProfile) {
|
||||||
|
p.lock.Lock()
|
||||||
|
profile.init()
|
||||||
|
// We expect callers to verify addresses before we get to this point, so if this isn't a
|
||||||
|
// valid address this is a noop.
|
||||||
|
if tor.IsValidHostname(onion) {
|
||||||
|
decodedPub, err := base32.StdEncoding.DecodeString(strings.ToUpper(onion[:56]))
|
||||||
|
if err == nil {
|
||||||
|
profile.Ed25519PublicKey = ed25519.PublicKey(decodedPub[:32])
|
||||||
|
p.Contacts[onion] = profile
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateMessageFlags updates the flags stored with a message
|
||||||
|
func (p *Profile) UpdateMessageFlags(handle string, mIdx int, flags uint64) {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
if contact, exists := p.Contacts[handle]; exists {
|
||||||
|
if len(contact.Timeline.Messages) > mIdx {
|
||||||
|
contact.Timeline.Messages[mIdx].Flags = flags
|
||||||
|
}
|
||||||
|
} else if group, exists := p.Groups[handle]; exists {
|
||||||
|
if len(group.Timeline.Messages) > mIdx {
|
||||||
|
group.Timeline.Messages[mIdx].Flags = flags
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteContact deletes a peer contact
|
||||||
|
func (p *Profile) DeleteContact(onion string) {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
delete(p.Contacts, onion)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteGroup deletes a group
|
||||||
|
func (p *Profile) DeleteGroup(groupID string) {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
delete(p.Groups, groupID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RejectInvite rejects and removes a group invite
|
||||||
|
func (p *Profile) RejectInvite(groupID string) {
|
||||||
|
p.lock.Lock()
|
||||||
|
delete(p.Groups, groupID)
|
||||||
|
p.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSentMessageToContactTimeline allows the saving of a message sent via a direct connection chat to the profile.
|
||||||
|
func (p *Profile) AddSentMessageToContactTimeline(onion string, messageTxt string, sent time.Time, eventID string) *Message {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
contact, ok := p.Contacts[onion]
|
||||||
|
if ok {
|
||||||
|
now := time.Now()
|
||||||
|
sig := p.SignMessage(onion + messageTxt + sent.String() + now.String())
|
||||||
|
|
||||||
|
message := &Message{PeerID: p.Onion, Message: messageTxt, Timestamp: sent, Received: now, Signature: sig, Acknowledged: false}
|
||||||
|
if contact.UnacknowledgedMessages == nil {
|
||||||
|
contact.UnacknowledgedMessages = make(map[string]int)
|
||||||
|
}
|
||||||
|
contact.Timeline.Insert(message)
|
||||||
|
contact.UnacknowledgedMessages[eventID] = contact.Timeline.Len() - 1
|
||||||
|
return message
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMessageToContactTimeline allows the saving of a message sent via a direct connection chat to the profile.
|
||||||
|
func (p *Profile) AddMessageToContactTimeline(onion string, messageTxt string, sent time.Time) (message *Message) {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
contact, ok := p.Contacts[onion]
|
||||||
|
|
||||||
|
// We don't really need a Signature here, but we use it to maintain order
|
||||||
|
now := time.Now()
|
||||||
|
sig := p.SignMessage(onion + messageTxt + sent.String() + now.String())
|
||||||
|
if ok {
|
||||||
|
message = &Message{PeerID: onion, Message: messageTxt, Timestamp: sent, Received: now, Signature: sig, Acknowledged: true}
|
||||||
|
contact.Timeline.Insert(message)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorSentMessageToPeer sets a sent message's error message and removes it from the unacknowledged list
|
||||||
|
func (p *Profile) ErrorSentMessageToPeer(onion string, eventID string, error string) int {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
contact, ok := p.Contacts[onion]
|
||||||
|
if ok {
|
||||||
|
mIdx, ok := contact.UnacknowledgedMessages[eventID]
|
||||||
|
if ok {
|
||||||
|
contact.Timeline.Messages[mIdx].Error = error
|
||||||
|
delete(contact.UnacknowledgedMessages, eventID)
|
||||||
|
return mIdx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// AckSentMessageToPeer sets mesage to a peer as acknowledged
|
||||||
|
func (p *Profile) AckSentMessageToPeer(onion string, eventID string) int {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
contact, ok := p.Contacts[onion]
|
||||||
|
if ok {
|
||||||
|
mIdx, ok := contact.UnacknowledgedMessages[eventID]
|
||||||
|
if ok {
|
||||||
|
contact.Timeline.Messages[mIdx].Acknowledged = true
|
||||||
|
delete(contact.UnacknowledgedMessages, eventID)
|
||||||
|
return mIdx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddGroupSentMessageError searches matching groups for the message by sig and marks it as an error
|
||||||
|
func (p *Profile) AddGroupSentMessageError(groupID string, signature []byte, error string) {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
group, exists := p.Groups[groupID]
|
||||||
|
if exists {
|
||||||
|
group.ErrorSentMessage(signature, error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AcceptInvite accepts a group invite
|
||||||
|
func (p *Profile) AcceptInvite(groupID string) (err error) {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
group, ok := p.Groups[groupID]
|
||||||
|
if ok {
|
||||||
|
group.Accepted = true
|
||||||
|
} else {
|
||||||
|
err = errors.New("group does not exist")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetGroups returns an unordered list of group IDs associated with this profile.
|
||||||
|
func (p *Profile) GetGroups() []string {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
var keys []string
|
||||||
|
for onion := range p.Groups {
|
||||||
|
keys = append(keys, onion)
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetContacts returns an unordered list of contact onions associated with this profile.
|
||||||
|
func (p *Profile) GetContacts() []string {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
var keys []string
|
||||||
|
for onion := range p.Contacts {
|
||||||
|
if onion != p.Onion {
|
||||||
|
keys = append(keys, onion)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetContactAuthorization sets the authoirization level of a peer
|
||||||
|
func (p *Profile) SetContactAuthorization(onion string, auth Authorization) (err error) {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
contact, ok := p.Contacts[onion]
|
||||||
|
if ok {
|
||||||
|
contact.Authorization = auth
|
||||||
|
} else {
|
||||||
|
err = errors.New("peer does not exist")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetContactAuthorization returns the contact's authorization level
|
||||||
|
func (p *Profile) GetContactAuthorization(onion string) Authorization {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
contact, ok := p.Contacts[onion]
|
||||||
|
if ok {
|
||||||
|
return contact.Authorization
|
||||||
|
}
|
||||||
|
return AuthUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContactsAuthorizations calculates a list of Peers who are at the supplied auth levels
|
||||||
|
func (p *Profile) ContactsAuthorizations(authorizationFilter ...Authorization) map[string]Authorization {
|
||||||
|
authorizations := map[string]Authorization{}
|
||||||
|
for _, contact := range p.GetContacts() {
|
||||||
|
c, _ := p.GetContact(contact)
|
||||||
|
authorizations[c.Onion] = c.Authorization
|
||||||
|
}
|
||||||
|
return authorizations
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetContact returns a contact if the profile has it
|
||||||
|
func (p *Profile) GetContact(onion string) (*PublicProfile, bool) {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
contact, ok := p.Contacts[onion]
|
||||||
|
return contact, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyGroupMessage confirms the authenticity of a message given an sender onion, ciphertext and signature.
|
||||||
|
// The goal of this function is 2-fold:
|
||||||
|
// 1. We confirm that the sender referenced in the group text is the actual sender of the message (or at least
|
||||||
|
// knows the senders private key)
|
||||||
|
// 2. Secondly, we confirm that the sender sent the message to a particular group id on a specific server (it doesn't
|
||||||
|
// matter if we actually received this message from the server or from a hybrid protocol, all that matters is
|
||||||
|
// that the sender and receivers agree that this message was intended for the group
|
||||||
|
// The 2nd point is important as it prevents an attack documented in the original Cwtch paper (and later at
|
||||||
|
// https://docs.openprivacy.ca/cwtch-security-handbook/groups.html) in which a malicious profile sets up 2 groups
|
||||||
|
// on two different servers with the same key and then forwards messages between them to convince the parties in
|
||||||
|
// each group that they are actually in one big group (with the intent to later censor and/or selectively send messages
|
||||||
|
// to each group).
|
||||||
|
func (p *Profile) VerifyGroupMessage(onion string, groupID string, ciphertext []byte, signature []byte) bool {
|
||||||
|
|
||||||
|
group := p.GetGroup(groupID)
|
||||||
|
if group == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// We use our group id, a known reference server and the ciphertext of the message.
|
||||||
|
m := groupID + group.GroupServer + string(ciphertext)
|
||||||
|
|
||||||
|
// If the message is ostensibly from us then we check it against our public key...
|
||||||
|
if onion == p.Onion {
|
||||||
|
return ed25519.Verify(p.Ed25519PublicKey, []byte(m), signature)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise we derive the public key from the sender and check it against that.
|
||||||
|
decodedPub, err := base32.StdEncoding.DecodeString(strings.ToUpper(onion))
|
||||||
|
if err == nil && len(decodedPub) >= 32 {
|
||||||
|
return ed25519.Verify(decodedPub[:32], []byte(m), signature)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignMessage takes a given message and returns an Ed21159 signature
|
||||||
|
func (p *Profile) SignMessage(message string) []byte {
|
||||||
|
sig := ed25519.Sign(p.Ed25519PrivateKey, []byte(message))
|
||||||
|
return sig
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartGroup when given a server, creates a new Group under this profile and returns the group id an a precomputed
|
||||||
|
// invite which can be sent on the wire.
|
||||||
|
func (p *Profile) StartGroup(server string) (groupID string, invite string, err error) {
|
||||||
|
group, err := NewGroup(server)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
groupID = group.GroupID
|
||||||
|
invite, err = group.Invite()
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
p.Groups[group.GroupID] = group
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetGroup a pointer to a Group by the group Id, returns nil if no group found.
|
||||||
|
func (p *Profile) GetGroup(groupID string) (g *Group) {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
g = p.Groups[groupID]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessInvite validates a group invite and adds a new group invite to the profile if it is valid.
|
||||||
|
// returns the new group ID on success, error on fail.
|
||||||
|
func (p *Profile) ProcessInvite(invite string) (string, error) {
|
||||||
|
gci, err := ValidateInvite(invite)
|
||||||
|
if err == nil {
|
||||||
|
group := new(Group)
|
||||||
|
group.Version = CurrentGroupVersion
|
||||||
|
group.GroupID = gci.GroupID
|
||||||
|
group.LocalID = GenerateRandomID()
|
||||||
|
copy(group.GroupKey[:], gci.SharedKey[:])
|
||||||
|
group.GroupServer = gci.ServerHost
|
||||||
|
group.Accepted = false
|
||||||
|
group.Attributes = make(map[string]string)
|
||||||
|
group.Attributes[attr.GetLocalScope("name")] = gci.GroupName
|
||||||
|
p.AddGroup(group)
|
||||||
|
return gci.GroupID, nil
|
||||||
|
}
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddGroup is a convenience method for adding a group to a profile.
|
||||||
|
func (p *Profile) AddGroup(group *Group) {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
_, exists := p.Groups[group.GroupID]
|
||||||
|
if !exists {
|
||||||
|
p.Groups[group.GroupID] = group
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttemptDecryption takes a ciphertext and signature and attempts to decrypt it under known groups.
|
||||||
|
// If successful, adds the message to the group's timeline
|
||||||
|
func (p *Profile) AttemptDecryption(ciphertext []byte, signature []byte) (bool, string, *Message, bool) {
|
||||||
|
for _, group := range p.Groups {
|
||||||
|
success, dgm := group.DecryptMessage(ciphertext)
|
||||||
|
if success {
|
||||||
|
verified := p.VerifyGroupMessage(dgm.Onion, group.GroupID, ciphertext, signature)
|
||||||
|
|
||||||
|
// So we have a message that has a valid group key, but the signature can't be verified.
|
||||||
|
// The most obvious explanation for this is that the group key has been compromised (or we are in an open group and the server is being malicious)
|
||||||
|
// Either way, someone who has the private key is being detectably bad so we are just going to throw this message away and mark the group as Compromised.
|
||||||
|
if !verified {
|
||||||
|
group.Compromised()
|
||||||
|
return false, group.GroupID, nil, false
|
||||||
|
}
|
||||||
|
message, seen := group.AddMessage(dgm, signature)
|
||||||
|
return true, group.GroupID, message, seen
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we couldn't find a group to decrypt the message with we just return false. This is an expected case
|
||||||
|
return false, "", nil, false
|
||||||
|
}
|
||||||
|
|
||||||
func getRandomness(arr *[]byte) {
|
func getRandomness(arr *[]byte) {
|
||||||
if _, err := io.ReadFull(rand.Reader, (*arr)[:]); err != nil {
|
if _, err := io.ReadFull(rand.Reader, (*arr)[:]); err != nil {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -62,11 +449,52 @@ func getRandomness(arr *[]byte) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateRandomID generates a random 16 byte hex id code
|
// EncryptMessageToGroup when given a message and a group, encrypts and signs the message under the group and
|
||||||
func GenerateRandomID() string {
|
// profile
|
||||||
randBytes := make([]byte, 16)
|
func (p *Profile) EncryptMessageToGroup(message string, groupID string) ([]byte, []byte, error) {
|
||||||
rand.Read(randBytes)
|
|
||||||
return hex.EncodeToString(randBytes)
|
if len(message) > MaxGroupMessageLength {
|
||||||
|
return nil, nil, errors.New("group message is too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
group := p.GetGroup(groupID)
|
||||||
|
if group != nil {
|
||||||
|
timestamp := time.Now().Unix()
|
||||||
|
|
||||||
|
// Select the latest message from the timeline as a reference point.
|
||||||
|
var prevSig []byte
|
||||||
|
if len(group.Timeline.Messages) > 0 {
|
||||||
|
prevSig = group.Timeline.Messages[len(group.Timeline.Messages)-1].Signature
|
||||||
|
} else {
|
||||||
|
prevSig = []byte(group.GroupID)
|
||||||
|
}
|
||||||
|
|
||||||
|
lenPadding := MaxGroupMessageLength - len(message)
|
||||||
|
padding := make([]byte, lenPadding)
|
||||||
|
getRandomness(&padding)
|
||||||
|
hexGroupID, err := hex.DecodeString(group.GroupID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dm := &groups.DecryptedGroupMessage{
|
||||||
|
Onion: p.Onion,
|
||||||
|
Text: message,
|
||||||
|
SignedGroupID: hexGroupID,
|
||||||
|
Timestamp: uint64(timestamp),
|
||||||
|
PreviousMessageSig: prevSig,
|
||||||
|
Padding: padding[:],
|
||||||
|
}
|
||||||
|
|
||||||
|
ciphertext, err := group.EncryptMessage(dm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
signature := p.SignMessage(groupID + group.GroupServer + string(ciphertext))
|
||||||
|
group.AddSentMessage(dm, signature)
|
||||||
|
return ciphertext, signature, nil
|
||||||
|
}
|
||||||
|
return nil, nil, errors.New("group does not exist")
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetCopy returns a full deep copy of the Profile struct and its members (timeline inclusion control by arg)
|
// GetCopy returns a full deep copy of the Profile struct and its members (timeline inclusion control by arg)
|
||||||
|
@ -80,19 +508,11 @@ func (p *Profile) GetCopy(timeline bool) *Profile {
|
||||||
|
|
||||||
if timeline {
|
if timeline {
|
||||||
for groupID := range newp.Groups {
|
for groupID := range newp.Groups {
|
||||||
if group, exists := newp.Groups[groupID]; exists {
|
newp.Groups[groupID].Timeline = *p.Groups[groupID].Timeline.GetCopy()
|
||||||
if pGroup, exists := p.Groups[groupID]; exists {
|
|
||||||
group.Timeline = *(pGroup).Timeline.GetCopy()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for peerID := range newp.Contacts {
|
for peerID := range newp.Contacts {
|
||||||
if peer, exists := newp.Contacts[peerID]; exists {
|
newp.Contacts[peerID].Timeline = *p.Contacts[peerID].Timeline.GetCopy()
|
||||||
if pPeer, exists := p.Contacts[peerID]; exists {
|
|
||||||
peer.Timeline = *(pPeer).Timeline.GetCopy()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,128 @@
|
||||||
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProfileIdentity(t *testing.T) {
|
||||||
|
sarah := GenerateNewProfile("Sarah")
|
||||||
|
alice := GenerateNewProfile("Alice")
|
||||||
|
|
||||||
|
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
|
||||||
|
if alice.Contacts[sarah.Onion].Name != "Sarah" {
|
||||||
|
t.Errorf("alice should have added sarah as a contact %v", alice.Contacts)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(alice.GetContacts()) != 1 {
|
||||||
|
t.Errorf("alice should be only contact: %v", alice.GetContacts())
|
||||||
|
}
|
||||||
|
|
||||||
|
alice.SetAttribute("test", "hello world")
|
||||||
|
value, _ := alice.GetAttribute("test")
|
||||||
|
if value != "hello world" {
|
||||||
|
t.Errorf("value from custom attribute should have been 'hello world', instead was: %v", value)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("%v", alice)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTrustPeer(t *testing.T) {
|
||||||
|
sarah := GenerateNewProfile("Sarah")
|
||||||
|
alice := GenerateNewProfile("Alice")
|
||||||
|
sarah.AddContact(alice.Onion, &alice.PublicProfile)
|
||||||
|
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
|
||||||
|
alice.SetContactAuthorization(sarah.Onion, AuthApproved)
|
||||||
|
if alice.GetContactAuthorization(sarah.Onion) != AuthApproved {
|
||||||
|
t.Errorf("peer should be approved")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlockPeer(t *testing.T) {
|
||||||
|
sarah := GenerateNewProfile("Sarah")
|
||||||
|
alice := GenerateNewProfile("Alice")
|
||||||
|
sarah.AddContact(alice.Onion, &alice.PublicProfile)
|
||||||
|
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
|
||||||
|
alice.SetContactAuthorization(sarah.Onion, AuthBlocked)
|
||||||
|
if alice.GetContactAuthorization(sarah.Onion) != AuthBlocked {
|
||||||
|
t.Errorf("peer should be blocked")
|
||||||
|
}
|
||||||
|
|
||||||
|
if alice.SetContactAuthorization("", AuthUnknown) == nil {
|
||||||
|
t.Errorf("Seting Auth level of a non existent peer should error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAcceptNonExistentGroup(t *testing.T) {
|
||||||
|
sarah := GenerateNewProfile("Sarah")
|
||||||
|
sarah.AcceptInvite("doesnotexist")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRejectGroupInvite(t *testing.T) {
|
||||||
|
sarah := GenerateNewProfile("Sarah")
|
||||||
|
alice := GenerateNewProfile("Alice")
|
||||||
|
sarah.AddContact(alice.Onion, &alice.PublicProfile)
|
||||||
|
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
|
||||||
|
|
||||||
|
gid, invite, _ := alice.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||||
|
sarah.ProcessInvite(invite)
|
||||||
|
group := alice.GetGroup(gid)
|
||||||
|
if len(sarah.Groups) == 1 {
|
||||||
|
if sarah.GetGroup(group.GroupID).Accepted {
|
||||||
|
t.Errorf("Group should not be accepted")
|
||||||
|
}
|
||||||
|
sarah.RejectInvite(group.GroupID)
|
||||||
|
if len(sarah.Groups) != 0 {
|
||||||
|
t.Errorf("Group %v should have been deleted", group.GroupID)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.Errorf("Group should exist in map")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProfileGroup(t *testing.T) {
|
||||||
|
sarah := GenerateNewProfile("Sarah")
|
||||||
|
alice := GenerateNewProfile("Alice")
|
||||||
|
sarah.AddContact(alice.Onion, &alice.PublicProfile)
|
||||||
|
alice.AddContact(sarah.Onion, &sarah.PublicProfile)
|
||||||
|
|
||||||
|
gid, invite, _ := alice.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||||
|
sarah.ProcessInvite(invite)
|
||||||
|
if len(sarah.GetGroups()) != 1 {
|
||||||
|
t.Errorf("sarah should only be in 1 group instead: %v", sarah.GetGroups())
|
||||||
|
}
|
||||||
|
|
||||||
|
group := alice.GetGroup(gid)
|
||||||
|
sarah.AcceptInvite(group.GroupID)
|
||||||
|
c, s1, _ := sarah.EncryptMessageToGroup("Hello World", group.GroupID)
|
||||||
|
alice.AttemptDecryption(c, s1)
|
||||||
|
|
||||||
|
gid2, invite2, _ := alice.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||||
|
sarah.ProcessInvite(invite2)
|
||||||
|
group2 := alice.GetGroup(gid2)
|
||||||
|
c2, s2, _ := sarah.EncryptMessageToGroup("Hello World", group2.GroupID)
|
||||||
|
alice.AttemptDecryption(c2, s2)
|
||||||
|
|
||||||
|
_, _, err := sarah.EncryptMessageToGroup(string(make([]byte, MaxGroupMessageLength*2)), group2.GroupID)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("Overly long message should have returned an error")
|
||||||
|
}
|
||||||
|
|
||||||
|
bob := GenerateNewProfile("bob")
|
||||||
|
bob.AddContact(alice.Onion, &alice.PublicProfile)
|
||||||
|
bob.ProcessInvite(invite2)
|
||||||
|
c3, s3, err := bob.EncryptMessageToGroup("Bobs Message", group2.GroupID)
|
||||||
|
if err == nil {
|
||||||
|
ok, _, message, _ := alice.AttemptDecryption(c3, s3)
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("Bobs message to the group should be decrypted %v %v", message, ok)
|
||||||
|
}
|
||||||
|
|
||||||
|
eve := GenerateNewProfile("eve")
|
||||||
|
ok, _, _, _ = eve.AttemptDecryption(c3, s3)
|
||||||
|
if ok {
|
||||||
|
t.Errorf("Eves hould not be able to decrypt Messages!")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
t.Errorf("Bob failed to encrypt a message to the group")
|
||||||
|
}
|
||||||
|
}
|
2227
peer/cwtch_peer.go
2227
peer/cwtch_peer.go
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1,52 +0,0 @@
|
||||||
package peer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cwtch.im/cwtch/event"
|
|
||||||
"cwtch.im/cwtch/model"
|
|
||||||
"cwtch.im/cwtch/model/attr"
|
|
||||||
"cwtch.im/cwtch/settings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ProfileHooks interface {
|
|
||||||
// EventsToRegister returns a set of events that the extension is interested hooking
|
|
||||||
EventsToRegister() []event.Type
|
|
||||||
|
|
||||||
// ExperimentsToRegister returns a set of experiments that the extension is interested in being notified about
|
|
||||||
ExperimentsToRegister() []string
|
|
||||||
|
|
||||||
// OnEvent is called whenever an event Registered with RegisterEvents is called
|
|
||||||
OnEvent(event event.Event, profile CwtchPeer)
|
|
||||||
|
|
||||||
// OnContactRequestValue is Hooked when a contact sends a request for the given path
|
|
||||||
OnContactRequestValue(profile CwtchPeer, conversation model.Conversation, eventID string, path attr.ScopedZonedPath)
|
|
||||||
|
|
||||||
// OnContactReceiveValue is Hooked after a profile receives a response to a Get/Val Request
|
|
||||||
OnContactReceiveValue(profile CwtchPeer, conversation model.Conversation, path attr.ScopedZonedPath, value string, exists bool)
|
|
||||||
|
|
||||||
// NotifySettingsUpdate allow profile hooks to access configs e.g. download folder
|
|
||||||
NotifySettingsUpdate(settings settings.GlobalSettings)
|
|
||||||
}
|
|
||||||
|
|
||||||
type ProfileHook struct {
|
|
||||||
extension ProfileHooks
|
|
||||||
events map[event.Type]bool
|
|
||||||
experiments map[string]bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func ConstructHook(extension ProfileHooks) ProfileHook {
|
|
||||||
events := make(map[event.Type]bool)
|
|
||||||
for _, e := range extension.EventsToRegister() {
|
|
||||||
events[e] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
experiments := make(map[string]bool)
|
|
||||||
for _, experiment := range extension.ExperimentsToRegister() {
|
|
||||||
experiments[experiment] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return ProfileHook{
|
|
||||||
extension,
|
|
||||||
events,
|
|
||||||
experiments,
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,181 +0,0 @@
|
||||||
package peer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cwtch.im/cwtch/event"
|
|
||||||
"cwtch.im/cwtch/model"
|
|
||||||
"cwtch.im/cwtch/model/attr"
|
|
||||||
"cwtch.im/cwtch/protocol/connections"
|
|
||||||
"cwtch.im/cwtch/settings"
|
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
|
||||||
"git.openprivacy.ca/openprivacy/connectivity"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AccessPeeringState provides access to functions relating to the underlying connections of a peer.
|
|
||||||
type AccessPeeringState interface {
|
|
||||||
GetPeerState(string) connections.ConnectionState
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModifyPeeringState is a meta-interface intended to restrict callers to modify-only access to connection peers
|
|
||||||
type ModifyPeeringState interface {
|
|
||||||
BlockUnknownConnections()
|
|
||||||
AllowUnknownConnections()
|
|
||||||
PeerWithOnion(string)
|
|
||||||
QueueJoinServer(string)
|
|
||||||
DisconnectFromPeer(string)
|
|
||||||
DisconnectFromServer(string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModifyContactsAndPeers is a meta-interface intended to restrict a call to reading and modifying contacts
|
|
||||||
// and peers.
|
|
||||||
type ModifyContactsAndPeers interface {
|
|
||||||
ModifyPeeringState
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadServers provides access to the servers
|
|
||||||
type ReadServers interface {
|
|
||||||
GetServers() []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModifyGroups provides write-only access add/edit/remove new groups
|
|
||||||
type ModifyGroups interface {
|
|
||||||
ImportGroup(string) (int, error)
|
|
||||||
StartGroup(string, string) (int, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModifyServers provides write-only access to servers
|
|
||||||
type ModifyServers interface {
|
|
||||||
AddServer(string) (string, error)
|
|
||||||
ResyncServer(onion string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendMessages enables a caller to sender messages to a contact
|
|
||||||
type SendMessages interface {
|
|
||||||
SendMessage(conversation int, message string) (int, error)
|
|
||||||
|
|
||||||
// EnhancedSendMessage Attempts to Send a Message and Immediately Attempts to Lookup the Message in the Database
|
|
||||||
EnhancedSendMessage(conversation int, message string) string
|
|
||||||
|
|
||||||
SendInviteToConversation(conversationID int, inviteConversationID int) (int, error)
|
|
||||||
|
|
||||||
// EnhancedSendInviteMessage Attempts to Send an Invite and Immediately Attempts to Lookup the Message in the Database
|
|
||||||
EnhancedSendInviteMessage(conversation int, inviteConversationID int) string
|
|
||||||
|
|
||||||
SendScopedZonedGetValToContact(conversationID int, scope attr.Scope, zone attr.Zone, key string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CwtchPeer provides us with a way of testing systems built on top of cwtch without having to
|
|
||||||
// directly implement a cwtchPeer.
|
|
||||||
type CwtchPeer interface {
|
|
||||||
|
|
||||||
// Core Cwtch Peer Functions that should not be exposed to
|
|
||||||
// most functions
|
|
||||||
Init(event.Manager)
|
|
||||||
|
|
||||||
GenerateProtocolEngine(acn connectivity.ACN, bus event.Manager, engineHooks connections.EngineHooks) (connections.Engine, error)
|
|
||||||
|
|
||||||
AutoHandleEvents(events []event.Type)
|
|
||||||
Listen()
|
|
||||||
StartConnections(doPeers, doServers bool)
|
|
||||||
// Deprecated in 1.10
|
|
||||||
StartPeersConnections()
|
|
||||||
// Deprecated in 1.10
|
|
||||||
StartServerConnections()
|
|
||||||
|
|
||||||
Shutdown()
|
|
||||||
|
|
||||||
// GetOnion is deprecated. If you find yourself needing to rely on this method it is time
|
|
||||||
// to consider replacing this with a GetAddress(es) function that can fully expand cwtch beyond the boundaries
|
|
||||||
// of tor v3 onion services.
|
|
||||||
// Deprecated
|
|
||||||
GetOnion() string
|
|
||||||
|
|
||||||
// SetScopedZonedAttribute allows the setting of an attribute by scope and zone
|
|
||||||
// scope.zone.key = value
|
|
||||||
SetScopedZonedAttribute(scope attr.Scope, zone attr.Zone, key string, value string)
|
|
||||||
|
|
||||||
// GetScopedZonedAttribute allows the retrieval of an attribute by scope and zone
|
|
||||||
// scope.zone.key = value
|
|
||||||
GetScopedZonedAttribute(scope attr.Scope, zone attr.Zone, key string) (string, bool)
|
|
||||||
|
|
||||||
// GetScopedZonedAttributeKeys returns all keys associated with a given scope and zone
|
|
||||||
GetScopedZonedAttributeKeys(scope attr.Scope, zone attr.Zone) ([]string, error)
|
|
||||||
|
|
||||||
AccessPeeringState
|
|
||||||
ModifyPeeringState
|
|
||||||
|
|
||||||
ModifyGroups
|
|
||||||
|
|
||||||
ReadServers
|
|
||||||
ModifyServers
|
|
||||||
|
|
||||||
SendMessages
|
|
||||||
|
|
||||||
// Import Bundle
|
|
||||||
ImportBundle(string) error
|
|
||||||
EnhancedImportBundle(string) string
|
|
||||||
|
|
||||||
// New Unified Conversation Interfaces
|
|
||||||
NewContactConversation(handle string, acl model.AccessControl, accepted bool) (int, error)
|
|
||||||
FetchConversations() ([]*model.Conversation, error)
|
|
||||||
ArchiveConversation(conversation int)
|
|
||||||
GetConversationInfo(conversation int) (*model.Conversation, error)
|
|
||||||
FetchConversationInfo(handle string) (*model.Conversation, error)
|
|
||||||
|
|
||||||
// API-level management of conversation access control
|
|
||||||
UpdateConversationAccessControlList(id int, acl model.AccessControlList) error
|
|
||||||
EnhancedUpdateConversationAccessControlList(conversation int, acjson string) error
|
|
||||||
|
|
||||||
GetConversationAccessControlList(conversation int) (model.AccessControlList, error)
|
|
||||||
EnhancedGetConversationAccessControlList(conversation int) (string, error)
|
|
||||||
|
|
||||||
// Convieniance Functions for ACL Management
|
|
||||||
AcceptConversation(conversation int) error
|
|
||||||
BlockConversation(conversation int) error
|
|
||||||
UnblockConversation(conversation int) error
|
|
||||||
|
|
||||||
SetConversationAttribute(conversation int, path attr.ScopedZonedPath, value string) error
|
|
||||||
GetConversationAttribute(conversation int, path attr.ScopedZonedPath) (string, error)
|
|
||||||
DeleteConversation(conversation int) error
|
|
||||||
|
|
||||||
// New Unified Conversation Channel Interfaces
|
|
||||||
GetChannelMessage(conversation int, channel int, id int) (string, model.Attributes, error)
|
|
||||||
GetChannelMessageCount(conversation int, channel int) (int, error)
|
|
||||||
GetChannelMessageByContentHash(conversation int, channel int, contenthash string) (int, error)
|
|
||||||
GetMostRecentMessages(conversation int, channel int, offset int, limit uint) ([]model.ConversationMessage, error)
|
|
||||||
UpdateMessageAttribute(conversation int, channel int, id int, key string, value string) error
|
|
||||||
SearchConversations(pattern string) string
|
|
||||||
|
|
||||||
// EnhancedGetMessageById returns a json-encoded enhanced message, suitable for rendering in a UI
|
|
||||||
EnhancedGetMessageById(conversation int, mid int) string
|
|
||||||
|
|
||||||
// EnhancedGetMessageByContentHash returns a json-encoded enhanced message, suitable for rendering in a UI
|
|
||||||
EnhancedGetMessageByContentHash(conversation int, hash string) string
|
|
||||||
|
|
||||||
// EnhancedGetMessages returns a set of json-encoded enhanced messages, suitable for rendering in a UI
|
|
||||||
EnhancedGetMessages(conversation int, index int, count uint) string
|
|
||||||
|
|
||||||
// Server Token APIS
|
|
||||||
// TODO move these to feature protected interfaces
|
|
||||||
StoreCachedTokens(tokenServer string, tokens []*privacypass.Token)
|
|
||||||
|
|
||||||
// Profile Management
|
|
||||||
CheckPassword(password string) bool
|
|
||||||
ChangePassword(oldpassword string, newpassword string, newpasswordAgain string) error
|
|
||||||
ExportProfile(file string) error
|
|
||||||
Delete()
|
|
||||||
PublishEvent(resp event.Event)
|
|
||||||
RegisterHook(hook ProfileHooks)
|
|
||||||
UpdateExperiments(enabled bool, experiments map[string]bool)
|
|
||||||
NotifySettingsUpdate(settings settings.GlobalSettings)
|
|
||||||
IsFeatureEnabled(featureName string) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnhancedMessage wraps a Cwtch model.Message with some additional data to reduce calls from the UI.
|
|
||||||
type EnhancedMessage struct {
|
|
||||||
model.Message
|
|
||||||
ID int // the actual ID of the message in the database (not the row number)
|
|
||||||
LocalIndex int // local index in the DB (row #). Can be empty (most calls supply it) but lookup by hash will fill it
|
|
||||||
ContentHash string
|
|
||||||
ContactImage string
|
|
||||||
Attributes map[string]string
|
|
||||||
}
|
|
|
@ -1,13 +0,0 @@
|
||||||
package peer
|
|
||||||
|
|
||||||
import "errors"
|
|
||||||
|
|
||||||
// Response is a wrapper to better semantically convey the response type...
|
|
||||||
type Response error
|
|
||||||
|
|
||||||
const errorSeparator = "."
|
|
||||||
|
|
||||||
// ConstructResponse is a helper function for creating Response structures.
|
|
||||||
func ConstructResponse(prefix string, error string) Response {
|
|
||||||
return errors.New(prefix + errorSeparator + error)
|
|
||||||
}
|
|
|
@ -1,29 +0,0 @@
|
||||||
package peer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SQLCreateTableProfileKeyValue creates the Profile Key Value Table
|
|
||||||
const SQLCreateTableProfileKeyValue = `create table if not exists profile_kv (KeyType text, KeyName text, KeyValue blob, UNIQUE (KeyType,KeyName));`
|
|
||||||
|
|
||||||
// SQLCreateTableConversations creates the Profile Key Value Table
|
|
||||||
const SQLCreateTableConversations = `create table if not exists conversations (ID integer unique primary key autoincrement, Handle text, Attributes blob, ACL blob, Accepted bool);`
|
|
||||||
|
|
||||||
// initializeDatabase executes all the sql statements necessary to construct the base of the database.
|
|
||||||
// db must be open
|
|
||||||
func initializeDatabase(db *sql.DB) error {
|
|
||||||
|
|
||||||
_, err := db.Exec(SQLCreateTableProfileKeyValue)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error On Executing Query: %v %v", SQLCreateTableProfileKeyValue, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = db.Exec(SQLCreateTableConversations)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error On Executing Query: %v %v", SQLCreateTableConversations, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
329
peer/storage.go
329
peer/storage.go
|
@ -1,329 +0,0 @@
|
||||||
package peer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"compress/gzip"
|
|
||||||
"crypto/rand"
|
|
||||||
"database/sql"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
"golang.org/x/crypto/pbkdf2"
|
|
||||||
"golang.org/x/crypto/sha3"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const versionFile = "VERSION"
|
|
||||||
const version = "2"
|
|
||||||
const saltFile = "SALT"
|
|
||||||
const dbFile = "db"
|
|
||||||
|
|
||||||
// CreateKeySalt derives a key and salt from a password: returns key, salt, err
|
|
||||||
func CreateKeySalt(password string) ([32]byte, [128]byte, error) {
|
|
||||||
var salt [128]byte
|
|
||||||
if _, err := io.ReadFull(rand.Reader, salt[:]); err != nil {
|
|
||||||
log.Errorf("Cannot read from random: %v\n", err)
|
|
||||||
return [32]byte{}, salt, err
|
|
||||||
}
|
|
||||||
dk := pbkdf2.Key([]byte(password), salt[:], 4096, 32, sha3.New512)
|
|
||||||
|
|
||||||
var dkr [32]byte
|
|
||||||
copy(dkr[:], dk)
|
|
||||||
return dkr, salt, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// createKey derives a key from a password and salt
|
|
||||||
func createKey(password string, salt []byte) [32]byte {
|
|
||||||
dk := pbkdf2.Key([]byte(password), salt, 4096, 32, sha3.New512)
|
|
||||||
|
|
||||||
var dkr [32]byte
|
|
||||||
copy(dkr[:], dk)
|
|
||||||
return dkr
|
|
||||||
}
|
|
||||||
|
|
||||||
func initV2Directory(directory, password string) ([32]byte, [128]byte, error) {
|
|
||||||
os.MkdirAll(directory, 0700)
|
|
||||||
|
|
||||||
key, salt, err := CreateKeySalt(password)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("Could not create key for profile store from password: %v\n", err)
|
|
||||||
return [32]byte{}, [128]byte{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = os.WriteFile(path.Join(directory, versionFile), []byte(version), 0600); err != nil {
|
|
||||||
log.Errorf("Could not write version file: %v", err)
|
|
||||||
return [32]byte{}, [128]byte{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = os.WriteFile(path.Join(directory, saltFile), salt[:], 0600); err != nil {
|
|
||||||
log.Errorf("Could not write salt file: %v", err)
|
|
||||||
return [32]byte{}, [128]byte{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return key, salt, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func openEncryptedDatabase(profileDirectory string, password string, createIfNotExists bool) (*sql.DB, error) {
|
|
||||||
salt, err := os.ReadFile(path.Join(profileDirectory, saltFile))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
key := createKey(password, salt)
|
|
||||||
dbPath := filepath.Join(profileDirectory, "db")
|
|
||||||
|
|
||||||
if !createIfNotExists {
|
|
||||||
if _, err := os.Stat(dbPath); errors.Is(err, os.ErrNotExist) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dbname := fmt.Sprintf("%v?_pragma_key=x'%x'&_pragma_cipher_page_size=8192", dbPath, key)
|
|
||||||
db, err := sql.Open("sqlite3", dbname)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("could not open encrypted database", err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return db, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateEncryptedStorePeer creates a *new* Cwtch Profile backed by an encrypted datastore
|
|
||||||
func CreateEncryptedStorePeer(profileDirectory string, name string, password string) (CwtchPeer, error) {
|
|
||||||
log.Debugf("Initializing Encrypted Storage Directory")
|
|
||||||
_, _, err := initV2Directory(profileDirectory, password)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Opening Encrypted Database")
|
|
||||||
db, err := openEncryptedDatabase(profileDirectory, password, true)
|
|
||||||
if db == nil || err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to open encrypted database: error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Initializing Database")
|
|
||||||
err = initializeDatabase(db)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
db.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Creating Cwtch Profile Backed By Encrypted Database")
|
|
||||||
|
|
||||||
cps, err := NewCwtchProfileStorage(db, profileDirectory)
|
|
||||||
if err != nil {
|
|
||||||
db.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewProfileWithEncryptedStorage(name, cps), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateEncryptedStore creates a encrypted datastore
|
|
||||||
func CreateEncryptedStore(profileDirectory string, password string) (*CwtchProfileStorage, error) {
|
|
||||||
|
|
||||||
log.Debugf("Creating Encrypted Database")
|
|
||||||
db, err := openEncryptedDatabase(profileDirectory, password, true)
|
|
||||||
if db == nil || err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to open encrypted database: error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Initializing Database")
|
|
||||||
err = initializeDatabase(db)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
db.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Creating Cwtch Profile Backed By Encrypted Database")
|
|
||||||
|
|
||||||
cps, err := NewCwtchProfileStorage(db, profileDirectory)
|
|
||||||
if err != nil {
|
|
||||||
db.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return cps, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromEncryptedDatabase constructs a Cwtch Profile from an existing Encrypted Database
|
|
||||||
func FromEncryptedDatabase(profileDirectory string, password string) (CwtchPeer, error) {
|
|
||||||
log.Debugf("Loading Encrypted Profile: %v", profileDirectory)
|
|
||||||
db, err := openEncryptedDatabase(profileDirectory, password, false)
|
|
||||||
if db == nil || err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to open encrypted database: error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Initializing Profile from Encrypted Storage")
|
|
||||||
cps, err := NewCwtchProfileStorage(db, profileDirectory)
|
|
||||||
if err != nil {
|
|
||||||
db.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return FromEncryptedStorage(cps), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ImportProfile(exportedCwtchFile string, profilesDir string, password string) (CwtchPeer, error) {
|
|
||||||
profileID, err := checkCwtchProfileBackupFile(exportedCwtchFile)
|
|
||||||
if profileID == "" || err != nil {
|
|
||||||
log.Errorf("%s is an invalid cwtch backup file: %s", profileID, err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
log.Debugf("%s is a valid cwtch backup file", profileID)
|
|
||||||
|
|
||||||
profileDBFile := filepath.Join(profilesDir, profileID, dbFile)
|
|
||||||
log.Debugf("checking %v", profileDBFile)
|
|
||||||
if _, err := os.Stat(profileDBFile); errors.Is(err, os.ErrNotExist) {
|
|
||||||
// backup is valid and the profile hasn't been imported yet, time to extract and check the password
|
|
||||||
profileDir := filepath.Join(profilesDir, profileID)
|
|
||||||
os.MkdirAll(profileDir, 0700)
|
|
||||||
err := importCwtchProfileBackupFile(exportedCwtchFile, profilesDir)
|
|
||||||
if err == nil {
|
|
||||||
profile, err := FromEncryptedDatabase(profileDir, password)
|
|
||||||
if err == nil {
|
|
||||||
return profile, err
|
|
||||||
}
|
|
||||||
// Otherwise purge
|
|
||||||
log.Errorf("error importing profile: %v. removing %s", err, profileDir)
|
|
||||||
os.RemoveAll(profileDir)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("%s is already a profile for this app", profileID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkCwtchProfileBackupFile(srcFile string) (string, error) {
|
|
||||||
f, err := os.Open(srcFile)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
gzf, err := gzip.NewReader(f)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
tarReader := tar.NewReader(gzf)
|
|
||||||
|
|
||||||
profileName := ""
|
|
||||||
|
|
||||||
for {
|
|
||||||
header, err := tarReader.Next()
|
|
||||||
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch header.Typeflag {
|
|
||||||
case tar.TypeDir:
|
|
||||||
return "", errors.New("invalid cwtch backup file")
|
|
||||||
case tar.TypeReg:
|
|
||||||
parts := strings.Split(header.Name, "/")
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return "", errors.New("invalid header name")
|
|
||||||
}
|
|
||||||
dir := parts[0]
|
|
||||||
profileFileType := parts[1]
|
|
||||||
|
|
||||||
_, hexErr := hex.DecodeString(dir)
|
|
||||||
if dir == "." || dir == ".." || len(dir) != 32 || hexErr != nil {
|
|
||||||
return "", errors.New("invalid profile name")
|
|
||||||
}
|
|
||||||
|
|
||||||
if profileName == "" {
|
|
||||||
profileName = dir
|
|
||||||
}
|
|
||||||
if dir != profileName {
|
|
||||||
return "", errors.New("invalid cwtch backup file")
|
|
||||||
}
|
|
||||||
|
|
||||||
if profileFileType != dbFile && profileFileType != saltFile && profileFileType != versionFile {
|
|
||||||
return "", errors.New("invalid cwtch backup file")
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return "", errors.New("invalid cwtch backup file")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return profileName, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func importCwtchProfileBackupFile(srcFile string, profilesDir string) error {
|
|
||||||
f, err := os.Open(srcFile)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
gzf, err := gzip.NewReader(f)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
tarReader := tar.NewReader(gzf)
|
|
||||||
|
|
||||||
profileName := ""
|
|
||||||
|
|
||||||
for {
|
|
||||||
header, err := tarReader.Next()
|
|
||||||
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch header.Typeflag {
|
|
||||||
case tar.TypeDir:
|
|
||||||
return errors.New("invalid cwtch backup file")
|
|
||||||
case tar.TypeReg:
|
|
||||||
// using split here because we deliberately construct these paths in a cross-platform consistent way
|
|
||||||
parts := strings.Split(header.Name, "/")
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return errors.New("invalid header name")
|
|
||||||
}
|
|
||||||
dir := parts[0]
|
|
||||||
base := parts[1]
|
|
||||||
|
|
||||||
_, hexErr := hex.DecodeString(dir)
|
|
||||||
if dir == "." || dir == ".." || len(dir) != 32 || hexErr != nil {
|
|
||||||
return errors.New("invalid profile name")
|
|
||||||
}
|
|
||||||
|
|
||||||
if profileName == "" {
|
|
||||||
profileName = dir
|
|
||||||
}
|
|
||||||
|
|
||||||
if dir != profileName {
|
|
||||||
return errors.New("invalid cwtch backup file")
|
|
||||||
}
|
|
||||||
|
|
||||||
// here we use filepath.Join to construct a valid directory path
|
|
||||||
outFile, err := os.Create(filepath.Join(profilesDir, dir, base))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error importing cwtch profile file: %s", err)
|
|
||||||
}
|
|
||||||
defer outFile.Close()
|
|
||||||
if _, err := io.Copy(outFile, tarReader); err != nil {
|
|
||||||
return fmt.Errorf("error importing cwtch profile file: %s", err)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return errors.New("invalid cwtch backup file")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,23 +1,13 @@
|
||||||
package connections
|
package connections
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cwtch.im/cwtch/event"
|
"cwtch.im/cwtch/event"
|
||||||
"cwtch.im/cwtch/model"
|
"cwtch.im/cwtch/model"
|
||||||
"cwtch.im/cwtch/protocol/files"
|
|
||||||
"cwtch.im/cwtch/protocol/groups"
|
"cwtch.im/cwtch/protocol/groups"
|
||||||
pmodel "cwtch.im/cwtch/protocol/model"
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"git.openprivacy.ca/cwtch.im/tapir"
|
"git.openprivacy.ca/cwtch.im/tapir"
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/applications"
|
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/networks/tor"
|
"git.openprivacy.ca/cwtch.im/tapir/networks/tor"
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/primitives"
|
"git.openprivacy.ca/cwtch.im/tapir/primitives"
|
||||||
"git.openprivacy.ca/openprivacy/connectivity"
|
"git.openprivacy.ca/openprivacy/connectivity"
|
||||||
|
@ -25,20 +15,11 @@ import (
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
"github.com/gtank/ristretto255"
|
"github.com/gtank/ristretto255"
|
||||||
"golang.org/x/crypto/ed25519"
|
"golang.org/x/crypto/ed25519"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// 32 from tor/src/app/config/config.c MaxClientCircuitsPending
|
|
||||||
// we lower a bit because there's a lot of spillage
|
|
||||||
// - just cus we get a SOCKS timeout doesn't mean tor has stopped trying as a huge sorce
|
|
||||||
// - potential multiple profiles as a huge source
|
|
||||||
// - second order connections like token service's second servers aren't tracked in our system adding a few extra periodically
|
|
||||||
const TorMaxPendingConns = 28
|
|
||||||
|
|
||||||
type connectionLockedService struct {
|
|
||||||
service tapir.Service
|
|
||||||
connectingLock sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
type engine struct {
|
type engine struct {
|
||||||
queue event.Queue
|
queue event.Queue
|
||||||
|
|
||||||
|
@ -50,7 +31,7 @@ type engine struct {
|
||||||
authorizations sync.Map // string(onion) => model.Authorization
|
authorizations sync.Map // string(onion) => model.Authorization
|
||||||
|
|
||||||
// Block Unknown Contacts
|
// Block Unknown Contacts
|
||||||
blockUnknownContacts atomic.Bool
|
blockUnknownContacts bool
|
||||||
|
|
||||||
// Pointer to the Global Event Manager
|
// Pointer to the Global Event Manager
|
||||||
eventManager event.Manager
|
eventManager event.Manager
|
||||||
|
@ -58,22 +39,13 @@ type engine struct {
|
||||||
// Nextgen Tapir Service
|
// Nextgen Tapir Service
|
||||||
service tapir.Service
|
service tapir.Service
|
||||||
|
|
||||||
getValRequests sync.Map // [string]string eventID:Data
|
|
||||||
|
|
||||||
// Nextgen Tapir Service
|
// Nextgen Tapir Service
|
||||||
ephemeralServices map[string]*connectionLockedService //sync.Map // string(onion) => tapir.Service
|
ephemeralServices sync.Map // string(onion) => tapir.Service
|
||||||
ephemeralServicesLock sync.Mutex
|
|
||||||
|
|
||||||
// Required for listen(), inaccessible from identity
|
// Required for listen(), inaccessible from identity
|
||||||
privateKey ed25519.PrivateKey
|
privateKey ed25519.PrivateKey
|
||||||
|
|
||||||
// file sharing subsystem is responsible for maintaining active shares and downloads
|
shuttingDown bool
|
||||||
filesharingSubSystem files.FileSharingSubSystem
|
|
||||||
|
|
||||||
tokenManagers sync.Map // [tokenService][]TokenManager
|
|
||||||
|
|
||||||
shuttingDown atomic.Bool
|
|
||||||
onSendMessage func(connection tapir.Connection, message []byte) error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Engine (ProtocolEngine) encapsulates the logic necessary to make and receive Cwtch connections.
|
// Engine (ProtocolEngine) encapsulates the logic necessary to make and receive Cwtch connections.
|
||||||
|
@ -87,16 +59,11 @@ type Engine interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProtocolEngine initializes a new engine that runs Cwtch using the given parameters
|
// NewProtocolEngine initializes a new engine that runs Cwtch using the given parameters
|
||||||
func NewProtocolEngine(identity primitives.Identity, privateKey ed25519.PrivateKey, acn connectivity.ACN, eventManager event.Manager, peerAuthorizations map[string]model.Authorization, engineHooks EngineHooks) Engine {
|
func NewProtocolEngine(identity primitives.Identity, privateKey ed25519.PrivateKey, acn connectivity.ACN, eventManager event.Manager, peerAuthorizations map[string]model.Authorization) Engine {
|
||||||
engine := new(engine)
|
engine := new(engine)
|
||||||
engine.identity = identity
|
engine.identity = identity
|
||||||
engine.privateKey = privateKey
|
engine.privateKey = privateKey
|
||||||
engine.ephemeralServices = make(map[string]*connectionLockedService)
|
|
||||||
engine.queue = event.NewQueue()
|
engine.queue = event.NewQueue()
|
||||||
|
|
||||||
// the standard send message function
|
|
||||||
engine.onSendMessage = engineHooks.SendPeerMessage
|
|
||||||
|
|
||||||
go engine.eventHandler()
|
go engine.eventHandler()
|
||||||
|
|
||||||
engine.acn = acn
|
engine.acn = acn
|
||||||
|
@ -108,8 +75,8 @@ func NewProtocolEngine(identity primitives.Identity, privateKey ed25519.PrivateK
|
||||||
engine.eventManager = eventManager
|
engine.eventManager = eventManager
|
||||||
|
|
||||||
engine.eventManager.Subscribe(event.ProtocolEngineStartListen, engine.queue)
|
engine.eventManager.Subscribe(event.ProtocolEngineStartListen, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.ProtocolEngineShutdown, engine.queue)
|
|
||||||
engine.eventManager.Subscribe(event.PeerRequest, engine.queue)
|
engine.eventManager.Subscribe(event.PeerRequest, engine.queue)
|
||||||
|
engine.eventManager.Subscribe(event.RetryPeerRequest, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.InvitePeerToGroup, engine.queue)
|
engine.eventManager.Subscribe(event.InvitePeerToGroup, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.JoinServer, engine.queue)
|
engine.eventManager.Subscribe(event.JoinServer, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.LeaveServer, engine.queue)
|
engine.eventManager.Subscribe(event.LeaveServer, engine.queue)
|
||||||
|
@ -118,22 +85,11 @@ func NewProtocolEngine(identity primitives.Identity, privateKey ed25519.PrivateK
|
||||||
engine.eventManager.Subscribe(event.SendGetValMessageToPeer, engine.queue)
|
engine.eventManager.Subscribe(event.SendGetValMessageToPeer, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.SendRetValMessageToPeer, engine.queue)
|
engine.eventManager.Subscribe(event.SendRetValMessageToPeer, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.DeleteContact, engine.queue)
|
engine.eventManager.Subscribe(event.DeleteContact, engine.queue)
|
||||||
|
engine.eventManager.Subscribe(event.DeleteGroup, engine.queue)
|
||||||
|
|
||||||
engine.eventManager.Subscribe(event.UpdateConversationAuthorization, engine.queue)
|
engine.eventManager.Subscribe(event.SetPeerAuthorization, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.BlockUnknownPeers, engine.queue)
|
engine.eventManager.Subscribe(event.BlockUnknownPeers, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.AllowUnknownPeers, engine.queue)
|
engine.eventManager.Subscribe(event.AllowUnknownPeers, engine.queue)
|
||||||
engine.eventManager.Subscribe(event.DisconnectPeerRequest, engine.queue)
|
|
||||||
engine.eventManager.Subscribe(event.DisconnectServerRequest, engine.queue)
|
|
||||||
|
|
||||||
// File Handling
|
|
||||||
engine.eventManager.Subscribe(event.ShareManifest, engine.queue)
|
|
||||||
engine.eventManager.Subscribe(event.StopFileShare, engine.queue)
|
|
||||||
engine.eventManager.Subscribe(event.StopAllFileShares, engine.queue)
|
|
||||||
engine.eventManager.Subscribe(event.ManifestSizeReceived, engine.queue)
|
|
||||||
engine.eventManager.Subscribe(event.ManifestSaved, engine.queue)
|
|
||||||
|
|
||||||
// Token Server
|
|
||||||
engine.eventManager.Subscribe(event.MakeAntispamPayment, engine.queue)
|
|
||||||
|
|
||||||
for peer, authorization := range peerAuthorizations {
|
for peer, authorization := range peerAuthorizations {
|
||||||
engine.authorizations.Store(peer, authorization)
|
engine.authorizations.Store(peer, authorization)
|
||||||
|
@ -151,44 +107,31 @@ func (e *engine) EventManager() event.Manager {
|
||||||
|
|
||||||
// eventHandler process events from other subsystems
|
// eventHandler process events from other subsystems
|
||||||
func (e *engine) eventHandler() {
|
func (e *engine) eventHandler() {
|
||||||
log.Debugf("restartFlow Launching ProtocolEngine listener")
|
|
||||||
for {
|
for {
|
||||||
ev := e.queue.Next()
|
ev := e.queue.Next()
|
||||||
// optimistic shutdown...
|
|
||||||
if e.shuttingDown.Load() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch ev.EventType {
|
switch ev.EventType {
|
||||||
case event.StatusRequest:
|
case event.StatusRequest:
|
||||||
e.eventManager.Publish(event.Event{EventType: event.ProtocolEngineStatus, EventID: ev.EventID})
|
e.eventManager.Publish(event.Event{EventType: event.ProtocolEngineStatus, EventID: ev.EventID})
|
||||||
case event.PeerRequest:
|
case event.PeerRequest:
|
||||||
log.Debugf("restartFlow Handling Peer Request")
|
|
||||||
if torProvider.IsValidHostname(ev.Data[event.RemotePeer]) {
|
if torProvider.IsValidHostname(ev.Data[event.RemotePeer]) {
|
||||||
go e.peerWithOnion(ev.Data[event.RemotePeer])
|
go e.peerWithOnion(ev.Data[event.RemotePeer])
|
||||||
}
|
}
|
||||||
case event.InvitePeerToGroup:
|
case event.RetryPeerRequest:
|
||||||
err := e.sendPeerMessage(ev.Data[event.RemotePeer], pmodel.PeerMessage{ID: ev.EventID, Context: event.ContextInvite, Data: []byte(ev.Data[event.GroupInvite])})
|
// This event allows engine to treat (automated) retry peering requests differently to user-specified
|
||||||
if err != nil {
|
// peer events
|
||||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.EventContext: string(event.InvitePeerToGroup), event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: "peer is offline or the connection has yet to finalize"}))
|
if torProvider.IsValidHostname(ev.Data[event.RemotePeer]) {
|
||||||
|
log.Debugf("Retrying Peer Request: %v", ev.Data[event.RemotePeer])
|
||||||
|
go e.peerWithOnion(ev.Data[event.RemotePeer])
|
||||||
}
|
}
|
||||||
|
case event.InvitePeerToGroup:
|
||||||
|
e.sendMessageToPeer(ev.EventID, ev.Data[event.RemotePeer], event.ContextInvite, []byte(ev.Data[event.GroupInvite]))
|
||||||
case event.JoinServer:
|
case event.JoinServer:
|
||||||
signature, err := base64.StdEncoding.DecodeString(ev.Data[event.Signature])
|
signature, err := base64.StdEncoding.DecodeString(ev.Data[event.Signature])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// will result in a full sync
|
// will result in a full sync
|
||||||
signature = []byte{}
|
signature = []byte{}
|
||||||
}
|
}
|
||||||
// if we have been sent cached tokens, also deserialize them
|
go e.peerWithTokenServer(ev.Data[event.GroupServer], ev.Data[event.ServerTokenOnion], ev.Data[event.ServerTokenY], signature)
|
||||||
cachedTokensJson := ev.Data[event.CachedTokens]
|
|
||||||
var cachedTokens []*privacypass.Token
|
|
||||||
if len(cachedTokensJson) != 0 {
|
|
||||||
json.Unmarshal([]byte(cachedTokensJson), &cachedTokens)
|
|
||||||
}
|
|
||||||
|
|
||||||
// create a new token handler...
|
|
||||||
e.NewTokenHandler(ev.Data[event.ServerTokenOnion], cachedTokens)
|
|
||||||
go e.peerWithTokenServer(ev.Data[event.GroupServer], ev.Data[event.ServerTokenOnion], ev.Data[event.ServerTokenY], signature, cachedTokens)
|
|
||||||
case event.MakeAntispamPayment:
|
|
||||||
go e.makeAntispamPayment(ev.Data[event.GroupServer])
|
|
||||||
case event.LeaveServer:
|
case event.LeaveServer:
|
||||||
e.leaveServer(ev.Data[event.GroupServer])
|
e.leaveServer(ev.Data[event.GroupServer])
|
||||||
case event.DeleteContact:
|
case event.DeleteContact:
|
||||||
|
@ -196,42 +139,28 @@ func (e *engine) eventHandler() {
|
||||||
// We remove this peer from out blocklist which will prevent them from contacting us if we have "block unknown peers" turned on.
|
// We remove this peer from out blocklist which will prevent them from contacting us if we have "block unknown peers" turned on.
|
||||||
e.authorizations.Delete(ev.Data[event.RemotePeer])
|
e.authorizations.Delete(ev.Data[event.RemotePeer])
|
||||||
e.deleteConnection(onion)
|
e.deleteConnection(onion)
|
||||||
case event.DisconnectPeerRequest:
|
case event.DeleteGroup:
|
||||||
e.deleteConnection(ev.Data[event.RemotePeer])
|
// TODO: There isn't a way here to determine if other Groups are using a server connection...
|
||||||
case event.DisconnectServerRequest:
|
|
||||||
e.leaveServer(ev.Data[event.GroupServer])
|
|
||||||
case event.SendMessageToGroup:
|
case event.SendMessageToGroup:
|
||||||
ciphertext, _ := base64.StdEncoding.DecodeString(ev.Data[event.Ciphertext])
|
ciphertext, _ := base64.StdEncoding.DecodeString(ev.Data[event.Ciphertext])
|
||||||
signature, _ := base64.StdEncoding.DecodeString(ev.Data[event.Signature])
|
signature, _ := base64.StdEncoding.DecodeString(ev.Data[event.Signature])
|
||||||
|
go e.sendMessageToGroup(ev.Data[event.GroupID], ev.Data[event.GroupServer], ciphertext, signature)
|
||||||
// launch a goroutine to post to the server
|
|
||||||
go e.sendMessageToGroup(ev.Data[event.GroupID], ev.Data[event.GroupServer], ciphertext, signature, 0)
|
|
||||||
case event.SendMessageToPeer:
|
case event.SendMessageToPeer:
|
||||||
// TODO: remove this passthrough once the UI is integrated.
|
// TODO: remove this passthrough once the UI is integrated.
|
||||||
context, ok := ev.Data[event.EventContext]
|
context, ok := ev.Data[event.EventContext]
|
||||||
if !ok {
|
if !ok {
|
||||||
context = event.ContextRaw
|
context = event.ContextRaw
|
||||||
}
|
}
|
||||||
if err := e.sendPeerMessage(ev.Data[event.RemotePeer], pmodel.PeerMessage{ID: ev.EventID, Context: context, Data: []byte(ev.Data[event.Data])}); err != nil {
|
err := e.sendMessageToPeer(ev.EventID, ev.Data[event.RemotePeer], context, []byte(ev.Data[event.Data]))
|
||||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.EventContext: string(event.SendMessageToPeer), event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: "peer is offline or the connection has yet to finalize"}))
|
if err != nil {
|
||||||
|
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: "peer is offline or the connection has yet to finalize"}))
|
||||||
}
|
}
|
||||||
case event.SendGetValMessageToPeer:
|
case event.SendGetValMessageToPeer:
|
||||||
if err := e.sendGetValToPeer(ev.EventID, ev.Data[event.RemotePeer], ev.Data[event.Scope], ev.Data[event.Path]); err != nil {
|
e.sendGetValToPeer(ev.EventID, ev.Data[event.RemotePeer], ev.Data[event.Scope], ev.Data[event.Path])
|
||||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.EventContext: string(event.SendGetValMessageToPeer), event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: err.Error()}))
|
|
||||||
}
|
|
||||||
case event.SendRetValMessageToPeer:
|
case event.SendRetValMessageToPeer:
|
||||||
if err := e.sendRetValToPeer(ev.EventID, ev.Data[event.RemotePeer], ev.Data[event.Data], ev.Data[event.Exists]); err != nil {
|
e.sendRetValToPeer(ev.EventID, ev.Data[event.RemotePeer], ev.Data[event.Data], ev.Data[event.Exists])
|
||||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.EventContext: string(event.SendRetValMessageToPeer), event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: err.Error()}))
|
case event.SetPeerAuthorization:
|
||||||
}
|
auth := model.Authorization(ev.Data[event.Authorization])
|
||||||
case event.UpdateConversationAuthorization:
|
|
||||||
accepted, _ := strconv.ParseBool(ev.Data[event.Accepted])
|
|
||||||
blocked, _ := strconv.ParseBool(ev.Data[event.Blocked])
|
|
||||||
auth := model.AuthUnknown
|
|
||||||
if blocked {
|
|
||||||
auth = model.AuthBlocked
|
|
||||||
} else if accepted {
|
|
||||||
auth = model.AuthApproved
|
|
||||||
}
|
|
||||||
e.authorizations.Store(ev.Data[event.RemotePeer], auth)
|
e.authorizations.Store(ev.Data[event.RemotePeer], auth)
|
||||||
if auth == model.AuthBlocked {
|
if auth == model.AuthBlocked {
|
||||||
connection, err := e.service.GetConnection(ev.Data[event.RemotePeer])
|
connection, err := e.service.GetConnection(ev.Data[event.RemotePeer])
|
||||||
|
@ -245,50 +174,12 @@ func (e *engine) eventHandler() {
|
||||||
}
|
}
|
||||||
case event.AllowUnknownPeers:
|
case event.AllowUnknownPeers:
|
||||||
log.Debugf("%v now allows unknown connections", e.identity.Hostname())
|
log.Debugf("%v now allows unknown connections", e.identity.Hostname())
|
||||||
e.blockUnknownContacts.Store(false)
|
e.blockUnknownContacts = false
|
||||||
case event.BlockUnknownPeers:
|
case event.BlockUnknownPeers:
|
||||||
log.Debugf("%v now forbids unknown connections", e.identity.Hostname())
|
log.Debugf("%v now forbids unknown connections", e.identity.Hostname())
|
||||||
e.blockUnknownContacts.Store(true)
|
e.blockUnknownContacts = true
|
||||||
case event.ProtocolEngineStartListen:
|
case event.ProtocolEngineStartListen:
|
||||||
go e.listenFn()
|
go e.listenFn()
|
||||||
case event.ShareManifest:
|
|
||||||
e.filesharingSubSystem.ShareFile(ev.Data[event.FileKey], ev.Data[event.SerializedManifest])
|
|
||||||
case event.StopFileShare:
|
|
||||||
e.filesharingSubSystem.StopFileShare(ev.Data[event.FileKey])
|
|
||||||
case event.StopAllFileShares:
|
|
||||||
e.filesharingSubSystem.StopAllFileShares()
|
|
||||||
case event.ManifestSizeReceived:
|
|
||||||
handle := ev.Data[event.Handle]
|
|
||||||
key := ev.Data[event.FileKey]
|
|
||||||
size, _ := strconv.Atoi(ev.Data[event.ManifestSize])
|
|
||||||
if err := e.sendPeerMessage(handle, e.filesharingSubSystem.FetchManifest(key, uint64(size))); err != nil {
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: err.Error()}))
|
|
||||||
}
|
|
||||||
case event.ManifestSaved:
|
|
||||||
handle := ev.Data[event.Handle]
|
|
||||||
key := ev.Data[event.FileKey]
|
|
||||||
serializedManifest := ev.Data[event.SerializedManifest]
|
|
||||||
tempFile := ev.Data[event.TempFile]
|
|
||||||
title := ev.Data[event.NameSuggestion]
|
|
||||||
|
|
||||||
// Another optimistic check here. Technically Cwtch profile should not request manifest on a download files
|
|
||||||
// but if they do then we should check if it exists up front. If it does then announce that the download
|
|
||||||
// is complete.
|
|
||||||
if _, filePath, success := e.filesharingSubSystem.VerifyFile(key); success {
|
|
||||||
log.Debugf("file verified and downloaded!")
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.FileDownloaded, map[event.Field]string{event.FileKey: key, event.FilePath: filePath, event.TempFile: tempFile}))
|
|
||||||
} else {
|
|
||||||
// NOTE: for now there will probably only ever be a single chunk request. When we enable group
|
|
||||||
// sharing and rehosting then this loop will serve as a a way of splitting the request among multiple
|
|
||||||
// contacts
|
|
||||||
for _, message := range e.filesharingSubSystem.CompileChunkRequests(key, serializedManifest, tempFile, title) {
|
|
||||||
if err := e.sendPeerMessage(handle, message); err != nil {
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: err.Error()}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case event.ProtocolEngineShutdown:
|
|
||||||
return
|
|
||||||
default:
|
default:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -299,7 +190,7 @@ func (e *engine) isBlocked(onion string) bool {
|
||||||
authorization, known := e.authorizations.Load(onion)
|
authorization, known := e.authorizations.Load(onion)
|
||||||
if !known {
|
if !known {
|
||||||
// if we block unknown peers we will block this contact
|
// if we block unknown peers we will block this contact
|
||||||
return e.blockUnknownContacts.Load()
|
return e.blockUnknownContacts
|
||||||
}
|
}
|
||||||
return authorization.(model.Authorization) == model.AuthBlocked
|
return authorization.(model.Authorization) == model.AuthBlocked
|
||||||
}
|
}
|
||||||
|
@ -310,7 +201,7 @@ func (e *engine) isAllowed(onion string) bool {
|
||||||
log.Errorf("attempted to lookup authorization of onion not in map...that should never happen")
|
log.Errorf("attempted to lookup authorization of onion not in map...that should never happen")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if e.blockUnknownContacts.Load() {
|
if e.blockUnknownContacts {
|
||||||
return authorization.(model.Authorization) == model.AuthApproved
|
return authorization.(model.Authorization) == model.AuthApproved
|
||||||
}
|
}
|
||||||
return authorization.(model.Authorization) != model.AuthBlocked
|
return authorization.(model.Authorization) != model.AuthBlocked
|
||||||
|
@ -325,39 +216,22 @@ func (e *engine) createPeerTemplate() *PeerApp {
|
||||||
peerAppTemplate.OnAuth = e.ignoreOnShutdown(e.peerAuthed)
|
peerAppTemplate.OnAuth = e.ignoreOnShutdown(e.peerAuthed)
|
||||||
peerAppTemplate.OnConnecting = e.ignoreOnShutdown(e.peerConnecting)
|
peerAppTemplate.OnConnecting = e.ignoreOnShutdown(e.peerConnecting)
|
||||||
peerAppTemplate.OnClose = e.ignoreOnShutdown(e.peerDisconnected)
|
peerAppTemplate.OnClose = e.ignoreOnShutdown(e.peerDisconnected)
|
||||||
peerAppTemplate.OnSendMessage = e.onSendMessage
|
peerAppTemplate.RetValHandler = e.handlePeerRetVal
|
||||||
return peerAppTemplate
|
return peerAppTemplate
|
||||||
}
|
}
|
||||||
|
|
||||||
// Listen sets up an onion listener to process incoming cwtch messages
|
// Listen sets up an onion listener to process incoming cwtch messages
|
||||||
func (e *engine) listenFn() {
|
func (e *engine) listenFn() {
|
||||||
err := e.service.Listen(e.createPeerTemplate())
|
err := e.service.Listen(e.createPeerTemplate())
|
||||||
if !e.shuttingDown.Load() {
|
if !e.shuttingDown {
|
||||||
e.eventManager.Publish(event.NewEvent(event.ProtocolEngineStopped, map[event.Field]string{event.Identity: e.identity.Hostname(), event.Error: err.Error()}))
|
e.eventManager.Publish(event.NewEvent(event.ProtocolEngineStopped, map[event.Field]string{event.Identity: e.identity.Hostname(), event.Error: err.Error()}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown tears down the eventHandler goroutine
|
// Shutdown tears down the eventHandler goroutine
|
||||||
func (e *engine) Shutdown() {
|
func (e *engine) Shutdown() {
|
||||||
// don't accept any more events...
|
e.shuttingDown = true
|
||||||
e.queue.Publish(event.NewEvent(event.ProtocolEngineShutdown, map[event.Field]string{}))
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.ProtocolEngineShutdown, map[event.Field]string{}))
|
|
||||||
e.service.Shutdown()
|
e.service.Shutdown()
|
||||||
e.shuttingDown.Store(true)
|
|
||||||
e.ephemeralServicesLock.Lock()
|
|
||||||
defer e.ephemeralServicesLock.Unlock()
|
|
||||||
for _, connection := range e.ephemeralServices {
|
|
||||||
log.Infof("shutting down ephemeral service")
|
|
||||||
// work around: service.shutdown() can block for a long time if it is Open()ing a new connection, putting it in a
|
|
||||||
// goroutine means we can perform this operation and let the per service shutdown in their own time or until the app exits
|
|
||||||
conn := connection // don't capture loop variable
|
|
||||||
go func() {
|
|
||||||
conn.connectingLock.Lock()
|
|
||||||
conn.service.Shutdown()
|
|
||||||
conn.connectingLock.Unlock()
|
|
||||||
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
e.queue.Shutdown()
|
e.queue.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -368,112 +242,66 @@ func (e *engine) peerWithOnion(onion string) {
|
||||||
if !e.isBlocked(onion) {
|
if !e.isBlocked(onion) {
|
||||||
e.ignoreOnShutdown(e.peerConnecting)(onion)
|
e.ignoreOnShutdown(e.peerConnecting)(onion)
|
||||||
connected, err := e.service.Connect(onion, e.createPeerTemplate())
|
connected, err := e.service.Connect(onion, e.createPeerTemplate())
|
||||||
if connected && err == nil {
|
|
||||||
// on success CwtchPeer will handle Auth and other status updates
|
|
||||||
// early exit from this function...
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// If we are already connected...check if we are authed and issue an auth event
|
// If we are already connected...check if we are authed and issue an auth event
|
||||||
// (This allows the ui to be stateless)
|
// (This allows the ui to be stateless)
|
||||||
if connected && err != nil {
|
if connected && err != nil {
|
||||||
conn, err := e.service.WaitForCapabilityOrClose(onion, cwtchCapability)
|
conn, err := e.service.GetConnection(onion)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if conn.HasCapability(cwtchCapability) {
|
if conn.HasCapability(cwtchCapability) {
|
||||||
e.ignoreOnShutdown(e.peerAuthed)(onion)
|
e.ignoreOnShutdown(e.peerAuthed)(onion)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Errorf("PeerWithOnion something went very wrong...%v %v", onion, err)
|
|
||||||
if conn != nil {
|
|
||||||
conn.Close()
|
|
||||||
}
|
|
||||||
e.ignoreOnShutdown(e.peerDisconnected)(onion)
|
|
||||||
} else {
|
|
||||||
e.ignoreOnShutdown(e.peerDisconnected)(onion)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
e.ignoreOnShutdown(e.peerDisconnected)(onion)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *engine) makeAntispamPayment(onion string) {
|
// Only issue a disconnected error if we are disconnected (Connect will fail if a connection already exists)
|
||||||
log.Debugf("making antispam payment")
|
if !connected && err != nil {
|
||||||
e.ephemeralServicesLock.Lock()
|
e.ignoreOnShutdown(e.peerDisconnected)(onion)
|
||||||
ephemeralService, ok := e.ephemeralServices[onion]
|
|
||||||
e.ephemeralServicesLock.Unlock()
|
|
||||||
|
|
||||||
if ephemeralService == nil || !ok {
|
|
||||||
log.Debugf("could not find associated group for antispam payment")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Before doing anything, send and event with the current number of token
|
|
||||||
// This may unblock downstream processes who don't have an accurate token count
|
|
||||||
e.PokeTokenCount(onion)
|
|
||||||
|
|
||||||
conn, err := ephemeralService.service.GetConnection(onion)
|
|
||||||
if err == nil {
|
|
||||||
tokenApp, ok := (conn.App()).(*TokenBoardClient)
|
|
||||||
if ok {
|
|
||||||
tokenManagerPointer, _ := e.tokenManagers.LoadOrStore(tokenApp.tokenServiceOnion, NewTokenManager())
|
|
||||||
tokenManager := tokenManagerPointer.(*TokenManager)
|
|
||||||
log.Debugf("checking antispam tokens %v", tokenManager.NumTokens())
|
|
||||||
if tokenManager.NumTokens() < 5 {
|
|
||||||
go tokenApp.PurchaseTokens()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// peerWithTokenServer is the entry point for cwtchPeer - server relationships
|
// peerWithTokenServer is the entry point for cwtchPeer - server relationships
|
||||||
// needs to be run in a goroutine as will block on Open.
|
// needs to be run in a goroutine as will block on Open.
|
||||||
func (e *engine) peerWithTokenServer(onion string, tokenServerOnion string, tokenServerY string, lastKnownSignature []byte, cachedTokens []*privacypass.Token) {
|
func (e *engine) peerWithTokenServer(onion string, tokenServerOnion string, tokenServerY string, lastKnownSignature []byte) {
|
||||||
e.ephemeralServicesLock.Lock()
|
|
||||||
_, exists := e.ephemeralServices[onion]
|
|
||||||
|
|
||||||
|
service, exists := e.ephemeralServices.Load(onion)
|
||||||
if exists {
|
if exists {
|
||||||
e.ephemeralServicesLock.Unlock()
|
connection := service.(*tor.BaseOnionService)
|
||||||
log.Debugf("attempted to join a server with an active connection")
|
if conn, err := connection.GetConnection(onion); err == nil {
|
||||||
return
|
// We are already peered and synced so return...
|
||||||
|
// This will only not-trigger it lastKnownSignature has been wiped, which only happens when ResyncServer is called
|
||||||
|
// in CwtchPeer.
|
||||||
|
if !conn.IsClosed() && len(lastKnownSignature) != 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Otherwise...we are going to rebuild the connection(which will result in a bandwidth heavy resync)...
|
||||||
|
e.leaveServer(onion)
|
||||||
|
}
|
||||||
|
// Otherwise...let's reconnect
|
||||||
}
|
}
|
||||||
|
|
||||||
connectionService := &connectionLockedService{service: new(tor.BaseOnionService)}
|
|
||||||
e.ephemeralServices[onion] = connectionService
|
|
||||||
|
|
||||||
connectionService.connectingLock.Lock()
|
|
||||||
defer connectionService.connectingLock.Unlock()
|
|
||||||
e.ephemeralServicesLock.Unlock()
|
|
||||||
|
|
||||||
log.Debugf("Peering with Token Server %v %v", onion, tokenServerOnion)
|
log.Debugf("Peering with Token Server %v %v", onion, tokenServerOnion)
|
||||||
e.ignoreOnShutdown(e.serverConnecting)(onion)
|
e.ignoreOnShutdown(e.serverConnecting)(onion)
|
||||||
// Create a new ephemeral service for this connection
|
// Create a new ephemeral service for this connection
|
||||||
|
ephemeralService := new(tor.BaseOnionService)
|
||||||
eid, epk := primitives.InitializeEphemeralIdentity()
|
eid, epk := primitives.InitializeEphemeralIdentity()
|
||||||
connectionService.service.Init(e.acn, epk, &eid)
|
ephemeralService.Init(e.acn, epk, &eid)
|
||||||
|
|
||||||
Y := new(ristretto255.Element)
|
Y := ristretto255.NewElement()
|
||||||
Y.UnmarshalText([]byte(tokenServerY))
|
Y.UnmarshalText([]byte(tokenServerY))
|
||||||
connected, err := connectionService.service.Connect(onion, NewTokenBoardClient(e.acn, Y, tokenServerOnion, lastKnownSignature, e))
|
connected, err := ephemeralService.Connect(onion, NewTokenBoardClient(e.acn, Y, tokenServerOnion, lastKnownSignature, e.receiveGroupMessage, e.serverSynced, e.serverDisconnected))
|
||||||
|
e.ephemeralServices.Store(onion, ephemeralService)
|
||||||
// If we are already connected...check if we are authed and issue an auth event
|
// If we are already connected...check if we are authed and issue an auth event
|
||||||
// (This allows the ui to be stateless)
|
// (This allows the ui to be stateless)
|
||||||
if connected && err != nil {
|
if connected && err != nil {
|
||||||
conn, err := connectionService.service.GetConnection(onion)
|
conn, err := ephemeralService.GetConnection(onion)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
|
||||||
// If the server is synced, resend the synced status update
|
|
||||||
if conn.HasCapability(groups.CwtchServerSyncedCapability) {
|
if conn.HasCapability(groups.CwtchServerSyncedCapability) {
|
||||||
e.ignoreOnShutdown(e.serverSynced)(onion)
|
e.ignoreOnShutdown(e.serverConnected)(onion)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the server is authed, resend the auth status update
|
|
||||||
if conn.HasCapability(applications.AuthCapability) {
|
|
||||||
// Resend the authed event...
|
|
||||||
e.ignoreOnShutdown(e.serverAuthed)(onion)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// if we are not authed or synced then we are stuck...
|
|
||||||
e.ignoreOnShutdown(e.serverConnecting)(onion)
|
|
||||||
log.Errorf("server connection attempt issued to active connection")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -485,7 +313,7 @@ func (e *engine) peerWithTokenServer(onion string, tokenServerOnion string, toke
|
||||||
|
|
||||||
func (e *engine) ignoreOnShutdown(f func(string)) func(string) {
|
func (e *engine) ignoreOnShutdown(f func(string)) func(string) {
|
||||||
return func(x string) {
|
return func(x string) {
|
||||||
if !e.shuttingDown.Load() {
|
if !e.shuttingDown {
|
||||||
f(x)
|
f(x)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -493,7 +321,7 @@ func (e *engine) ignoreOnShutdown(f func(string)) func(string) {
|
||||||
|
|
||||||
func (e *engine) ignoreOnShutdown2(f func(string, string)) func(string, string) {
|
func (e *engine) ignoreOnShutdown2(f func(string, string)) func(string, string) {
|
||||||
return func(x, y string) {
|
return func(x, y string) {
|
||||||
if !e.shuttingDown.Load() {
|
if !e.shuttingDown {
|
||||||
f(x, y)
|
f(x, y)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -504,26 +332,6 @@ func (e *engine) peerAuthed(onion string) {
|
||||||
if !known {
|
if !known {
|
||||||
e.authorizations.Store(onion, model.AuthUnknown)
|
e.authorizations.Store(onion, model.AuthUnknown)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: This call uses WAY too much memory, and was responsible for the vast majority
|
|
||||||
// of allocations in the UI
|
|
||||||
// This is because Bine ends up reading the entire response into memory and then passes that back
|
|
||||||
// into Connectivity which eventually extracts just what it needs.
|
|
||||||
// Ideally we would just read from the control stream directly into reusable buffers.
|
|
||||||
|
|
||||||
//details, err := e.acn.GetInfo(onion)
|
|
||||||
//if err == nil {
|
|
||||||
// if hops, exists := details["circuit"]; exists {
|
|
||||||
// e.eventManager.Publish(event.NewEvent(event.ACNInfo, map[event.Field]string{
|
|
||||||
// event.Handle: onion,
|
|
||||||
// event.Key: "circuit",
|
|
||||||
// event.Data: hops,
|
|
||||||
// }))
|
|
||||||
// }
|
|
||||||
//} else {
|
|
||||||
// log.Errorf("error getting info for onion %v", err)
|
|
||||||
//}
|
|
||||||
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{
|
e.eventManager.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{
|
||||||
event.RemotePeer: string(onion),
|
event.RemotePeer: string(onion),
|
||||||
event.ConnectionState: ConnectionStateName[AUTHENTICATED],
|
event.ConnectionState: ConnectionStateName[AUTHENTICATED],
|
||||||
|
@ -532,22 +340,22 @@ func (e *engine) peerAuthed(onion string) {
|
||||||
|
|
||||||
func (e *engine) peerConnecting(onion string) {
|
func (e *engine) peerConnecting(onion string) {
|
||||||
e.eventManager.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{
|
e.eventManager.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{
|
||||||
event.RemotePeer: onion,
|
event.RemotePeer: string(onion),
|
||||||
event.ConnectionState: ConnectionStateName[CONNECTING],
|
event.ConnectionState: ConnectionStateName[CONNECTING],
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *engine) serverConnecting(onion string) {
|
func (e *engine) serverConnecting(onion string) {
|
||||||
e.eventManager.Publish(event.NewEvent(event.ServerStateChange, map[event.Field]string{
|
e.eventManager.Publish(event.NewEvent(event.ServerStateChange, map[event.Field]string{
|
||||||
event.GroupServer: onion,
|
event.GroupServer: string(onion),
|
||||||
event.ConnectionState: ConnectionStateName[CONNECTING],
|
event.ConnectionState: ConnectionStateName[CONNECTING],
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *engine) serverAuthed(onion string) {
|
func (e *engine) serverConnected(onion string) {
|
||||||
e.eventManager.Publish(event.NewEvent(event.ServerStateChange, map[event.Field]string{
|
e.eventManager.Publish(event.NewEvent(event.ServerStateChange, map[event.Field]string{
|
||||||
event.GroupServer: onion,
|
event.GroupServer: onion,
|
||||||
event.ConnectionState: ConnectionStateName[AUTHENTICATED],
|
event.ConnectionState: ConnectionStateName[CONNECTED],
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -559,8 +367,6 @@ func (e *engine) serverSynced(onion string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *engine) serverDisconnected(onion string) {
|
func (e *engine) serverDisconnected(onion string) {
|
||||||
e.leaveServer(onion)
|
|
||||||
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.ServerStateChange, map[event.Field]string{
|
e.eventManager.Publish(event.NewEvent(event.ServerStateChange, map[event.Field]string{
|
||||||
event.GroupServer: onion,
|
event.GroupServer: onion,
|
||||||
event.ConnectionState: ConnectionStateName[DISCONNECTED],
|
event.ConnectionState: ConnectionStateName[DISCONNECTED],
|
||||||
|
@ -575,44 +381,34 @@ func (e *engine) peerAck(onion string, eventID string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *engine) peerDisconnected(onion string) {
|
func (e *engine) peerDisconnected(onion string) {
|
||||||
|
|
||||||
// Clean up any existing get value requests...
|
|
||||||
e.getValRequests.Range(func(key, value interface{}) bool {
|
|
||||||
keyString := key.(string)
|
|
||||||
if strings.HasPrefix(keyString, onion) {
|
|
||||||
e.getValRequests.Delete(keyString)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
// Purge circuit information...
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.ACNInfo, map[event.Field]string{
|
|
||||||
event.Handle: onion,
|
|
||||||
event.Key: "circuit",
|
|
||||||
event.Data: "",
|
|
||||||
}))
|
|
||||||
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{
|
e.eventManager.Publish(event.NewEvent(event.PeerStateChange, map[event.Field]string{
|
||||||
event.RemotePeer: string(onion),
|
event.RemotePeer: string(onion),
|
||||||
event.ConnectionState: ConnectionStateName[DISCONNECTED],
|
event.ConnectionState: ConnectionStateName[DISCONNECTED],
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sendMessageToPeer sends a message to a peer under a given context
|
||||||
|
func (e *engine) sendMessageToPeer(eventID string, onion string, context string, message []byte) error {
|
||||||
|
conn, err := e.service.WaitForCapabilityOrClose(onion, cwtchCapability)
|
||||||
|
if err == nil {
|
||||||
|
peerApp, ok := (conn.App()).(*PeerApp)
|
||||||
|
if ok {
|
||||||
|
peerApp.SendMessage(PeerMessage{eventID, context, message})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.New("failed type assertion conn.App != PeerApp")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func (e *engine) sendGetValToPeer(eventID, onion, scope, path string) error {
|
func (e *engine) sendGetValToPeer(eventID, onion, scope, path string) error {
|
||||||
log.Debugf("sendGetValMessage to peer %v %v.%v\n", onion, scope, path)
|
log.Debugf("sendGetValMessage to peer %v %v%v\n", onion, scope, path)
|
||||||
getVal := peerGetVal{Scope: scope, Path: path}
|
getVal := peerGetVal{Scope: scope, Path: path}
|
||||||
message, err := json.Marshal(getVal)
|
message, err := json.Marshal(getVal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
return e.sendMessageToPeer(eventID, onion, event.ContextGetVal, message)
|
||||||
key := onion + eventID
|
|
||||||
e.getValRequests.Store(key, message)
|
|
||||||
err = e.sendPeerMessage(onion, pmodel.PeerMessage{ID: eventID, Context: event.ContextGetVal, Data: message})
|
|
||||||
if err != nil {
|
|
||||||
e.getValRequests.Delete(key)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *engine) sendRetValToPeer(eventID, onion, val, existsStr string) error {
|
func (e *engine) sendRetValToPeer(eventID, onion, val, existsStr string) error {
|
||||||
|
@ -623,7 +419,7 @@ func (e *engine) sendRetValToPeer(eventID, onion, val, existsStr string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return e.sendPeerMessage(onion, pmodel.PeerMessage{ID: eventID, Context: event.ContextRetVal, Data: message})
|
return e.sendMessageToPeer(eventID, onion, event.ContextRetVal, message)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *engine) deleteConnection(id string) {
|
func (e *engine) deleteConnection(id string) {
|
||||||
|
@ -641,69 +437,44 @@ func (e *engine) receiveGroupMessage(server string, gm *groups.EncryptedGroupMes
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendMessageToGroup attempts to sent the given message to the given group id.
|
// sendMessageToGroup attempts to sent the given message to the given group id.
|
||||||
func (e *engine) sendMessageToGroup(groupID string, server string, ct []byte, sig []byte, attempts int) {
|
func (e *engine) sendMessageToGroup(groupID string, server string, ct []byte, sig []byte) {
|
||||||
// sending to groups can fail for a few reasons (slow server, not enough tokens, etc.)
|
|
||||||
// rather than trying to keep all that logic in method we simply back-off and try again
|
|
||||||
// but if we fail more than 5 times then we report back to the client so they can investigate other options.
|
|
||||||
// Note: This flow only applies to online-and-connected servers (this method will return faster if the server is not
|
|
||||||
// online)
|
|
||||||
if attempts >= 5 {
|
|
||||||
log.Errorf("failed to post a message to a group after %v attempts", attempts)
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: "could not make payment to server", event.Signature: base64.StdEncoding.EncodeToString(sig)}))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
e.ephemeralServicesLock.Lock()
|
es, ok := e.ephemeralServices.Load(server)
|
||||||
ephemeralService, ok := e.ephemeralServices[server]
|
if es == nil || !ok {
|
||||||
e.ephemeralServicesLock.Unlock()
|
|
||||||
|
|
||||||
if ephemeralService == nil || !ok {
|
|
||||||
log.Debugf("could not send message to group: serve not found")
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: "server-not-found", event.Signature: base64.StdEncoding.EncodeToString(sig)}))
|
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: "server-not-found", event.Signature: base64.StdEncoding.EncodeToString(sig)}))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
ephemeralService := es.(tapir.Service)
|
||||||
|
|
||||||
conn, err := ephemeralService.service.WaitForCapabilityOrClose(server, groups.CwtchServerSyncedCapability)
|
conn, err := ephemeralService.WaitForCapabilityOrClose(server, groups.CwtchServerSyncedCapability)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
tokenApp, ok := (conn.App()).(*TokenBoardClient)
|
tokenApp, ok := (conn.App()).(*TokenBoardClient)
|
||||||
if ok {
|
if ok {
|
||||||
if spent, numtokens := tokenApp.Post(groupID, ct, sig); !spent {
|
if spent, numtokens := tokenApp.Post(ct, sig); !spent {
|
||||||
// we failed to post, probably because we ran out of tokens... so make a payment
|
// TODO: while this works for the spam guard, it won't work for other forms of payment...
|
||||||
go tokenApp.PurchaseTokens()
|
// Make an -inline- payment, this will hold the goroutine
|
||||||
// backoff
|
if err := tokenApp.MakePayment(); err == nil {
|
||||||
time.Sleep(time.Second * 5)
|
// This really shouldn't fail since we now know we have the required tokens...
|
||||||
// try again
|
if spent, _ := tokenApp.Post(ct, sig); !spent {
|
||||||
log.Debugf("sending message to group error attempt: %v", attempts)
|
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: err.Error(), event.Signature: base64.StdEncoding.EncodeToString(sig)}))
|
||||||
e.sendMessageToGroup(groupID, server, ct, sig, attempts+1)
|
}
|
||||||
} else {
|
} else {
|
||||||
if numtokens < 5 {
|
// Broadast the token error
|
||||||
go tokenApp.PurchaseTokens()
|
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: err.Error(), event.Signature: base64.StdEncoding.EncodeToString(sig)}))
|
||||||
}
|
}
|
||||||
|
} else if numtokens < 5 {
|
||||||
|
go tokenApp.MakePayment()
|
||||||
}
|
}
|
||||||
// regardless we return....
|
// regardless we return....
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Debugf("could not send message to group")
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: "server-connection-not-valid", event.Signature: base64.StdEncoding.EncodeToString(sig)}))
|
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: "server-connection-not-valid", event.Signature: base64.StdEncoding.EncodeToString(sig)}))
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO this is becoming cluttered
|
|
||||||
func (e *engine) handlePeerMessage(hostname string, eventID string, context string, message []byte) {
|
func (e *engine) handlePeerMessage(hostname string, eventID string, context string, message []byte) {
|
||||||
log.Debugf("New message from peer: %v %v", hostname, context)
|
log.Debugf("New message from peer: %v %v", hostname, context)
|
||||||
|
if context == event.ContextGetVal {
|
||||||
if context == event.ContextAck {
|
|
||||||
e.peerAck(hostname, eventID)
|
|
||||||
} else if context == event.ContextRetVal {
|
|
||||||
req, ok := e.getValRequests.Load(hostname + eventID)
|
|
||||||
if ok {
|
|
||||||
reqStr := req.([]byte)
|
|
||||||
e.handlePeerRetVal(hostname, reqStr, message)
|
|
||||||
e.getValRequests.Delete(hostname + eventID)
|
|
||||||
} else {
|
|
||||||
log.Errorf("could not find val request for %v %s", hostname, eventID)
|
|
||||||
}
|
|
||||||
} else if context == event.ContextGetVal {
|
|
||||||
var getVal peerGetVal
|
var getVal peerGetVal
|
||||||
err := json.Unmarshal(message, &getVal)
|
err := json.Unmarshal(message, &getVal)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -711,59 +482,8 @@ func (e *engine) handlePeerMessage(hostname string, eventID string, context stri
|
||||||
ev.EventID = eventID
|
ev.EventID = eventID
|
||||||
e.eventManager.Publish(ev)
|
e.eventManager.Publish(ev)
|
||||||
}
|
}
|
||||||
} else if context == event.ContextRequestManifest {
|
|
||||||
for _, message := range e.filesharingSubSystem.RequestManifestParts(eventID) {
|
|
||||||
if err := e.sendPeerMessage(hostname, message); err != nil {
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: hostname, event.EventID: eventID, event.Error: err.Error()}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if context == event.ContextSendManifest {
|
|
||||||
if fileKey, manifest := e.filesharingSubSystem.ReceiveManifestPart(eventID, message); len(manifest) != 0 {
|
|
||||||
// We have a valid manifest
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.ManifestReceived, map[event.Field]string{event.Handle: hostname, event.FileKey: fileKey, event.SerializedManifest: manifest}))
|
|
||||||
}
|
|
||||||
} else if context == event.ContextRequestFile {
|
|
||||||
chunks := e.filesharingSubSystem.ProcessChunkRequest(eventID, message)
|
|
||||||
go func() {
|
|
||||||
for _, message := range chunks {
|
|
||||||
if err := e.sendPeerMessage(hostname, message); err != nil {
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: hostname, event.EventID: eventID, event.Error: err.Error()}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
} else if context == event.ContextSendFile {
|
|
||||||
fileKey, progress, totalChunks, _, title := e.filesharingSubSystem.ProcessChunk(eventID, message)
|
|
||||||
if len(fileKey) != 0 {
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.FileDownloadProgressUpdate, map[event.Field]string{event.FileKey: fileKey, event.Progress: strconv.Itoa(int(progress)), event.FileSizeInChunks: strconv.Itoa(int(totalChunks)), event.NameSuggestion: title}))
|
|
||||||
if progress == totalChunks {
|
|
||||||
if tempFile, filePath, success := e.filesharingSubSystem.VerifyFile(fileKey); success {
|
|
||||||
log.Debugf("file verified and downloaded!")
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.FileDownloaded, map[event.Field]string{event.FileKey: fileKey, event.FilePath: filePath, event.TempFile: tempFile}))
|
|
||||||
} else {
|
|
||||||
log.Debugf("file failed to verify!")
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.FileVerificationFailed, map[event.Field]string{event.FileKey: fileKey}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// Fall through handler for the default text conversation.
|
e.eventManager.Publish(event.NewEvent(event.NewMessageFromPeer, map[event.Field]string{event.TimestampReceived: time.Now().Format(time.RFC3339Nano), event.RemotePeer: hostname, event.Data: string(message)}))
|
||||||
e.eventManager.Publish(event.NewEvent(event.NewMessageFromPeerEngine, map[event.Field]string{event.TimestampReceived: time.Now().Format(time.RFC3339Nano), event.RemotePeer: hostname, event.Data: string(message)}))
|
|
||||||
|
|
||||||
// Don't ack messages in channel 7
|
|
||||||
// Note: this code explictly doesn't care about malformed messages, we deal with them
|
|
||||||
// later on...we still want to ack the original send...(as some "malformed" messages
|
|
||||||
// may be future-ok)
|
|
||||||
if cm, err := model.DeserializeMessage(string(message)); err == nil {
|
|
||||||
if cm.IsStream() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send an explicit acknowledgement
|
|
||||||
// Every other protocol should have an explicit acknowledgement message e.g. value lookups have responses, and file handling has an explicit flow
|
|
||||||
if err := e.sendPeerMessage(hostname, pmodel.PeerMessage{ID: eventID, Context: event.ContextAck, Data: []byte{}}); err != nil {
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: hostname, event.EventID: eventID, event.Error: err.Error()}))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -785,27 +505,11 @@ func (e *engine) handlePeerRetVal(hostname string, getValData, retValData []byte
|
||||||
e.eventManager.Publish(event.NewEventList(event.NewRetValMessageFromPeer, event.RemotePeer, hostname, event.Scope, getVal.Scope, event.Path, getVal.Path, event.Exists, strconv.FormatBool(retVal.Exists), event.Data, retVal.Val))
|
e.eventManager.Publish(event.NewEventList(event.NewRetValMessageFromPeer, event.RemotePeer, hostname, event.Scope, getVal.Scope, event.Path, getVal.Path, event.Exists, strconv.FormatBool(retVal.Exists), event.Data, retVal.Val))
|
||||||
}
|
}
|
||||||
|
|
||||||
// leaveServer disconnects from a server and deletes the ephemeral service
|
|
||||||
func (e *engine) leaveServer(server string) {
|
func (e *engine) leaveServer(server string) {
|
||||||
e.ephemeralServicesLock.Lock()
|
es, ok := e.ephemeralServices.Load(server)
|
||||||
defer e.ephemeralServicesLock.Unlock()
|
|
||||||
ephemeralService, ok := e.ephemeralServices[server]
|
|
||||||
if ok {
|
if ok {
|
||||||
ephemeralService.service.Shutdown()
|
ephemeralService := es.(tapir.Service)
|
||||||
delete(e.ephemeralServices, server)
|
ephemeralService.Shutdown()
|
||||||
|
e.ephemeralServices.Delete(server)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *engine) sendPeerMessage(handle string, message pmodel.PeerMessage) error {
|
|
||||||
conn, err := e.service.WaitForCapabilityOrClose(handle, cwtchCapability)
|
|
||||||
if err == nil {
|
|
||||||
peerApp, ok := (conn.App()).(*PeerApp)
|
|
||||||
if ok {
|
|
||||||
return peerApp.SendMessage(message)
|
|
||||||
}
|
|
||||||
log.Debugf("could not derive peer app: %v", err)
|
|
||||||
return fmt.Errorf("could not find peer app to send message to: %v", handle)
|
|
||||||
}
|
|
||||||
log.Debugf("could not send peer message: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,59 +0,0 @@
|
||||||
package connections
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cwtch.im/cwtch/event"
|
|
||||||
"cwtch.im/cwtch/protocol/groups"
|
|
||||||
"encoding/base64"
|
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Implement Token Service Handler for Engine
|
|
||||||
|
|
||||||
// GroupMessageHandler receives a server and an encrypted group message
|
|
||||||
func (e *engine) GroupMessageHandler(server string, gm *groups.EncryptedGroupMessage) {
|
|
||||||
e.receiveGroupMessage(server, gm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PostingFailed notifies a peer that a message failed to post
|
|
||||||
func (e *engine) PostingFailed(group string, sig []byte) {
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: group, event.Error: "failed to post message", event.Signature: base64.StdEncoding.EncodeToString(sig)}))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerAuthedHandler is notified when a server has successfully authed
|
|
||||||
func (e *engine) ServerAuthedHandler(server string) {
|
|
||||||
e.serverAuthed(server)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerSyncedHandler is notified when a server has successfully synced
|
|
||||||
func (e *engine) ServerSyncedHandler(server string) {
|
|
||||||
e.serverSynced(server)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerClosedHandler is notified when a server connection has closed, the result is ignored during shutdown...
|
|
||||||
func (e *engine) ServerClosedHandler(server string) {
|
|
||||||
e.ignoreOnShutdown(e.serverDisconnected)(server)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTokenHandler is notified after a successful token acquisition
|
|
||||||
func (e *engine) NewTokenHandler(tokenService string, tokens []*privacypass.Token) {
|
|
||||||
tokenManagerPointer, _ := e.tokenManagers.LoadOrStore(tokenService, NewTokenManager())
|
|
||||||
tokenManager := tokenManagerPointer.(*TokenManager)
|
|
||||||
tokenManager.StoreNewTokens(tokens)
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.TokenManagerInfo, map[event.Field]string{event.ServerTokenOnion: tokenService, event.ServerTokenCount: strconv.Itoa(tokenManager.NumTokens())}))
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchToken is notified when a server requires a new token from the client
|
|
||||||
func (e *engine) FetchToken(tokenService string) (*privacypass.Token, int, error) {
|
|
||||||
tokenManagerPointer, _ := e.tokenManagers.LoadOrStore(tokenService, NewTokenManager())
|
|
||||||
tokenManager := tokenManagerPointer.(*TokenManager)
|
|
||||||
token, numTokens, err := tokenManager.FetchToken()
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.TokenManagerInfo, map[event.Field]string{event.ServerTokenOnion: tokenService, event.ServerTokenCount: strconv.Itoa(numTokens)}))
|
|
||||||
return token, numTokens, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *engine) PokeTokenCount(tokenService string) {
|
|
||||||
tokenManagerPointer, _ := e.tokenManagers.LoadOrStore(tokenService, NewTokenManager())
|
|
||||||
tokenManager := tokenManagerPointer.(*TokenManager)
|
|
||||||
e.eventManager.Publish(event.NewEvent(event.TokenManagerInfo, map[event.Field]string{event.ServerTokenOnion: tokenService, event.ServerTokenCount: strconv.Itoa(tokenManager.NumTokens())}))
|
|
||||||
}
|
|
|
@ -1,14 +0,0 @@
|
||||||
package connections
|
|
||||||
|
|
||||||
import "git.openprivacy.ca/cwtch.im/tapir"
|
|
||||||
|
|
||||||
type EngineHooks interface {
|
|
||||||
SendPeerMessage(connection tapir.Connection, message []byte) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type DefaultEngineHooks struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (deh DefaultEngineHooks) SendPeerMessage(connection tapir.Connection, message []byte) error {
|
|
||||||
return connection.Send(message)
|
|
||||||
}
|
|
|
@ -1,59 +0,0 @@
|
||||||
package connections
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cwtch.im/cwtch/utils"
|
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/applications"
|
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/networks/tor"
|
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/primitives"
|
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
|
||||||
"git.openprivacy.ca/openprivacy/connectivity"
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MakePayment uses the PoW based token protocol to obtain more tokens
|
|
||||||
func MakePayment(tokenServiceOnion string, tokenService *privacypass.TokenServer, acn connectivity.ACN, handler TokenBoardHandler) error {
|
|
||||||
log.Debugf("making a payment")
|
|
||||||
id, sk := primitives.InitializeEphemeralIdentity()
|
|
||||||
client := new(tor.BaseOnionService)
|
|
||||||
client.Init(acn, sk, &id)
|
|
||||||
defer client.Shutdown()
|
|
||||||
|
|
||||||
tokenApplication := new(applications.TokenApplication)
|
|
||||||
tokenApplication.TokenService = tokenService
|
|
||||||
powTokenApp := new(applications.ApplicationChain).
|
|
||||||
ChainApplication(new(applications.ProofOfWorkApplication), applications.SuccessfulProofOfWorkCapability).
|
|
||||||
ChainApplication(tokenApplication, applications.HasTokensCapability)
|
|
||||||
|
|
||||||
log.Debugf("waiting for successful PoW auth...")
|
|
||||||
tp := utils.TimeoutPolicy(time.Second * 30)
|
|
||||||
err := tp.ExecuteAction(func() error {
|
|
||||||
connected, err := client.Connect(tokenServiceOnion, powTokenApp)
|
|
||||||
if connected && err == nil {
|
|
||||||
log.Debugf("waiting for successful token acquisition...")
|
|
||||||
conn, err := client.WaitForCapabilityOrClose(tokenServiceOnion, applications.HasTokensCapability)
|
|
||||||
if err == nil {
|
|
||||||
powtapp, ok := conn.App().(*applications.TokenApplication)
|
|
||||||
if ok {
|
|
||||||
log.Debugf("updating tokens")
|
|
||||||
handler.NewTokenHandler(tokenServiceOnion, powtapp.Tokens)
|
|
||||||
log.Debugf("transcript: %v", powtapp.Transcript().OutputTranscriptToAudit())
|
|
||||||
conn.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
log.Errorf("invalid cast of powapp. this should never happen %v %v", powtapp, reflect.TypeOf(conn.App()))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
|
|
||||||
// we timed out
|
|
||||||
if err != nil {
|
|
||||||
log.Debugf("make payment timeout...")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
|
@ -2,14 +2,11 @@ package connections
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"cwtch.im/cwtch/event"
|
"cwtch.im/cwtch/event"
|
||||||
"cwtch.im/cwtch/model"
|
|
||||||
model2 "cwtch.im/cwtch/protocol/model"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"git.openprivacy.ca/cwtch.im/tapir"
|
"git.openprivacy.ca/cwtch.im/tapir"
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/applications"
|
"git.openprivacy.ca/cwtch.im/tapir/applications"
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
"sync/atomic"
|
"sync"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const cwtchCapability = tapir.Capability("cwtchCapability")
|
const cwtchCapability = tapir.Capability("cwtchCapability")
|
||||||
|
@ -19,14 +16,22 @@ type PeerApp struct {
|
||||||
applications.AuthApp
|
applications.AuthApp
|
||||||
connection tapir.Connection
|
connection tapir.Connection
|
||||||
MessageHandler func(string, string, string, []byte)
|
MessageHandler func(string, string, string, []byte)
|
||||||
|
RetValHandler func(string, []byte, []byte)
|
||||||
IsBlocked func(string) bool
|
IsBlocked func(string) bool
|
||||||
IsAllowed func(string) bool
|
IsAllowed func(string) bool
|
||||||
OnAcknowledgement func(string, string)
|
OnAcknowledgement func(string, string)
|
||||||
OnAuth func(string)
|
OnAuth func(string)
|
||||||
OnClose func(string)
|
OnClose func(string)
|
||||||
OnConnecting func(string)
|
OnConnecting func(string)
|
||||||
OnSendMessage func(connection tapir.Connection, message []byte) error
|
|
||||||
version atomic.Value
|
getValRequests sync.Map // [string]string eventID:Data
|
||||||
|
}
|
||||||
|
|
||||||
|
// PeerMessage is an encapsulation that can be used by higher level applications
|
||||||
|
type PeerMessage struct {
|
||||||
|
ID string // A unique Message ID (primarily used for acknowledgments)
|
||||||
|
Context string // A unique context identifier i.e. im.cwtch.chat
|
||||||
|
Data []byte // The serialized data packet.
|
||||||
}
|
}
|
||||||
|
|
||||||
type peerGetVal struct {
|
type peerGetVal struct {
|
||||||
|
@ -38,9 +43,6 @@ type peerRetVal struct {
|
||||||
Exists bool
|
Exists bool
|
||||||
}
|
}
|
||||||
|
|
||||||
const Version1 = 0x01
|
|
||||||
const Version2 = 0x02
|
|
||||||
|
|
||||||
// NewInstance should always return a new instantiation of the application.
|
// NewInstance should always return a new instantiation of the application.
|
||||||
func (pa *PeerApp) NewInstance() tapir.Application {
|
func (pa *PeerApp) NewInstance() tapir.Application {
|
||||||
newApp := new(PeerApp)
|
newApp := new(PeerApp)
|
||||||
|
@ -51,8 +53,7 @@ func (pa *PeerApp) NewInstance() tapir.Application {
|
||||||
newApp.OnAuth = pa.OnAuth
|
newApp.OnAuth = pa.OnAuth
|
||||||
newApp.OnClose = pa.OnClose
|
newApp.OnClose = pa.OnClose
|
||||||
newApp.OnConnecting = pa.OnConnecting
|
newApp.OnConnecting = pa.OnConnecting
|
||||||
newApp.OnSendMessage = pa.OnSendMessage
|
newApp.RetValHandler = pa.RetValHandler
|
||||||
newApp.version.Store(Version1)
|
|
||||||
return newApp
|
return newApp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,28 +71,11 @@ func (pa *PeerApp) Init(connection tapir.Connection) {
|
||||||
pa.connection.Close()
|
pa.connection.Close()
|
||||||
pa.OnClose(connection.Hostname())
|
pa.OnClose(connection.Hostname())
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
// we are authenticated
|
|
||||||
// attempt to negotiate a more efficient packet format...
|
|
||||||
// we are abusing the context here slightly by sending a "malformed" GetVal request.
|
|
||||||
// as a rule cwtch ignores getval requests that it cannot deserialize so older clients will ignore this
|
|
||||||
// message.
|
|
||||||
// version *must* be the first message sent to prevent race conditions for other events fired after-auth
|
|
||||||
// (e.g. getVal requests)
|
|
||||||
// as such, we send this message before we update the rest of the system
|
|
||||||
_ = pa.SendMessage(model2.PeerMessage{
|
|
||||||
ID: event.ContextVersion,
|
|
||||||
Context: event.ContextGetVal,
|
|
||||||
Data: []byte{Version2},
|
|
||||||
})
|
|
||||||
|
|
||||||
pa.OnAuth(connection.Hostname())
|
pa.OnAuth(connection.Hostname())
|
||||||
go pa.listen()
|
go pa.listen()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// The auth protocol wasn't completed, we can safely shutdown the connection
|
// The auth protocol wasn't completed, we can safely shutdown the connection
|
||||||
// send an onclose here because we *may* have triggered this and we want to retry later...
|
|
||||||
pa.OnClose(connection.Hostname())
|
|
||||||
connection.Close()
|
connection.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -104,46 +88,25 @@ func (pa *PeerApp) listen() {
|
||||||
pa.OnClose(pa.connection.Hostname())
|
pa.OnClose(pa.connection.Hostname())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
var peerMessage PeerMessage
|
||||||
var packet model2.PeerMessage
|
err := json.Unmarshal(message, &peerMessage)
|
||||||
var err error
|
|
||||||
|
|
||||||
if pa.version.Load() == Version1 {
|
|
||||||
err = json.Unmarshal(message, &packet)
|
|
||||||
} else if pa.version.Load() == Version2 {
|
|
||||||
parsePacket, parseErr := model2.ParsePeerMessage(message)
|
|
||||||
// if all else fails...attempt to process this message as a version 1 message
|
|
||||||
if parseErr != nil {
|
|
||||||
err = json.Unmarshal(message, &packet)
|
|
||||||
} else {
|
|
||||||
packet = *parsePacket
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
log.Errorf("invalid version")
|
|
||||||
pa.OnClose(pa.connection.Hostname())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if pa.IsAllowed(pa.connection.Hostname()) {
|
switch peerMessage.Context {
|
||||||
// we don't expose im.cwtch.version messages outside of PeerApp (ideally at some point in the future we
|
case event.ContextAck:
|
||||||
// can remove this check all together)
|
pa.OnAcknowledgement(pa.connection.Hostname(), peerMessage.ID)
|
||||||
if packet.ID == event.ContextVersion {
|
case event.ContextRetVal:
|
||||||
if pa.version.Load() == Version1 && len(packet.Data) == 1 && packet.Data[0] == Version2 {
|
req, ok := pa.getValRequests.Load(peerMessage.ID)
|
||||||
log.Debugf("switching to protocol version 2")
|
if ok {
|
||||||
pa.version.Store(Version2)
|
reqStr := []byte(req.(string))
|
||||||
}
|
pa.RetValHandler(pa.connection.Hostname(), reqStr, peerMessage.Data)
|
||||||
} else {
|
pa.getValRequests.Delete(peerMessage.ID)
|
||||||
if cm, err := model.DeserializeMessage(string(packet.Data)); err == nil {
|
}
|
||||||
if cm.TransitTime != nil {
|
default:
|
||||||
rt := time.Now().UTC()
|
if pa.IsAllowed(pa.connection.Hostname()) {
|
||||||
cm.RecvTime = &rt
|
pa.MessageHandler(pa.connection.Hostname(), peerMessage.ID, peerMessage.Context, peerMessage.Data)
|
||||||
data, _ := json.Marshal(cm)
|
|
||||||
packet.Data = data
|
// Acknowledge the message
|
||||||
}
|
pa.SendMessage(PeerMessage{peerMessage.ID, event.ContextAck, []byte{}})
|
||||||
}
|
|
||||||
pa.MessageHandler(pa.connection.Hostname(), packet.ID, packet.Context, packet.Data)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -154,42 +117,10 @@ func (pa *PeerApp) listen() {
|
||||||
|
|
||||||
// SendMessage sends the peer a preformatted message
|
// SendMessage sends the peer a preformatted message
|
||||||
// NOTE: This is a stub, we will likely want to extend this to better reflect the desired protocol
|
// NOTE: This is a stub, we will likely want to extend this to better reflect the desired protocol
|
||||||
func (pa *PeerApp) SendMessage(message model2.PeerMessage) error {
|
func (pa *PeerApp) SendMessage(message PeerMessage) {
|
||||||
var serialized []byte
|
if message.Context == event.ContextGetVal {
|
||||||
var err error
|
pa.getValRequests.Store(message.ID, string(message.Data))
|
||||||
|
|
||||||
if cm, err := model.DeserializeMessage(string(message.Data)); err == nil {
|
|
||||||
if cm.SendTime != nil {
|
|
||||||
tt := time.Now().UTC()
|
|
||||||
cm.TransitTime = &tt
|
|
||||||
data, _ := json.Marshal(cm)
|
|
||||||
message.Data = data
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
serialized, _ := json.Marshal(message)
|
||||||
if pa.version.Load() == Version2 {
|
pa.connection.Send(serialized)
|
||||||
// treat data as a pre-serialized string, not as a byte array (which will be base64 encoded and bloat the packet size)
|
|
||||||
serialized = message.Serialize()
|
|
||||||
} else {
|
|
||||||
serialized, err = json.Marshal(message)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
err = pa.OnSendMessage(pa.connection, serialized)
|
|
||||||
|
|
||||||
// at this point we have tried to send a message to a peer only to find that something went wrong.
|
|
||||||
// we don't know *what* went wrong - the most likely explanation is the peer went offline in the time between
|
|
||||||
// sending the message and it arriving in the engine to be sent. Other explanations include problems with Tor,
|
|
||||||
// a dropped wifi connection.
|
|
||||||
// Regardless, we error out this message and close this peer app assuming it cannot be used again.
|
|
||||||
// We expect that cwtch will eventually recreate this connection and the app.
|
|
||||||
if err != nil {
|
|
||||||
// close any associated sockets
|
|
||||||
pa.connection.Close()
|
|
||||||
// tell cwtch this connection is no longer valid
|
|
||||||
pa.OnClose(err.Error())
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,54 +0,0 @@
|
||||||
package connections
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TokenManager maintains a list of tokens associated with a single TokenServer
|
|
||||||
type TokenManager struct {
|
|
||||||
lock sync.Mutex
|
|
||||||
tokens map[string]*privacypass.Token
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTokenManager() *TokenManager {
|
|
||||||
tm := new(TokenManager)
|
|
||||||
tm.tokens = make(map[string]*privacypass.Token)
|
|
||||||
return tm
|
|
||||||
}
|
|
||||||
|
|
||||||
// StoreNewTokens adds tokens to the internal list managed by this TokenManager
|
|
||||||
func (tm *TokenManager) StoreNewTokens(tokens []*privacypass.Token) {
|
|
||||||
tm.lock.Lock()
|
|
||||||
defer tm.lock.Unlock()
|
|
||||||
log.Debugf("acquired %v new tokens", tokens)
|
|
||||||
for _, token := range tokens {
|
|
||||||
serialized, _ := json.Marshal(token)
|
|
||||||
tm.tokens[string(serialized)] = token
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NumTokens returns the current number of tokens
|
|
||||||
func (tm *TokenManager) NumTokens() int {
|
|
||||||
tm.lock.Lock()
|
|
||||||
defer tm.lock.Unlock()
|
|
||||||
return len(tm.tokens)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchToken removes a token from the internal list and returns it, along with a count of the remaining tokens.
|
|
||||||
// Errors if no tokens available.
|
|
||||||
func (tm *TokenManager) FetchToken() (*privacypass.Token, int, error) {
|
|
||||||
tm.lock.Lock()
|
|
||||||
defer tm.lock.Unlock()
|
|
||||||
if len(tm.tokens) == 0 {
|
|
||||||
return nil, 0, errors.New("no more tokens")
|
|
||||||
}
|
|
||||||
for serializedToken, token := range tm.tokens {
|
|
||||||
delete(tm.tokens, serializedToken)
|
|
||||||
return token, len(tm.tokens), nil
|
|
||||||
}
|
|
||||||
return nil, 0, errors.New("no more tokens")
|
|
||||||
}
|
|
|
@ -3,8 +3,11 @@ package connections
|
||||||
import (
|
import (
|
||||||
"cwtch.im/cwtch/protocol/groups"
|
"cwtch.im/cwtch/protocol/groups"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"git.openprivacy.ca/cwtch.im/tapir"
|
"git.openprivacy.ca/cwtch.im/tapir"
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/applications"
|
"git.openprivacy.ca/cwtch.im/tapir/applications"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/networks/tor"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/primitives"
|
||||||
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
||||||
"git.openprivacy.ca/openprivacy/connectivity"
|
"git.openprivacy.ca/openprivacy/connectivity"
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
@ -12,26 +15,16 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TokenBoardHandler encapsulates all the various handlers a client needs to interact with a token board
|
|
||||||
// this includes handlers to receive new messages, as well as handlers to manage tokens.
|
|
||||||
type TokenBoardHandler interface {
|
|
||||||
GroupMessageHandler(server string, gm *groups.EncryptedGroupMessage)
|
|
||||||
ServerAuthedHandler(server string)
|
|
||||||
ServerSyncedHandler(server string)
|
|
||||||
ServerClosedHandler(server string)
|
|
||||||
NewTokenHandler(tokenService string, tokens []*privacypass.Token)
|
|
||||||
PostingFailed(server string, sig []byte)
|
|
||||||
FetchToken(tokenService string) (*privacypass.Token, int, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTokenBoardClient generates a new Client for Token Board
|
// NewTokenBoardClient generates a new Client for Token Board
|
||||||
func NewTokenBoardClient(acn connectivity.ACN, Y *ristretto255.Element, tokenServiceOnion string, lastKnownSignature []byte, tokenBoardHandler TokenBoardHandler) tapir.Application {
|
func NewTokenBoardClient(acn connectivity.ACN, Y *ristretto255.Element, tokenServiceOnion string, lastKnownSignature []byte, groupMessageHandler func(server string, gm *groups.EncryptedGroupMessage), serverSyncedHandler func(server string), serverClosedHandler func(server string)) tapir.Application {
|
||||||
tba := new(TokenBoardClient)
|
tba := new(TokenBoardClient)
|
||||||
tba.acn = acn
|
tba.acn = acn
|
||||||
tba.tokenService = privacypass.NewTokenServer()
|
tba.tokenService = privacypass.NewTokenServer()
|
||||||
tba.tokenService.Y = Y
|
tba.tokenService.Y = Y
|
||||||
tba.tokenServiceOnion = tokenServiceOnion
|
tba.tokenServiceOnion = tokenServiceOnion
|
||||||
tba.tokenBoardHandler = tokenBoardHandler
|
tba.receiveGroupMessageHandler = groupMessageHandler
|
||||||
|
tba.serverSyncedHandler = serverSyncedHandler
|
||||||
|
tba.serverClosedHandler = serverClosedHandler
|
||||||
tba.lastKnownSignature = lastKnownSignature
|
tba.lastKnownSignature = lastKnownSignature
|
||||||
return tba
|
return tba
|
||||||
}
|
}
|
||||||
|
@ -39,24 +32,26 @@ func NewTokenBoardClient(acn connectivity.ACN, Y *ristretto255.Element, tokenSer
|
||||||
// TokenBoardClient defines a client for the TokenBoard server
|
// TokenBoardClient defines a client for the TokenBoard server
|
||||||
type TokenBoardClient struct {
|
type TokenBoardClient struct {
|
||||||
applications.AuthApp
|
applications.AuthApp
|
||||||
connection tapir.Connection
|
connection tapir.Connection
|
||||||
tokenBoardHandler TokenBoardHandler
|
receiveGroupMessageHandler func(server string, gm *groups.EncryptedGroupMessage)
|
||||||
|
serverSyncedHandler func(server string)
|
||||||
|
serverClosedHandler func(server string)
|
||||||
|
|
||||||
// Token service handling
|
// Token service handling
|
||||||
acn connectivity.ACN
|
acn connectivity.ACN
|
||||||
|
tokens []*privacypass.Token
|
||||||
|
tokenLock sync.Mutex
|
||||||
tokenService *privacypass.TokenServer
|
tokenService *privacypass.TokenServer
|
||||||
tokenServiceOnion string
|
tokenServiceOnion string
|
||||||
lastKnownSignature []byte
|
lastKnownSignature []byte
|
||||||
|
|
||||||
postLock sync.Mutex
|
|
||||||
postQueue []groups.CachedEncryptedGroupMessage
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewInstance Client a new TokenBoardApp
|
// NewInstance Client a new TokenBoardApp
|
||||||
func (ta *TokenBoardClient) NewInstance() tapir.Application {
|
func (ta *TokenBoardClient) NewInstance() tapir.Application {
|
||||||
tba := new(TokenBoardClient)
|
tba := new(TokenBoardClient)
|
||||||
tba.tokenBoardHandler = ta.tokenBoardHandler
|
tba.serverSyncedHandler = ta.serverSyncedHandler
|
||||||
|
tba.serverClosedHandler = ta.serverClosedHandler
|
||||||
|
tba.receiveGroupMessageHandler = ta.receiveGroupMessageHandler
|
||||||
tba.acn = ta.acn
|
tba.acn = ta.acn
|
||||||
tba.tokenService = ta.tokenService
|
tba.tokenService = ta.tokenService
|
||||||
tba.tokenServiceOnion = ta.tokenServiceOnion
|
tba.tokenServiceOnion = ta.tokenServiceOnion
|
||||||
|
@ -66,22 +61,17 @@ func (ta *TokenBoardClient) NewInstance() tapir.Application {
|
||||||
|
|
||||||
// Init initializes the cryptographic TokenBoardApp
|
// Init initializes the cryptographic TokenBoardApp
|
||||||
func (ta *TokenBoardClient) Init(connection tapir.Connection) {
|
func (ta *TokenBoardClient) Init(connection tapir.Connection) {
|
||||||
// connection.Hostname is always valid because we are ALWAYS the initiating party
|
|
||||||
log.Debugf("connecting to server: %v", connection.Hostname())
|
|
||||||
ta.AuthApp.Init(connection)
|
ta.AuthApp.Init(connection)
|
||||||
log.Debugf("server protocol complete: %v", connection.Hostname())
|
|
||||||
if connection.HasCapability(applications.AuthCapability) {
|
if connection.HasCapability(applications.AuthCapability) {
|
||||||
log.Debugf("Successfully Initialized Connection to %v", connection.Hostname())
|
|
||||||
ta.connection = connection
|
ta.connection = connection
|
||||||
ta.tokenBoardHandler.ServerAuthedHandler(connection.Hostname())
|
ta.connection.SetCapability(groups.CwtchServerSyncedCapability)
|
||||||
|
log.Debugf("Successfully Initialized Connection")
|
||||||
go ta.Listen()
|
go ta.Listen()
|
||||||
// Optimistically acquire many tokens for this server...
|
// Optimistically acquire many tokens for this server...
|
||||||
go ta.PurchaseTokens()
|
go ta.MakePayment()
|
||||||
go ta.PurchaseTokens()
|
go ta.MakePayment()
|
||||||
ta.Replay()
|
ta.Replay()
|
||||||
} else {
|
} else {
|
||||||
log.Debugf("Error Connecting to %v", connection.Hostname())
|
|
||||||
ta.tokenBoardHandler.ServerClosedHandler(connection.Hostname())
|
|
||||||
connection.Close()
|
connection.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -93,7 +83,7 @@ func (ta *TokenBoardClient) Listen() {
|
||||||
data := ta.connection.Expect()
|
data := ta.connection.Expect()
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
log.Debugf("Server closed the connection...")
|
log.Debugf("Server closed the connection...")
|
||||||
ta.tokenBoardHandler.ServerClosedHandler(ta.connection.Hostname())
|
ta.serverClosedHandler(ta.connection.Hostname())
|
||||||
return // connection is closed
|
return // connection is closed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -101,7 +91,7 @@ func (ta *TokenBoardClient) Listen() {
|
||||||
var message groups.Message
|
var message groups.Message
|
||||||
if err := json.Unmarshal(data, &message); err != nil {
|
if err := json.Unmarshal(data, &message); err != nil {
|
||||||
log.Debugf("Server sent an unexpected message, closing the connection: %v", err)
|
log.Debugf("Server sent an unexpected message, closing the connection: %v", err)
|
||||||
ta.tokenBoardHandler.ServerClosedHandler(ta.connection.Hostname())
|
ta.serverClosedHandler(ta.connection.Hostname())
|
||||||
ta.connection.Close()
|
ta.connection.Close()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -109,28 +99,15 @@ func (ta *TokenBoardClient) Listen() {
|
||||||
switch message.MessageType {
|
switch message.MessageType {
|
||||||
case groups.NewMessageMessage:
|
case groups.NewMessageMessage:
|
||||||
if message.NewMessage != nil {
|
if message.NewMessage != nil {
|
||||||
ta.tokenBoardHandler.GroupMessageHandler(ta.connection.Hostname(), &message.NewMessage.EGM)
|
ta.receiveGroupMessageHandler(ta.connection.Hostname(), &message.NewMessage.EGM)
|
||||||
} else {
|
} else {
|
||||||
log.Debugf("Server sent an unexpected NewMessage, closing the connection: %s", data)
|
log.Debugf("Server sent an unexpected NewMessage, closing the connection: %s", data)
|
||||||
ta.tokenBoardHandler.ServerClosedHandler(ta.connection.Hostname())
|
ta.serverClosedHandler(ta.connection.Hostname())
|
||||||
ta.connection.Close()
|
ta.connection.Close()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case groups.PostResultMessage:
|
case groups.PostResultMessage:
|
||||||
ta.postLock.Lock()
|
// TODO handle failure
|
||||||
egm := ta.postQueue[0]
|
|
||||||
ta.postQueue = ta.postQueue[1:]
|
|
||||||
ta.postLock.Unlock()
|
|
||||||
if !message.PostResult.Success {
|
|
||||||
log.Debugf("post result message: %v", message.PostResult)
|
|
||||||
// Retry using another token
|
|
||||||
posted, _ := ta.Post(egm.Group, egm.Ciphertext, egm.Signature)
|
|
||||||
// if posting failed...
|
|
||||||
if !posted {
|
|
||||||
log.Errorf("error posting message")
|
|
||||||
ta.tokenBoardHandler.PostingFailed(egm.Group, egm.Signature)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case groups.ReplayResultMessage:
|
case groups.ReplayResultMessage:
|
||||||
if message.ReplayResult != nil {
|
if message.ReplayResult != nil {
|
||||||
log.Debugf("Replaying %v Messages...", message.ReplayResult.NumMessages)
|
log.Debugf("Replaying %v Messages...", message.ReplayResult.NumMessages)
|
||||||
|
@ -139,24 +116,23 @@ func (ta *TokenBoardClient) Listen() {
|
||||||
|
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
log.Debugf("Server sent an unexpected EncryptedGroupMessage, closing the connection")
|
log.Debugf("Server sent an unexpected EncryptedGroupMessage, closing the connection")
|
||||||
ta.tokenBoardHandler.ServerClosedHandler(ta.connection.Hostname())
|
ta.serverClosedHandler(ta.connection.Hostname())
|
||||||
ta.connection.Close()
|
ta.connection.Close()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
egm := &groups.EncryptedGroupMessage{}
|
egm := &groups.EncryptedGroupMessage{}
|
||||||
if err := json.Unmarshal(data, egm); err == nil {
|
if err := json.Unmarshal(data, egm); err == nil {
|
||||||
ta.tokenBoardHandler.GroupMessageHandler(ta.connection.Hostname(), egm)
|
ta.receiveGroupMessageHandler(ta.connection.Hostname(), egm)
|
||||||
ta.lastKnownSignature = egm.Signature
|
ta.lastKnownSignature = egm.Signature
|
||||||
} else {
|
} else {
|
||||||
log.Debugf("Server sent an unexpected EncryptedGroupMessage, closing the connection: %v", err)
|
log.Debugf("Server sent an unexpected EncryptedGroupMessage, closing the connection: %v", err)
|
||||||
ta.tokenBoardHandler.ServerClosedHandler(ta.connection.Hostname())
|
ta.serverClosedHandler(ta.connection.Hostname())
|
||||||
ta.connection.Close()
|
ta.connection.Close()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ta.tokenBoardHandler.ServerSyncedHandler(ta.connection.Hostname())
|
ta.serverSyncedHandler(ta.connection.Hostname())
|
||||||
ta.connection.SetCapability(groups.CwtchServerSyncedCapability)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -170,35 +146,64 @@ func (ta *TokenBoardClient) Replay() {
|
||||||
|
|
||||||
// PurchaseTokens purchases the given number of tokens from the server (using the provided payment handler)
|
// PurchaseTokens purchases the given number of tokens from the server (using the provided payment handler)
|
||||||
func (ta *TokenBoardClient) PurchaseTokens() {
|
func (ta *TokenBoardClient) PurchaseTokens() {
|
||||||
MakePayment(ta.tokenServiceOnion, ta.tokenService, ta.acn, ta.tokenBoardHandler)
|
ta.MakePayment()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Post sends a Post Request to the server
|
// Post sends a Post Request to the server
|
||||||
func (ta *TokenBoardClient) Post(group string, ct []byte, sig []byte) (bool, int) {
|
func (ta *TokenBoardClient) Post(ct []byte, sig []byte) (bool, int) {
|
||||||
egm := groups.EncryptedGroupMessage{Ciphertext: ct, Signature: sig}
|
egm := groups.EncryptedGroupMessage{Ciphertext: ct, Signature: sig}
|
||||||
token, numTokens, err := ta.NextToken(egm.ToBytes(), ta.connection.Hostname())
|
token, numTokens, err := ta.NextToken(egm.ToBytes(), ta.connection.Hostname())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
data, _ := json.Marshal(groups.Message{MessageType: groups.PostRequestMessage, PostRequest: &groups.PostRequest{EGM: egm, Token: token}})
|
data, _ := json.Marshal(groups.Message{MessageType: groups.PostRequestMessage, PostRequest: &groups.PostRequest{EGM: egm, Token: token}})
|
||||||
ta.postLock.Lock()
|
|
||||||
// ONLY put group in the EGM as a cache / for error reporting...
|
|
||||||
ta.postQueue = append(ta.postQueue, groups.CachedEncryptedGroupMessage{Group: group, EncryptedGroupMessage: egm})
|
|
||||||
log.Debugf("Message Length: %s %v", data, len(data))
|
log.Debugf("Message Length: %s %v", data, len(data))
|
||||||
err := ta.connection.Send(data)
|
ta.connection.Send(data)
|
||||||
ta.postLock.Unlock()
|
|
||||||
if err != nil {
|
|
||||||
return false, numTokens
|
|
||||||
}
|
|
||||||
return true, numTokens
|
return true, numTokens
|
||||||
}
|
}
|
||||||
log.Debugf("No Valid Tokens: %v", err)
|
log.Debugf("No Valid Tokens: %v", err)
|
||||||
return false, numTokens
|
return false, numTokens
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MakePayment uses the PoW based token protocol to obtain more tokens
|
||||||
|
func (ta *TokenBoardClient) MakePayment() error {
|
||||||
|
log.Debugf("Making a Payment %v", ta)
|
||||||
|
id, sk := primitives.InitializeEphemeralIdentity()
|
||||||
|
client := new(tor.BaseOnionService)
|
||||||
|
client.Init(ta.acn, sk, &id)
|
||||||
|
|
||||||
|
tokenApplication := new(applications.TokenApplication)
|
||||||
|
tokenApplication.TokenService = ta.tokenService
|
||||||
|
powTokenApp := new(applications.ApplicationChain).
|
||||||
|
ChainApplication(new(applications.ProofOfWorkApplication), applications.SuccessfulProofOfWorkCapability).
|
||||||
|
ChainApplication(tokenApplication, applications.HasTokensCapability)
|
||||||
|
client.Connect(ta.tokenServiceOnion, powTokenApp)
|
||||||
|
log.Debugf("Waiting for successful PoW Auth...")
|
||||||
|
conn, err := client.WaitForCapabilityOrClose(ta.tokenServiceOnion, applications.HasTokensCapability)
|
||||||
|
if err == nil {
|
||||||
|
powtapp, _ := conn.App().(*applications.TokenApplication)
|
||||||
|
// Update tokens...we need a lock here to prevent SpendToken from modifying the tokens
|
||||||
|
// during this process..
|
||||||
|
log.Debugf("Updating Tokens")
|
||||||
|
ta.tokenLock.Lock()
|
||||||
|
ta.tokens = append(ta.tokens, powtapp.Tokens...)
|
||||||
|
ta.tokenLock.Unlock()
|
||||||
|
log.Debugf("Transcript: %v", powtapp.Transcript().OutputTranscriptToAudit())
|
||||||
|
conn.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
log.Debugf("Error making payment: to %v %v", ta.tokenServiceOnion, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// NextToken retrieves the next token
|
// NextToken retrieves the next token
|
||||||
func (ta *TokenBoardClient) NextToken(data []byte, hostname string) (privacypass.SpentToken, int, error) {
|
func (ta *TokenBoardClient) NextToken(data []byte, hostname string) (privacypass.SpentToken, int, error) {
|
||||||
token, numtokens, err := ta.tokenBoardHandler.FetchToken(ta.tokenServiceOnion)
|
// Taken the first new token, we need a lock here because tokens can be appended by MakePayment
|
||||||
if err != nil {
|
// which could result in weird behaviour...
|
||||||
return privacypass.SpentToken{}, numtokens, err
|
ta.tokenLock.Lock()
|
||||||
|
defer ta.tokenLock.Unlock()
|
||||||
|
if len(ta.tokens) == 0 {
|
||||||
|
return privacypass.SpentToken{}, len(ta.tokens), errors.New("no more tokens")
|
||||||
}
|
}
|
||||||
return token.SpendToken(append(data, hostname...)), numtokens, nil
|
token := ta.tokens[0]
|
||||||
|
ta.tokens = ta.tokens[1:]
|
||||||
|
return token.SpendToken(append(data, hostname...)), len(ta.tokens), nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,87 +0,0 @@
|
||||||
package files
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ChunkSpec is a wrapper around an uncompressed array of chunk identifiers
|
|
||||||
type ChunkSpec []uint64
|
|
||||||
|
|
||||||
// CreateChunkSpec given a full list of chunks with their downloaded status (true for downloaded, false otherwise)
|
|
||||||
// derives a list of identifiers of chunks that have not been downloaded yet
|
|
||||||
func CreateChunkSpec(progress []bool) ChunkSpec {
|
|
||||||
chunks := ChunkSpec{}
|
|
||||||
for i, p := range progress {
|
|
||||||
if !p {
|
|
||||||
chunks = append(chunks, uint64(i))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return chunks
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deserialize takes in a compressed chunk spec and returns an uncompressed ChunkSpec or an error
|
|
||||||
// if the serialized chunk spec has format errors
|
|
||||||
func Deserialize(serialized string) (*ChunkSpec, error) {
|
|
||||||
|
|
||||||
var chunkSpec ChunkSpec
|
|
||||||
|
|
||||||
if len(serialized) == 0 {
|
|
||||||
return &chunkSpec, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ranges := strings.Split(serialized, ",")
|
|
||||||
for _, r := range ranges {
|
|
||||||
parts := strings.Split(r, ":")
|
|
||||||
if len(parts) == 1 {
|
|
||||||
single, err := strconv.Atoi(r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.New("invalid chunk spec")
|
|
||||||
}
|
|
||||||
chunkSpec = append(chunkSpec, uint64(single))
|
|
||||||
} else if len(parts) == 2 {
|
|
||||||
start, err1 := strconv.Atoi(parts[0])
|
|
||||||
end, err2 := strconv.Atoi(parts[1])
|
|
||||||
if err1 != nil || err2 != nil {
|
|
||||||
return nil, errors.New("invalid chunk spec")
|
|
||||||
}
|
|
||||||
for i := start; i <= end; i++ {
|
|
||||||
chunkSpec = append(chunkSpec, uint64(i))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return nil, errors.New("invalid chunk spec")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &chunkSpec, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize compresses the ChunkSpec into a list of inclusive ranges e.g. 1,2,3,5,6,7 becomes "1:3,5:7"
|
|
||||||
func (cs ChunkSpec) Serialize() string {
|
|
||||||
result := ""
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
|
|
||||||
for {
|
|
||||||
if i >= len(cs) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
j := i + 1
|
|
||||||
for ; j < len(cs) && cs[j] == cs[j-1]+1; j++ {
|
|
||||||
}
|
|
||||||
|
|
||||||
if result != "" {
|
|
||||||
result += ","
|
|
||||||
}
|
|
||||||
|
|
||||||
if j == i+1 {
|
|
||||||
result += fmt.Sprintf("%d", cs[i])
|
|
||||||
} else {
|
|
||||||
result += fmt.Sprintf("%d:%d", cs[i], cs[j-1])
|
|
||||||
}
|
|
||||||
i = j
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
|
@ -1,37 +0,0 @@
|
||||||
package files
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
func TestChunkSpec(t *testing.T) {
|
|
||||||
|
|
||||||
var testCases = map[string]ChunkSpec{
|
|
||||||
"0": CreateChunkSpec([]bool{false}),
|
|
||||||
"0:10": CreateChunkSpec([]bool{false, false, false, false, false, false, false, false, false, false, false}),
|
|
||||||
"0:1,3:5,7:9": CreateChunkSpec([]bool{false, false, true, false, false, false, true, false, false, false, true}),
|
|
||||||
"": CreateChunkSpec([]bool{true, true, true, true, true, true, true, true, true, true, true}),
|
|
||||||
"2,5,8,10": CreateChunkSpec([]bool{true, true, false, true, true, false, true, true, false, true, false}),
|
|
||||||
//
|
|
||||||
"0,2:10": CreateChunkSpec([]bool{false, true, false, false, false, false, false, false, false, false, false}),
|
|
||||||
"0:8,10": CreateChunkSpec([]bool{false, false, false, false, false, false, false, false, false, true, false}),
|
|
||||||
"1:9": CreateChunkSpec([]bool{true, false, false, false, false, false, false, false, false, false, true}),
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range testCases {
|
|
||||||
if k != v.Serialize() {
|
|
||||||
t.Fatalf("got %v but expected %v", v.Serialize(), k)
|
|
||||||
}
|
|
||||||
t.Logf("%v == %v", k, v.Serialize())
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range testCases {
|
|
||||||
if cs, err := Deserialize(k); err != nil {
|
|
||||||
t.Fatalf("error deserialized key: %v %v", k, err)
|
|
||||||
} else {
|
|
||||||
if v.Serialize() != cs.Serialize() {
|
|
||||||
t.Fatalf("got %v but expected %v", v.Serialize(), cs.Serialize())
|
|
||||||
}
|
|
||||||
t.Logf("%v == %v", cs.Serialize(), v.Serialize())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,249 +0,0 @@
|
||||||
package files
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
path "path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"cwtch.im/cwtch/event"
|
|
||||||
"cwtch.im/cwtch/protocol/model"
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FileSharingSubSystem encapsulates the functionality necessary to share and download files via Cwtch
|
|
||||||
type FileSharingSubSystem struct {
|
|
||||||
|
|
||||||
// for sharing files
|
|
||||||
activeShares sync.Map // file key to manifest
|
|
||||||
|
|
||||||
// for downloading files
|
|
||||||
prospectiveManifests sync.Map // file key to serialized manifests
|
|
||||||
activeDownloads sync.Map // file key to manifests
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShareFile given a file key and a serialized manifest, allow the serialized manifest to be downloaded
|
|
||||||
// by Cwtch profiles in possession of the fileKey
|
|
||||||
func (fsss *FileSharingSubSystem) ShareFile(fileKey string, serializedManifest string) {
|
|
||||||
var manifest Manifest
|
|
||||||
err := json.Unmarshal([]byte(serializedManifest), &manifest)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("could not share file %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Debugf("sharing file: %v %v", fileKey, serializedManifest)
|
|
||||||
fsss.activeShares.Store(fileKey, &manifest)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StopFileShare given a file key removes the serialized manifest from consideration by the file sharing
|
|
||||||
// subsystem. Future requests on this manifest will fail, as will any in-progress chunk requests.
|
|
||||||
func (fsss *FileSharingSubSystem) StopFileShare(fileKey string) {
|
|
||||||
fsss.activeShares.Delete(fileKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StopAllFileShares removes all active file shares from consideration
|
|
||||||
func (fsss *FileSharingSubSystem) StopAllFileShares() {
|
|
||||||
fsss.activeShares.Range(func(key, value interface{}) bool {
|
|
||||||
fsss.activeShares.Delete(key)
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchManifest given a file key and knowledge of the manifest size in chunks (obtained via an attribute lookup)
|
|
||||||
// construct a request to download the manifest.
|
|
||||||
func (fsss *FileSharingSubSystem) FetchManifest(fileKey string, manifestSize uint64) model.PeerMessage {
|
|
||||||
fsss.prospectiveManifests.Store(fileKey, strings.Repeat("\"", int(manifestSize*DefaultChunkSize)))
|
|
||||||
return model.PeerMessage{
|
|
||||||
Context: event.ContextRequestManifest,
|
|
||||||
ID: fileKey,
|
|
||||||
Data: []byte{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CompileChunkRequests takes in a complete serializedManifest and returns a set of chunk request messages
|
|
||||||
// TODO in the future we will want this to return the handles of contacts to request chunks from
|
|
||||||
func (fsss *FileSharingSubSystem) CompileChunkRequests(fileKey, serializedManifest, tempFile, title string) []model.PeerMessage {
|
|
||||||
var manifest Manifest
|
|
||||||
err := json.Unmarshal([]byte(serializedManifest), &manifest)
|
|
||||||
var messages []model.PeerMessage
|
|
||||||
if err == nil {
|
|
||||||
manifest.TempFileName = tempFile
|
|
||||||
manifest.Title = title
|
|
||||||
err := manifest.PrepareDownload()
|
|
||||||
if err == nil {
|
|
||||||
fsss.activeDownloads.Store(fileKey, &manifest)
|
|
||||||
log.Debugf("downloading file chunks: %v", manifest.GetChunkRequest().Serialize())
|
|
||||||
messages = append(messages, model.PeerMessage{
|
|
||||||
ID: fileKey,
|
|
||||||
Context: event.ContextRequestFile,
|
|
||||||
Data: []byte(manifest.GetChunkRequest().Serialize()),
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
log.Errorf("couldn't prepare download: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return messages
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequestManifestParts given a fileKey construct a set of messages representing requests to download various
|
|
||||||
// parts of the Manifest
|
|
||||||
func (fsss *FileSharingSubSystem) RequestManifestParts(fileKey string) []model.PeerMessage {
|
|
||||||
manifestI, exists := fsss.activeShares.Load(fileKey)
|
|
||||||
var messages []model.PeerMessage
|
|
||||||
if exists {
|
|
||||||
oldManifest := manifestI.(*Manifest)
|
|
||||||
serializedOldManifest := oldManifest.Serialize()
|
|
||||||
log.Debugf("found serialized manifest")
|
|
||||||
|
|
||||||
// copy so we dont get threading issues by modifying the original
|
|
||||||
// and then redact the file path before sending
|
|
||||||
// nb: manifest.size has already been corrected elsewhere
|
|
||||||
var manifest Manifest
|
|
||||||
json.Unmarshal([]byte(serializedOldManifest), &manifest)
|
|
||||||
manifest.FileName = path.Base(manifest.FileName)
|
|
||||||
serializedManifest := manifest.Serialize()
|
|
||||||
|
|
||||||
chunkID := 0
|
|
||||||
for i := 0; i < len(serializedManifest); i += DefaultChunkSize {
|
|
||||||
offset := i
|
|
||||||
end := i + DefaultChunkSize
|
|
||||||
// truncate end
|
|
||||||
if end > len(serializedManifest) {
|
|
||||||
end = len(serializedManifest)
|
|
||||||
}
|
|
||||||
chunk := serializedManifest[offset:end]
|
|
||||||
// request this manifest part
|
|
||||||
messages = append(messages, model.PeerMessage{
|
|
||||||
Context: event.ContextSendManifest,
|
|
||||||
ID: fmt.Sprintf("%s.%d", fileKey, chunkID),
|
|
||||||
Data: chunk,
|
|
||||||
})
|
|
||||||
chunkID++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return messages
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReceiveManifestPart given a manifestKey reconstruct part the manifest from the provided part
|
|
||||||
func (fsss *FileSharingSubSystem) ReceiveManifestPart(manifestKey string, part []byte) (fileKey string, serializedManifest string) {
|
|
||||||
fileKeyParts := strings.Split(manifestKey, ".")
|
|
||||||
if len(fileKeyParts) == 3 { // rootHash.nonce.manifestPart
|
|
||||||
fileKey = fmt.Sprintf("%s.%s", fileKeyParts[0], fileKeyParts[1])
|
|
||||||
log.Debugf("manifest filekey: %s", fileKey)
|
|
||||||
manifestPart, err := strconv.Atoi(fileKeyParts[2])
|
|
||||||
if err == nil {
|
|
||||||
serializedManifest, exists := fsss.prospectiveManifests.Load(fileKey)
|
|
||||||
if exists {
|
|
||||||
serializedManifest := serializedManifest.(string)
|
|
||||||
log.Debugf("loaded manifest")
|
|
||||||
offset := manifestPart * DefaultChunkSize
|
|
||||||
end := (manifestPart + 1) * DefaultChunkSize
|
|
||||||
|
|
||||||
log.Debugf("storing manifest part %v %v", offset, end)
|
|
||||||
serializedManifestBytes := []byte(serializedManifest)
|
|
||||||
if len(serializedManifestBytes) > offset && len(serializedManifestBytes) >= end {
|
|
||||||
copy(serializedManifestBytes[offset:end], part[:])
|
|
||||||
|
|
||||||
if len(part) < DefaultChunkSize {
|
|
||||||
serializedManifestBytes = serializedManifestBytes[0 : len(serializedManifestBytes)-(DefaultChunkSize-len(part))]
|
|
||||||
}
|
|
||||||
|
|
||||||
serializedManifest = string(serializedManifestBytes)
|
|
||||||
fsss.prospectiveManifests.Store(fileKey, serializedManifest)
|
|
||||||
log.Debugf("current manifest: [%s]", serializedManifest)
|
|
||||||
var manifest Manifest
|
|
||||||
err := json.Unmarshal([]byte(serializedManifest), &manifest)
|
|
||||||
if err == nil && hex.EncodeToString(manifest.RootHash) == fileKeyParts[0] {
|
|
||||||
log.Debugf("valid manifest received! %x", manifest.RootHash)
|
|
||||||
return fileKey, serializedManifest
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProcessChunkRequest given a fileKey, and a chunk request, compile a set of responses for each requested Chunk
|
|
||||||
func (fsss *FileSharingSubSystem) ProcessChunkRequest(fileKey string, serializedChunkRequest []byte) []model.PeerMessage {
|
|
||||||
log.Debugf("chunk request: %v", fileKey)
|
|
||||||
// fileKey is rootHash.nonce
|
|
||||||
manifestI, exists := fsss.activeShares.Load(fileKey)
|
|
||||||
var messages []model.PeerMessage
|
|
||||||
if exists {
|
|
||||||
manifest := manifestI.(*Manifest)
|
|
||||||
log.Debugf("manifest found: %x", manifest.RootHash)
|
|
||||||
chunkSpec, err := Deserialize(string(serializedChunkRequest))
|
|
||||||
log.Debugf("deserialized chunk spec found: %v [%s]", chunkSpec, serializedChunkRequest)
|
|
||||||
if err == nil {
|
|
||||||
for _, chunk := range *chunkSpec {
|
|
||||||
contents, err := manifest.GetChunkBytes(chunk)
|
|
||||||
if err == nil {
|
|
||||||
log.Debugf("sending chunk: %v %x", chunk, contents)
|
|
||||||
messages = append(messages, model.PeerMessage{
|
|
||||||
ID: fmt.Sprintf("%v.%d", fileKey, chunk),
|
|
||||||
Context: event.ContextSendFile,
|
|
||||||
Data: contents,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return messages
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProcessChunk given a chunk key and a chunk attempt to store and verify the chunk as part of an active download
|
|
||||||
// If this results in the file download being completed return downloaded = true
|
|
||||||
// Always return the progress of a matched download if it exists along with the total number of chunks and the
|
|
||||||
// given chunk ID
|
|
||||||
// If not such active download exists then return an empty file key and ignore all further processing.
|
|
||||||
func (fsss *FileSharingSubSystem) ProcessChunk(chunkKey string, chunk []byte) (fileKey string, progress uint64, totalChunks uint64, chunkID uint64, title string) {
|
|
||||||
fileKeyParts := strings.Split(chunkKey, ".")
|
|
||||||
log.Debugf("got chunk for %s", fileKeyParts)
|
|
||||||
if len(fileKeyParts) == 3 { // fileKey is rootHash.nonce.chunk
|
|
||||||
// recalculate file key
|
|
||||||
fileKey = fmt.Sprintf("%s.%s", fileKeyParts[0], fileKeyParts[1])
|
|
||||||
derivedChunkID, err := strconv.Atoi(fileKeyParts[2])
|
|
||||||
if err == nil {
|
|
||||||
chunkID = uint64(derivedChunkID)
|
|
||||||
log.Debugf("got chunk id %d", chunkID)
|
|
||||||
manifestI, exists := fsss.activeDownloads.Load(fileKey)
|
|
||||||
if exists {
|
|
||||||
manifest := manifestI.(*Manifest)
|
|
||||||
totalChunks = uint64(len(manifest.Chunks))
|
|
||||||
title = manifest.Title
|
|
||||||
log.Debugf("found active manifest %v", manifest)
|
|
||||||
progress, err = manifest.StoreChunk(chunkID, chunk)
|
|
||||||
log.Debugf("attempts to store chunk %v %v", progress, err)
|
|
||||||
if err != nil {
|
|
||||||
log.Debugf("error storing chunk: %v", err)
|
|
||||||
// malicious contacts who share conversations can share random chunks
|
|
||||||
// these will not match the chunk hash and as such will fail.
|
|
||||||
// at this point we can't differentiate between a malicious chunk and failure to store a
|
|
||||||
// legitimate chunk, so if there is an error we silently drop it and expect the higher level callers (e.g. the ui)
|
|
||||||
//to detect and respond to missing chunks if it detects them..
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyFile returns true if the file has been downloaded, false otherwise
|
|
||||||
// as well as the temporary filename, if one was used
|
|
||||||
func (fsss *FileSharingSubSystem) VerifyFile(fileKey string) (tempFile string, filePath string, downloaded bool) {
|
|
||||||
manifestI, exists := fsss.activeDownloads.Load(fileKey)
|
|
||||||
if exists {
|
|
||||||
manifest := manifestI.(*Manifest)
|
|
||||||
if manifest.VerifyFile() == nil {
|
|
||||||
manifest.Close()
|
|
||||||
fsss.activeDownloads.Delete(fileKey)
|
|
||||||
log.Debugf("file verified and downloaded!")
|
|
||||||
return manifest.TempFileName, manifest.FileName, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", "", false
|
|
||||||
}
|
|
|
@ -1,338 +0,0 @@
|
||||||
package files
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"crypto/sha256"
|
|
||||||
"crypto/sha512"
|
|
||||||
"crypto/subtle"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Chunk is a wrapper around a hash
|
|
||||||
type Chunk []byte
|
|
||||||
|
|
||||||
// DefaultChunkSize is the default value of a manifest chunk
|
|
||||||
const DefaultChunkSize = 4096
|
|
||||||
|
|
||||||
// MaxManifestSize is the maximum size of a manifest (in DefaultChunkSize)
|
|
||||||
// Because we reconstruct the manifest in memory we have to practically limit this size.
|
|
||||||
// 2622000 * 4096 ~= 10GB using 4096 byte chunks
|
|
||||||
// This makes the actual manifest size ~125Mb which seems reasonable for a 10Gb file.
|
|
||||||
// most file transfers are expected to have manifest that are much smaller.
|
|
||||||
const MaxManifestSize = 2622000
|
|
||||||
|
|
||||||
// Manifest is a collection of hashes and other metadata needed to reconstruct a file and verify contents given a root hash
|
|
||||||
type Manifest struct {
|
|
||||||
Chunks []Chunk
|
|
||||||
FileName string
|
|
||||||
RootHash []byte
|
|
||||||
FileSizeInBytes uint64
|
|
||||||
ChunkSizeInBytes uint64
|
|
||||||
TempFileName string `json:"-"`
|
|
||||||
Title string `json:"-"`
|
|
||||||
|
|
||||||
chunkComplete []bool
|
|
||||||
openFd *os.File
|
|
||||||
progress uint64
|
|
||||||
lock sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateManifest takes in a file path and constructs a file sharing manifest of hashes along with
|
|
||||||
// other information necessary to download, reconstruct and verify the file.
|
|
||||||
func CreateManifest(path string) (*Manifest, error) {
|
|
||||||
// Process file into Chunks
|
|
||||||
f, err := os.Open(path)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
reader := bufio.NewReader(f)
|
|
||||||
buf := make([]byte, DefaultChunkSize)
|
|
||||||
|
|
||||||
var chunks []Chunk
|
|
||||||
fileSizeInBytes := uint64(0)
|
|
||||||
|
|
||||||
rootHash := sha512.New()
|
|
||||||
|
|
||||||
for {
|
|
||||||
n, err := reader.Read(buf)
|
|
||||||
if err != nil {
|
|
||||||
if err != io.EOF {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
hash := sha256.New()
|
|
||||||
hash.Write(buf[0:n])
|
|
||||||
rootHash.Write(buf[0:n])
|
|
||||||
chunkHash := hash.Sum(nil)
|
|
||||||
chunks = append(chunks, chunkHash)
|
|
||||||
fileSizeInBytes += uint64(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Manifest{
|
|
||||||
Chunks: chunks,
|
|
||||||
FileName: path,
|
|
||||||
RootHash: rootHash.Sum(nil),
|
|
||||||
ChunkSizeInBytes: DefaultChunkSize,
|
|
||||||
FileSizeInBytes: fileSizeInBytes,
|
|
||||||
chunkComplete: make([]bool, len(chunks)),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetChunkBytes takes in a chunk identifier and returns the bytes associated with that chunk
|
|
||||||
// it does not attempt to validate the chunk Hash.
|
|
||||||
func (m *Manifest) GetChunkBytes(id uint64) ([]byte, error) {
|
|
||||||
m.lock.Lock()
|
|
||||||
defer m.lock.Unlock()
|
|
||||||
if id >= uint64(len(m.Chunks)) {
|
|
||||||
return nil, errors.New("chunk not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := m.getFileHandle(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seek to Chunk
|
|
||||||
offset, err := m.openFd.Seek(int64(id*m.ChunkSizeInBytes), 0)
|
|
||||||
if (uint64(offset) != id*m.ChunkSizeInBytes) || err != nil {
|
|
||||||
return nil, errors.New("chunk not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read chunk into memory and return...
|
|
||||||
reader := bufio.NewReader(m.openFd)
|
|
||||||
buf := make([]byte, m.ChunkSizeInBytes)
|
|
||||||
n, err := reader.Read(buf)
|
|
||||||
if err != nil {
|
|
||||||
if err != io.EOF {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return buf[0:n], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadManifest reads in a json serialized Manifest from a file
|
|
||||||
func LoadManifest(filename string) (*Manifest, error) {
|
|
||||||
bytes, err := os.ReadFile(filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
manifest := new(Manifest)
|
|
||||||
err = json.Unmarshal(bytes, manifest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
manifest.chunkComplete = make([]bool, len(manifest.Chunks))
|
|
||||||
return manifest, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyFile attempts to calculate the rootHash of a file and compare it to the expected rootHash stored in the
|
|
||||||
// manifest
|
|
||||||
func (m *Manifest) VerifyFile() error {
|
|
||||||
m.lock.Lock()
|
|
||||||
defer m.lock.Unlock()
|
|
||||||
if err := m.getFileHandle(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
offset, err := m.openFd.Seek(0, 0)
|
|
||||||
if offset != 0 || err != nil {
|
|
||||||
return errors.New("chunk not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
rootHash := sha512.New()
|
|
||||||
reader := bufio.NewReader(m.openFd)
|
|
||||||
buf := make([]byte, m.ChunkSizeInBytes)
|
|
||||||
for {
|
|
||||||
n, err := reader.Read(buf)
|
|
||||||
rootHash.Write(buf[0:n])
|
|
||||||
if err != nil {
|
|
||||||
if err != io.EOF {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
calculatedRootHash := rootHash.Sum(nil)
|
|
||||||
if subtle.ConstantTimeCompare(m.RootHash, calculatedRootHash) != 1 {
|
|
||||||
return fmt.Errorf("hashes do not match %x %x", m.RootHash, calculatedRootHash)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StoreChunk takes in a chunk id and contents, verifies the chunk has the expected hash and if so store the contents
|
|
||||||
// in the file.
|
|
||||||
func (m *Manifest) StoreChunk(id uint64, contents []byte) (uint64, error) {
|
|
||||||
m.lock.Lock()
|
|
||||||
defer m.lock.Unlock()
|
|
||||||
// Check the chunk id
|
|
||||||
if id >= uint64(len(m.Chunks)) {
|
|
||||||
return 0, errors.New("invalid chunk id")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate the chunk hash
|
|
||||||
hash := sha256.New()
|
|
||||||
hash.Write(contents)
|
|
||||||
chunkHash := hash.Sum(nil)
|
|
||||||
|
|
||||||
if subtle.ConstantTimeCompare(chunkHash, m.Chunks[id]) != 1 {
|
|
||||||
return 0, fmt.Errorf("invalid chunk hash %x %x", chunkHash, m.Chunks[id])
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := m.getFileHandle(); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
offset, err := m.openFd.Seek(int64(id*m.ChunkSizeInBytes), 0)
|
|
||||||
if (uint64(offset) != id*m.ChunkSizeInBytes) || err != nil {
|
|
||||||
return 0, errors.New("chunk not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the contents of the chunk to the file
|
|
||||||
_, err = m.openFd.Write(contents)
|
|
||||||
|
|
||||||
if err == nil && !m.chunkComplete[id] {
|
|
||||||
m.chunkComplete[id] = true
|
|
||||||
m.progress++
|
|
||||||
}
|
|
||||||
|
|
||||||
return m.progress, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// private function to set the internal file handle
|
|
||||||
func (m *Manifest) getFileHandle() error {
|
|
||||||
// Seek to the chunk in the file
|
|
||||||
if m.openFd == nil {
|
|
||||||
useFileName := m.FileName
|
|
||||||
if m.TempFileName != "" {
|
|
||||||
useFileName = m.TempFileName
|
|
||||||
}
|
|
||||||
fd, err := os.OpenFile(useFileName, os.O_RDWR, 0600)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m.openFd = fd
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetChunkRequest returns an uncompressed list of Chunks needed to complete the file described in the manifest
|
|
||||||
func (m *Manifest) GetChunkRequest() ChunkSpec {
|
|
||||||
return CreateChunkSpec(m.chunkComplete)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrepareDownload creates an empty file of the expected size of the file described by the manifest
|
|
||||||
// If the file already exists it assumes it is the correct file and that it is resuming from when it left off.
|
|
||||||
func (m *Manifest) PrepareDownload() error {
|
|
||||||
m.lock.Lock()
|
|
||||||
defer m.lock.Unlock()
|
|
||||||
|
|
||||||
m.chunkComplete = make([]bool, len(m.Chunks))
|
|
||||||
if m.ChunkSizeInBytes == 0 || m.FileSizeInBytes == 0 {
|
|
||||||
return fmt.Errorf("manifest is invalid")
|
|
||||||
}
|
|
||||||
|
|
||||||
if info, err := os.Stat(m.FileName); os.IsNotExist(err) {
|
|
||||||
useFileName := m.FileName
|
|
||||||
if m.TempFileName != "" {
|
|
||||||
useFileName = m.TempFileName
|
|
||||||
}
|
|
||||||
|
|
||||||
fd, err := os.Create(useFileName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m.openFd = fd
|
|
||||||
|
|
||||||
writer := bufio.NewWriter(m.openFd)
|
|
||||||
buf := make([]byte, m.ChunkSizeInBytes)
|
|
||||||
for chunk := 0; chunk < len(m.Chunks)-1; chunk++ {
|
|
||||||
_, err := writer.Write(buf)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
lastChunkSize := m.FileSizeInBytes % m.ChunkSizeInBytes
|
|
||||||
if lastChunkSize > 0 {
|
|
||||||
buf = make([]byte, lastChunkSize)
|
|
||||||
_, err := writer.Write(buf)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
writer.Flush()
|
|
||||||
} else {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if uint64(info.Size()) != m.FileSizeInBytes {
|
|
||||||
return fmt.Errorf("file exists but is the wrong size")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := m.getFileHandle(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate Progress
|
|
||||||
reader := bufio.NewReader(m.openFd)
|
|
||||||
buf := make([]byte, m.ChunkSizeInBytes)
|
|
||||||
chunkI := 0
|
|
||||||
for {
|
|
||||||
n, err := reader.Read(buf)
|
|
||||||
if err != nil {
|
|
||||||
if err != io.EOF {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if chunkI >= len(m.Chunks) {
|
|
||||||
log.Errorf("file is larger than the number of chunks assigned. Assuming manifest was corrupted.")
|
|
||||||
return fmt.Errorf("file is larger than the number of chunks assigned. Assuming manifest was corrupted")
|
|
||||||
}
|
|
||||||
|
|
||||||
hash := sha512.New()
|
|
||||||
hash.Write(buf[0:n])
|
|
||||||
chunkHash := hash.Sum(nil)
|
|
||||||
m.progress = 0
|
|
||||||
if subtle.ConstantTimeCompare(chunkHash, m.Chunks[chunkI]) == 1 {
|
|
||||||
m.chunkComplete[chunkI] = true
|
|
||||||
m.progress++
|
|
||||||
}
|
|
||||||
chunkI++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the underlying file descriptor
|
|
||||||
func (m *Manifest) Close() {
|
|
||||||
m.lock.Lock()
|
|
||||||
defer m.lock.Unlock()
|
|
||||||
if m.openFd != nil {
|
|
||||||
m.openFd.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save writes a JSON encoded byte array version of the manifest to path
|
|
||||||
func (m *Manifest) Save(path string) error {
|
|
||||||
return os.WriteFile(path, m.Serialize(), 0600)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize returns the manifest as a JSON encoded byte array
|
|
||||||
func (m *Manifest) Serialize() []byte {
|
|
||||||
data, _ := json.Marshal(m)
|
|
||||||
return data
|
|
||||||
}
|
|
|
@ -1,150 +0,0 @@
|
||||||
package files
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
|
||||||
"math"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestManifest(t *testing.T) {
|
|
||||||
manifest, err := CreateManifest("testdata/example.txt")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("manifest create error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(manifest.Chunks) != 1 {
|
|
||||||
t.Fatalf("manifest had unepxected Chunks : %v", manifest.Chunks)
|
|
||||||
}
|
|
||||||
|
|
||||||
if manifest.FileSizeInBytes != 12 {
|
|
||||||
t.Fatalf("manifest had unepxected length : %v", manifest.FileSizeInBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
if hex.EncodeToString(manifest.RootHash) != "861844d6704e8573fec34d967e20bcfef3d424cf48be04e6dc08f2bd58c729743371015ead891cc3cf1c9d34b49264b510751b1ff9e537937bc46b5d6ff4ecc8" {
|
|
||||||
t.Fatalf("manifest had incorrect root Hash : %v", manifest.RootHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Logf("%v", manifest)
|
|
||||||
|
|
||||||
// Try to read the chunk
|
|
||||||
_, err = manifest.GetChunkBytes(1)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("chunk fetch should have thrown an error")
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = manifest.GetChunkBytes(0)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("chunk fetch error: %v", err)
|
|
||||||
}
|
|
||||||
_, err = manifest.GetChunkBytes(0)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("chunk fetch error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = manifest.GetChunkBytes(0)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("chunk fetch error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
json, _ := json.Marshal(manifest)
|
|
||||||
t.Logf("%s", json)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestManifestLarge(t *testing.T) {
|
|
||||||
manifest, err := CreateManifest("testdata/cwtch.png")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("manifest create error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(manifest.Chunks) != int(math.Ceil(float64(51791)/DefaultChunkSize)) {
|
|
||||||
t.Fatalf("manifest had unexpected Chunks : %v", manifest.Chunks)
|
|
||||||
}
|
|
||||||
|
|
||||||
if manifest.FileSizeInBytes != 51791 {
|
|
||||||
t.Fatalf("manifest had unepxected length : %v", manifest.FileSizeInBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
if hex.EncodeToString(manifest.RootHash) != "8f0ed73bbb30db45b6a740b1251cae02945f48e4f991464d5f3607685c45dcd136a325dab2e5f6429ce2b715e602b20b5b16bf7438fb6235fefe912adcedb5fd" {
|
|
||||||
t.Fatalf("manifest had incorrect root Hash : %v", manifest.RootHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Logf("%v", len(manifest.Chunks))
|
|
||||||
|
|
||||||
json, _ := json.Marshal(manifest)
|
|
||||||
t.Logf("%v %s", len(json), json)
|
|
||||||
|
|
||||||
// Pretend we downloaded the manifest
|
|
||||||
os.WriteFile("testdata/cwtch.png.manifest", json, 0600)
|
|
||||||
|
|
||||||
// Load the manifest from a file
|
|
||||||
cwtchPngManifest, err := LoadManifest("testdata/cwtch.png.manifest")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("manifest create error: %v", err)
|
|
||||||
}
|
|
||||||
defer cwtchPngManifest.Close()
|
|
||||||
t.Logf("%v", cwtchPngManifest)
|
|
||||||
|
|
||||||
// Test verifying the hash
|
|
||||||
if cwtchPngManifest.VerifyFile() != nil {
|
|
||||||
t.Fatalf("hashes do not validate error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare Download
|
|
||||||
cwtchPngOutManifest, err := LoadManifest("testdata/cwtch.png.manifest")
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("could not prepare download %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cwtchPngOutManifest.FileName = "testdata/cwtch.out.png"
|
|
||||||
|
|
||||||
defer cwtchPngOutManifest.Close()
|
|
||||||
err = cwtchPngOutManifest.PrepareDownload()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("could not prepare download %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(cwtchPngManifest.Chunks); i++ {
|
|
||||||
|
|
||||||
t.Logf("Sending Chunk %v %x from %v", i, cwtchPngManifest.Chunks[i], cwtchPngManifest.FileName)
|
|
||||||
|
|
||||||
contents, err := cwtchPngManifest.GetChunkBytes(uint64(i))
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("could not get chunk %v %v", i, err)
|
|
||||||
}
|
|
||||||
t.Logf("Progress: %v", cwtchPngOutManifest.chunkComplete)
|
|
||||||
_, err = cwtchPngOutManifest.StoreChunk(uint64(i), contents)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("could not store chunk %v %v", i, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempt to store the chunk in an invalid position...
|
|
||||||
_, err = cwtchPngOutManifest.StoreChunk(uint64(i+1), contents)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("incorrect chunk store")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempt to store an invalid chunk...should trigger an error
|
|
||||||
_, err = cwtchPngOutManifest.StoreChunk(uint64(len(cwtchPngManifest.Chunks)), []byte{0xff})
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("incorrect chunk store")
|
|
||||||
}
|
|
||||||
|
|
||||||
err = cwtchPngOutManifest.VerifyFile()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("could not verify file %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that changing the hash throws an error
|
|
||||||
cwtchPngManifest.RootHash[3] = 0xFF
|
|
||||||
if cwtchPngManifest.VerifyFile() == nil {
|
|
||||||
t.Fatalf("hashes should not validate error")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
Binary file not shown.
Before Width: | Height: | Size: 51 KiB |
Binary file not shown.
Before Width: | Height: | Size: 51 KiB |
|
@ -1 +0,0 @@
|
||||||
{"Chunks":["BXbFagOrWyDwcsnW+f1O6fddCqJywEISjUrzI31FAE0=","1SZcGk0NSduL093Hh0hZ4WVcx2o6VKgL3kUy2WqmdLY=","R4wwVcR4andJJ0fkXlp/td1ZSjH7xHi3Egh8aloWONA=","TAuI06kog7TYVDSO8AgWprAGY8LSlGBwqZvpgMymhZE=","XQLxqLjiM0qIAeOmGIrZJkyuCEfJ4t+ikgbV1ohudiY=","aXInp/WF58A5/TGkwAwniNvIU2ZlRjVtrpClw0sBcVM=","oSCjcrenQ4+Pix4jtgNCRt40K0kQ41eCumSJO0Gqo/0=","FebZSfHuyVdRWkS8/IaWA6UooEURkf9vPxnqZXKII8g=","tITbm77ca1YmExGzbX4WBP5fAOh4bUzDtceN1VBYcBI=","VJd8rWuMtrZzqobdKam0n6t4Vgo72GcsNRNzMk46PsI=","7ywzxLV44HVk9wz+QQHvvVQJAFkTU6/pHyVFjE0uF40=","PoHUwEoQOSXv8ZpJ9bGeCZqiwY34bXcFcBki2OPxd8o=","eogaSYPKrl0MFEqVP1mwUMczMCcnjjwUmUz/0DsAF48="],"FileName":"testdata/cwtch.png","RootHash":"jw7XO7sw20W2p0CxJRyuApRfSOT5kUZNXzYHaFxF3NE2oyXasuX2QpzitxXmArILWxa/dDj7YjX+/pEq3O21/Q==","FileSizeInBytes":51791,"ChunkSizeInBytes":4096}
|
|
|
@ -1 +0,0 @@
|
||||||
Hello World!
|
|
|
@ -39,12 +39,6 @@ type EncryptedGroupMessage struct {
|
||||||
Signature []byte
|
Signature []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// CachedEncryptedGroupMessage provides an encapsulation of the encrypted group message for local caching / error reporting
|
|
||||||
type CachedEncryptedGroupMessage struct {
|
|
||||||
EncryptedGroupMessage
|
|
||||||
Group string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToBytes converts the encrypted group message to a set of bytes for serialization
|
// ToBytes converts the encrypted group message to a set of bytes for serialization
|
||||||
func (egm EncryptedGroupMessage) ToBytes() []byte {
|
func (egm EncryptedGroupMessage) ToBytes() []byte {
|
||||||
data, _ := json.Marshal(egm)
|
data, _ := json.Marshal(egm)
|
||||||
|
|
|
@ -1,53 +0,0 @@
|
||||||
package model
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PeerMessage is an encapsulation that can be used by higher level applications
|
|
||||||
type PeerMessage struct {
|
|
||||||
// ID **must** only contain alphanumeric characters separated by period.
|
|
||||||
ID string // A unique Message ID (primarily used for acknowledgments)
|
|
||||||
|
|
||||||
// Context **must** only contain alphanumeric characters separated by period.
|
|
||||||
Context string // A unique context identifier i.e. im.cwtch.chat
|
|
||||||
|
|
||||||
// Data can contain anything
|
|
||||||
Data []byte // A data packet.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize constructs an efficient serialized representation
|
|
||||||
// Format: [ID String] | [Context String] | Binary Data
|
|
||||||
func (m *PeerMessage) Serialize() []byte {
|
|
||||||
return append(append([]byte(m.ID+"|"), []byte(m.Context+"|")...), m.Data...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParsePeerMessage returns either a deserialized PeerMessage or an error if it is malformed
|
|
||||||
func ParsePeerMessage(message []byte) (*PeerMessage, error) {
|
|
||||||
|
|
||||||
// find the identifier prefix
|
|
||||||
idTerminator := bytes.IndexByte(message, '|')
|
|
||||||
if idTerminator != -1 && idTerminator+1 < len(message) {
|
|
||||||
// find the context terminator prefix
|
|
||||||
contextbegin := idTerminator + 1
|
|
||||||
contextTerminator := bytes.IndexByte(message[contextbegin:], '|')
|
|
||||||
if contextTerminator != -1 {
|
|
||||||
|
|
||||||
// check that we have data
|
|
||||||
dataBegin := contextbegin + contextTerminator + 1
|
|
||||||
var data []byte
|
|
||||||
if dataBegin < len(message) {
|
|
||||||
data = message[dataBegin:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// compile the message
|
|
||||||
return &PeerMessage{
|
|
||||||
ID: string(message[0:idTerminator]),
|
|
||||||
Context: string(message[contextbegin : contextbegin+contextTerminator]),
|
|
||||||
Data: data,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, errors.New("invalid message")
|
|
||||||
}
|
|
|
@ -0,0 +1,100 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"cwtch.im/cwtch/model"
|
||||||
|
cwtchserver "cwtch.im/cwtch/server"
|
||||||
|
"encoding/base64"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/primitives"
|
||||||
|
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
mrand "math/rand"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
serverConfigFile = "serverConfig.json"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.AddEverythingFromPattern("server/app/main")
|
||||||
|
log.AddEverythingFromPattern("server/server")
|
||||||
|
log.SetLevel(log.LevelDebug)
|
||||||
|
configDir := os.Getenv("CWTCH_CONFIG_DIR")
|
||||||
|
|
||||||
|
if len(os.Args) == 2 && os.Args[1] == "gen1" {
|
||||||
|
config := new(cwtchserver.Config)
|
||||||
|
id, pk := primitives.InitializeEphemeralIdentity()
|
||||||
|
tid, tpk := primitives.InitializeEphemeralIdentity()
|
||||||
|
config.PrivateKey = pk
|
||||||
|
config.PublicKey = id.PublicKey()
|
||||||
|
config.TokenServerPrivateKey = tpk
|
||||||
|
config.TokenServerPublicKey = tid.PublicKey()
|
||||||
|
config.MaxBufferLines = 100000
|
||||||
|
config.ServerReporting = cwtchserver.Reporting{
|
||||||
|
LogMetricsToFile: true,
|
||||||
|
ReportingGroupID: "",
|
||||||
|
ReportingServerAddr: "",
|
||||||
|
}
|
||||||
|
config.Save(".", "serverConfig.json")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
serverConfig := cwtchserver.LoadConfig(configDir, serverConfigFile)
|
||||||
|
|
||||||
|
// we don't need real randomness for the port, just to avoid a possible conflict...
|
||||||
|
mrand.Seed(int64(time.Now().Nanosecond()))
|
||||||
|
controlPort := mrand.Intn(1000) + 9052
|
||||||
|
|
||||||
|
// generate a random password
|
||||||
|
key := make([]byte, 64)
|
||||||
|
_, err := rand.Read(key)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
os.MkdirAll("tordir/tor", 0700)
|
||||||
|
tor.NewTorrc().WithHashedPassword(base64.StdEncoding.EncodeToString(key)).WithControlPort(controlPort).Build("./tordir/tor/torrc")
|
||||||
|
acn, err := tor.NewTorACNWithAuth("tordir", "", controlPort, tor.HashedPasswordAuthenticator{Password: base64.StdEncoding.EncodeToString(key)})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("\nError connecting to Tor: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
defer acn.Close()
|
||||||
|
|
||||||
|
server := new(cwtchserver.Server)
|
||||||
|
log.Infoln("starting cwtch server...")
|
||||||
|
|
||||||
|
server.Setup(serverConfig)
|
||||||
|
|
||||||
|
// TODO create a random group for testing
|
||||||
|
group, _ := model.NewGroup(tor.GetTorV3Hostname(serverConfig.PublicKey))
|
||||||
|
invite, err := group.Invite()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
bundle := server.KeyBundle().Serialize()
|
||||||
|
log.Infof("Server Config: server:%s", base64.StdEncoding.EncodeToString(bundle))
|
||||||
|
|
||||||
|
log.Infof("Server Tofu Bundle: tofubundle:server:%s||%s", base64.StdEncoding.EncodeToString(bundle), invite)
|
||||||
|
|
||||||
|
// Graceful Shutdown
|
||||||
|
c := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||||
|
go func() {
|
||||||
|
<-c
|
||||||
|
acn.Close()
|
||||||
|
server.Close()
|
||||||
|
os.Exit(1)
|
||||||
|
}()
|
||||||
|
|
||||||
|
server.Run(acn)
|
||||||
|
for {
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,33 @@
|
||||||
|
#!/bin/sh
|
||||||
|
set -o errexit
|
||||||
|
|
||||||
|
chmod_files() { find $2 -type f -exec chmod -v $1 {} \;
|
||||||
|
}
|
||||||
|
chmod_dirs() { find $2 -type d -exec chmod -v $1 {} \;
|
||||||
|
}
|
||||||
|
|
||||||
|
chown ${TOR_USER}:${TOR_USER} /run/tor/
|
||||||
|
chmod 770 /run/tor
|
||||||
|
|
||||||
|
chown -Rv ${TOR_USER}:${TOR_USER} /var/lib/tor
|
||||||
|
chmod_dirs 700 /var/lib/tor
|
||||||
|
chmod_files 600 /var/lib/tor
|
||||||
|
|
||||||
|
echo -e "\n========================================================"
|
||||||
|
# Display OS version, Tor version & torrc in log
|
||||||
|
echo -e "Alpine Version: \c" && cat /etc/alpine-release
|
||||||
|
tor --version
|
||||||
|
#cat /etc/tor/torrc
|
||||||
|
echo -e "========================================================\n"
|
||||||
|
|
||||||
|
tor -f /etc/tor/torrc
|
||||||
|
|
||||||
|
#Cwtch will crash and burn if 9051 isn't ready
|
||||||
|
sleep 15
|
||||||
|
|
||||||
|
if [ -z "${CWTCH_CONFIG_DIR}" ]; then
|
||||||
|
CWTCH_CONFIG_DIR=/etc/cwtch/
|
||||||
|
fi
|
||||||
|
|
||||||
|
#Run cwtch (or whatever the user passed)
|
||||||
|
CWTCH_CONFIG_DIR=$CWTCH_CONFIG_DIR exec "$@"
|
|
@ -0,0 +1,27 @@
|
||||||
|
User _tor
|
||||||
|
DataDirectory /var/lib/tor
|
||||||
|
|
||||||
|
ORPort 0
|
||||||
|
ExitRelay 0
|
||||||
|
IPv6Exit 0
|
||||||
|
|
||||||
|
#We need this running in the background as the server doesn't launch it itself
|
||||||
|
RunAsDaemon 1
|
||||||
|
|
||||||
|
ClientOnly 1
|
||||||
|
SocksPort 9050
|
||||||
|
|
||||||
|
ControlPort 9051
|
||||||
|
ControlSocket /run/tor/control
|
||||||
|
ControlSocketsGroupWritable 1
|
||||||
|
CookieAuthentication 1
|
||||||
|
CookieAuthFile /run/tor/control.authcookie
|
||||||
|
CookieAuthFileGroupReadable 1
|
||||||
|
#HashedControlPassword 16:B4C8EE980C085EE460AEA9094350DAA9C2B5F841400E9BBA247368400A
|
||||||
|
|
||||||
|
# Run as a relay only (change policy to enable exit node)
|
||||||
|
ExitPolicy reject *:* # no exits allowed
|
||||||
|
ExitPolicy reject6 *:*
|
||||||
|
|
||||||
|
# Additional config built by the entrypoint will go here
|
||||||
|
|
|
@ -0,0 +1,249 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type counter struct {
|
||||||
|
startTime time.Time
|
||||||
|
count uint64
|
||||||
|
total uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Counter providers a threadsafe counter to use for storing long running counts
|
||||||
|
type Counter interface {
|
||||||
|
Add(unit int)
|
||||||
|
Reset()
|
||||||
|
|
||||||
|
Count() int
|
||||||
|
GetStarttime() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCounter initializes a counter starting at time.Now() and a count of 0 and returns it
|
||||||
|
func NewCounter() Counter {
|
||||||
|
c := &counter{startTime: time.Now(), count: 0, total: 0}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add add a count of unit to the counter
|
||||||
|
func (c *counter) Add(unit int) {
|
||||||
|
atomic.AddUint64(&c.count, uint64(unit))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count since Start
|
||||||
|
func (c *counter) Count() int {
|
||||||
|
return int(atomic.LoadUint64(&c.count))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *counter) Reset() {
|
||||||
|
atomic.StoreUint64(&c.count, 0)
|
||||||
|
c.startTime = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStarttime returns the starttime of the counter
|
||||||
|
func (c *counter) GetStarttime() time.Time {
|
||||||
|
return c.startTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// MonitorType controls how the monitor will report itself
|
||||||
|
type MonitorType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Count indicates the monitor should report in interger format
|
||||||
|
Count MonitorType = iota
|
||||||
|
// Percent indicates the monitor should report in decimal format with 2 places
|
||||||
|
Percent
|
||||||
|
// MegaBytes indicates the monitor should transform the raw number into MBs
|
||||||
|
MegaBytes
|
||||||
|
)
|
||||||
|
|
||||||
|
// MonitorAccumulation controls how monitor data is accumulated over time into larger summary buckets
|
||||||
|
type MonitorAccumulation int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Cumulative values with sum over time
|
||||||
|
Cumulative MonitorAccumulation = iota
|
||||||
|
// Average values will average over time
|
||||||
|
Average
|
||||||
|
)
|
||||||
|
|
||||||
|
type monitorHistory struct {
|
||||||
|
monitorType MonitorType
|
||||||
|
monitorAccumulation MonitorAccumulation
|
||||||
|
|
||||||
|
starttime time.Time
|
||||||
|
perMinutePerHour [60]float64
|
||||||
|
timeLastHourRotate time.Time
|
||||||
|
perHourForDay [24]float64
|
||||||
|
timeLastDayRotate time.Time
|
||||||
|
perDayForWeek [7]float64
|
||||||
|
timeLastWeekRotate time.Time
|
||||||
|
perWeekForMonth [4]float64
|
||||||
|
timeLastMonthRotate time.Time
|
||||||
|
perMonthForYear [12]float64
|
||||||
|
|
||||||
|
monitor func() float64
|
||||||
|
|
||||||
|
breakChannel chan bool
|
||||||
|
lock sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// MonitorHistory runs a monitor every minute and rotates and averages the results out across time
|
||||||
|
type MonitorHistory interface {
|
||||||
|
Start()
|
||||||
|
Stop()
|
||||||
|
|
||||||
|
Minutes() []float64
|
||||||
|
Hours() []float64
|
||||||
|
Days() []float64
|
||||||
|
Weeks() []float64
|
||||||
|
Months() []float64
|
||||||
|
|
||||||
|
Report(w *bufio.Writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMonitorHistory returns a new MonitorHistory with starttime of time.Now and Started running with supplied monitor
|
||||||
|
func NewMonitorHistory(t MonitorType, a MonitorAccumulation, monitor func() float64) MonitorHistory {
|
||||||
|
mh := &monitorHistory{monitorType: t, monitorAccumulation: a, starttime: time.Now(), monitor: monitor, breakChannel: make(chan bool),
|
||||||
|
timeLastHourRotate: time.Now(), timeLastDayRotate: time.Now(), timeLastWeekRotate: time.Now(),
|
||||||
|
timeLastMonthRotate: time.Now()}
|
||||||
|
mh.Start()
|
||||||
|
return mh
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts a monitorHistory go rountine to run the monitor at intervals and rotate history
|
||||||
|
func (mh *monitorHistory) Start() {
|
||||||
|
go mh.monitorThread()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops a monitorHistory go routine
|
||||||
|
func (mh *monitorHistory) Stop() {
|
||||||
|
mh.breakChannel <- true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Minutes returns the last 60 minute monitoring results
|
||||||
|
func (mh *monitorHistory) Minutes() []float64 {
|
||||||
|
return mh.returnCopy(mh.perMinutePerHour[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hours returns the last 24 hourly averages of monitor results
|
||||||
|
func (mh *monitorHistory) Hours() []float64 {
|
||||||
|
return mh.returnCopy(mh.perHourForDay[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Days returns the last 7 day averages of monitor results
|
||||||
|
func (mh *monitorHistory) Days() []float64 {
|
||||||
|
return mh.returnCopy(mh.perDayForWeek[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Weeks returns the last 4 weeks of averages of monitor results
|
||||||
|
func (mh *monitorHistory) Weeks() []float64 {
|
||||||
|
return mh.returnCopy(mh.perWeekForMonth[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Months returns the last 12 months of averages of monitor results
|
||||||
|
func (mh *monitorHistory) Months() []float64 {
|
||||||
|
return mh.returnCopy(mh.perMonthForYear[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mh *monitorHistory) Report(w *bufio.Writer) {
|
||||||
|
mh.lock.Lock()
|
||||||
|
fmt.Fprintln(w, "Minutes:", reportLine(mh.monitorType, mh.perMinutePerHour[:]))
|
||||||
|
fmt.Fprintln(w, "Hours: ", reportLine(mh.monitorType, mh.perHourForDay[:]))
|
||||||
|
fmt.Fprintln(w, "Days: ", reportLine(mh.monitorType, mh.perDayForWeek[:]))
|
||||||
|
fmt.Fprintln(w, "Weeks: ", reportLine(mh.monitorType, mh.perWeekForMonth[:]))
|
||||||
|
fmt.Fprintln(w, "Months: ", reportLine(mh.monitorType, mh.perMonthForYear[:]))
|
||||||
|
mh.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func reportLine(t MonitorType, array []float64) string {
|
||||||
|
switch t {
|
||||||
|
case Count:
|
||||||
|
return strings.Trim(strings.Join(strings.Fields(fmt.Sprintf("%.0f", array)), " "), "[]")
|
||||||
|
case Percent:
|
||||||
|
return strings.Trim(strings.Join(strings.Fields(fmt.Sprintf("%.2f", array)), " "), "[]")
|
||||||
|
case MegaBytes:
|
||||||
|
mbs := make([]int, len(array))
|
||||||
|
for i, b := range array {
|
||||||
|
mbs[i] = int(b) / 1024 / 1024
|
||||||
|
}
|
||||||
|
return strings.Trim(strings.Join(strings.Fields(fmt.Sprintf("%d", mbs)), "MBs "), "[]") + "MBs"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mh *monitorHistory) returnCopy(slice []float64) []float64 {
|
||||||
|
retSlice := make([]float64, len(slice))
|
||||||
|
mh.lock.Lock()
|
||||||
|
for i, v := range slice {
|
||||||
|
retSlice[i] = v
|
||||||
|
}
|
||||||
|
mh.lock.Unlock()
|
||||||
|
return retSlice
|
||||||
|
}
|
||||||
|
|
||||||
|
func rotateAndAccumulate(array []float64, newVal float64, acc MonitorAccumulation) float64 {
|
||||||
|
total := float64(0.0)
|
||||||
|
for i := len(array) - 1; i > 0; i-- {
|
||||||
|
array[i] = array[i-1]
|
||||||
|
total += array[i]
|
||||||
|
}
|
||||||
|
array[0] = newVal
|
||||||
|
total += newVal
|
||||||
|
if acc == Cumulative {
|
||||||
|
return total
|
||||||
|
}
|
||||||
|
return total / float64(len(array))
|
||||||
|
}
|
||||||
|
func accumulate(array []float64, acc MonitorAccumulation) float64 {
|
||||||
|
total := float64(0)
|
||||||
|
for _, x := range array {
|
||||||
|
total += x
|
||||||
|
}
|
||||||
|
if acc == Cumulative {
|
||||||
|
return total
|
||||||
|
}
|
||||||
|
return total / float64(len(array))
|
||||||
|
}
|
||||||
|
|
||||||
|
// monitorThread is the goroutine in a monitorHistory that does per minute monitoring and rotation
|
||||||
|
func (mh *monitorHistory) monitorThread() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-time.After(time.Minute):
|
||||||
|
mh.lock.Lock()
|
||||||
|
|
||||||
|
minuteAvg := rotateAndAccumulate(mh.perMinutePerHour[:], mh.monitor(), mh.monitorAccumulation)
|
||||||
|
|
||||||
|
if time.Now().Sub(mh.timeLastHourRotate) > time.Hour {
|
||||||
|
rotateAndAccumulate(mh.perHourForDay[:], minuteAvg, mh.monitorAccumulation)
|
||||||
|
mh.timeLastHourRotate = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
if time.Now().Sub(mh.timeLastDayRotate) > time.Hour*24 {
|
||||||
|
rotateAndAccumulate(mh.perDayForWeek[:], accumulate(mh.perHourForDay[:], mh.monitorAccumulation), mh.monitorAccumulation)
|
||||||
|
mh.timeLastDayRotate = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
if time.Now().Sub(mh.timeLastWeekRotate) > time.Hour*24*7 {
|
||||||
|
rotateAndAccumulate(mh.perWeekForMonth[:], accumulate(mh.perDayForWeek[:], mh.monitorAccumulation), mh.monitorAccumulation)
|
||||||
|
mh.timeLastWeekRotate = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
if time.Now().Sub(mh.timeLastMonthRotate) > time.Hour*24*7*4 {
|
||||||
|
rotateAndAccumulate(mh.perMonthForYear[:], accumulate(mh.perWeekForMonth[:], mh.monitorAccumulation), mh.monitorAccumulation)
|
||||||
|
mh.timeLastMonthRotate = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
mh.lock.Unlock()
|
||||||
|
|
||||||
|
case <-mh.breakChannel:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,37 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCounter(t *testing.T) {
|
||||||
|
starttime := time.Now()
|
||||||
|
c := NewCounter()
|
||||||
|
|
||||||
|
max := 100
|
||||||
|
done := make(chan bool, max)
|
||||||
|
|
||||||
|
// slightly stress test atomic nature of metric by flooding with threads Add()ing
|
||||||
|
for i := 0; i < max; i++ {
|
||||||
|
go func() {
|
||||||
|
c.Add(1)
|
||||||
|
done <- true
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < max; i++ {
|
||||||
|
<-done
|
||||||
|
}
|
||||||
|
|
||||||
|
val := c.Count()
|
||||||
|
if val != 100 {
|
||||||
|
t.Errorf("counter count was not 100")
|
||||||
|
}
|
||||||
|
|
||||||
|
counterStart := c.GetStarttime()
|
||||||
|
|
||||||
|
if counterStart.Sub(starttime) > time.Millisecond {
|
||||||
|
t.Errorf("counter's starttime was innaccurate %v", counterStart.Sub(starttime))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,119 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"github.com/struCoder/pidusage"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
reportFile = "serverMonitorReport.txt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Monitors is a package of metrics for a Cwtch Server including message count, CPU, Mem, and conns
|
||||||
|
type Monitors struct {
|
||||||
|
MessageCounter Counter
|
||||||
|
TotalMessageCounter Counter
|
||||||
|
Messages MonitorHistory
|
||||||
|
CPU MonitorHistory
|
||||||
|
Memory MonitorHistory
|
||||||
|
ClientConns MonitorHistory
|
||||||
|
starttime time.Time
|
||||||
|
breakChannel chan bool
|
||||||
|
log bool
|
||||||
|
configDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start initializes a Monitors's monitors
|
||||||
|
func (mp *Monitors) Start(ts tapir.Service, configDir string, log bool) {
|
||||||
|
mp.log = log
|
||||||
|
mp.configDir = configDir
|
||||||
|
mp.starttime = time.Now()
|
||||||
|
mp.breakChannel = make(chan bool)
|
||||||
|
mp.MessageCounter = NewCounter()
|
||||||
|
|
||||||
|
// Maintain a count of total messages
|
||||||
|
mp.TotalMessageCounter = NewCounter()
|
||||||
|
mp.Messages = NewMonitorHistory(Count, Cumulative, func() (c float64) {
|
||||||
|
c = float64(mp.MessageCounter.Count())
|
||||||
|
mp.TotalMessageCounter.Add(int(c))
|
||||||
|
mp.MessageCounter.Reset()
|
||||||
|
return
|
||||||
|
})
|
||||||
|
|
||||||
|
var pidUsageLock sync.Mutex
|
||||||
|
mp.CPU = NewMonitorHistory(Percent, Average, func() float64 {
|
||||||
|
pidUsageLock.Lock()
|
||||||
|
defer pidUsageLock.Unlock()
|
||||||
|
sysInfo, _ := pidusage.GetStat(os.Getpid())
|
||||||
|
return float64(sysInfo.CPU)
|
||||||
|
})
|
||||||
|
mp.Memory = NewMonitorHistory(MegaBytes, Average, func() float64 {
|
||||||
|
pidUsageLock.Lock()
|
||||||
|
defer pidUsageLock.Unlock()
|
||||||
|
sysInfo, _ := pidusage.GetStat(os.Getpid())
|
||||||
|
return float64(sysInfo.Memory)
|
||||||
|
})
|
||||||
|
|
||||||
|
// TODO: replace with ts.
|
||||||
|
mp.ClientConns = NewMonitorHistory(Count, Average, func() float64 { return float64(ts.Metrics().ConnectionCount) })
|
||||||
|
|
||||||
|
if mp.log {
|
||||||
|
go mp.run()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mp *Monitors) run() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-time.After(time.Minute):
|
||||||
|
mp.report()
|
||||||
|
case <-mp.breakChannel:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mp *Monitors) report() {
|
||||||
|
f, err := os.Create(path.Join(mp.configDir, reportFile))
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Could not open monitor reporting file: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
w := bufio.NewWriter(f)
|
||||||
|
|
||||||
|
fmt.Fprintf(w, "Uptime: %v\n\n", time.Now().Sub(mp.starttime))
|
||||||
|
|
||||||
|
fmt.Fprintln(w, "messages:")
|
||||||
|
mp.Messages.Report(w)
|
||||||
|
|
||||||
|
fmt.Fprintln(w, "\nClient Connections:")
|
||||||
|
mp.ClientConns.Report(w)
|
||||||
|
|
||||||
|
fmt.Fprintln(w, "\nCPU:")
|
||||||
|
mp.CPU.Report(w)
|
||||||
|
|
||||||
|
fmt.Fprintln(w, "\nMemory:")
|
||||||
|
mp.Memory.Report(w)
|
||||||
|
|
||||||
|
w.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops all the monitors in a Monitors
|
||||||
|
func (mp *Monitors) Stop() {
|
||||||
|
if mp.log {
|
||||||
|
mp.breakChannel <- true
|
||||||
|
}
|
||||||
|
mp.Messages.Stop()
|
||||||
|
mp.CPU.Stop()
|
||||||
|
mp.Memory.Stop()
|
||||||
|
mp.ClientConns.Stop()
|
||||||
|
}
|
|
@ -0,0 +1,161 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/ed25519"
|
||||||
|
"cwtch.im/cwtch/model"
|
||||||
|
"cwtch.im/cwtch/server/metrics"
|
||||||
|
"cwtch.im/cwtch/server/storage"
|
||||||
|
"fmt"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/applications"
|
||||||
|
tor2 "git.openprivacy.ca/cwtch.im/tapir/networks/tor"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/persistence"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/primitives"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
||||||
|
"git.openprivacy.ca/openprivacy/connectivity"
|
||||||
|
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"path"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Server encapsulates a complete, compliant Cwtch server.
|
||||||
|
type Server struct {
|
||||||
|
service tapir.Service
|
||||||
|
config Config
|
||||||
|
metricsPack metrics.Monitors
|
||||||
|
tokenTapirService tapir.Service
|
||||||
|
tokenServer *privacypass.TokenServer
|
||||||
|
tokenService primitives.Identity
|
||||||
|
tokenServicePrivKey ed25519.PrivateKey
|
||||||
|
tokenServiceStopped bool
|
||||||
|
onionServiceStopped bool
|
||||||
|
running bool
|
||||||
|
existingMessageCount int
|
||||||
|
lock sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup initialized a server from a given configuration
|
||||||
|
func (s *Server) Setup(serverConfig Config) {
|
||||||
|
s.config = serverConfig
|
||||||
|
bs := new(persistence.BoltPersistence)
|
||||||
|
bs.Open(path.Join(serverConfig.ConfigDir, "tokens.db"))
|
||||||
|
s.tokenServer = privacypass.NewTokenServerFromStore(&serverConfig.TokenServiceK, bs)
|
||||||
|
log.Infof("Y: %v", s.tokenServer.Y)
|
||||||
|
s.tokenService = s.config.TokenServiceIdentity()
|
||||||
|
s.tokenServicePrivKey = s.config.TokenServerPrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// Identity returns the main onion identity of the server
|
||||||
|
func (s *Server) Identity() primitives.Identity {
|
||||||
|
return s.config.Identity()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run starts a server with the given privateKey
|
||||||
|
func (s *Server) Run(acn connectivity.ACN) error {
|
||||||
|
addressIdentity := tor.GetTorV3Hostname(s.config.PublicKey)
|
||||||
|
identity := primitives.InitializeIdentity("", &s.config.PrivateKey, &s.config.PublicKey)
|
||||||
|
var service tapir.Service
|
||||||
|
service = new(tor2.BaseOnionService)
|
||||||
|
service.Init(acn, s.config.PrivateKey, &identity)
|
||||||
|
s.service = service
|
||||||
|
log.Infof("cwtch server running on cwtch:%s\n", addressIdentity+".onion:")
|
||||||
|
s.metricsPack.Start(service, s.config.ConfigDir, s.config.ServerReporting.LogMetricsToFile)
|
||||||
|
|
||||||
|
ms := new(storage.MessageStore)
|
||||||
|
err := ms.Init(s.config.ConfigDir, s.config.MaxBufferLines, s.metricsPack.MessageCounter)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Needed because we only collect metrics on a per-session basis
|
||||||
|
// TODO fix metrics so they persist across sessions?
|
||||||
|
s.existingMessageCount = len(ms.FetchMessages())
|
||||||
|
|
||||||
|
s.tokenTapirService = new(tor2.BaseOnionService)
|
||||||
|
s.tokenTapirService.Init(acn, s.tokenServicePrivKey, &s.tokenService)
|
||||||
|
tokenApplication := new(applications.TokenApplication)
|
||||||
|
tokenApplication.TokenService = s.tokenServer
|
||||||
|
powTokenApp := new(applications.ApplicationChain).
|
||||||
|
ChainApplication(new(applications.ProofOfWorkApplication), applications.SuccessfulProofOfWorkCapability).
|
||||||
|
ChainApplication(tokenApplication, applications.HasTokensCapability)
|
||||||
|
go func() {
|
||||||
|
s.tokenTapirService.Listen(powTokenApp)
|
||||||
|
s.tokenServiceStopped = true
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
s.service.Listen(NewTokenBoardServer(ms, s.tokenServer))
|
||||||
|
s.onionServiceStopped = true
|
||||||
|
}()
|
||||||
|
|
||||||
|
s.lock.Lock()
|
||||||
|
s.running = true
|
||||||
|
s.lock.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyBundle provides the signed keybundle of the server
|
||||||
|
func (s *Server) KeyBundle() *model.KeyBundle {
|
||||||
|
kb := model.NewKeyBundle()
|
||||||
|
identity := s.config.Identity()
|
||||||
|
kb.Keys[model.KeyTypeServerOnion] = model.Key(identity.Hostname())
|
||||||
|
kb.Keys[model.KeyTypeTokenOnion] = model.Key(s.tokenService.Hostname())
|
||||||
|
kb.Keys[model.KeyTypePrivacyPass] = model.Key(s.tokenServer.Y.String())
|
||||||
|
kb.Sign(identity)
|
||||||
|
return kb
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckStatus returns true if the server is running and/or an error if any part of the server needs to be restarted.
|
||||||
|
func (s *Server) CheckStatus() (bool, error) {
|
||||||
|
s.lock.RLock()
|
||||||
|
defer s.lock.RUnlock()
|
||||||
|
if s.onionServiceStopped == true || s.tokenServiceStopped == true {
|
||||||
|
return s.running, fmt.Errorf("one of more server components are down: onion:%v token service: %v", s.onionServiceStopped, s.tokenServiceStopped)
|
||||||
|
}
|
||||||
|
return s.running, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown kills the app closing all connections and freeing all goroutines
|
||||||
|
func (s *Server) Shutdown() {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
s.service.Shutdown()
|
||||||
|
s.tokenTapirService.Shutdown()
|
||||||
|
s.metricsPack.Stop()
|
||||||
|
s.running = true
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Statistics is an encapsulation of information about the server that an operator might want to know at a glance.
|
||||||
|
type Statistics struct {
|
||||||
|
TotalMessages int
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStatistics is a stub method for providing some high level information about
|
||||||
|
// the server operation to bundling applications (e.g. the UI)
|
||||||
|
func (s *Server) GetStatistics() Statistics {
|
||||||
|
// TODO Statistics from Metrics is very awkward. Metrics needs an overhaul to make safe
|
||||||
|
total := s.existingMessageCount
|
||||||
|
if s.metricsPack.TotalMessageCounter != nil {
|
||||||
|
total += s.metricsPack.TotalMessageCounter.Count()
|
||||||
|
}
|
||||||
|
|
||||||
|
return Statistics{
|
||||||
|
TotalMessages: total,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigureAutostart sets whether this server should autostart (in the Cwtch UI/bundling application)
|
||||||
|
func (s *Server) ConfigureAutostart(autostart bool) {
|
||||||
|
s.config.AutoStart = autostart
|
||||||
|
s.config.Save(s.config.ConfigDir, s.config.FilePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close shuts down the cwtch server in a safe way.
|
||||||
|
func (s *Server) Close() {
|
||||||
|
log.Infof("Shutting down server")
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
log.Infof("Closing Token Server Database...")
|
||||||
|
s.tokenServer.Close()
|
||||||
|
}
|
|
@ -0,0 +1,98 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/json"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/primitives"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"github.com/gtank/ristretto255"
|
||||||
|
"golang.org/x/crypto/ed25519"
|
||||||
|
"io/ioutil"
|
||||||
|
"path"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reporting is a struct for storing a the config a server needs to be a peer, and connect to a group to report
|
||||||
|
type Reporting struct {
|
||||||
|
LogMetricsToFile bool `json:"logMetricsToFile"`
|
||||||
|
ReportingGroupID string `json:"reportingGroupId"`
|
||||||
|
ReportingServerAddr string `json:"reportingServerAddr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config is a struct for storing basic server configuration
|
||||||
|
type Config struct {
|
||||||
|
ConfigDir string `json:"-"`
|
||||||
|
FilePath string `json:"-"`
|
||||||
|
MaxBufferLines int `json:"maxBufferLines"`
|
||||||
|
|
||||||
|
PublicKey ed25519.PublicKey `json:"publicKey"`
|
||||||
|
PrivateKey ed25519.PrivateKey `json:"privateKey"`
|
||||||
|
|
||||||
|
TokenServerPublicKey ed25519.PublicKey `json:"tokenServerPublicKey"`
|
||||||
|
TokenServerPrivateKey ed25519.PrivateKey `json:"tokenServerPrivateKey"`
|
||||||
|
|
||||||
|
TokenServiceK ristretto255.Scalar `json:"tokenServiceK"`
|
||||||
|
|
||||||
|
ServerReporting Reporting `json:"serverReporting"`
|
||||||
|
AutoStart bool `json:"autostart"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Identity returns an encapsulation of the servers keys
|
||||||
|
func (config *Config) Identity() primitives.Identity {
|
||||||
|
return primitives.InitializeIdentity("", &config.PrivateKey, &config.PublicKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TokenServiceIdentity returns an encapsulation of the servers token server (experimental)
|
||||||
|
func (config *Config) TokenServiceIdentity() primitives.Identity {
|
||||||
|
return primitives.InitializeIdentity("", &config.TokenServerPrivateKey, &config.TokenServerPublicKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save dumps the latest version of the config to a json file given by filename
|
||||||
|
func (config *Config) Save(dir, filename string) {
|
||||||
|
log.Infof("Saving config to %s\n", path.Join(dir, filename))
|
||||||
|
bytes, _ := json.MarshalIndent(config, "", "\t")
|
||||||
|
ioutil.WriteFile(path.Join(dir, filename), bytes, 0600)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadConfig loads a Config from a json file specified by filename
|
||||||
|
func LoadConfig(configDir, filename string) Config {
|
||||||
|
log.Infof("Loading config from %s\n", path.Join(configDir, filename))
|
||||||
|
config := Config{}
|
||||||
|
|
||||||
|
id, pk := primitives.InitializeEphemeralIdentity()
|
||||||
|
tid, tpk := primitives.InitializeEphemeralIdentity()
|
||||||
|
config.PrivateKey = pk
|
||||||
|
config.PublicKey = id.PublicKey()
|
||||||
|
config.TokenServerPrivateKey = tpk
|
||||||
|
config.TokenServerPublicKey = tid.PublicKey()
|
||||||
|
config.MaxBufferLines = 100000
|
||||||
|
config.ServerReporting = Reporting{
|
||||||
|
LogMetricsToFile: true,
|
||||||
|
ReportingGroupID: "",
|
||||||
|
ReportingServerAddr: "",
|
||||||
|
}
|
||||||
|
config.AutoStart = false
|
||||||
|
config.ConfigDir = configDir
|
||||||
|
config.FilePath = filename
|
||||||
|
|
||||||
|
k := new(ristretto255.Scalar)
|
||||||
|
b := make([]byte, 64)
|
||||||
|
_, err := rand.Read(b)
|
||||||
|
if err != nil {
|
||||||
|
// unable to generate secure random numbers
|
||||||
|
panic("unable to generate secure random numbers")
|
||||||
|
}
|
||||||
|
k.FromUniformBytes(b)
|
||||||
|
config.TokenServiceK = *k
|
||||||
|
|
||||||
|
raw, err := ioutil.ReadFile(path.Join(configDir, filename))
|
||||||
|
if err == nil {
|
||||||
|
err = json.Unmarshal(raw, &config)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("reading config: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Always save (first time generation, new version with new variables populated)
|
||||||
|
config.Save(configDir, filename)
|
||||||
|
return config
|
||||||
|
}
|
|
@ -0,0 +1,118 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/protocol/groups"
|
||||||
|
"cwtch.im/cwtch/server/storage"
|
||||||
|
"encoding/json"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/applications"
|
||||||
|
"git.openprivacy.ca/cwtch.im/tapir/primitives/privacypass"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewTokenBoardServer generates new Server for Token Board
|
||||||
|
func NewTokenBoardServer(store storage.MessageStoreInterface, tokenService *privacypass.TokenServer) tapir.Application {
|
||||||
|
tba := new(TokenboardServer)
|
||||||
|
tba.TokenService = tokenService
|
||||||
|
tba.LegacyMessageStore = store
|
||||||
|
return tba
|
||||||
|
}
|
||||||
|
|
||||||
|
// TokenboardServer defines the token board server
|
||||||
|
type TokenboardServer struct {
|
||||||
|
applications.AuthApp
|
||||||
|
connection tapir.Connection
|
||||||
|
TokenService *privacypass.TokenServer
|
||||||
|
LegacyMessageStore storage.MessageStoreInterface
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewInstance creates a new TokenBoardApp
|
||||||
|
func (ta *TokenboardServer) NewInstance() tapir.Application {
|
||||||
|
tba := new(TokenboardServer)
|
||||||
|
tba.TokenService = ta.TokenService
|
||||||
|
tba.LegacyMessageStore = ta.LegacyMessageStore
|
||||||
|
return tba
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initializes the cryptographic TokenBoardApp
|
||||||
|
func (ta *TokenboardServer) Init(connection tapir.Connection) {
|
||||||
|
ta.AuthApp.Init(connection)
|
||||||
|
if connection.HasCapability(applications.AuthCapability) {
|
||||||
|
ta.connection = connection
|
||||||
|
go ta.Listen()
|
||||||
|
} else {
|
||||||
|
connection.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Listen processes the messages for this application
|
||||||
|
func (ta *TokenboardServer) Listen() {
|
||||||
|
for {
|
||||||
|
data := ta.connection.Expect()
|
||||||
|
if len(data) == 0 {
|
||||||
|
log.Debugf("Server Closing Connection")
|
||||||
|
ta.connection.Close()
|
||||||
|
return // connection is closed
|
||||||
|
}
|
||||||
|
|
||||||
|
var message groups.Message
|
||||||
|
if err := json.Unmarshal(data, &message); err != nil {
|
||||||
|
log.Debugf("Server Closing Connection Because of Malformed Client Packet %v", err)
|
||||||
|
ta.connection.Close()
|
||||||
|
return // connection is closed
|
||||||
|
}
|
||||||
|
|
||||||
|
switch message.MessageType {
|
||||||
|
case groups.PostRequestMessage:
|
||||||
|
if message.PostRequest != nil {
|
||||||
|
postrequest := *message.PostRequest
|
||||||
|
log.Debugf("Received a Post Message Request: %v", ta.connection.Hostname())
|
||||||
|
ta.postMessageRequest(postrequest)
|
||||||
|
} else {
|
||||||
|
log.Debugf("Server Closing Connection Because of PostRequestMessage Client Packet")
|
||||||
|
ta.connection.Close()
|
||||||
|
return // connection is closed
|
||||||
|
}
|
||||||
|
case groups.ReplayRequestMessage:
|
||||||
|
if message.ReplayRequest != nil {
|
||||||
|
log.Debugf("Received Replay Request %v", message.ReplayRequest)
|
||||||
|
messages := ta.LegacyMessageStore.FetchMessages()
|
||||||
|
response, _ := json.Marshal(groups.Message{MessageType: groups.ReplayResultMessage, ReplayResult: &groups.ReplayResult{NumMessages: len(messages)}})
|
||||||
|
log.Debugf("Sending Replay Response %v", groups.ReplayResult{NumMessages: len(messages)})
|
||||||
|
ta.connection.Send(response)
|
||||||
|
for _, message := range messages {
|
||||||
|
data, _ = json.Marshal(message)
|
||||||
|
ta.connection.Send(data)
|
||||||
|
}
|
||||||
|
// Set sync and then send any new messages that might have happened while we were syncing
|
||||||
|
ta.connection.SetCapability(groups.CwtchServerSyncedCapability)
|
||||||
|
newMessages := ta.LegacyMessageStore.FetchMessages()
|
||||||
|
if len(newMessages) > len(messages) {
|
||||||
|
for _, message := range newMessages[len(messages):] {
|
||||||
|
data, _ = json.Marshal(groups.Message{MessageType: groups.NewMessageMessage, NewMessage: &groups.NewMessage{EGM: *message}})
|
||||||
|
ta.connection.Send(data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Debugf("Server Closing Connection Because of Malformed ReplayRequestMessage Packet")
|
||||||
|
ta.connection.Close()
|
||||||
|
return // connection is closed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ta *TokenboardServer) postMessageRequest(pr groups.PostRequest) {
|
||||||
|
if err := ta.TokenService.SpendToken(pr.Token, append(pr.EGM.ToBytes(), ta.connection.ID().Hostname()...)); err == nil {
|
||||||
|
log.Debugf("Token is valid")
|
||||||
|
ta.LegacyMessageStore.AddMessage(pr.EGM)
|
||||||
|
data, _ := json.Marshal(groups.Message{MessageType: groups.PostResultMessage, PostResult: &groups.PostResult{Success: true}})
|
||||||
|
ta.connection.Send(data)
|
||||||
|
data, _ = json.Marshal(groups.Message{MessageType: groups.NewMessageMessage, NewMessage: &groups.NewMessage{EGM: pr.EGM}})
|
||||||
|
ta.connection.Broadcast(data, groups.CwtchServerSyncedCapability)
|
||||||
|
} else {
|
||||||
|
log.Debugf("Attempt to spend an invalid token: %v", err)
|
||||||
|
data, _ := json.Marshal(groups.Message{MessageType: groups.PostResultMessage, PostResult: &groups.PostResult{Success: false}})
|
||||||
|
ta.connection.Send(data)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,152 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"cwtch.im/cwtch/protocol/groups"
|
||||||
|
"cwtch.im/cwtch/server/metrics"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
fileStorePartitions = 10
|
||||||
|
fileStoreFilename = "cwtch.messages"
|
||||||
|
directory = "messages"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MessageStoreInterface defines an interface to interact with a store of cwtch messages.
|
||||||
|
type MessageStoreInterface interface {
|
||||||
|
AddMessage(groups.EncryptedGroupMessage)
|
||||||
|
FetchMessages() []*groups.EncryptedGroupMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageStore is a file-backed implementation of MessageStoreInterface
|
||||||
|
type MessageStore struct {
|
||||||
|
activeLogFile *os.File
|
||||||
|
filePos int
|
||||||
|
storeDirectory string
|
||||||
|
lock sync.Mutex
|
||||||
|
messages []*groups.EncryptedGroupMessage
|
||||||
|
messageCounter metrics.Counter
|
||||||
|
maxBufferLines int
|
||||||
|
bufferPos int
|
||||||
|
bufferRotated bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the message store and underlying resources.
|
||||||
|
func (ms *MessageStore) Close() {
|
||||||
|
ms.lock.Lock()
|
||||||
|
ms.messages = nil
|
||||||
|
ms.activeLogFile.Close()
|
||||||
|
ms.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *MessageStore) updateBuffer(gm *groups.EncryptedGroupMessage) {
|
||||||
|
ms.messages[ms.bufferPos] = gm
|
||||||
|
ms.bufferPos++
|
||||||
|
if ms.bufferPos == ms.maxBufferLines {
|
||||||
|
ms.bufferPos = 0
|
||||||
|
ms.bufferRotated = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *MessageStore) initAndLoadFiles() error {
|
||||||
|
ms.activeLogFile = nil
|
||||||
|
for i := fileStorePartitions - 1; i >= 0; i-- {
|
||||||
|
ms.filePos = 0
|
||||||
|
filename := path.Join(ms.storeDirectory, fmt.Sprintf("%s.%d", fileStoreFilename, i))
|
||||||
|
f, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0600)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("MessageStore could not open: %v: %v", filename, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ms.activeLogFile = f
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(f)
|
||||||
|
for scanner.Scan() {
|
||||||
|
gms := scanner.Text()
|
||||||
|
ms.filePos++
|
||||||
|
gm := &groups.EncryptedGroupMessage{}
|
||||||
|
err := json.Unmarshal([]byte(gms), gm)
|
||||||
|
if err == nil {
|
||||||
|
ms.updateBuffer(gm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ms.activeLogFile == nil {
|
||||||
|
return fmt.Errorf("Could not create log file to write to in %s", ms.storeDirectory)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *MessageStore) updateFile(gm *groups.EncryptedGroupMessage) {
|
||||||
|
s, err := json.Marshal(gm)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Failed to unmarshal group message %v\n", err)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(ms.activeLogFile, "%s\n", s)
|
||||||
|
ms.filePos++
|
||||||
|
if ms.filePos >= ms.maxBufferLines/fileStorePartitions {
|
||||||
|
ms.rotateFileStore()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *MessageStore) rotateFileStore() {
|
||||||
|
ms.activeLogFile.Close()
|
||||||
|
os.Remove(path.Join(ms.storeDirectory, fmt.Sprintf("%s.%d", fileStoreFilename, fileStorePartitions-1)))
|
||||||
|
|
||||||
|
for i := fileStorePartitions - 2; i >= 0; i-- {
|
||||||
|
os.Rename(path.Join(ms.storeDirectory, fmt.Sprintf("%s.%d", fileStoreFilename, i)), path.Join(ms.storeDirectory, fmt.Sprintf("%s.%d", fileStoreFilename, i+1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.OpenFile(path.Join(ms.storeDirectory, fmt.Sprintf("%s.%d", fileStoreFilename, 0)), os.O_CREATE|os.O_APPEND|os.O_RDWR, 0600)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Could not open new message store file in: %s", ms.storeDirectory)
|
||||||
|
}
|
||||||
|
ms.filePos = 0
|
||||||
|
ms.activeLogFile = f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init sets up a MessageStore of size maxBufferLines (# of messages) backed by filename
|
||||||
|
func (ms *MessageStore) Init(appDirectory string, maxBufferLines int, messageCounter metrics.Counter) error {
|
||||||
|
ms.storeDirectory = path.Join(appDirectory, directory)
|
||||||
|
os.Mkdir(ms.storeDirectory, 0700)
|
||||||
|
|
||||||
|
ms.bufferPos = 0
|
||||||
|
ms.maxBufferLines = maxBufferLines
|
||||||
|
ms.messages = make([]*groups.EncryptedGroupMessage, maxBufferLines)
|
||||||
|
ms.bufferRotated = false
|
||||||
|
ms.messageCounter = messageCounter
|
||||||
|
|
||||||
|
err := ms.initAndLoadFiles()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchMessages returns all messages from the backing file.
|
||||||
|
func (ms *MessageStore) FetchMessages() (messages []*groups.EncryptedGroupMessage) {
|
||||||
|
ms.lock.Lock()
|
||||||
|
if !ms.bufferRotated {
|
||||||
|
messages = make([]*groups.EncryptedGroupMessage, ms.bufferPos)
|
||||||
|
copy(messages, ms.messages[0:ms.bufferPos])
|
||||||
|
} else {
|
||||||
|
messages = make([]*groups.EncryptedGroupMessage, ms.maxBufferLines)
|
||||||
|
copy(messages, ms.messages[ms.bufferPos:ms.maxBufferLines])
|
||||||
|
copy(messages[ms.bufferPos:], ms.messages[0:ms.bufferPos])
|
||||||
|
}
|
||||||
|
ms.lock.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMessage adds a GroupMessage to the store
|
||||||
|
func (ms *MessageStore) AddMessage(gm groups.EncryptedGroupMessage) {
|
||||||
|
ms.messageCounter.Add(1)
|
||||||
|
ms.lock.Lock()
|
||||||
|
ms.updateBuffer(&gm)
|
||||||
|
ms.updateFile(&gm)
|
||||||
|
|
||||||
|
ms.lock.Unlock()
|
||||||
|
}
|
|
@ -0,0 +1,54 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/protocol/groups"
|
||||||
|
"cwtch.im/cwtch/server/metrics"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMessageStore(t *testing.T) {
|
||||||
|
os.Remove("ms.test")
|
||||||
|
ms := new(MessageStore)
|
||||||
|
counter := metrics.NewCounter()
|
||||||
|
ms.Init("./", 1000, counter)
|
||||||
|
for i := 0; i < 499; i++ {
|
||||||
|
gm := groups.EncryptedGroupMessage{
|
||||||
|
Ciphertext: []byte("Hello this is a fairly average length message that we are writing here. " + strconv.Itoa(i)),
|
||||||
|
}
|
||||||
|
ms.AddMessage(gm)
|
||||||
|
}
|
||||||
|
if counter.Count() != 499 {
|
||||||
|
t.Errorf("Counter should be at 499 was %v", counter.Count())
|
||||||
|
}
|
||||||
|
ms.Close()
|
||||||
|
ms.Init("./", 1000, counter)
|
||||||
|
m := ms.FetchMessages()
|
||||||
|
if len(m) != 499 {
|
||||||
|
t.Errorf("Should have been 499 was %v", len(m))
|
||||||
|
}
|
||||||
|
|
||||||
|
counter.Reset()
|
||||||
|
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
gm := groups.EncryptedGroupMessage{
|
||||||
|
Ciphertext: []byte("Hello this is a fairly average length message that we are writing here. " + strconv.Itoa(i)),
|
||||||
|
}
|
||||||
|
ms.AddMessage(gm)
|
||||||
|
}
|
||||||
|
|
||||||
|
m = ms.FetchMessages()
|
||||||
|
if len(m) != 1000 {
|
||||||
|
t.Errorf("Should have been 1000 was %v", len(m))
|
||||||
|
}
|
||||||
|
ms.Close()
|
||||||
|
ms.Init("./", 1000, counter)
|
||||||
|
m = ms.FetchMessages()
|
||||||
|
if len(m) != 999 {
|
||||||
|
t.Errorf("Should have been 999 was %v", len(m))
|
||||||
|
}
|
||||||
|
ms.Close()
|
||||||
|
|
||||||
|
os.RemoveAll("./messages")
|
||||||
|
}
|
|
@ -1,160 +0,0 @@
|
||||||
package settings
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cwtch.im/cwtch/event"
|
|
||||||
"cwtch.im/cwtch/model/constants"
|
|
||||||
"cwtch.im/cwtch/storage/v1"
|
|
||||||
"encoding/json"
|
|
||||||
"git.openprivacy.ca/openprivacy/log"
|
|
||||||
"os"
|
|
||||||
path "path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
CwtchStarted = event.Type("CwtchStarted")
|
|
||||||
CwtchStartError = event.Type("CwtchStartError")
|
|
||||||
UpdateGlobalSettings = event.Type("UpdateGlobalSettings")
|
|
||||||
)
|
|
||||||
|
|
||||||
const GlobalSettingsFilename = "ui.globals"
|
|
||||||
const saltFile = "SALT"
|
|
||||||
|
|
||||||
type NotificationPolicy string
|
|
||||||
|
|
||||||
const (
|
|
||||||
NotificationPolicyMute = NotificationPolicy("NotificationPolicy.Mute")
|
|
||||||
NotificationPolicyOptIn = NotificationPolicy("NotificationPolicy.OptIn")
|
|
||||||
NotificationPolicyDefaultAll = NotificationPolicy("NotificationPolicy.DefaultAll")
|
|
||||||
)
|
|
||||||
|
|
||||||
type GlobalSettingsFile struct {
|
|
||||||
v1.FileStore
|
|
||||||
}
|
|
||||||
|
|
||||||
type GlobalSettings struct {
|
|
||||||
Locale string
|
|
||||||
Theme string
|
|
||||||
ThemeMode string
|
|
||||||
ThemeImages bool
|
|
||||||
PreviousPid int64
|
|
||||||
ExperimentsEnabled bool
|
|
||||||
Experiments map[string]bool
|
|
||||||
BlockUnknownConnections bool
|
|
||||||
NotificationPolicy NotificationPolicy
|
|
||||||
NotificationContent string
|
|
||||||
StreamerMode bool
|
|
||||||
StateRootPane int
|
|
||||||
FirstTime bool
|
|
||||||
UIColumnModePortrait string
|
|
||||||
UIColumnModeLandscape string
|
|
||||||
DownloadPath string
|
|
||||||
AllowAdvancedTorConfig bool
|
|
||||||
CustomTorrc string
|
|
||||||
UseCustomTorrc bool
|
|
||||||
UseExternalTor bool
|
|
||||||
CustomSocksPort int
|
|
||||||
CustomControlPort int
|
|
||||||
UseTorCache bool
|
|
||||||
TorCacheDir string
|
|
||||||
BlodeuweddPath string
|
|
||||||
FontScaling float64
|
|
||||||
DefaultSaveHistory bool
|
|
||||||
}
|
|
||||||
|
|
||||||
var DefaultGlobalSettings = GlobalSettings{
|
|
||||||
Locale: "en",
|
|
||||||
Theme: "cwtch",
|
|
||||||
ThemeMode: "dark",
|
|
||||||
ThemeImages: false,
|
|
||||||
PreviousPid: -1,
|
|
||||||
ExperimentsEnabled: false,
|
|
||||||
Experiments: map[string]bool{constants.MessageFormattingExperiment: true},
|
|
||||||
StateRootPane: 0,
|
|
||||||
FirstTime: true,
|
|
||||||
BlockUnknownConnections: false,
|
|
||||||
StreamerMode: false,
|
|
||||||
UIColumnModePortrait: "DualpaneMode.Single",
|
|
||||||
UIColumnModeLandscape: "DualpaneMode.CopyPortrait",
|
|
||||||
NotificationPolicy: "NotificationPolicy.Mute",
|
|
||||||
NotificationContent: "NotificationContent.SimpleEvent",
|
|
||||||
DownloadPath: "",
|
|
||||||
AllowAdvancedTorConfig: false,
|
|
||||||
CustomTorrc: "",
|
|
||||||
UseCustomTorrc: false,
|
|
||||||
CustomSocksPort: -1,
|
|
||||||
CustomControlPort: -1,
|
|
||||||
UseTorCache: false,
|
|
||||||
TorCacheDir: "",
|
|
||||||
BlodeuweddPath: "",
|
|
||||||
FontScaling: 1.0, // use the system pixel scaling default
|
|
||||||
DefaultSaveHistory: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitGlobalSettingsFile(directory string, password string) (*GlobalSettingsFile, error) {
|
|
||||||
var key [32]byte
|
|
||||||
salt, err := os.ReadFile(path.Join(directory, saltFile))
|
|
||||||
if err != nil {
|
|
||||||
log.Infof("Could not find salt file: %v (creating a new settings file)", err)
|
|
||||||
var newSalt [128]byte
|
|
||||||
key, newSalt, err = v1.CreateKeySalt(password)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("Could not initialize salt: %v", err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err := os.MkdirAll(directory, 0700)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = os.WriteFile(path.Join(directory, saltFile), newSalt[:], 0600)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("Could not write salt file: %v", err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
key = v1.CreateKey(password, salt)
|
|
||||||
}
|
|
||||||
|
|
||||||
gsFile := v1.NewFileStore(directory, GlobalSettingsFilename, key)
|
|
||||||
log.Infof("initialized global settings file: %v", gsFile)
|
|
||||||
globalSettingsFile := GlobalSettingsFile{
|
|
||||||
gsFile,
|
|
||||||
}
|
|
||||||
return &globalSettingsFile, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (globalSettingsFile *GlobalSettingsFile) ReadGlobalSettings() GlobalSettings {
|
|
||||||
settings := DefaultGlobalSettings
|
|
||||||
|
|
||||||
if globalSettingsFile == nil {
|
|
||||||
log.Errorf("Global Settings File was not Initialized Properly")
|
|
||||||
return settings
|
|
||||||
}
|
|
||||||
|
|
||||||
settingsBytes, err := globalSettingsFile.Read()
|
|
||||||
if err != nil {
|
|
||||||
log.Infof("Could not read global ui settings: %v (assuming this is a first time app deployment...)", err)
|
|
||||||
return settings //firstTime = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// note: by giving json.Unmarshal settings we are providing it defacto defaults
|
|
||||||
// from DefaultGlobalSettings
|
|
||||||
err = json.Unmarshal(settingsBytes, &settings)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("Could not parse global ui settings: %v\n", err)
|
|
||||||
// TODO if settings is corrupted, we probably want to alert the UI.
|
|
||||||
return settings //firstTime = true
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Settings: %#v", settings)
|
|
||||||
return settings
|
|
||||||
}
|
|
||||||
|
|
||||||
func (globalSettingsFile *GlobalSettingsFile) WriteGlobalSettings(globalSettings GlobalSettings) {
|
|
||||||
bytes, _ := json.Marshal(globalSettings)
|
|
||||||
// override first time setting
|
|
||||||
globalSettings.FirstTime = true
|
|
||||||
err := globalSettingsFile.Write(bytes)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("Could not write global ui settings: %v\n", err)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,17 +1,93 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
"cwtch.im/cwtch/model"
|
"cwtch.im/cwtch/model"
|
||||||
|
"cwtch.im/cwtch/storage/v0"
|
||||||
"cwtch.im/cwtch/storage/v1"
|
"cwtch.im/cwtch/storage/v1"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"io/ioutil"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const profileFilename = "profile"
|
||||||
|
const versionFile = "VERSION"
|
||||||
|
const currentVersion = 1
|
||||||
|
|
||||||
// ProfileStore is an interface to managing the storage of Cwtch Profiles
|
// ProfileStore is an interface to managing the storage of Cwtch Profiles
|
||||||
type ProfileStore interface {
|
type ProfileStore interface {
|
||||||
|
Shutdown()
|
||||||
|
Delete()
|
||||||
GetProfileCopy(timeline bool) *model.Profile
|
GetProfileCopy(timeline bool) *model.Profile
|
||||||
|
GetNewPeerMessage() *event.Event
|
||||||
|
GetStatusMessages() []*event.Event
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateProfileWriterStore creates a profile store backed by a filestore listening for events and saving them
|
||||||
|
// directory should be $appDir/profiles/$rand
|
||||||
|
func CreateProfileWriterStore(eventManager event.Manager, directory, password string, profile *model.Profile) ProfileStore {
|
||||||
|
return v1.CreateProfileWriterStore(eventManager, directory, password, profile)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadProfileWriterStore loads a profile store from filestore listening for events and saving them
|
// LoadProfileWriterStore loads a profile store from filestore listening for events and saving them
|
||||||
// directory should be $appDir/profiles/$rand
|
// directory should be $appDir/profiles/$rand
|
||||||
func LoadProfileWriterStore(directory, password string) (ProfileStore, error) {
|
func LoadProfileWriterStore(eventManager event.Manager, directory, password string) (ProfileStore, error) {
|
||||||
return v1.LoadProfileWriterStore(directory, password)
|
versionCheckUpgrade(directory, password)
|
||||||
|
|
||||||
|
return v1.LoadProfileWriterStore(eventManager, directory, password)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadProfile reads a profile from storage and returns the profile
|
||||||
|
// Should only be called for cache refresh of the profile after a ProfileWriterStore has opened
|
||||||
|
// (and upgraded) the store, and thus supplied the key/salt
|
||||||
|
func ReadProfile(directory string, key [32]byte, salt [128]byte) (*model.Profile, error) {
|
||||||
|
return v1.ReadProfile(directory, key, salt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProfile creates a new profile for use in the profile store.
|
||||||
|
func NewProfile(name string) *model.Profile {
|
||||||
|
profile := model.GenerateNewProfile(name)
|
||||||
|
return profile
|
||||||
|
}
|
||||||
|
|
||||||
|
// ********* Versioning and upgrade **********
|
||||||
|
|
||||||
|
func detectVersion(directory string) int {
|
||||||
|
vnumberStr, err := ioutil.ReadFile(path.Join(directory, versionFile))
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
vnumber, err := strconv.Atoi(string(vnumberStr))
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Could not parse VERSION file contents: '%v' - %v\n", vnumber, err)
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return vnumber
|
||||||
|
}
|
||||||
|
|
||||||
|
func upgradeV0ToV1(directory, password string) error {
|
||||||
|
log.Debugln("Attempting storage v0 to v1: Reading v0 profile...")
|
||||||
|
profile, err := v0.ReadProfile(directory, password)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugln("Attempting storage v0 to v1: Writing v1 profile...")
|
||||||
|
return v1.UpgradeV0Profile(profile, directory, password)
|
||||||
|
}
|
||||||
|
|
||||||
|
func versionCheckUpgrade(directory, password string) {
|
||||||
|
version := detectVersion(directory)
|
||||||
|
log.Debugf("versionCheck: %v\n", version)
|
||||||
|
if version == -1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if version == 0 {
|
||||||
|
err := upgradeV0ToV1(directory, password)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
//version = 1
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,73 @@
|
||||||
|
// Known race issue with event bus channel closure
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cwtch.im/cwtch/event"
|
||||||
|
"cwtch.im/cwtch/storage/v0"
|
||||||
|
"fmt"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const testingDir = "./testing"
|
||||||
|
const filenameBase = "testStream"
|
||||||
|
const password = "asdfqwer"
|
||||||
|
const line1 = "Hello from storage!"
|
||||||
|
const testProfileName = "Alice"
|
||||||
|
const testKey = "key"
|
||||||
|
const testVal = "value"
|
||||||
|
const testInitialMessage = "howdy"
|
||||||
|
const testMessage = "Hello from storage"
|
||||||
|
|
||||||
|
func TestProfileStoreUpgradeV0toV1(t *testing.T) {
|
||||||
|
log.SetLevel(log.LevelDebug)
|
||||||
|
os.RemoveAll(testingDir)
|
||||||
|
eventBus := event.NewEventManager()
|
||||||
|
|
||||||
|
queue := event.NewQueue()
|
||||||
|
eventBus.Subscribe(event.ChangePasswordSuccess, queue)
|
||||||
|
|
||||||
|
fmt.Println("Creating and initializing v0 profile and store...")
|
||||||
|
profile := NewProfile(testProfileName)
|
||||||
|
ps1 := v0.NewProfileWriterStore(eventBus, testingDir, password, profile)
|
||||||
|
|
||||||
|
groupid, invite, err := profile.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Creating group: %v\n", err)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Creating group invite: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ps1.AddGroup(invite)
|
||||||
|
|
||||||
|
fmt.Println("Sending 200 messages...")
|
||||||
|
|
||||||
|
for i := 0; i < 200; i++ {
|
||||||
|
ps1.AddGroupMessage(groupid, time.Now().Format(time.RFC3339Nano), time.Now().Format(time.RFC3339Nano), profile.Onion, testMessage)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("Shutdown v0 profile store...")
|
||||||
|
ps1.Shutdown()
|
||||||
|
|
||||||
|
fmt.Println("New v1 Profile store...")
|
||||||
|
ps2, err := LoadProfileWriterStore(eventBus, testingDir, password)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error createing new profileStore with new password: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
profile2 := ps2.GetProfileCopy(true)
|
||||||
|
|
||||||
|
if profile2.Groups[groupid] == nil {
|
||||||
|
t.Errorf("Failed to load group %v\n", groupid)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(profile2.Groups[groupid].Timeline.Messages) != 200 {
|
||||||
|
t.Errorf("Failed to load group's 200 messages, instead got %v\n", len(profile2.Groups[groupid].Timeline.Messages))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,70 @@
|
||||||
|
package v0
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"errors"
|
||||||
|
"git.openprivacy.ca/openprivacy/log"
|
||||||
|
"golang.org/x/crypto/nacl/secretbox"
|
||||||
|
"golang.org/x/crypto/pbkdf2"
|
||||||
|
"golang.org/x/crypto/sha3"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"path"
|
||||||
|
)
|
||||||
|
|
||||||
|
// createKey derives a key from a password
|
||||||
|
func createKey(password string) ([32]byte, [128]byte, error) {
|
||||||
|
var salt [128]byte
|
||||||
|
if _, err := io.ReadFull(rand.Reader, salt[:]); err != nil {
|
||||||
|
log.Errorf("Cannot read from random: %v\n", err)
|
||||||
|
return [32]byte{}, salt, err
|
||||||
|
}
|
||||||
|
dk := pbkdf2.Key([]byte(password), salt[:], 4096, 32, sha3.New512)
|
||||||
|
|
||||||
|
var dkr [32]byte
|
||||||
|
copy(dkr[:], dk)
|
||||||
|
return dkr, salt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//encryptFileData encrypts the cwtchPeer via the specified key.
|
||||||
|
func encryptFileData(data []byte, key [32]byte) ([]byte, error) {
|
||||||
|
var nonce [24]byte
|
||||||
|
|
||||||
|
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
|
||||||
|
log.Errorf("Cannot read from random: %v\n", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
encrypted := secretbox.Seal(nonce[:], data, &nonce, &key)
|
||||||
|
return encrypted, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//decryptFile decrypts the passed ciphertext into a cwtchPeer via the specified key.
|
||||||
|
func decryptFile(ciphertext []byte, key [32]byte) ([]byte, error) {
|
||||||
|
var decryptNonce [24]byte
|
||||||
|
copy(decryptNonce[:], ciphertext[:24])
|
||||||
|
decrypted, ok := secretbox.Open(nil, ciphertext[24:], &decryptNonce, &key)
|
||||||
|
if ok {
|
||||||
|
return decrypted, nil
|
||||||
|
}
|
||||||
|
return nil, errors.New("Failed to decrypt")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load instantiates a cwtchPeer from the file store
|
||||||
|
func readEncryptedFile(directory, filename, password string) ([]byte, error) {
|
||||||
|
encryptedbytes, err := ioutil.ReadFile(path.Join(directory, filename))
|
||||||
|
if err == nil && len(encryptedbytes) > 128 {
|
||||||
|
var dkr [32]byte
|
||||||
|
//Separate the salt from the encrypted bytes, then generate the derived key
|
||||||
|
salt, encryptedbytes := encryptedbytes[0:128], encryptedbytes[128:]
|
||||||
|
dk := pbkdf2.Key([]byte(password), salt, 4096, 32, sha3.New512)
|
||||||
|
copy(dkr[:], dk)
|
||||||
|
|
||||||
|
data, err := decryptFile(encryptedbytes, dkr)
|
||||||
|
if err == nil {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue