diff --git a/.gitignore b/.gitignore index 9308c6b..b074e0f 100644 --- a/.gitignore +++ b/.gitignore @@ -24,4 +24,7 @@ testing/cwtch.out.png.manifest testing/tordir/ tokens-bak.db tokens.db -tokens1.db \ No newline at end of file +tokens1.db +arch/ +testing/encryptedstorage/encrypted_storage_profiles +testing/encryptedstorage/tordir \ No newline at end of file diff --git a/app/app.go b/app/app.go index 51d38a9..7c487fc 100644 --- a/app/app.go +++ b/app/app.go @@ -10,7 +10,6 @@ import ( "cwtch.im/cwtch/protocol/connections" "cwtch.im/cwtch/storage" "fmt" - "git.openprivacy.ca/cwtch.im/tapir/primitives" "git.openprivacy.ca/openprivacy/connectivity" "git.openprivacy.ca/openprivacy/log" "io/ioutil" @@ -32,7 +31,6 @@ type application struct { appletPeers appletACN appletPlugins - storage map[string]storage.ProfileStore engines map[string]connections.Engine appBus event.Manager appmutex sync.Mutex @@ -41,7 +39,6 @@ type application struct { // Application is a full cwtch peer application. It allows management, usage and storage of multiple peers type Application interface { LoadProfiles(password string) - CreatePeer(name string, password string) CreateTaggedPeer(name string, password string, tag string) DeletePeer(onion string, currentPassword string) AddPeerPlugin(onion string, pluginID plugins.PluginID) @@ -61,7 +58,7 @@ type Application interface { } // LoadProfileFn is the function signature for a function in an app that loads a profile -type LoadProfileFn func(profile *model.Profile, store storage.ProfileStore) +type LoadProfileFn func(profile peer.CwtchPeer) func newAppCore(appDirectory string) *applicationCore { appCore := &applicationCore{eventBuses: make(map[string]event.Manager), directory: appDirectory} @@ -72,33 +69,13 @@ func newAppCore(appDirectory string) *applicationCore { // NewApp creates a new app with some environment awareness and initializes a Tor Manager func NewApp(acn connectivity.ACN, appDirectory string) Application { log.Debugf("NewApp(%v)\n", appDirectory) - app := &application{storage: make(map[string]storage.ProfileStore), engines: make(map[string]connections.Engine), applicationCore: *newAppCore(appDirectory), appBus: event.NewEventManager()} + app := &application{engines: make(map[string]connections.Engine), applicationCore: *newAppCore(appDirectory), appBus: event.NewEventManager()} app.appletPeers.init() app.appletACN.init(acn, app.getACNStatusHandler()) return app } -// CreatePeer creates a new Peer with a given name and core required accessories (eventbus) -func (ac *applicationCore) CreatePeer(name string) (*model.Profile, error) { - log.Debugf("CreatePeer(%v)\n", name) - - profile := storage.NewProfile(name) - - ac.coremutex.Lock() - defer ac.coremutex.Unlock() - - _, exists := ac.eventBuses[profile.Onion] - if exists { - return nil, fmt.Errorf("error: profile for onion %v already exists", profile.Onion) - } - - eventBus := event.NewEventManager() - ac.eventBuses[profile.Onion] = eventBus - - return profile, nil -} - func (ac *applicationCore) DeletePeer(onion string) { ac.coremutex.Lock() defer ac.coremutex.Unlock() @@ -108,37 +85,29 @@ func (ac *applicationCore) DeletePeer(onion string) { } func (app *application) CreateTaggedPeer(name string, password string, tag string) { - profile, err := app.applicationCore.CreatePeer(name) + app.appmutex.Lock() + defer app.appmutex.Unlock() + + profileDirectory := path.Join(app.directory, "profiles", model.GenerateRandomID()) + + profile, err := peer.CreateEncryptedStorePeer(profileDirectory, name, password) if err != nil { + log.Errorf("Error Creating Peer: %v", err) app.appBus.Publish(event.NewEventList(event.PeerError, event.Error, err.Error())) return } - profileStore := storage.CreateProfileWriterStore(app.eventBuses[profile.Onion], path.Join(app.directory, "profiles", profile.LocalID), password, profile) - app.storage[profile.Onion] = profileStore - - pc := app.storage[profile.Onion].GetProfileCopy(true) - p := peer.FromProfile(pc) - p.Init(app.eventBuses[profile.Onion]) - - peerAuthorizations := profile.ContactsAuthorizations() - // TODO: Would be nice if ProtocolEngine did not need to explicitly be given the Private Key. - identity := primitives.InitializeIdentity(profile.Name, &profile.Ed25519PrivateKey, &profile.Ed25519PublicKey) - engine := connections.NewProtocolEngine(identity, profile.Ed25519PrivateKey, app.acn, app.eventBuses[profile.Onion], peerAuthorizations) - - app.peers[profile.Onion] = p - app.engines[profile.Onion] = engine + eventBus := event.NewEventManager() + app.eventBuses[profile.GetOnion()] = eventBus + profile.Init(app.eventBuses[profile.GetOnion()]) + app.peers[profile.GetOnion()] = profile + app.engines[profile.GetOnion()], _ = profile.GenerateProtocolEngine(app.acn, app.eventBuses[profile.GetOnion()]) if tag != "" { - p.SetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Tag, tag) + profile.SetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Tag, tag) } - app.appBus.Publish(event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.Onion, event.Created: event.True})) -} - -// CreatePeer creates a new Peer with the given name and required accessories (eventbus, storage, protocol engine) -func (app *application) CreatePeer(name string, password string) { - app.CreateTaggedPeer(name, password, "") + app.appBus.Publish(event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.GetOnion(), event.Created: event.True})) } func (app *application) DeletePeer(onion string, password string) { @@ -146,23 +115,20 @@ func (app *application) DeletePeer(onion string, password string) { app.appmutex.Lock() defer app.appmutex.Unlock() - if app.storage[onion].CheckPassword(password) { + if app.peers[onion].CheckPassword(password) { app.appletPlugins.ShutdownPeer(onion) app.plugins.Delete(onion) - app.peers[onion].Shutdown() - delete(app.peers, onion) - + // Shutdown and Remove the Engine app.engines[onion].Shutdown() delete(app.engines, onion) - app.storage[onion].Shutdown() - app.storage[onion].Delete() - delete(app.storage, onion) - + app.peers[onion].Shutdown() + app.peers[onion].Delete() + delete(app.peers, onion) app.eventBuses[onion].Publish(event.NewEventList(event.ShutdownPeer, event.Identity, onion)) - app.applicationCore.DeletePeer(onion) + log.Debugf("Delete peer for %v Done\n", onion) app.appBus.Publish(event.NewEventList(event.PeerDeleted, event.Identity, onion)) return @@ -186,27 +152,28 @@ func (ac *applicationCore) LoadProfiles(password string, timeline bool, loadProf } for _, file := range files { - eventBus := event.NewEventManager() - profileStore, err := storage.LoadProfileWriterStore(eventBus, path.Join(ac.directory, "profiles", file.Name()), password) - if err != nil { - continue + // Attempt to load an encrypted database + profileDirectory := path.Join(ac.directory, "profiles", file.Name()) + profile, err := peer.FromEncryptedDatabase(profileDirectory, password) + if err == nil { + // return the load the profile... + log.Infof("loading profile from new-type storage database...") + loadProfileFn(profile) + } else { // On failure attempt to load a legacy profile + profileStore, err := storage.LoadProfileWriterStore(profileDirectory, password) + if err != nil { + continue + } + log.Infof("found legacy profile. importing to new database structure...") + legacyProfile := profileStore.GetProfileCopy(timeline) + + cps, err := peer.CreateEncryptedStore(profileDirectory, password) + if err != nil { + log.Errorf("error creating encrypted store: %v", err) + } + profile := peer.ImportLegacyProfile(legacyProfile, cps) + loadProfileFn(profile) } - - profile := profileStore.GetProfileCopy(timeline) - - _, exists := ac.eventBuses[profile.Onion] - if exists { - profileStore.Shutdown() - eventBus.Shutdown() - log.Errorf("profile for onion %v already exists", profile.Onion) - continue - } - - ac.coremutex.Lock() - ac.eventBuses[profile.Onion] = eventBus - ac.coremutex.Unlock() - - loadProfileFn(profile, profileStore) } return nil } @@ -214,20 +181,22 @@ func (ac *applicationCore) LoadProfiles(password string, timeline bool, loadProf // LoadProfiles takes a password and attempts to load any profiles it can from storage with it and create Peers for them func (app *application) LoadProfiles(password string) { count := 0 - app.applicationCore.LoadProfiles(password, true, func(profile *model.Profile, profileStore storage.ProfileStore) { - peer := peer.FromProfile(profile) - peer.Init(app.eventBuses[profile.Onion]) - - peerAuthorizations := profile.ContactsAuthorizations() - identity := primitives.InitializeIdentity(profile.Name, &profile.Ed25519PrivateKey, &profile.Ed25519PublicKey) - engine := connections.NewProtocolEngine(identity, profile.Ed25519PrivateKey, app.acn, app.eventBuses[profile.Onion], peerAuthorizations) + app.applicationCore.LoadProfiles(password, true, func(profile peer.CwtchPeer) { app.appmutex.Lock() - app.peers[profile.Onion] = peer - app.storage[profile.Onion] = profileStore - app.engines[profile.Onion] = engine + // Only attempt to finalize the profile if we don't have one loaded... + if app.peers[profile.GetOnion()] == nil { + eventBus := event.NewEventManager() + app.eventBuses[profile.GetOnion()] = eventBus + profile.Init(app.eventBuses[profile.GetOnion()]) + app.peers[profile.GetOnion()] = profile + app.engines[profile.GetOnion()], _ = profile.GenerateProtocolEngine(app.acn, app.eventBuses[profile.GetOnion()]) + app.appBus.Publish(event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.GetOnion(), event.Created: event.False})) + count++ + } else { + // Otherwise shutdown the connections + profile.Shutdown() + } app.appmutex.Unlock() - app.appBus.Publish(event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.Onion, event.Created: event.False})) - count++ }) if count == 0 { message := event.NewEventList(event.AppError, event.Error, event.AppErrLoaded0) @@ -251,12 +220,12 @@ func (ac *applicationCore) GetEventBus(onion string) event.Manager { func (app *application) getACNStatusHandler() func(int, string) { return func(progress int, status string) { progStr := strconv.Itoa(progress) - app.peerLock.Lock() + app.appmutex.Lock() app.appBus.Publish(event.NewEventList(event.ACNStatus, event.Progress, progStr, event.Status, status)) for _, bus := range app.eventBuses { bus.Publish(event.NewEventList(event.ACNStatus, event.Progress, progStr, event.Status, status)) } - app.peerLock.Unlock() + app.appmutex.Unlock() } } @@ -280,8 +249,6 @@ func (app *application) ShutdownPeer(onion string) { delete(app.peers, onion) app.engines[onion].Shutdown() delete(app.engines, onion) - app.storage[onion].Shutdown() - delete(app.storage, onion) app.appletPlugins.Shutdown() } @@ -293,8 +260,6 @@ func (app *application) Shutdown() { app.appletPlugins.ShutdownPeer(id) log.Debugf("Shutting Down Engines for %v", id) app.engines[id].Shutdown() - log.Debugf("Shutting Down Storage for %v", id) - app.storage[id].Shutdown() log.Debugf("Shutting Down Bus for %v", id) app.eventBuses[id].Shutdown() } diff --git a/app/appBridge.go b/app/appBridge.go deleted file mode 100644 index 2e661c6..0000000 --- a/app/appBridge.go +++ /dev/null @@ -1,39 +0,0 @@ -package app - -import "cwtch.im/cwtch/event" -import "git.openprivacy.ca/openprivacy/log" - -const ( - // DestApp should be used as a destination for IPC messages that are for the application itself an not a peer - DestApp = "app" -) - -type applicationBridge struct { - applicationCore - - bridge event.IPCBridge - handle func(*event.Event) -} - -func (ab *applicationBridge) listen() { - log.Infoln("ab.listen()") - for { - ipcMessage, ok := ab.bridge.Read() - log.Debugf("listen() got %v for %v\n", ipcMessage.Message.EventType, ipcMessage.Dest) - if !ok { - log.Debugln("exiting appBridge.listen()") - return - } - - if ipcMessage.Dest == DestApp { - ab.handle(&ipcMessage.Message) - } else { - if eventBus, exists := ab.eventBuses[ipcMessage.Dest]; exists { - eventBus.PublishLocal(ipcMessage.Message) - } - } - } -} - -func (ab *applicationBridge) Shutdown() { -} diff --git a/app/appClient.go b/app/appClient.go deleted file mode 100644 index 9dd5c1b..0000000 --- a/app/appClient.go +++ /dev/null @@ -1,177 +0,0 @@ -package app - -import ( - "cwtch.im/cwtch/app/plugins" - "cwtch.im/cwtch/event" - "cwtch.im/cwtch/peer" - "cwtch.im/cwtch/storage" - "fmt" - "git.openprivacy.ca/openprivacy/log" - "path" - "strconv" - "sync" -) - -type applicationClient struct { - applicationBridge - appletPeers - - appBus event.Manager - acmutex sync.Mutex -} - -// NewAppClient returns an Application that acts as a client to a AppService, connected by the IPCBridge supplied -func NewAppClient(appDirectory string, bridge event.IPCBridge) Application { - appClient := &applicationClient{appletPeers: appletPeers{peers: make(map[string]peer.CwtchPeer)}, applicationBridge: applicationBridge{applicationCore: *newAppCore(appDirectory), bridge: bridge}, appBus: event.NewEventManager()} - appClient.handle = appClient.handleEvent - - go appClient.listen() - - appClient.bridge.Write(&event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.ReloadClient)}) - - log.Infoln("Created new App Client") - return appClient -} - -// GetPrimaryBus returns the bus the Application uses for events that aren't peer specific -func (ac *applicationClient) GetPrimaryBus() event.Manager { - return ac.appBus -} - -func (ac *applicationClient) handleEvent(ev *event.Event) { - switch ev.EventType { - case event.NewPeer: - localID := ev.Data[event.Identity] - key := ev.Data[event.Key] - salt := ev.Data[event.Salt] - reload := ev.Data[event.Status] == event.StorageRunning - created := ev.Data[event.Created] - ac.newPeer(localID, key, salt, reload, created) - case event.PeerDeleted: - onion := ev.Data[event.Identity] - ac.handleDeletedPeer(onion) - case event.PeerError: - ac.appBus.Publish(*ev) - case event.AppError: - ac.appBus.Publish(*ev) - case event.ACNStatus: - ac.appBus.Publish(*ev) - case event.ACNVersion: - ac.appBus.Publish(*ev) - case event.ReloadDone: - ac.appBus.Publish(*ev) - } -} - -func (ac *applicationClient) newPeer(localID, key, salt string, reload bool, created string) { - var keyBytes [32]byte - var saltBytes [128]byte - copy(keyBytes[:], key) - copy(saltBytes[:], salt) - profile, err := storage.ReadProfile(path.Join(ac.directory, "profiles", localID), keyBytes, saltBytes) - if err != nil { - log.Errorf("Could not read profile for NewPeer event: %v\n", err) - ac.appBus.Publish(event.NewEventList(event.PeerError, event.Error, fmt.Sprintf("Could not read profile for NewPeer event: %v\n", err))) - return - } - - _, exists := ac.peers[profile.Onion] - if exists { - log.Errorf("profile for onion %v already exists", profile.Onion) - ac.appBus.Publish(event.NewEventList(event.PeerError, event.Error, fmt.Sprintf("profile for onion %v already exists", profile.Onion))) - return - } - - eventBus := event.NewIPCEventManager(ac.bridge, profile.Onion) - peer := peer.FromProfile(profile) - peer.Init(eventBus) - - ac.peerLock.Lock() - defer ac.peerLock.Unlock() - ac.peers[profile.Onion] = peer - ac.eventBuses[profile.Onion] = eventBus - npEvent := event.NewEvent(event.NewPeer, map[event.Field]string{event.Identity: profile.Onion, event.Created: created}) - if reload { - npEvent.Data[event.Status] = event.StorageRunning - } - ac.appBus.Publish(npEvent) - - if reload { - ac.bridge.Write(&event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.ReloadPeer, event.Identity, profile.Onion)}) - } -} - -// CreatePeer messages the service to create a new Peer with the given name -func (ac *applicationClient) CreatePeer(name string, password string) { - ac.CreateTaggedPeer(name, password, "") -} - -func (ac *applicationClient) CreateTaggedPeer(name, password, tag string) { - log.Infof("appClient CreatePeer %v\n", name) - message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.CreatePeer, map[event.Field]string{event.ProfileName: name, event.Password: password, event.Data: tag})} - ac.bridge.Write(&message) -} - -// DeletePeer messages the service to delete a peer -func (ac *applicationClient) DeletePeer(onion string, password string) { - message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.DeletePeer, map[event.Field]string{event.Identity: onion, event.Password: password})} - ac.bridge.Write(&message) -} - -func (ac *applicationClient) ChangePeerPassword(onion, oldpass, newpass string) { - message := event.IPCMessage{Dest: onion, Message: event.NewEventList(event.ChangePassword, event.Password, oldpass, event.NewPassword, newpass)} - ac.bridge.Write(&message) -} - -func (ac *applicationClient) handleDeletedPeer(onion string) { - ac.acmutex.Lock() - defer ac.acmutex.Unlock() - ac.peers[onion].Shutdown() - delete(ac.peers, onion) - ac.eventBuses[onion].Publish(event.NewEventList(event.ShutdownPeer, event.Identity, onion)) - - ac.applicationCore.DeletePeer(onion) - ac.appBus.Publish(event.NewEventList(event.PeerDeleted, event.Identity, onion)) -} - -func (ac *applicationClient) AddPeerPlugin(onion string, pluginID plugins.PluginID) { - message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.AddPeerPlugin, map[event.Field]string{event.Identity: onion, event.Data: strconv.Itoa(int(pluginID))})} - ac.bridge.Write(&message) -} - -// LoadProfiles messages the service to load any profiles for the given password -func (ac *applicationClient) LoadProfiles(password string) { - message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.LoadProfiles, map[event.Field]string{event.Password: password})} - ac.bridge.Write(&message) -} - -func (ac *applicationClient) QueryACNStatus() { - message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.GetACNStatus, map[event.Field]string{})} - ac.bridge.Write(&message) -} - -func (ac *applicationClient) QueryACNVersion() { - message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.GetACNVersion, map[event.Field]string{})} - ac.bridge.Write(&message) -} - -// ShutdownPeer shuts down a peer and removes it from the app's management -func (ac *applicationClient) ShutdownPeer(onion string) { - ac.acmutex.Lock() - defer ac.acmutex.Unlock() - ac.eventBuses[onion].Shutdown() - delete(ac.eventBuses, onion) - ac.peers[onion].Shutdown() - delete(ac.peers, onion) - message := event.IPCMessage{Dest: DestApp, Message: event.NewEvent(event.ShutdownPeer, map[event.Field]string{event.Identity: onion})} - ac.bridge.Write(&message) -} - -// Shutdown shuts down the application client and all front end peer components -func (ac *applicationClient) Shutdown() { - for id := range ac.peers { - ac.ShutdownPeer(id) - } - ac.applicationBridge.Shutdown() - ac.appBus.Shutdown() -} diff --git a/app/appService.go b/app/appService.go deleted file mode 100644 index 6744202..0000000 --- a/app/appService.go +++ /dev/null @@ -1,209 +0,0 @@ -package app - -import ( - "cwtch.im/cwtch/app/plugins" - "cwtch.im/cwtch/event" - "cwtch.im/cwtch/model" - "cwtch.im/cwtch/protocol/connections" - "cwtch.im/cwtch/storage" - "git.openprivacy.ca/cwtch.im/tapir/primitives" - "git.openprivacy.ca/openprivacy/connectivity" - "git.openprivacy.ca/openprivacy/log" - path "path/filepath" - "strconv" - "sync" -) - -type applicationService struct { - applicationBridge - appletACN - appletPlugins - - storage map[string]storage.ProfileStore - engines map[string]connections.Engine - asmutex sync.Mutex -} - -// ApplicationService is the back end of an application that manages engines and writing storage and communicates to an ApplicationClient by an IPCBridge -type ApplicationService interface { - Shutdown() -} - -// NewAppService returns an ApplicationService that runs the backend of an app and communicates with a client by the supplied IPCBridge -func NewAppService(acn connectivity.ACN, appDirectory string, bridge event.IPCBridge) ApplicationService { - appService := &applicationService{storage: make(map[string]storage.ProfileStore), engines: make(map[string]connections.Engine), applicationBridge: applicationBridge{applicationCore: *newAppCore(appDirectory), bridge: bridge}} - - appService.appletACN.init(acn, appService.getACNStatusHandler()) - appService.handle = appService.handleEvent - - go appService.listen() - - log.Infoln("Created new App Service") - return appService -} - -func (as *applicationService) handleEvent(ev *event.Event) { - log.Infof("app Service handleEvent %v\n", ev.EventType) - switch ev.EventType { - case event.CreatePeer: - profileName := ev.Data[event.ProfileName] - password := ev.Data[event.Password] - tag := ev.Data[event.Data] - as.createPeer(profileName, password, tag) - case event.DeletePeer: - onion := ev.Data[event.Identity] - password := ev.Data[event.Password] - as.deletePeer(onion, password) - - message := event.IPCMessage{Dest: DestApp, Message: *ev} - as.bridge.Write(&message) - case event.AddPeerPlugin: - onion := ev.Data[event.Identity] - pluginID, _ := strconv.Atoi(ev.Data[event.Data]) - as.AddPlugin(onion, plugins.PluginID(pluginID), as.eventBuses[onion], as.acn) - case event.LoadProfiles: - password := ev.Data[event.Password] - as.loadProfiles(password) - case event.ReloadClient: - for _, storage := range as.storage { - peerMsg := *storage.GetNewPeerMessage() - peerMsg.Data[event.Status] = event.StorageRunning - peerMsg.Data[event.Created] = event.False - message := event.IPCMessage{Dest: DestApp, Message: peerMsg} - as.bridge.Write(&message) - } - - message := event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.ReloadDone)} - as.bridge.Write(&message) - case event.ReloadPeer: - onion := ev.Data[event.Identity] - events := as.storage[onion].GetStatusMessages() - - for _, ev := range events { - message := event.IPCMessage{Dest: onion, Message: *ev} - as.bridge.Write(&message) - } - case event.GetACNStatus: - prog, status := as.acn.GetBootstrapStatus() - as.getACNStatusHandler()(prog, status) - case event.GetACNVersion: - version := as.acn.GetVersion() - as.bridge.Write(&event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.ACNVersion, event.Data, version)}) - case event.ShutdownPeer: - onion := ev.Data[event.Identity] - as.ShutdownPeer(onion) - } -} - -func (as *applicationService) createPeer(name, password, tag string) { - log.Infof("app Service create peer %v %v\n", name, password) - profile, err := as.applicationCore.CreatePeer(name) - as.eventBuses[profile.Onion] = event.IPCEventManagerFrom(as.bridge, profile.Onion, as.eventBuses[profile.Onion]) - if err != nil { - log.Errorf("Could not create Peer: %v\n", err) - message := event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.PeerError, event.Error, err.Error())} - as.bridge.Write(&message) - return - } - - profileStore := storage.CreateProfileWriterStore(as.eventBuses[profile.Onion], path.Join(as.directory, "profiles", profile.LocalID), password, profile) - - peerAuthorizations := profile.ContactsAuthorizations() - // TODO: Would be nice if ProtocolEngine did not need to explicitly be given the Private Key. - identity := primitives.InitializeIdentity(profile.Name, &profile.Ed25519PrivateKey, &profile.Ed25519PublicKey) - engine := connections.NewProtocolEngine(identity, profile.Ed25519PrivateKey, as.acn, as.eventBuses[profile.Onion], peerAuthorizations) - - as.storage[profile.Onion] = profileStore - as.engines[profile.Onion] = engine - - peerMsg := *profileStore.GetNewPeerMessage() - peerMsg.Data[event.Created] = event.True - peerMsg.Data[event.Status] = event.StorageNew - message := event.IPCMessage{Dest: DestApp, Message: peerMsg} - as.bridge.Write(&message) -} - -func (as *applicationService) loadProfiles(password string) { - count := 0 - as.applicationCore.LoadProfiles(password, false, func(profile *model.Profile, profileStore storage.ProfileStore) { - as.eventBuses[profile.Onion] = event.IPCEventManagerFrom(as.bridge, profile.Onion, as.eventBuses[profile.Onion]) - - peerAuthorizations := profile.ContactsAuthorizations() - identity := primitives.InitializeIdentity(profile.Name, &profile.Ed25519PrivateKey, &profile.Ed25519PublicKey) - engine := connections.NewProtocolEngine(identity, profile.Ed25519PrivateKey, as.acn, as.eventBuses[profile.Onion], peerAuthorizations) - as.asmutex.Lock() - as.storage[profile.Onion] = profileStore - as.engines[profile.Onion] = engine - as.asmutex.Unlock() - - peerMsg := *profileStore.GetNewPeerMessage() - peerMsg.Data[event.Created] = event.False - peerMsg.Data[event.Status] = event.StorageNew - message := event.IPCMessage{Dest: DestApp, Message: peerMsg} - as.bridge.Write(&message) - count++ - }) - if count == 0 { - message := event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.AppError, event.Error, event.AppErrLoaded0)} - as.bridge.Write(&message) - } -} - -func (as *applicationService) getACNStatusHandler() func(int, string) { - return func(progress int, status string) { - progStr := strconv.Itoa(progress) - as.bridge.Write(&event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.ACNStatus, event.Progress, progStr, event.Status, status)}) - as.applicationCore.coremutex.Lock() - defer as.applicationCore.coremutex.Unlock() - for _, bus := range as.eventBuses { - bus.Publish(event.NewEventList(event.ACNStatus, event.Progress, progStr, event.Status, status)) - } - } -} - -func (as *applicationService) deletePeer(onion, password string) { - as.asmutex.Lock() - defer as.asmutex.Unlock() - - if as.storage[onion].CheckPassword(password) { - as.appletPlugins.ShutdownPeer(onion) - as.plugins.Delete(onion) - - as.engines[onion].Shutdown() - delete(as.engines, onion) - - as.storage[onion].Shutdown() - as.storage[onion].Delete() - delete(as.storage, onion) - - as.eventBuses[onion].Publish(event.NewEventList(event.ShutdownPeer, event.Identity, onion)) - - as.applicationCore.DeletePeer(onion) - log.Debugf("Delete peer for %v Done\n", onion) - - message := event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.PeerDeleted, event.Identity, onion)} - as.bridge.Write(&message) - return - } - message := event.IPCMessage{Dest: DestApp, Message: event.NewEventList(event.AppError, event.Error, event.PasswordMatchError, event.Identity, onion)} - as.bridge.Write(&message) -} - -func (as *applicationService) ShutdownPeer(onion string) { - as.engines[onion].Shutdown() - delete(as.engines, onion) - as.storage[onion].Shutdown() - delete(as.storage, onion) - as.eventBuses[onion].Shutdown() - delete(as.eventBuses, onion) -} - -// Shutdown shuts down the application Service and all peer related backend parts -func (as *applicationService) Shutdown() { - log.Debugf("shutting down application service...") - as.appletPlugins.Shutdown() - for id := range as.engines { - log.Debugf("shutting down application service peer engine %v", id) - as.ShutdownPeer(id) - } -} diff --git a/event/bridge/goChanBridge.go b/event/bridge/goChanBridge.go deleted file mode 100644 index ce9f67e..0000000 --- a/event/bridge/goChanBridge.go +++ /dev/null @@ -1,57 +0,0 @@ -package bridge - -import ( - "cwtch.im/cwtch/event" - "sync" -) - -type goChanBridge struct { - in chan event.IPCMessage - out chan event.IPCMessage - closedChan chan bool - closed bool - lock sync.Mutex -} - -// MakeGoChanBridge returns a simple testing IPCBridge made from inprocess go channels -func MakeGoChanBridge() (b1, b2 event.IPCBridge) { - chan1 := make(chan event.IPCMessage) - chan2 := make(chan event.IPCMessage) - closed := make(chan bool) - - a := &goChanBridge{in: chan1, out: chan2, closedChan: closed, closed: false} - b := &goChanBridge{in: chan2, out: chan1, closedChan: closed, closed: false} - - go monitor(a, b) - - return a, b -} - -func monitor(a, b *goChanBridge) { - <-a.closedChan - a.closed = true - b.closed = true - a.closedChan <- true -} - -func (pb *goChanBridge) Read() (*event.IPCMessage, bool) { - message, ok := <-pb.in - return &message, ok -} - -func (pb *goChanBridge) Write(message *event.IPCMessage) { - pb.lock.Lock() - defer pb.lock.Unlock() - if !pb.closed { - pb.out <- *message - } -} - -func (pb *goChanBridge) Shutdown() { - if !pb.closed { - close(pb.in) - close(pb.out) - pb.closedChan <- true - <-pb.closedChan - } -} diff --git a/event/bridge/infinite_chan.go b/event/bridge/infinite_chan.go deleted file mode 100644 index 688d7f6..0000000 --- a/event/bridge/infinite_chan.go +++ /dev/null @@ -1,72 +0,0 @@ -package bridge - -/* Todo: When go generics ships, refactor this and event.infiniteChannel into one */ - -// InfiniteChannel implements the Channel interface with an infinite buffer between the input and the output. -type InfiniteChannel struct { - input, output chan interface{} - length chan int - buffer *InfiniteQueue -} - -func newInfiniteChannel() *InfiniteChannel { - ch := &InfiniteChannel{ - input: make(chan interface{}), - output: make(chan interface{}), - length: make(chan int), - buffer: newInfiniteQueue(), - } - go ch.infiniteBuffer() - return ch -} - -// In returns the input channel -func (ch *InfiniteChannel) In() chan<- interface{} { - return ch.input -} - -// Out returns the output channel -func (ch *InfiniteChannel) Out() <-chan interface{} { - return ch.output -} - -// Len returns the length of items in queue -func (ch *InfiniteChannel) Len() int { - return <-ch.length -} - -// Close closes the InfiniteChanel -func (ch *InfiniteChannel) Close() { - close(ch.input) -} - -func (ch *InfiniteChannel) infiniteBuffer() { - var input, output chan interface{} - var next interface{} - input = ch.input - - for input != nil || output != nil { - select { - case elem, open := <-input: - if open { - ch.buffer.Add(elem) - } else { - input = nil - } - case output <- next: - ch.buffer.Remove() - case ch.length <- ch.buffer.Length(): - } - - if ch.buffer.Length() > 0 { - output = ch.output - next = ch.buffer.Peek() - } else { - output = nil - next = nil - } - } - - close(ch.output) - close(ch.length) -} diff --git a/event/bridge/infinite_queue.go b/event/bridge/infinite_queue.go deleted file mode 100644 index 5ce1289..0000000 --- a/event/bridge/infinite_queue.go +++ /dev/null @@ -1,105 +0,0 @@ -package bridge - -/* Todo: When go generics ships, refactor this and event.infinitQueue channel into one */ - -/* -Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki. -Using this instead of other, simpler, queue implementations (slice+append or linked list) provides -substantial memory and time benefits, and fewer GC pauses. - -The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe. -*/ - -// minQueueLen is smallest capacity that queue may have. -// Must be power of 2 for bitwise modulus: x % n == x & (n - 1). -const minQueueLen = 16 - -// InfiniteQueue represents a single instance of the queue data structure. -type InfiniteQueue struct { - buf []interface{} - head, tail, count int -} - -// New constructs and returns a new Queue. -func newInfiniteQueue() *InfiniteQueue { - return &InfiniteQueue{ - buf: make([]interface{}, minQueueLen), - } -} - -// Length returns the number of elements currently stored in the queue. -func (q *InfiniteQueue) Length() int { - return q.count -} - -// resizes the queue to fit exactly twice its current contents -// this can result in shrinking if the queue is less than half-full -func (q *InfiniteQueue) resize() { - newBuf := make([]interface{}, q.count<<1) - - if q.tail > q.head { - copy(newBuf, q.buf[q.head:q.tail]) - } else { - n := copy(newBuf, q.buf[q.head:]) - copy(newBuf[n:], q.buf[:q.tail]) - } - - q.head = 0 - q.tail = q.count - q.buf = newBuf -} - -// Add puts an element on the end of the queue. -func (q *InfiniteQueue) Add(elem interface{}) { - if q.count == len(q.buf) { - q.resize() - } - - q.buf[q.tail] = elem - // bitwise modulus - q.tail = (q.tail + 1) & (len(q.buf) - 1) - q.count++ -} - -// Peek returns the element at the head of the queue. This call panics -// if the queue is empty. -func (q *InfiniteQueue) Peek() interface{} { - if q.count <= 0 { - panic("queue: Peek() called on empty queue") - } - return q.buf[q.head] -} - -// Get returns the element at index i in the queue. If the index is -// invalid, the call will panic. This method accepts both positive and -// negative index values. Index 0 refers to the first element, and -// index -1 refers to the last. -func (q *InfiniteQueue) Get(i int) interface{} { - // If indexing backwards, convert to positive index. - if i < 0 { - i += q.count - } - if i < 0 || i >= q.count { - panic("queue: Get() called with index out of range") - } - // bitwise modulus - return q.buf[(q.head+i)&(len(q.buf)-1)] -} - -// Remove removes and returns the element from the front of the queue. If the -// queue is empty, the call will panic. -func (q *InfiniteQueue) Remove() interface{} { - if q.count <= 0 { - panic("queue: Remove() called on empty queue") - } - ret := q.buf[q.head] - q.buf[q.head] = nil - // bitwise modulus - q.head = (q.head + 1) & (len(q.buf) - 1) - q.count-- - // Resize down if buffer 1/4 full. - if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) { - q.resize() - } - return ret -} diff --git a/event/bridge/pipeBridge-windows.go b/event/bridge/pipeBridge-windows.go deleted file mode 100644 index 1671ad9..0000000 --- a/event/bridge/pipeBridge-windows.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build windows - -package bridge - -import ( - "cwtch.im/cwtch/event" - "log" -) - -func NewPipeBridgeClient(inFilename, outFilename string) event.IPCBridge { - log.Fatal("Not supported on windows") - return nil -} - -// NewPipeBridgeService returns a pipe backed IPCBridge for a service -func NewPipeBridgeService(inFilename, outFilename string) event.IPCBridge { - log.Fatal("Not supported on windows") - return nil -} diff --git a/event/bridge/pipeBridge.go b/event/bridge/pipeBridge.go deleted file mode 100644 index c7fdc9f..0000000 --- a/event/bridge/pipeBridge.go +++ /dev/null @@ -1,357 +0,0 @@ -// +build !windows - -package bridge - -import ( - "cwtch.im/cwtch/event" - "cwtch.im/cwtch/protocol/connections" - "encoding/base64" - "encoding/binary" - "encoding/json" - "git.openprivacy.ca/openprivacy/log" - "os" - "sync" - "syscall" - "time" -) - -/* pipeBridge creates a pair of named pipes - Needs a call to new client and service to fully successfully open -*/ - -const maxBufferSize = 1000 - -const serviceName = "service" -const clientName = "client" - -const syn = "SYN" -const synack = "SYNACK" -const ack = "ACK" - -type pipeBridge struct { - infile, outfile string - in, out *os.File - read chan event.IPCMessage - write *InfiniteChannel - closedChan chan bool - state connections.ConnectionState - lock sync.Mutex - threeShake func() bool - - // For logging / debugging purposes - name string -} - -func newPipeBridge(inFilename, outFilename string) *pipeBridge { - syscall.Mkfifo(inFilename, 0600) - syscall.Mkfifo(outFilename, 0600) - pb := &pipeBridge{infile: inFilename, outfile: outFilename, state: connections.DISCONNECTED} - pb.read = make(chan event.IPCMessage, maxBufferSize) - pb.write = newInfiniteChannel() //make(chan event.IPCMessage, maxBufferSize) - return pb -} - -// NewPipeBridgeClient returns a pipe backed IPCBridge for a client -func NewPipeBridgeClient(inFilename, outFilename string) event.IPCBridge { - log.Debugf("Making new PipeBridge Client...\n") - pb := newPipeBridge(inFilename, outFilename) - pb.name = clientName - pb.threeShake = pb.threeShakeClient - go pb.connectionManager() - - return pb -} - -// NewPipeBridgeService returns a pipe backed IPCBridge for a service -func NewPipeBridgeService(inFilename, outFilename string) event.IPCBridge { - log.Debugf("Making new PipeBridge Service...\n") - pb := newPipeBridge(inFilename, outFilename) - pb.name = serviceName - pb.threeShake = pb.threeShakeService - - go pb.connectionManager() - - log.Debugf("Successfully created new PipeBridge Service!\n") - return pb -} - -func (pb *pipeBridge) setState(state connections.ConnectionState) { - pb.lock.Lock() - defer pb.lock.Unlock() - - pb.state = state -} - -func (pb *pipeBridge) getState() connections.ConnectionState { - pb.lock.Lock() - defer pb.lock.Unlock() - - return pb.state -} - -func (pb *pipeBridge) connectionManager() { - for pb.getState() != connections.KILLED { - log.Debugf("clientConnManager loop start init\n") - pb.setState(connections.CONNECTING) - - var err error - log.Debugf("%v open file infile\n", pb.name) - pb.in, err = os.OpenFile(pb.infile, os.O_RDWR, 0600) - if err != nil { - pb.setState(connections.DISCONNECTED) - continue - } - - log.Debugf("%v open file outfile\n", pb.name) - pb.out, err = os.OpenFile(pb.outfile, os.O_RDWR, 0600) - if err != nil { - pb.setState(connections.DISCONNECTED) - continue - } - - log.Debugf("Successfully connected PipeBridge %v!\n", pb.name) - - pb.handleConns() - } - log.Debugf("exiting %v ConnectionManager\n", pb.name) - -} - -// threeShake performs a 3way handshake sync up -func (pb *pipeBridge) threeShakeService() bool { - synacked := false - - for { - resp, err := pb.readString() - if err != nil { - return false - } - - if string(resp) == syn { - if !synacked { - err = pb.writeString([]byte(synack)) - if err != nil { - return false - } - synacked = true - } - } else if string(resp) == ack { - return true - } - } -} - -func (pb *pipeBridge) synLoop(stop chan bool) { - delay := time.Duration(0) - for { - select { - case <-time.After(delay): - err := pb.writeString([]byte(syn)) - if err != nil { - return - } - delay = time.Second - case <-stop: - return - } - } -} - -func (pb *pipeBridge) threeShakeClient() bool { - stop := make(chan bool) - go pb.synLoop(stop) - for { - resp, err := pb.readString() - if err != nil { - return false - } - - if string(resp) == synack { - stop <- true - err := pb.writeString([]byte(ack)) - return err == nil - } - } -} - -func (pb *pipeBridge) handleConns() { - - if !pb.threeShake() { - pb.setState(connections.FAILED) - pb.closeReset() - return - } - - pb.setState(connections.AUTHENTICATED) - - pb.closedChan = make(chan bool, 5) - - log.Debugf("handleConns authed, %v 2xgo\n", pb.name) - - go pb.handleRead() - go pb.handleWrite() - - <-pb.closedChan - log.Debugf("handleConns <-closedChan (%v)\n", pb.name) - if pb.getState() != connections.KILLED { - pb.setState(connections.FAILED) - } - pb.closeReset() - log.Debugf("handleConns done for %v, exit\n", pb.name) -} - -func (pb *pipeBridge) closeReset() { - pb.in.Close() - pb.out.Close() - close(pb.read) - pb.write.Close() - - if pb.getState() != connections.KILLED { - pb.read = make(chan event.IPCMessage, maxBufferSize) - pb.write = newInfiniteChannel() - } -} - -func (pb *pipeBridge) handleWrite() { - log.Debugf("handleWrite() %v\n", pb.name) - defer log.Debugf("exiting handleWrite() %v\n", pb.name) - - for { - select { - case messageInf := <-pb.write.output: - if messageInf == nil { - pb.closedChan <- true - return - } - message := messageInf.(event.IPCMessage) - if message.Message.EventType == event.EncryptedGroupMessage || message.Message.EventType == event.SendMessageToGroup || message.Message.EventType == event.NewMessageFromGroup { - log.Debugf("handleWrite <- message: %v %v ...\n", message.Dest, message.Message.EventType) - } else { - log.Debugf("handleWrite <- message: %v\n", message) - } - if pb.getState() == connections.AUTHENTICATED { - encMessage := &event.IPCMessage{Dest: message.Dest, Message: event.Event{EventType: message.Message.EventType, EventID: message.Message.EventID, Data: make(map[event.Field]string)}} - for k, v := range message.Message.Data { - encMessage.Message.Data[k] = base64.StdEncoding.EncodeToString([]byte(v)) - } - - messageJSON, _ := json.Marshal(encMessage) - err := pb.writeString(messageJSON) - if err != nil { - pb.closedChan <- true - return - } - } else { - return - } - } - } -} - -func (pb *pipeBridge) handleRead() { - log.Debugf("handleRead() %v\n", pb.name) - defer log.Debugf("exiting handleRead() %v", pb.name) - - for { - log.Debugf("Waiting to handleRead()...\n") - - buffer, err := pb.readString() - if err != nil { - pb.closedChan <- true - return - } - - var message event.IPCMessage - err = json.Unmarshal(buffer, &message) - if err != nil { - log.Errorf("Read error: '%v', value: '%v'", err, buffer) - pb.closedChan <- true - return // probably new connection trying to initialize - } - for k, v := range message.Message.Data { - val, _ := base64.StdEncoding.DecodeString(v) - message.Message.Data[k] = string(val) - } - if message.Message.EventType == event.EncryptedGroupMessage || message.Message.EventType == event.SendMessageToGroup || message.Message.EventType == event.NewMessageFromGroup { - log.Debugf("handleRead read<-: %v %v ...\n", message.Dest, message.Message.EventType) - } else { - log.Debugf("handleRead read<-: %v\n", message) - } - pb.read <- message - log.Debugf("handleRead wrote\n") - } -} - -func (pb *pipeBridge) Read() (*event.IPCMessage, bool) { - log.Debugf("Read() %v...\n", pb.name) - var ok = false - var message event.IPCMessage - for !ok && pb.getState() != connections.KILLED { - message, ok = <-pb.read - if message.Message.EventType == event.EncryptedGroupMessage || message.Message.EventType == event.SendMessageToGroup || message.Message.EventType == event.NewMessageFromGroup { - log.Debugf("Read %v: %v %v ...\n", pb.name, message.Dest, message.Message.EventType) - } else { - log.Debugf("Read %v: %v\n", pb.name, message) - } - } - return &message, pb.getState() != connections.KILLED -} - -func (pb *pipeBridge) Write(message *event.IPCMessage) { - if message.Message.EventType == event.EncryptedGroupMessage || message.Message.EventType == event.SendMessageToGroup || message.Message.EventType == event.NewMessageFromGroup { - log.Debugf("Write %v: %v %v ...\n", pb.name, message.Dest, message.Message.EventType) - } else { - log.Debugf("Write %v: %v\n", pb.name, message) - } - pb.write.input <- *message - log.Debugf("Wrote\n") -} - -func (pb *pipeBridge) Shutdown() { - log.Debugf("pb.Shutdown() for %v currently in state: %v\n", pb.name, connections.ConnectionStateName[pb.getState()]) - pb.state = connections.KILLED - pb.closedChan <- true - log.Debugf("Done Shutdown for %v\n", pb.name) -} - -func (pb *pipeBridge) writeString(message []byte) error { - size := make([]byte, 2) - binary.LittleEndian.PutUint16(size, uint16(len(message))) - pb.out.Write(size) - - for pos := 0; pos < len(message); { - n, err := pb.out.Write(message[pos:]) - if err != nil { - log.Errorf("Writing out on pipeBridge: %v\n", err) - return err - } - pos += n - } - return nil -} - -func (pb *pipeBridge) readString() ([]byte, error) { - var n int - size := make([]byte, 2) - var err error - - n, err = pb.in.Read(size) - if err != nil || n != 2 { - log.Errorf("Could not read len int from stream: %v\n", err) - return nil, err - } - - n = int(binary.LittleEndian.Uint16(size)) - pos := 0 - buffer := make([]byte, n) - for n > 0 { - m, err := pb.in.Read(buffer[pos:]) - if err != nil { - log.Errorf("Reading into buffer from pipe: %v\n", err) - return nil, err - } - n -= m - pos += m - } - return buffer, nil -} diff --git a/event/bridge/pipeBridge_test.go b/event/bridge/pipeBridge_test.go deleted file mode 100644 index 637f3a6..0000000 --- a/event/bridge/pipeBridge_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package bridge - -import ( - "cwtch.im/cwtch/event" - "git.openprivacy.ca/openprivacy/log" - "os" - "testing" - "time" -) - -var ( - clientPipe = "./client" - servicePipe = "./service" -) - -func clientHelper(t *testing.T, in, out string, messageOrig *event.IPCMessage, done chan bool) { - client := NewPipeBridgeClient(in, out) - - messageAfter, ok := client.Read() - if !ok { - t.Errorf("Reading from client IPCBridge failed") - done <- true - return - } - - if messageOrig.Dest != messageAfter.Dest { - t.Errorf("Dest's value differs expected: %v actaul: %v", messageOrig.Dest, messageAfter.Dest) - } - - if messageOrig.Message.EventType != messageAfter.Message.EventType { - t.Errorf("EventTypes's value differs expected: %v actaul: %v", messageOrig.Message.EventType, messageAfter.Message.EventType) - } - - if messageOrig.Message.Data[event.Identity] != messageAfter.Message.Data[event.Identity] { - t.Errorf("Data[Identity]'s value differs expected: %v actaul: %v", messageOrig.Message.Data[event.Identity], messageAfter.Message.Data[event.Identity]) - } - - done <- true -} - -func serviceHelper(t *testing.T, in, out string, messageOrig *event.IPCMessage, done chan bool) { - service := NewPipeBridgeService(in, out) - - service.Write(messageOrig) - - done <- true -} - -func TestPipeBridge(t *testing.T) { - os.Remove(servicePipe) - os.Remove(clientPipe) - - messageOrig := &event.IPCMessage{Dest: "ABC", Message: event.NewEventList(event.NewPeer, event.Identity, "It is I")} - serviceDone := make(chan bool) - clientDone := make(chan bool) - - go clientHelper(t, clientPipe, servicePipe, messageOrig, clientDone) - go serviceHelper(t, servicePipe, clientPipe, messageOrig, serviceDone) - - <-serviceDone - <-clientDone -} - -func restartingClient(t *testing.T, in, out string, done chan bool) { - client := NewPipeBridgeClient(in, out) - - message1 := &event.IPCMessage{Dest: "ABC", Message: event.NewEventList(event.NewPeer)} - log.Infoln("client writing message 1") - client.Write(message1) - - time.Sleep(100 * time.Millisecond) - log.Infoln("client shutdown") - client.Shutdown() - - log.Infoln("client new client") - client = NewPipeBridgeClient(in, out) - message2 := &event.IPCMessage{Dest: "ABC", Message: event.NewEventList(event.DeleteContact)} - log.Infoln("client2 write message2") - client.Write(message2) - - done <- true -} - -func stableService(t *testing.T, in, out string, done chan bool) { - service := NewPipeBridgeService(in, out) - - log.Infoln("service wait read 1") - message1, ok := service.Read() - log.Infof("service read 1 %v ok:%v\n", message1, ok) - if !ok { - t.Errorf("Reading from client IPCBridge 1st time failed") - done <- true - return - } - if message1.Message.EventType != event.NewPeer { - t.Errorf("Wrong message received, expected NewPeer\n") - done <- true - return - } - - log.Infoln("service wait read 2") - message2, ok := service.Read() - log.Infof("service read 2 got %v ok:%v\n", message2, ok) - if !ok { - t.Errorf("Reading from client IPCBridge 2nd time failed") - done <- true - return - } - if message2.Message.EventType != event.DeleteContact { - t.Errorf("Wrong message received, expected DeleteContact, got %v\n", message2) - done <- true - return - } - - done <- true -} - -func TestReconnect(t *testing.T) { - log.Infoln("TestReconnect") - os.Remove(servicePipe) - os.Remove(clientPipe) - - serviceDone := make(chan bool) - clientDone := make(chan bool) - - go restartingClient(t, clientPipe, servicePipe, clientDone) - go stableService(t, servicePipe, clientPipe, serviceDone) - - <-serviceDone - <-clientDone -} diff --git a/event/common.go b/event/common.go index cd51e1e..c682651 100644 --- a/event/common.go +++ b/event/common.go @@ -126,8 +126,7 @@ const ( // a peer contact has been added // attributes: // RemotePeer [eg ""] - // Authorization - PeerCreated = Type("PeerCreated") + ContactCreated = Type("ContactCreated") // Password, NewPassword ChangePassword = Type("ChangePassword") @@ -253,6 +252,9 @@ const ( FileDownloadProgressUpdate = Type("FileDownloadProgressUpdate") FileDownloaded = Type("FileDownloaded") FileVerificationFailed = Type("FileVerificationFailed") + + // Profile Attribute Event + UpdatedProfileAttribute = Type("UpdatedProfileAttribute") ) // Field defines common event attributes @@ -273,6 +275,7 @@ const ( Identity = Field("Identity") + ConversationID = Field("ConversationID") GroupID = Field("GroupID") GroupServer = Field("GroupServer") ServerTokenY = Field("ServerTokenY") diff --git a/event/eventmanager.go b/event/eventmanager.go index 2575f2e..04c09a2 100644 --- a/event/eventmanager.go +++ b/event/eventmanager.go @@ -62,11 +62,9 @@ type manager struct { } // Manager is an interface for an event bus -// FIXME this interface lends itself to race conditions around channels type Manager interface { Subscribe(Type, Queue) Publish(Event) - PublishLocal(Event) Shutdown() } @@ -123,11 +121,6 @@ func (em *manager) Publish(event Event) { } } -// Publish an event only locally, not going over an IPC bridge if there is one -func (em *manager) PublishLocal(event Event) { - em.Publish(event) -} - // eventBus is an internal function that is used to distribute events to all subscribers func (em *manager) eventBus() { for { diff --git a/event/eventmanageripc.go b/event/eventmanageripc.go deleted file mode 100644 index 4dddcd1..0000000 --- a/event/eventmanageripc.go +++ /dev/null @@ -1,38 +0,0 @@ -package event - -type ipcManager struct { - manager Manager - - onion string - ipcBridge IPCBridge -} - -// NewIPCEventManager returns an EvenetManager that also pipes events over and supplied IPCBridge -func NewIPCEventManager(bridge IPCBridge, onion string) Manager { - em := &ipcManager{onion: onion, ipcBridge: bridge, manager: NewEventManager()} - return em -} - -// IPCEventManagerFrom returns an IPCEventManger from the supplied manager and IPCBridge -func IPCEventManagerFrom(bridge IPCBridge, onion string, manager Manager) Manager { - em := &ipcManager{onion: onion, ipcBridge: bridge, manager: manager} - return em -} - -func (ipcm *ipcManager) Publish(ev Event) { - ipcm.manager.Publish(ev) - message := &IPCMessage{Dest: ipcm.onion, Message: ev} - ipcm.ipcBridge.Write(message) -} - -func (ipcm *ipcManager) PublishLocal(ev Event) { - ipcm.manager.Publish(ev) -} - -func (ipcm *ipcManager) Subscribe(eventType Type, queue Queue) { - ipcm.manager.Subscribe(eventType, queue) -} - -func (ipcm *ipcManager) Shutdown() { - ipcm.manager.Shutdown() -} diff --git a/event/ipc.go b/event/ipc.go deleted file mode 100644 index 8abae3e..0000000 --- a/event/ipc.go +++ /dev/null @@ -1,14 +0,0 @@ -package event - -// IPCMessage is a wrapper for a regular eventMessage with a destination (onion|AppDest) so the other side of the bridge can route appropriately -type IPCMessage struct { - Dest string - Message Event -} - -// IPCBridge is an interface to a IPC construct used to communicate IPCMessages -type IPCBridge interface { - Read() (*IPCMessage, bool) - Write(message *IPCMessage) - Shutdown() -} diff --git a/functionality/filesharing/filesharing_functionality.go b/functionality/filesharing/filesharing_functionality.go index 0dae38a..c767352 100644 --- a/functionality/filesharing/filesharing_functionality.go +++ b/functionality/filesharing/filesharing_functionality.go @@ -23,9 +23,10 @@ import ( type Functionality struct { } -// FunctionalityGate returns contact.Functionality always +// FunctionalityGate returns filesharing if enabled in the given experiment map +// Note: Experiment maps are currently in libcwtch-go func FunctionalityGate(experimentMap map[string]bool) (*Functionality, error) { - if experimentMap["filesharing"] == true { + if experimentMap["filesharing"] { return new(Functionality), nil } return nil, errors.New("filesharing is not enabled") @@ -42,7 +43,8 @@ type OverlayMessage struct { // DownloadFile given a profile, a conversation handle and a file sharing key, start off a download process // to downloadFilePath -func (f *Functionality) DownloadFile(profile peer.CwtchPeer, handle string, downloadFilePath string, manifestFilePath string, key string) { +func (f *Functionality) DownloadFile(profile peer.CwtchPeer, conversationID int, downloadFilePath string, manifestFilePath string, key string) { + // Store local.filesharing.filekey.manifest as the location of the manifest profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", key), manifestFilePath) @@ -50,12 +52,12 @@ func (f *Functionality) DownloadFile(profile peer.CwtchPeer, handle string, down profile.SetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.path", key), downloadFilePath) // Get the value of conversation.filesharing.filekey.manifest.size from `handle` - profile.SendScopedZonedGetValToContact(handle, attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest.size", key)) + profile.SendScopedZonedGetValToContact(conversationID, attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest.size", key)) } // ShareFile given a profile and a conversation handle, sets up a file sharing process to share the file // at filepath -func (f *Functionality) ShareFile(filepath string, profile peer.CwtchPeer, handle string) error { +func (f *Functionality) ShareFile(filepath string, profile peer.CwtchPeer, conversationID int) error { manifest, err := files.CreateManifest(filepath) if err != nil { return err @@ -97,7 +99,7 @@ func (f *Functionality) ShareFile(filepath string, profile peer.CwtchPeer, handl profile.ShareFile(key, string(serializedManifest)) - profile.SendMessage(handle, string(wrapperJSON)) + profile.SendMessage(conversationID, string(wrapperJSON)) return nil } diff --git a/go.mod b/go.mod index 324ff8a..9340f4f 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ require ( git.openprivacy.ca/openprivacy/connectivity v1.5.0 git.openprivacy.ca/openprivacy/log v1.0.3 github.com/gtank/ristretto255 v0.1.2 + github.com/mutecomm/go-sqlcipher/v4 v4.4.2 github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee golang.org/x/sys v0.0.0-20210510120138-977fb7262007 // indirect diff --git a/go.sum b/go.sum index d628ee0..38aef9b 100644 --- a/go.sum +++ b/go.sum @@ -22,11 +22,14 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643 h1:hLDRPB66XQT/8+wG9WsDpiCvZf1yKO7sz7scAjSlBa0= github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/mutecomm/go-sqlcipher/v4 v4.4.2 h1:eM10bFtI4UvibIsKr10/QT7Yfz+NADfjZYh0GKrXUNc= +github.com/mutecomm/go-sqlcipher/v4 v4.4.2/go.mod h1:mF2UmIpBnzFeBdu/ypTDb/LdbS0nk0dfSN1WUsWTjMA= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= diff --git a/model/attr/zone.go b/model/attr/zone.go index 77d7753..3e3c667 100644 --- a/model/attr/zone.go +++ b/model/attr/zone.go @@ -17,9 +17,15 @@ const ( // ProfileZone for attributes related to profile details like name and profile image ProfileZone = Zone("profile") + // LegacyGroupZone for attributes related to legacy group experiment + LegacyGroupZone = Zone("legacygroup") + // FilesharingZone for attributes related to file sharing FilesharingZone = Zone("filesharing") + // ServerKeyZone for attributes related to Server Keys + ServerKeyZone = Zone("serverkey") + // UnknownZone is a catch all useful for error handling UnknownZone = Zone("unknown") ) @@ -44,8 +50,12 @@ func ParseZone(path string) (Zone, string) { switch Zone(parts[0]) { case ProfileZone: return ProfileZone, parts[1] + case LegacyGroupZone: + return LegacyGroupZone, parts[1] case FilesharingZone: return FilesharingZone, parts[1] + case ServerKeyZone: + return ServerKeyZone, parts[1] default: return UnknownZone, parts[1] } diff --git a/model/constants/attributes.go b/model/constants/attributes.go index 0eebcf6..d3385a6 100644 --- a/model/constants/attributes.go +++ b/model/constants/attributes.go @@ -3,6 +3,9 @@ package constants // Name refers to a Profile Name const Name = "name" +// Onion refers the Onion address of the profile +const Onion = "onion" + // Tag describes the type of a profile e.g. default password / encrypted etc. const Tag = "tag" @@ -11,3 +14,38 @@ const ProfileTypeV1DefaultPassword = "v1-defaultPassword" // ProfileTypeV1Password is a tag describing a profile encrypted derived from a user-provided password. const ProfileTypeV1Password = "v1-userPassword" + +// GroupID is the ID of a group +const GroupID = "groupid" + +// GroupServer identifies the Server the legacy group is hosted on +const GroupServer = "groupserver" + +// GroupKey is the name of the group key attribute... +const GroupKey = "groupkey" + +// True - true +const True = "true" + +// False - false +const False = "false" + +// AttrAuthor - conversation attribute for author of the message - referenced by pub key rather than conversation id because of groups. +const AttrAuthor = "author" + +// AttrAck - conversation attribute for acknowledgement status +const AttrAck = "ack" + +// AttrErr - conversation attribute for errored status +const AttrErr = "error" + +// AttrSentTimestamp - conversation attribute for the time the message was (nominally) sent +const AttrSentTimestamp = "sent" + +// Legacy MessageFlags + +// AttrRejected - conversation attribute for storing rejected prompts (for invites) +const AttrRejected = "rejected-invite" + +// AttrDownloaded - conversation attribute for storing downloaded prompts (for file downloads) +const AttrDownloaded = "file-downloaded" diff --git a/model/constants/bundles.go b/model/constants/bundles.go new file mode 100644 index 0000000..f2e0fd7 --- /dev/null +++ b/model/constants/bundles.go @@ -0,0 +1,13 @@ +package constants + +// ServerPrefix precedes a server import statement +const ServerPrefix = "server:" + +// TofuBundlePrefix precedes a server and a group import statement +const TofuBundlePrefix = "tofubundle:" + +// GroupPrefix precedes a group import statement +const GroupPrefix = "torv3" + +// ImportBundlePrefix is an error api constant for import bundle error messages +const ImportBundlePrefix = "importBundle" diff --git a/model/conversation.go b/model/conversation.go new file mode 100644 index 0000000..df7e151 --- /dev/null +++ b/model/conversation.go @@ -0,0 +1,96 @@ +package model + +import ( + "cwtch.im/cwtch/model/attr" + "cwtch.im/cwtch/model/constants" + "encoding/json" +) + +// AccessControl is a type determining client assigned authorization to a peer +type AccessControl struct { + Blocked bool // Any attempts from this handle to connect are blocked + Read bool // Allows a handle to access the conversation + Append bool // Allows a handle to append new messages to the conversation +} + +// DefaultP2PAccessControl - because in the year 2021, go does not support constant structs... +func DefaultP2PAccessControl() AccessControl { + return AccessControl{Read: true, Append: true, Blocked: false} +} + +// AccessControlList represents an access control list for a conversation. Mapping handles to conversation +// functions +type AccessControlList map[string]AccessControl + +// Serialize transforms the ACL into json. +func (acl *AccessControlList) Serialize() []byte { + data, _ := json.Marshal(acl) + return data +} + +// DeserializeAccessControlList takes in JSON and returns an AccessControlList +func DeserializeAccessControlList(data []byte) AccessControlList { + var acl AccessControlList + json.Unmarshal(data, &acl) + return acl +} + +// Attributes a type-driven encapsulation of an Attribute map. +type Attributes map[string]string + +// Serialize transforms an Attributes map into a JSON struct +func (a *Attributes) Serialize() []byte { + data, _ := json.Marshal(a) + return data +} + +// DeserializeAttributes converts a JSON struct into an Attributes map +func DeserializeAttributes(data []byte) Attributes { + var attributes Attributes + json.Unmarshal(data, &attributes) + return attributes +} + +// Conversation encapsulates high-level information about a conversation, including the +// handle, any set attributes, the access control list associated with the message tree and the +// accepted status of the conversation (whether the user has consented into the conversation). +type Conversation struct { + ID int + Handle string + Attributes Attributes + ACL AccessControlList + Accepted bool +} + +// GetAttribute is a helper function that fetches a conversation attribute by scope, zone and key +func (ci *Conversation) GetAttribute(scope attr.Scope, zone attr.Zone, key string) (string, bool) { + if value, exists := ci.Attributes[scope.ConstructScopedZonedPath(zone.ConstructZonedPath(key)).ToString()]; exists { + return value, true + } + return "", false +} + +// IsGroup is a helper attribute that identifies whether a conversation is a legacy group +func (ci *Conversation) IsGroup() bool { + if _, exists := ci.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.GroupID)).ToString()]; exists { + return true + } + return false +} + +// IsServer is a helper attribute that identifies whether a conversation is with a server +func (ci *Conversation) IsServer() bool { + if _, exists := ci.Attributes[attr.PublicScope.ConstructScopedZonedPath(attr.ServerKeyZone.ConstructZonedPath(string(BundleType))).ToString()]; exists { + return true + } + return false +} + +// ConversationMessage bundles an instance of a conversation message row +type ConversationMessage struct { + ID int + Body string + Attr Attributes + Signature string + ContentHash string +} diff --git a/model/group.go b/model/group.go index ff01c4f..8e1d2d9 100644 --- a/model/group.go +++ b/model/group.go @@ -4,8 +4,6 @@ import ( "crypto/ed25519" "crypto/rand" "crypto/sha512" - "cwtch.im/cwtch/model/attr" - "cwtch.im/cwtch/model/constants" "cwtch.im/cwtch/protocol/groups" "encoding/base32" "encoding/base64" @@ -13,13 +11,13 @@ import ( "encoding/json" "errors" "fmt" + "git.openprivacy.ca/cwtch.im/tapir/primitives" "git.openprivacy.ca/openprivacy/connectivity/tor" "git.openprivacy.ca/openprivacy/log" "golang.org/x/crypto/nacl/secretbox" "golang.org/x/crypto/pbkdf2" "io" "strings" - "sync" "time" ) @@ -33,25 +31,19 @@ const GroupInvitePrefix = "torv3" // tied to a server under a given group key. Each group has a set of Messages. type Group struct { // GroupID is now derived from the GroupKey and the GroupServer - GroupID string - GroupKey [32]byte - GroupServer string - Timeline Timeline `json:"-"` - Accepted bool - IsCompromised bool - Attributes map[string]string - lock sync.Mutex - LocalID string - State string `json:"-"` - Version int + GroupID string + GroupName string + GroupKey [32]byte + GroupServer string + Attributes map[string]string //legacy to not use + Version int + Timeline Timeline `json:"-"` + LocalID string } // NewGroup initializes a new group associated with a given CwtchServer func NewGroup(server string) (*Group, error) { group := new(Group) - group.Version = CurrentGroupVersion - group.LocalID = GenerateRandomID() - group.Accepted = true // we are starting a group, so we assume we want to connect to it... if !tor.IsValidHostname(server) { return nil, errors.New("server is not a valid v3 onion") } @@ -68,11 +60,6 @@ func NewGroup(server string) (*Group, error) { // Derive Group ID from the group key and the server public key. This binds the group to a particular server // and key. group.GroupID = deriveGroupID(groupKey[:], server) - - group.Attributes = make(map[string]string) - // By default we set the "name" of the group to a random string, we can override this later, but to simplify the - // codes around invite, we assume that this is always set. - group.Attributes[attr.GetLocalScope(constants.Name)] = group.GroupID return group, nil } @@ -89,17 +76,12 @@ func deriveGroupID(groupKey []byte, serverHostname string) string { return hex.EncodeToString(pbkdf2.Key(groupKey, pubkey, 4096, 16, sha512.New)) } -// Compromised should be called if we detect a groupkey leak -func (g *Group) Compromised() { - g.IsCompromised = true -} - // Invite generates a invitation that can be sent to a cwtch peer func (g *Group) Invite() (string, error) { gci := &groups.GroupInvite{ GroupID: g.GroupID, - GroupName: g.Attributes[attr.GetLocalScope(constants.Name)], + GroupName: g.GroupName, SharedKey: g.GroupKey[:], ServerHost: g.GroupServer, } @@ -109,75 +91,6 @@ func (g *Group) Invite() (string, error) { return serializedInvite, err } -// AddSentMessage takes a DecryptedGroupMessage and adds it to the Groups Timeline -func (g *Group) AddSentMessage(message *groups.DecryptedGroupMessage, sig []byte) Message { - g.lock.Lock() - defer g.lock.Unlock() - timelineMessage := Message{ - Message: message.Text, - Timestamp: time.Unix(int64(message.Timestamp), 0), - Received: time.Unix(0, 0), - Signature: sig, - PeerID: message.Onion, - PreviousMessageSig: message.PreviousMessageSig, - ReceivedByServer: false, - } - g.Timeline.Insert(&timelineMessage) - return timelineMessage -} - -// ErrorSentMessage removes a sent message from the unacknowledged list and sets its error flag if found, otherwise returns false -func (g *Group) ErrorSentMessage(sig []byte, error string) bool { - g.lock.Lock() - defer g.lock.Unlock() - - return g.Timeline.SetSendError(sig, error) -} - -// GetMessage returns the message at index `index` if it exists. Otherwise returns false. -// This routine also returns the length of the timeline -// If go has an optional type this would return Option... -func (g *Group) GetMessage(index int) (bool, Message, int) { - g.lock.Lock() - defer g.lock.Unlock() - - length := len(g.Timeline.Messages) - - if length > index { - return true, g.Timeline.Messages[index], length - } - return false, Message{}, length -} - -// AddMessage takes a DecryptedGroupMessage and adds it to the Groups Timeline -func (g *Group) AddMessage(message *groups.DecryptedGroupMessage, sig []byte) (*Message, int) { - - g.lock.Lock() - defer g.lock.Unlock() - - timelineMessage := &Message{ - Message: message.Text, - Timestamp: time.Unix(int64(message.Timestamp), 0), - Received: time.Now(), - Signature: sig, - PeerID: message.Onion, - PreviousMessageSig: message.PreviousMessageSig, - ReceivedByServer: true, - Error: "", - Acknowledged: true, - } - index := g.Timeline.Insert(timelineMessage) - - return timelineMessage, index -} - -// GetTimeline provides a safe copy of the timeline -func (g *Group) GetTimeline() (timeline []Message) { - g.lock.Lock() - defer g.lock.Unlock() - return g.Timeline.GetMessages() -} - //EncryptMessage takes a message and encrypts the message under the group key. func (g *Group) EncryptMessage(message *groups.DecryptedGroupMessage) ([]byte, error) { var nonce [24]byte @@ -211,21 +124,6 @@ func (g *Group) DecryptMessage(ciphertext []byte) (bool, *groups.DecryptedGroupM return false, nil } -// SetAttribute allows applications to store arbitrary configuration info at the group level. -func (g *Group) SetAttribute(name string, value string) { - g.lock.Lock() - defer g.lock.Unlock() - g.Attributes[name] = value -} - -// GetAttribute returns the value of a value set with SetAttribute. If no such value has been set exists is set to false. -func (g *Group) GetAttribute(name string) (value string, exists bool) { - g.lock.Lock() - defer g.lock.Unlock() - value, exists = g.Attributes[name] - return -} - // ValidateInvite takes in a serialized invite and returns the invite structure if it is cryptographically valid // and an error if it is not func ValidateInvite(invite string) (*groups.GroupInvite, error) { @@ -263,3 +161,115 @@ func ValidateInvite(invite string) (*groups.GroupInvite, error) { } return nil, errors.New("invite has invalid structure") } + +// AttemptDecryption takes a ciphertext and signature and attempts to decrypt it under known groups. +// If successful, adds the message to the group's timeline +func (g *Group) AttemptDecryption(ciphertext []byte, signature []byte) (bool, *groups.DecryptedGroupMessage) { + success, dgm := g.DecryptMessage(ciphertext) + if success { + + // Attempt to serialize this message + serialized, err := json.Marshal(dgm) + + // Someone send a message that isn't a valid Decrypted Group Message. Since we require this struct in orer + // to verify the message, we simply ignore it. + if err != nil { + return false, nil + } + + // This now requires knowledge of the Sender, the Onion and the Specific Decrypted Group Message (which should only + // be derivable from the cryptographic key) which contains many unique elements such as the time and random padding + verified := g.VerifyGroupMessage(dgm.Onion, g.GroupID, base64.StdEncoding.EncodeToString(serialized), signature) + + if !verified { + // An earlier version of this protocol mistakenly signed the ciphertext of the message + // instead of the serialized decrypted group message. + // This has 2 issues: + // 1. A server with knowledge of group members public keys AND the Group ID would be able to detect valid messages + // 2. It made the metadata-security of a group dependent on keeping the cryptographically derived Group ID secret. + // While not awful, it also isn't good. For Version 3 groups only we permit Cwtch to check this older signature + // structure in a backwards compatible way for the duration of the Groups Experiment. + // TODO: Delete this check when Groups are no long Experimental + if g.Version == 3 { + verified = g.VerifyGroupMessage(dgm.Onion, g.GroupID, string(ciphertext), signature) + } + } + + // So we have a message that has a valid group key, but the signature can't be verified. + // The most obvious explanation for this is that the group key has been compromised (or we are in an open group and the server is being malicious) + // Either way, someone who has the private key is being detectably bad so we are just going to throw this message away and mark the group as Compromised. + if !verified { + return false, nil + } + return true, dgm + } + + // If we couldn't find a group to decrypt the message with we just return false. This is an expected case + return false, nil +} + +// VerifyGroupMessage confirms the authenticity of a message given an sender onion, message and signature. +// The goal of this function is 2-fold: +// 1. We confirm that the sender referenced in the group text is the actual sender of the message (or at least +// knows the senders private key) +// 2. Secondly, we confirm that the sender sent the message to a particular group id on a specific server (it doesn't +// matter if we actually received this message from the server or from a hybrid protocol, all that matters is +// that the sender and receivers agree that this message was intended for the group +// The 2nd point is important as it prevents an attack documented in the original Cwtch paper (and later at +// https://docs.openprivacy.ca/cwtch-security-handbook/groups.html) in which a malicious profile sets up 2 groups +// on two different servers with the same key and then forwards messages between them to convince the parties in +// each group that they are actually in one big group (with the intent to later censor and/or selectively send messages +// to each group). +func (g *Group) VerifyGroupMessage(onion string, groupID string, message string, signature []byte) bool { + // We use our group id, a known reference server and the ciphertext of the message. + m := groupID + g.GroupServer + message + + // Otherwise we derive the public key from the sender and check it against that. + decodedPub, err := base32.StdEncoding.DecodeString(strings.ToUpper(onion)) + if err == nil && len(decodedPub) >= 32 { + return ed25519.Verify(decodedPub[:32], []byte(m), signature) + } + return false +} + +// EncryptMessageToGroup when given a message and a group, encrypts and signs the message under the group and +// profile +func EncryptMessageToGroup(message string, author primitives.Identity, group *Group) ([]byte, []byte, *groups.DecryptedGroupMessage, error) { + if len(message) > MaxGroupMessageLength { + return nil, nil, nil, errors.New("group message is too long") + } + timestamp := time.Now().Unix() + + // Select the latest message from the timeline as a reference point. + var prevSig []byte + if len(group.Timeline.Messages) > 0 { + prevSig = group.Timeline.Messages[len(group.Timeline.Messages)-1].Signature + } else { + prevSig = []byte(group.GroupID) + } + + lenPadding := MaxGroupMessageLength - len(message) + padding := make([]byte, lenPadding) + getRandomness(&padding) + hexGroupID, err := hex.DecodeString(group.GroupID) + if err != nil { + return nil, nil, nil, err + } + + dm := &groups.DecryptedGroupMessage{ + Onion: author.Hostname(), + Text: message, + SignedGroupID: hexGroupID, + Timestamp: uint64(timestamp), + PreviousMessageSig: prevSig, + Padding: padding[:], + } + + ciphertext, err := group.EncryptMessage(dm) + if err != nil { + return nil, nil, nil, err + } + serialized, _ := json.Marshal(dm) + signature := author.Sign([]byte(group.GroupID + group.GroupServer + base64.StdEncoding.EncodeToString(serialized))) + return ciphertext, signature, dm, nil +} diff --git a/model/group_test.go b/model/group_test.go index 1de73e7..1d90ca6 100644 --- a/model/group_test.go +++ b/model/group_test.go @@ -4,7 +4,6 @@ import ( "crypto/sha256" "cwtch.im/cwtch/protocol/groups" "strings" - "sync" "testing" "time" ) @@ -42,11 +41,7 @@ func TestGroup(t *testing.T) { t.Errorf("group encryption was invalid, or returned wrong message decrypted:%v message:%v", ok, message) return } - g.SetAttribute("test", "test_value") - value, exists := g.GetAttribute("test") - if !exists || value != "test_value" { - t.Errorf("Custom Attribute Should have been set, instead %v %v", exists, value) - } + t.Logf("Got message %v", message) } @@ -61,17 +56,12 @@ func TestGroupErr(t *testing.T) { func TestGroupValidation(t *testing.T) { group := &Group{ - GroupID: "", - GroupKey: [32]byte{}, - GroupServer: "", - Timeline: Timeline{}, - Accepted: false, - IsCompromised: false, - Attributes: nil, - lock: sync.Mutex{}, - LocalID: "", - State: "", - Version: 0, + GroupID: "", + GroupKey: [32]byte{}, + GroupServer: "", + Timeline: Timeline{}, + LocalID: "", + Version: 0, } invite, _ := group.Invite() diff --git a/model/message_test.go b/model/message_test.go deleted file mode 100644 index be2859e..0000000 --- a/model/message_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package model - -import ( - "strconv" - "testing" - "time" -) - -func TestMessagePadding(t *testing.T) { - - // Setup the Group - sarah := GenerateNewProfile("Sarah") - alice := GenerateNewProfile("Alice") - sarah.AddContact(alice.Onion, &alice.PublicProfile) - alice.AddContact(sarah.Onion, &sarah.PublicProfile) - - gid, invite, _ := alice.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd") - - sarah.ProcessInvite(invite) - - group := alice.GetGroup(gid) - - c1, s1, err := sarah.EncryptMessageToGroup("Hello World 1", group.GroupID) - t.Logf("Length of Encrypted Message: %v %v", len(c1), err) - alice.AttemptDecryption(c1, s1) - - c2, s2, _ := alice.EncryptMessageToGroup("Hello World 2", group.GroupID) - t.Logf("Length of Encrypted Message: %v", len(c2)) - alice.AttemptDecryption(c2, s2) - - c3, s3, _ := alice.EncryptMessageToGroup("Hello World 3", group.GroupID) - t.Logf("Length of Encrypted Message: %v", len(c3)) - alice.AttemptDecryption(c3, s3) - - c4, s4, _ := alice.EncryptMessageToGroup("Hello World this is a much longer message 3", group.GroupID) - t.Logf("Length of Encrypted Message: %v", len(c4)) - alice.AttemptDecryption(c4, s4) - -} - -func TestTranscriptConsistency(t *testing.T) { - timeline := new(Timeline) - - // Setup the Group - sarah := GenerateNewProfile("Sarah") - alice := GenerateNewProfile("Alice") - sarah.AddContact(alice.Onion, &alice.PublicProfile) - alice.AddContact(sarah.Onion, &sarah.PublicProfile) - - // The lightest weight server entry possible (usually we would import a key bundle...) - sarah.AddContact("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd", &PublicProfile{Attributes: map[string]string{string(KeyTypeServerOnion): "2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd"}}) - - gid, invite, _ := alice.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd") - - sarah.ProcessInvite(invite) - - group := alice.GetGroup(gid) - - t.Logf("group: %v, sarah %v", group, sarah) - - c1, s1, _ := alice.EncryptMessageToGroup("Hello World 1", group.GroupID) - t.Logf("Length of Encrypted Message: %v", len(c1)) - alice.AttemptDecryption(c1, s1) - - c2, s2, _ := alice.EncryptMessageToGroup("Hello World 2", group.GroupID) - t.Logf("Length of Encrypted Message: %v", len(c2)) - alice.AttemptDecryption(c2, s2) - - c3, s3, _ := alice.EncryptMessageToGroup("Hello World 3", group.GroupID) - t.Logf("Length of Encrypted Message: %v", len(c3)) - alice.AttemptDecryption(c3, s3) - - time.Sleep(time.Second * 1) - - c4, s4, _ := alice.EncryptMessageToGroup("Hello World 4", group.GroupID) - t.Logf("Length of Encrypted Message: %v", len(c4)) - alice.AttemptDecryption(c4, s4) - - c5, s5, _ := alice.EncryptMessageToGroup("Hello World 5", group.GroupID) - t.Logf("Length of Encrypted Message: %v", len(c5)) - - _, _, m1, _ := sarah.AttemptDecryption(c1, s1) - sarah.AttemptDecryption(c1, s1) // Try a duplicate - _, _, m2, _ := sarah.AttemptDecryption(c2, s2) - _, _, m3, _ := sarah.AttemptDecryption(c3, s3) - _, _, m4, _ := sarah.AttemptDecryption(c4, s4) - _, _, m5, _ := sarah.AttemptDecryption(c5, s5) - - // Now we simulate a client receiving these Messages completely out of order - timeline.Insert(m1) - timeline.Insert(m5) - timeline.Insert(m4) - timeline.Insert(m3) - timeline.Insert(m2) - - for i, m := range group.GetTimeline() { - if m.Message != "Hello World "+strconv.Itoa(i+1) { - t.Fatalf("Timeline Out of Order!: %v %v", i, m) - } - - t.Logf("Messages %v: %v %x %x", i, m.Message, m.Signature, m.PreviousMessageSig) - } - - // Test message by hash lookup... - hash := timeline.calculateHash(*m5) - - t.Logf("Looking up %v ", hash) - - for key, msgs := range timeline.hashCache { - t.Logf("%v %v", key, msgs) - } - - // check a real message.. - msgs, err := timeline.GetMessagesByHash(hash) - if err != nil || len(msgs) != 1 { - t.Fatalf("looking up message by hash %v should have not errored: %v", hash, err) - } else if msgs[0].Message.Message != m5.Message { - t.Fatalf("%v != %v", msgs[0].Message, m5.Message) - } - - // Check a non existed hash... error if there is no error - _, err = timeline.GetMessagesByHash("not a real hash") - if err == nil { - t.Fatalf("looking up message by hash %v should have errored: %v", hash, err) - } - -} diff --git a/model/message_utils.go b/model/message_utils.go new file mode 100644 index 0000000..ba57ce0 --- /dev/null +++ b/model/message_utils.go @@ -0,0 +1,14 @@ +package model + +import ( + "crypto/sha256" + "encoding/base64" +) + +// CalculateContentHash derives a hash using the author and the message body. It is intended to be +// globally referencable in the context of a single conversation +func CalculateContentHash(author string, messageBody string) string { + content := []byte(author + messageBody) + contentBasedHash := sha256.Sum256(content) + return base64.StdEncoding.EncodeToString(contentBasedHash[:]) +} diff --git a/model/profile.go b/model/profile.go index 03c5dc3..acf06e4 100644 --- a/model/profile.go +++ b/model/profile.go @@ -2,25 +2,16 @@ package model import ( "crypto/rand" - "cwtch.im/cwtch/model/attr" - "cwtch.im/cwtch/model/constants" - "cwtch.im/cwtch/protocol/groups" - "encoding/base32" - "encoding/base64" "encoding/hex" "encoding/json" - "errors" - "fmt" - "git.openprivacy.ca/openprivacy/connectivity/tor" "golang.org/x/crypto/ed25519" "io" - "path/filepath" - "strings" "sync" - "time" ) // Authorization is a type determining client assigned authorization to a peer +// Deprecated - Only used for Importing legacy profile formats +// Still used in some APIs in UI but will be replaced prior to full deprecation type Authorization string const ( @@ -33,6 +24,7 @@ const ( ) // PublicProfile is a local copy of a CwtchIdentity +// Deprecated - Only used for Importing legacy profile formats type PublicProfile struct { Name string Ed25519PublicKey ed25519.PublicKey @@ -48,6 +40,7 @@ type PublicProfile struct { } // Profile encapsulates all the attributes necessary to be a Cwtch Peer. +// Deprecated - Only used for Importing legacy profile formats type Profile struct { PublicProfile Contacts map[string]*PublicProfile @@ -59,418 +52,6 @@ type Profile struct { // TODO: Should this be per server? const MaxGroupMessageLength = 1800 -// GenerateRandomID generates a random 16 byte hex id code -func GenerateRandomID() string { - randBytes := make([]byte, 16) - rand.Read(randBytes) - return filepath.Join(hex.EncodeToString(randBytes)) -} - -func (p *PublicProfile) init() { - if p.Attributes == nil { - p.Attributes = make(map[string]string) - } - p.UnacknowledgedMessages = make(map[string]int) - p.LocalID = GenerateRandomID() -} - -// SetAttribute allows applications to store arbitrary configuration info at the profile level. -func (p *PublicProfile) SetAttribute(name string, value string) { - p.lock.Lock() - defer p.lock.Unlock() - p.Attributes[name] = value -} - -// IsServer returns true if the profile is associated with a server. -func (p *PublicProfile) IsServer() (isServer bool) { - _, isServer = p.GetAttribute(string(KeyTypeServerOnion)) - return -} - -// GetAttribute returns the value of a value set with SetCustomAttribute. If no such value has been set exists is set to false. -func (p *PublicProfile) GetAttribute(name string) (value string, exists bool) { - p.lock.Lock() - defer p.lock.Unlock() - value, exists = p.Attributes[name] - return -} - -// GenerateNewProfile creates a new profile, with new encryption and signing keys, and a profile name. -func GenerateNewProfile(name string) *Profile { - p := new(Profile) - p.init() - p.Name = name - pub, priv, _ := ed25519.GenerateKey(rand.Reader) - p.Ed25519PublicKey = pub - p.Ed25519PrivateKey = priv - p.Onion = tor.GetTorV3Hostname(pub) - - p.Contacts = make(map[string]*PublicProfile) - p.Contacts[p.Onion] = &p.PublicProfile - p.Groups = make(map[string]*Group) - return p -} - -// AddContact allows direct manipulation of cwtch contacts -func (p *Profile) AddContact(onion string, profile *PublicProfile) { - p.lock.Lock() - profile.init() - // We expect callers to verify addresses before we get to this point, so if this isn't a - // valid address this is a noop. - if tor.IsValidHostname(onion) { - decodedPub, err := base32.StdEncoding.DecodeString(strings.ToUpper(onion[:56])) - if err == nil { - profile.Ed25519PublicKey = ed25519.PublicKey(decodedPub[:32]) - p.Contacts[onion] = profile - } - } - p.lock.Unlock() -} - -// UpdateMessageFlags updates the flags stored with a message -func (p *Profile) UpdateMessageFlags(handle string, mIdx int, flags uint64) { - p.lock.Lock() - defer p.lock.Unlock() - if contact, exists := p.Contacts[handle]; exists { - if len(contact.Timeline.Messages) > mIdx { - contact.Timeline.Messages[mIdx].Flags = flags - } - } else if group, exists := p.Groups[handle]; exists { - if len(group.Timeline.Messages) > mIdx { - group.Timeline.Messages[mIdx].Flags = flags - } - } -} - -// DeleteContact deletes a peer contact -func (p *Profile) DeleteContact(onion string) { - p.lock.Lock() - defer p.lock.Unlock() - delete(p.Contacts, onion) -} - -// DeleteGroup deletes a group -func (p *Profile) DeleteGroup(groupID string) { - p.lock.Lock() - defer p.lock.Unlock() - delete(p.Groups, groupID) -} - -// RejectInvite rejects and removes a group invite -func (p *Profile) RejectInvite(groupID string) { - p.lock.Lock() - delete(p.Groups, groupID) - p.lock.Unlock() -} - -// AddSentMessageToContactTimeline allows the saving of a message sent via a direct connection chat to the profile. -func (p *Profile) AddSentMessageToContactTimeline(onion string, messageTxt string, sent time.Time, eventID string) *Message { - p.lock.Lock() - defer p.lock.Unlock() - - contact, ok := p.Contacts[onion] - if ok { - now := time.Now() - sig := p.SignMessage(onion + messageTxt + sent.String() + now.String()) - - message := &Message{PeerID: p.Onion, Message: messageTxt, Timestamp: sent, Received: now, Signature: sig, Acknowledged: false} - if contact.UnacknowledgedMessages == nil { - contact.UnacknowledgedMessages = make(map[string]int) - } - contact.Timeline.Insert(message) - contact.UnacknowledgedMessages[eventID] = contact.Timeline.Len() - 1 - return message - } - return nil -} - -// AddMessageToContactTimeline allows the saving of a message sent via a direct connection chat to the profile. -func (p *Profile) AddMessageToContactTimeline(onion string, messageTxt string, sent time.Time) (message *Message) { - p.lock.Lock() - defer p.lock.Unlock() - contact, ok := p.Contacts[onion] - - // We don't really need a Signature here, but we use it to maintain order - now := time.Now() - sig := p.SignMessage(onion + messageTxt + sent.String() + now.String()) - if ok { - message = &Message{PeerID: onion, Message: messageTxt, Timestamp: sent, Received: now, Signature: sig, Acknowledged: true} - contact.Timeline.Insert(message) - } - return -} - -// ErrorSentMessageToPeer sets a sent message's error message and removes it from the unacknowledged list -func (p *Profile) ErrorSentMessageToPeer(onion string, eventID string, error string) int { - p.lock.Lock() - defer p.lock.Unlock() - - contact, ok := p.Contacts[onion] - if ok { - mIdx, ok := contact.UnacknowledgedMessages[eventID] - if ok { - contact.Timeline.Messages[mIdx].Error = error - delete(contact.UnacknowledgedMessages, eventID) - return mIdx - } - } - return -1 -} - -// AckSentMessageToPeer sets mesage to a peer as acknowledged -func (p *Profile) AckSentMessageToPeer(onion string, eventID string) int { - p.lock.Lock() - defer p.lock.Unlock() - - contact, ok := p.Contacts[onion] - if ok { - mIdx, ok := contact.UnacknowledgedMessages[eventID] - if ok { - contact.Timeline.Messages[mIdx].Acknowledged = true - delete(contact.UnacknowledgedMessages, eventID) - return mIdx - } - } - - return -1 -} - -// AddGroupSentMessageError searches matching groups for the message by sig and marks it as an error -func (p *Profile) AddGroupSentMessageError(groupID string, signature []byte, error string) { - p.lock.Lock() - defer p.lock.Unlock() - group, exists := p.Groups[groupID] - if exists { - group.ErrorSentMessage(signature, error) - } -} - -// AcceptInvite accepts a group invite -func (p *Profile) AcceptInvite(groupID string) (err error) { - p.lock.Lock() - defer p.lock.Unlock() - group, ok := p.Groups[groupID] - if ok { - group.Accepted = true - } else { - err = errors.New("group does not exist") - } - return -} - -// GetGroups returns an unordered list of group IDs associated with this profile. -func (p *Profile) GetGroups() []string { - p.lock.Lock() - defer p.lock.Unlock() - var keys []string - for onion := range p.Groups { - keys = append(keys, onion) - } - return keys -} - -// GetContacts returns an unordered list of contact onions associated with this profile. -func (p *Profile) GetContacts() []string { - p.lock.Lock() - defer p.lock.Unlock() - var keys []string - for onion := range p.Contacts { - if onion != p.Onion { - keys = append(keys, onion) - } - } - return keys -} - -// SetContactAuthorization sets the authoirization level of a peer -func (p *Profile) SetContactAuthorization(onion string, auth Authorization) (err error) { - p.lock.Lock() - defer p.lock.Unlock() - contact, ok := p.Contacts[onion] - if ok { - contact.Authorization = auth - } else { - err = errors.New("peer does not exist") - } - return -} - -// GetContactAuthorization returns the contact's authorization level -func (p *Profile) GetContactAuthorization(onion string) Authorization { - p.lock.Lock() - defer p.lock.Unlock() - contact, ok := p.Contacts[onion] - if ok { - return contact.Authorization - } - return AuthUnknown -} - -// ContactsAuthorizations calculates a list of Peers who are at the supplied auth levels -func (p *Profile) ContactsAuthorizations(authorizationFilter ...Authorization) map[string]Authorization { - authorizations := map[string]Authorization{} - for _, contact := range p.GetContacts() { - c, _ := p.GetContact(contact) - authorizations[c.Onion] = c.Authorization - } - return authorizations -} - -// GetContact returns a contact if the profile has it -func (p *Profile) GetContact(onion string) (*PublicProfile, bool) { - p.lock.Lock() - defer p.lock.Unlock() - contact, ok := p.Contacts[onion] - return contact, ok -} - -// VerifyGroupMessage confirms the authenticity of a message given an sender onion, message and signature. -// The goal of this function is 2-fold: -// 1. We confirm that the sender referenced in the group text is the actual sender of the message (or at least -// knows the senders private key) -// 2. Secondly, we confirm that the sender sent the message to a particular group id on a specific server (it doesn't -// matter if we actually received this message from the server or from a hybrid protocol, all that matters is -// that the sender and receivers agree that this message was intended for the group -// The 2nd point is important as it prevents an attack documented in the original Cwtch paper (and later at -// https://docs.openprivacy.ca/cwtch-security-handbook/groups.html) in which a malicious profile sets up 2 groups -// on two different servers with the same key and then forwards messages between them to convince the parties in -// each group that they are actually in one big group (with the intent to later censor and/or selectively send messages -// to each group). -func (p *Profile) VerifyGroupMessage(onion string, groupID string, message string, signature []byte) bool { - - group := p.GetGroup(groupID) - if group == nil { - return false - } - - // We use our group id, a known reference server and the ciphertext of the message. - m := groupID + group.GroupServer + message - - // If the message is ostensibly from us then we check it against our public key... - if onion == p.Onion { - return ed25519.Verify(p.Ed25519PublicKey, []byte(m), signature) - } - - // Otherwise we derive the public key from the sender and check it against that. - decodedPub, err := base32.StdEncoding.DecodeString(strings.ToUpper(onion)) - if err == nil && len(decodedPub) >= 32 { - return ed25519.Verify(decodedPub[:32], []byte(m), signature) - } - return false -} - -// SignMessage takes a given message and returns an Ed21159 signature -func (p *Profile) SignMessage(message string) []byte { - sig := ed25519.Sign(p.Ed25519PrivateKey, []byte(message)) - return sig -} - -// StartGroup when given a server, creates a new Group under this profile and returns the group id an a precomputed -// invite which can be sent on the wire. -func (p *Profile) StartGroup(server string) (groupID string, invite string, err error) { - group, err := NewGroup(server) - if err != nil { - return "", "", err - } - groupID = group.GroupID - invite, err = group.Invite() - p.lock.Lock() - defer p.lock.Unlock() - p.Groups[group.GroupID] = group - return -} - -// GetGroup a pointer to a Group by the group Id, returns nil if no group found. -func (p *Profile) GetGroup(groupID string) (g *Group) { - p.lock.Lock() - defer p.lock.Unlock() - g = p.Groups[groupID] - return -} - -// ProcessInvite validates a group invite and adds a new group invite to the profile if it is valid. -// returns the new group ID on success, error on fail. -func (p *Profile) ProcessInvite(invite string) (string, error) { - gci, err := ValidateInvite(invite) - if err == nil { - if server, exists := p.GetContact(gci.ServerHost); !exists || !server.IsServer() { - return "", fmt.Errorf("unknown server. a server key bundle needs to be imported before this group can be verified") - } - group := new(Group) - group.Version = CurrentGroupVersion - group.GroupID = gci.GroupID - group.LocalID = GenerateRandomID() - copy(group.GroupKey[:], gci.SharedKey[:]) - group.GroupServer = gci.ServerHost - group.Accepted = false - group.Attributes = make(map[string]string) - group.Attributes[attr.GetLocalScope(constants.Name)] = gci.GroupName - p.AddGroup(group) - return gci.GroupID, nil - } - return "", err -} - -// AddGroup is a convenience method for adding a group to a profile. -func (p *Profile) AddGroup(group *Group) { - p.lock.Lock() - defer p.lock.Unlock() - _, exists := p.Groups[group.GroupID] - if !exists { - p.Groups[group.GroupID] = group - } -} - -// AttemptDecryption takes a ciphertext and signature and attempts to decrypt it under known groups. -// If successful, adds the message to the group's timeline -func (p *Profile) AttemptDecryption(ciphertext []byte, signature []byte) (bool, string, *Message, int) { - for _, group := range p.Groups { - success, dgm := group.DecryptMessage(ciphertext) - if success { - - // Attempt to serialize this message - serialized, err := json.Marshal(dgm) - - // Someone send a message that isn't a valid Decrypted Group Message. Since we require this struct in orer - // to verify the message, we simply ignore it. - if err != nil { - return false, group.GroupID, nil, -1 - } - - // This now requires knowledge of the Sender, the Onion and the Specific Decrypted Group Message (which should only - // be derivable from the cryptographic key) which contains many unique elements such as the time and random padding - verified := p.VerifyGroupMessage(dgm.Onion, group.GroupID, base64.StdEncoding.EncodeToString(serialized), signature) - - if !verified { - // An earlier version of this protocol mistakenly signed the ciphertext of the message - // instead of the serialized decrypted group message. - // This has 2 issues: - // 1. A server with knowledge of group members public keys AND the Group ID would be able to detect valid messages - // 2. It made the metadata-security of a group dependent on keeping the cryptographically derived Group ID secret. - // While not awful, it also isn't good. For Version 3 groups only we permit Cwtch to check this older signature - // structure in a backwards compatible way for the duration of the Groups Experiment. - // TODO: Delete this check when Groups are no long Experimental - if group.Version == 3 { - verified = p.VerifyGroupMessage(dgm.Onion, group.GroupID, string(ciphertext), signature) - } - } - - // So we have a message that has a valid group key, but the signature can't be verified. - // The most obvious explanation for this is that the group key has been compromised (or we are in an open group and the server is being malicious) - // Either way, someone who has the private key is being detectably bad so we are just going to throw this message away and mark the group as Compromised. - if !verified { - group.Compromised() - return false, group.GroupID, nil, -1 - } - message, index := group.AddMessage(dgm, signature) - return true, group.GroupID, message, index - } - } - - // If we couldn't find a group to decrypt the message with we just return false. This is an expected case - return false, "", nil, -1 -} - func getRandomness(arr *[]byte) { if _, err := io.ReadFull(rand.Reader, (*arr)[:]); err != nil { if err != nil { @@ -481,53 +62,11 @@ func getRandomness(arr *[]byte) { } } -// EncryptMessageToGroup when given a message and a group, encrypts and signs the message under the group and -// profile -func (p *Profile) EncryptMessageToGroup(message string, groupID string) ([]byte, []byte, error) { - - if len(message) > MaxGroupMessageLength { - return nil, nil, errors.New("group message is too long") - } - - group := p.GetGroup(groupID) - if group != nil { - timestamp := time.Now().Unix() - - // Select the latest message from the timeline as a reference point. - var prevSig []byte - if len(group.Timeline.Messages) > 0 { - prevSig = group.Timeline.Messages[len(group.Timeline.Messages)-1].Signature - } else { - prevSig = []byte(group.GroupID) - } - - lenPadding := MaxGroupMessageLength - len(message) - padding := make([]byte, lenPadding) - getRandomness(&padding) - hexGroupID, err := hex.DecodeString(group.GroupID) - if err != nil { - return nil, nil, err - } - - dm := &groups.DecryptedGroupMessage{ - Onion: p.Onion, - Text: message, - SignedGroupID: hexGroupID, - Timestamp: uint64(timestamp), - PreviousMessageSig: prevSig, - Padding: padding[:], - } - - ciphertext, err := group.EncryptMessage(dm) - if err != nil { - return nil, nil, err - } - serialized, _ := json.Marshal(dm) - signature := p.SignMessage(groupID + group.GroupServer + base64.StdEncoding.EncodeToString(serialized)) - group.AddSentMessage(dm, signature) - return ciphertext, signature, nil - } - return nil, nil, errors.New("group does not exist") +// GenerateRandomID generates a random 16 byte hex id code +func GenerateRandomID() string { + randBytes := make([]byte, 16) + rand.Read(randBytes) + return hex.EncodeToString(randBytes) } // GetCopy returns a full deep copy of the Profile struct and its members (timeline inclusion control by arg) diff --git a/model/profile_test.go b/model/profile_test.go deleted file mode 100644 index 2cd8ae5..0000000 --- a/model/profile_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package model - -import ( - "testing" -) - -func TestProfileIdentity(t *testing.T) { - sarah := GenerateNewProfile("Sarah") - alice := GenerateNewProfile("Alice") - - alice.AddContact(sarah.Onion, &sarah.PublicProfile) - if alice.Contacts[sarah.Onion].Name != "Sarah" { - t.Errorf("alice should have added sarah as a contact %v", alice.Contacts) - } - - if len(alice.GetContacts()) != 1 { - t.Errorf("alice should be only contact: %v", alice.GetContacts()) - } - - alice.SetAttribute("test", "hello world") - value, _ := alice.GetAttribute("test") - if value != "hello world" { - t.Errorf("value from custom attribute should have been 'hello world', instead was: %v", value) - } - - t.Logf("%v", alice) -} - -func TestTrustPeer(t *testing.T) { - sarah := GenerateNewProfile("Sarah") - alice := GenerateNewProfile("Alice") - sarah.AddContact(alice.Onion, &alice.PublicProfile) - alice.AddContact(sarah.Onion, &sarah.PublicProfile) - alice.SetContactAuthorization(sarah.Onion, AuthApproved) - if alice.GetContactAuthorization(sarah.Onion) != AuthApproved { - t.Errorf("peer should be approved") - } -} - -func TestBlockPeer(t *testing.T) { - sarah := GenerateNewProfile("Sarah") - alice := GenerateNewProfile("Alice") - sarah.AddContact(alice.Onion, &alice.PublicProfile) - alice.AddContact(sarah.Onion, &sarah.PublicProfile) - alice.SetContactAuthorization(sarah.Onion, AuthBlocked) - if alice.GetContactAuthorization(sarah.Onion) != AuthBlocked { - t.Errorf("peer should be blocked") - } - - if alice.SetContactAuthorization("", AuthUnknown) == nil { - t.Errorf("Seting Auth level of a non existent peer should error") - } -} - -func TestAcceptNonExistentGroup(t *testing.T) { - sarah := GenerateNewProfile("Sarah") - sarah.AcceptInvite("doesnotexist") -} - -func TestRejectGroupInvite(t *testing.T) { - sarah := GenerateNewProfile("Sarah") - alice := GenerateNewProfile("Alice") - sarah.AddContact(alice.Onion, &alice.PublicProfile) - alice.AddContact(sarah.Onion, &sarah.PublicProfile) - // The lightest weight server entry possible (usually we would import a key bundle...) - sarah.AddContact("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd", &PublicProfile{Attributes: map[string]string{string(KeyTypeServerOnion): "2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd"}}) - - gid, invite, _ := alice.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd") - sarah.ProcessInvite(invite) - group := alice.GetGroup(gid) - if len(sarah.Groups) == 1 { - if sarah.GetGroup(group.GroupID).Accepted { - t.Errorf("Group should not be accepted") - } - sarah.RejectInvite(group.GroupID) - if len(sarah.Groups) != 0 { - t.Errorf("Group %v should have been deleted", group.GroupID) - } - return - } - t.Errorf("Group should exist in map") -} - -func TestProfileGroup(t *testing.T) { - sarah := GenerateNewProfile("Sarah") - alice := GenerateNewProfile("Alice") - sarah.AddContact(alice.Onion, &alice.PublicProfile) - alice.AddContact(sarah.Onion, &sarah.PublicProfile) - - gid, invite, _ := alice.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd") - - // The lightest weight server entry possible (usually we would import a key bundle...) - sarah.AddContact("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd", &PublicProfile{Attributes: map[string]string{string(KeyTypeServerOnion): "2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd"}}) - sarah.ProcessInvite(invite) - if len(sarah.GetGroups()) != 1 { - t.Errorf("sarah should only be in 1 group instead: %v", sarah.GetGroups()) - } - - group := alice.GetGroup(gid) - sarah.AcceptInvite(group.GroupID) - c, s1, _ := sarah.EncryptMessageToGroup("Hello World", group.GroupID) - alice.AttemptDecryption(c, s1) - - gid2, invite2, _ := alice.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd") - sarah.ProcessInvite(invite2) - group2 := alice.GetGroup(gid2) - c2, s2, _ := sarah.EncryptMessageToGroup("Hello World", group2.GroupID) - alice.AttemptDecryption(c2, s2) - - _, _, err := sarah.EncryptMessageToGroup(string(make([]byte, MaxGroupMessageLength*2)), group2.GroupID) - if err == nil { - t.Errorf("Overly long message should have returned an error") - } - - bob := GenerateNewProfile("bob") - bob.AddContact(alice.Onion, &alice.PublicProfile) - // The lightest weight server entry possible (usually we would import a key bundle...) - bob.AddContact("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd", &PublicProfile{Attributes: map[string]string{string(KeyTypeServerOnion): "2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd"}}) - - bob.ProcessInvite(invite2) - c3, s3, err := bob.EncryptMessageToGroup("Bobs Message", group2.GroupID) - if err == nil { - ok, _, message, _ := alice.AttemptDecryption(c3, s3) - if !ok { - t.Errorf("Bobs message to the group should be decrypted %v %v", message, ok) - } - - eve := GenerateNewProfile("eve") - ok, _, _, _ = eve.AttemptDecryption(c3, s3) - if ok { - t.Errorf("Eves hould not be able to decrypt Messages!") - } - } else { - t.Errorf("Bob failed to encrypt a message to the group") - } -} diff --git a/peer/cwtch_peer.go b/peer/cwtch_peer.go index 862d6e1..2395af0 100644 --- a/peer/cwtch_peer.go +++ b/peer/cwtch_peer.go @@ -1,29 +1,33 @@ package peer import ( - "encoding/base32" + "crypto/rand" + "cwtch.im/cwtch/model/constants" + "cwtch.im/cwtch/protocol/groups" "encoding/base64" "encoding/json" "errors" "fmt" + "git.openprivacy.ca/cwtch.im/tapir/primitives" + "git.openprivacy.ca/openprivacy/connectivity" + "git.openprivacy.ca/openprivacy/connectivity/tor" + "golang.org/x/crypto/ed25519" "runtime" "strconv" "strings" "sync" "time" - "cwtch.im/cwtch/model/constants" - "cwtch.im/cwtch/event" "cwtch.im/cwtch/model" "cwtch.im/cwtch/model/attr" "cwtch.im/cwtch/protocol/connections" "cwtch.im/cwtch/protocol/files" - "git.openprivacy.ca/openprivacy/connectivity/tor" "git.openprivacy.ca/openprivacy/log" ) const lastKnownSignature = "LastKnowSignature" +const lastReceivedSignature = "LastReceivedSignature" var autoHandleableEvents = map[event.Type]bool{event.EncryptedGroupMessage: true, event.PeerStateChange: true, event.ServerStateChange: true, event.NewGroupInvite: true, event.NewMessageFromPeer: true, @@ -47,290 +51,336 @@ var DefaultEventsToHandle = []event.Type{ // cwtchPeer manages incoming and outgoing connections and all processing for a Cwtch cwtchPeer type cwtchPeer struct { - Profile *model.Profile mutex sync.Mutex shutdown bool listenStatus bool + storage *CwtchProfileStorage + + state map[string]connections.ConnectionState queue event.Queue eventBus event.Manager } -func (cp *cwtchPeer) SendScopedZonedGetValToContact(handle string, scope attr.Scope, zone attr.Zone, path string) { - ev := event.NewEventList(event.SendGetValMessageToPeer, event.RemotePeer, handle, event.Scope, string(scope), event.Path, string(zone.ConstructZonedPath(path))) - cp.eventBus.Publish(ev) +func (cp *cwtchPeer) Delete() { + cp.mutex.Lock() + defer cp.mutex.Unlock() + cp.storage.Delete() } +func (cp *cwtchPeer) CheckPassword(password string) bool { + cp.mutex.Lock() + defer cp.mutex.Unlock() + db, err := openEncryptedDatabase(cp.storage.ProfileDirectory, password, false) + if db == nil || err != nil { + return false + } + db.Close() + return true +} + +// GenerateProtocolEngine +// Status: New in 1.5 +func (cp *cwtchPeer) GenerateProtocolEngine(acn connectivity.ACN, bus event.Manager) (connections.Engine, error) { + cp.mutex.Lock() + defer cp.mutex.Unlock() + conversations, _ := cp.storage.FetchConversations() + + authorizations := make(map[string]model.Authorization) + for _, conversation := range conversations { + if tor.IsValidHostname(conversation.Handle) { + if conversation.ACL[conversation.Handle].Blocked { + authorizations[conversation.Handle] = model.AuthBlocked + } else { + authorizations[conversation.Handle] = model.AuthApproved + } + } + } + + privateKey, err := cp.storage.LoadProfileKeyValue(TypePrivateKey, "Ed25519PrivateKey") + if err != nil { + log.Errorf("error loading private key from storage") + return nil, err + } + + publicKey, err := cp.storage.LoadProfileKeyValue(TypePublicKey, "Ed25519PublicKey") + if err != nil { + log.Errorf("error loading public key from storage") + return nil, err + } + + identity := primitives.InitializeIdentity("", (*ed25519.PrivateKey)(&privateKey), (*ed25519.PublicKey)(&publicKey)) + + return connections.NewProtocolEngine(identity, privateKey, acn, bus, authorizations), nil +} + +// SendScopedZonedGetValToContact +// Status: No change in 1.5 +func (cp *cwtchPeer) SendScopedZonedGetValToContact(conversationID int, scope attr.Scope, zone attr.Zone, path string) { + ci, err := cp.GetConversationInfo(conversationID) + if err == nil { + ev := event.NewEventList(event.SendGetValMessageToPeer, event.RemotePeer, ci.Handle, event.Scope, string(scope), event.Path, string(zone.ConstructZonedPath(path))) + cp.eventBus.Publish(ev) + } else { + log.Errorf("Error sending scoped zone to contact %v %v", conversationID, err) + } +} + +// GetScopedZonedAttribute +// Status: Ready for 1.5 func (cp *cwtchPeer) GetScopedZonedAttribute(scope attr.Scope, zone attr.Zone, key string) (string, bool) { cp.mutex.Lock() defer cp.mutex.Unlock() scopedZonedKey := scope.ConstructScopedZonedPath(zone.ConstructZonedPath(key)) - log.Debugf("looking up attribute %v %v %v (%v)", scope, zone, key, scopedZonedKey) + value, err := cp.storage.LoadProfileKeyValue(TypeAttribute, scopedZonedKey.ToString()) - if val, exists := cp.Profile.GetAttribute(scopedZonedKey.ToString()); exists { - return val, true + if err != nil { + return "", false } - return "", false + return string(value), true } +// SetScopedZonedAttribute +// Status: Ready for 1.5 func (cp *cwtchPeer) SetScopedZonedAttribute(scope attr.Scope, zone attr.Zone, key string, value string) { cp.mutex.Lock() - scopedZonedKey := scope.ConstructScopedZonedPath(zone.ConstructZonedPath(key)) - log.Debugf("storing attribute: %v = %v", scopedZonedKey, value) - cp.Profile.SetAttribute(scopedZonedKey.ToString(), value) defer cp.mutex.Unlock() - cp.eventBus.Publish(event.NewEvent(event.SetAttribute, map[event.Field]string{ - event.Key: scopedZonedKey.ToString(), - event.Data: value, - })) + + scopedZonedKey := scope.ConstructScopedZonedPath(zone.ConstructZonedPath(key)) + + err := cp.storage.StoreProfileKeyValue(TypeAttribute, scopedZonedKey.ToString(), []byte(value)) + + if err != nil { + log.Errorf("error setting attribute %v") + return + } + + // We always want to publish profile level attributes to the ui + // This should be low traffic. + if cp.eventBus != nil { + cp.eventBus.Publish(event.NewEvent(event.UpdatedProfileAttribute, map[event.Field]string{event.Key: scopedZonedKey.ToString(), event.Data: value})) + } } // SendMessage is a higher level that merges sending messages to contacts and group handles // If you try to send a message to a handle that doesn't exist, malformed or an incorrect type then // this function will error -func (cp *cwtchPeer) SendMessage(handle string, message string) error { +func (cp *cwtchPeer) SendMessage(conversation int, message string) error { cp.mutex.Lock() defer cp.mutex.Unlock() - var ev event.Event - // Group Handles are always 32 bytes in length, but we forgo any further testing here - // and delegate the group existence check to EncryptMessageToGroup - if len(handle) == 32 { - group := cp.Profile.GetGroup(handle) - if group == nil { - return errors.New("invalid group id") - } + // We assume we are sending to a Contact. + conversationInfo, err := cp.storage.GetConversation(conversation) + // If the contact exists replace the event id with the index of this message in the contacts timeline... + // Otherwise assume we don't log the message in the timeline... + if conversationInfo != nil && err == nil { - // Group adds it's own sent message to timeline - ct, sig, err := cp.Profile.EncryptMessageToGroup(message, handle) + if tor.IsValidHostname(conversationInfo.Handle) { + ev := event.NewEvent(event.SendMessageToPeer, map[event.Field]string{event.ConversationID: strconv.Itoa(conversationInfo.ID), event.RemotePeer: conversationInfo.Handle, event.Data: message}) + onion, _ := cp.storage.LoadProfileKeyValue(TypeAttribute, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Onion)).ToString()) - // Group does not exist or some other unrecoverable error... - if err != nil { - return err + // For p2p messages we store the event id of the message as the "signature" we can then look this up in the database later for acks + err := cp.storage.InsertMessage(conversationInfo.ID, 0, message, model.Attributes{constants.AttrAuthor: string(onion), constants.AttrAck: event.False, constants.AttrSentTimestamp: time.Now().Format(time.RFC3339Nano)}, ev.EventID, model.CalculateContentHash(string(onion), message)) + if err != nil { + return err + } + cp.eventBus.Publish(ev) + } else { + group, err := cp.constructGroupFromConversation(conversationInfo) + if err != nil { + log.Errorf("error constructing group") + return err + } + + privateKey, err := cp.storage.LoadProfileKeyValue(TypePrivateKey, "Ed25519PrivateKey") + if err != nil { + log.Errorf("error loading private key from storage") + return err + } + + publicKey, err := cp.storage.LoadProfileKeyValue(TypePublicKey, "Ed25519PublicKey") + if err != nil { + log.Errorf("error loading public key from storage") + return err + } + + identity := primitives.InitializeIdentity("", (*ed25519.PrivateKey)(&privateKey), (*ed25519.PublicKey)(&publicKey)) + + ct, sig, dm, err := model.EncryptMessageToGroup(message, identity, group) + if err != nil { + return err + } + + // Insert the Group Message + err = cp.storage.InsertMessage(conversationInfo.ID, 0, dm.Text, model.Attributes{constants.AttrAck: constants.False, "PreviousSignature": base64.StdEncoding.EncodeToString(dm.PreviousMessageSig), constants.AttrAuthor: dm.Onion, constants.AttrSentTimestamp: time.Now().Format(time.RFC3339Nano)}, base64.StdEncoding.EncodeToString(sig), model.CalculateContentHash(dm.Onion, dm.Text)) + if err == nil { + ev := event.NewEvent(event.SendMessageToGroup, map[event.Field]string{event.ConversationID: strconv.Itoa(conversationInfo.ID), event.GroupID: conversationInfo.Handle, event.GroupServer: group.GroupServer, event.Ciphertext: base64.StdEncoding.EncodeToString(ct), event.Signature: base64.StdEncoding.EncodeToString(sig)}) + cp.eventBus.Publish(ev) + } else { + return err + } } - ev = event.NewEvent(event.SendMessageToGroup, map[event.Field]string{event.GroupID: handle, event.GroupServer: group.GroupServer, event.Ciphertext: base64.StdEncoding.EncodeToString(ct), event.Signature: base64.StdEncoding.EncodeToString(sig)}) - } else if tor.IsValidHostname(handle) { - // We assume we are sending to a Contact. - // (Servers are technically Contacts) - contact, exists := cp.Profile.GetContact(handle) - ev = event.NewEvent(event.SendMessageToPeer, map[event.Field]string{event.RemotePeer: handle, event.Data: message}) - // If the contact exists replace the event id wih the index of this message in the contacts timeline... - // Otherwise assume we don't log the message in the timeline... - if exists { - ev.EventID = strconv.Itoa(contact.Timeline.Len()) - cp.Profile.AddSentMessageToContactTimeline(handle, message, time.Now(), ev.EventID) - } - // Regardless we publish the send message to peer event for the protocol engine to execute on... - // We assume this is always successful as it is always valid to attempt to - // Contact a valid hostname - } else { - return errors.New("malformed handle type") + return nil } - - cp.eventBus.Publish(ev) - return nil -} - -func (cp *cwtchPeer) UpdateMessageFlags(handle string, mIdx int, flags uint64) { - cp.mutex.Lock() - defer cp.mutex.Unlock() - log.Debugf("Updating Flags for %v %v %v", handle, mIdx, flags) - cp.Profile.UpdateMessageFlags(handle, mIdx, flags) - cp.eventBus.Publish(event.NewEvent(event.UpdateMessageFlags, map[event.Field]string{event.Handle: handle, event.Index: strconv.Itoa(mIdx), event.Flags: strconv.FormatUint(flags, 2)})) + return fmt.Errorf("error sending message to conversation %v", err) } // BlockUnknownConnections will auto disconnect from connections if authentication doesn't resolve a hostname // known to peer. +// Status: Ready for 1.5 func (cp *cwtchPeer) BlockUnknownConnections() { cp.eventBus.Publish(event.NewEvent(event.BlockUnknownPeers, map[event.Field]string{})) } // AllowUnknownConnections will permit connections from unknown contacts. +// Status: Ready for 1.5 func (cp *cwtchPeer) AllowUnknownConnections() { cp.eventBus.Publish(event.NewEvent(event.AllowUnknownPeers, map[event.Field]string{})) } -// ReadContacts is a meta-interface intended to restrict callers to read-only access to contacts -type ReadContacts interface { - GetContacts() []string - GetContact(string) *model.PublicProfile - GetContactAttribute(string, string) (string, bool) -} - -// ModifyContacts is a meta-interface intended to restrict callers to modify-only access to contacts -type ModifyContacts interface { - AddContact(nick, onion string, authorization model.Authorization) - SetContactAuthorization(string, model.Authorization) error - SetContactAttribute(string, string, string) - DeleteContact(string) -} - -// AccessPeeringState provides access to functions relating to the underlying connections of a peer. -type AccessPeeringState interface { - GetPeerState(string) (connections.ConnectionState, bool) -} - -// ModifyPeeringState is a meta-interface intended to restrict callers to modify-only access to connection peers -type ModifyPeeringState interface { - BlockUnknownConnections() - AllowUnknownConnections() - PeerWithOnion(string) - JoinServer(string) error -} - -// ModifyContactsAndPeers is a meta-interface intended to restrict a call to reading and modifying contacts -// and peers. -type ModifyContactsAndPeers interface { - ReadContacts - ModifyContacts - ModifyPeeringState -} - -// ReadServers provides access to the servers -type ReadServers interface { - GetServers() []string -} - -// ReadGroups provides read-only access to group state -type ReadGroups interface { - GetGroup(string) *model.Group - GetGroupState(string) (connections.ConnectionState, bool) - GetGroups() []string - GetGroupAttribute(string, string) (string, bool) - ExportGroup(string) (string, error) -} - -// ModifyGroups provides write-only access add/edit/remove new groups -type ModifyGroups interface { - ImportGroup(string) (string, error) - StartGroup(string) (string, string, error) - AcceptInvite(string) error - RejectInvite(string) - DeleteGroup(string) - SetGroupAttribute(string, string, string) -} - -// ModifyServers provides write-only access to servers -type ModifyServers interface { - AddServer(string) (string, error) - ResyncServer(onion string) error -} - -// SendMessages enables a caller to sender messages to a contact -type SendMessages interface { - SendMessage(handle string, message string) error - - // Deprecated: is unsafe - SendGetValToPeer(string, string, string) - - SendScopedZonedGetValToContact(handle string, scope attr.Scope, zone attr.Zone, key string) - - // TODO - // Deprecated use overlays instead - InviteOnionToGroup(string, string) error -} - -// ModifyMessages enables a caller to modify the messages in a timeline -type ModifyMessages interface { - UpdateMessageFlags(string, int, uint64) -} - -// CwtchPeer provides us with a way of testing systems built on top of cwtch without having to -// directly implement a cwtchPeer. -type CwtchPeer interface { - - // Core Cwtch Peer Functions that should not be exposed to - // most functions - Init(event.Manager) - AutoHandleEvents(events []event.Type) - Listen() - StartPeersConnections() - StartServerConnections() - Shutdown() - - // GetOnion is deprecated. If you find yourself needing to rely on this method it is time - // to consider replacing this with a GetAddress(es) function that can fully expand cwtch beyond the boundaries - // of tor v3 onion services. - // Deprecated - GetOnion() string - - // SetScopedZonedAttribute allows the setting of an attribute by scope and zone - // scope.zone.key = value - SetScopedZonedAttribute(scope attr.Scope, zone attr.Zone, key string, value string) - - // GetScopedZonedAttribute allows the retrieval of an attribute by scope and zone - // scope.zone.key = value - GetScopedZonedAttribute(scope attr.Scope, zone attr.Zone, key string) (string, bool) - - ReadContacts - ModifyContacts - - AccessPeeringState - ModifyPeeringState - - ReadGroups - ModifyGroups - - ReadServers - ModifyServers - - SendMessages - ModifyMessages - - ShareFile(fileKey string, serializedManifest string) -} - -// NewCwtchPeer creates and returns a new cwtchPeer with the given name. -func NewCwtchPeer(name string) CwtchPeer { +// NewProfileWithEncryptedStorage instantiates a new Cwtch Profile from encrypted storage +func NewProfileWithEncryptedStorage(name string, cps *CwtchProfileStorage) CwtchPeer { cp := new(cwtchPeer) - cp.Profile = model.GenerateNewProfile(name) cp.shutdown = false + cp.storage = cps + cp.queue = event.NewQueue() + cp.state = make(map[string]connections.ConnectionState) + + pub, priv, _ := ed25519.GenerateKey(rand.Reader) + // Store all the Necessary Base Attributes In The Database + cp.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name, name) + cp.storage.StoreProfileKeyValue(TypeAttribute, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Onion)).ToString(), []byte(tor.GetTorV3Hostname(pub))) + cp.storage.StoreProfileKeyValue(TypePrivateKey, "Ed25519PrivateKey", priv) + cp.storage.StoreProfileKeyValue(TypePublicKey, "Ed25519PublicKey", pub) + return cp } -// FromProfile generates a new peer from a profile. -func FromProfile(profile *model.Profile) CwtchPeer { +// FromEncryptedStorage loads an existing Profile from Encrypted Storage +func FromEncryptedStorage(cps *CwtchProfileStorage) CwtchPeer { cp := new(cwtchPeer) - cp.Profile = profile cp.shutdown = false + cp.storage = cps + cp.queue = event.NewQueue() + cp.state = make(map[string]connections.ConnectionState) + // At some point we may want to populate caches here, for now we will assume hitting the + // database directly is tolerable + // Clean up anything that wasn't cleaned up on shutdown + // TODO ideally this shouldn't need to be done but the UI sometimes doesn't shut down cleanly + cp.storage.PurgeNonSavedMessages() + return cp +} + +// ImportLegacyProfile generates a new peer from a profile. +// Deprecated - Only to be used for importing new profiles +func ImportLegacyProfile(profile *model.Profile, cps *CwtchProfileStorage) CwtchPeer { + cp := new(cwtchPeer) + cp.shutdown = false + cp.storage = cps + cp.eventBus = event.NewEventManager() + cp.queue = event.NewQueue() + cp.state = make(map[string]connections.ConnectionState) + // Store all the Necessary Base Attributes In The Database + cp.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name, profile.Name) + cp.storage.StoreProfileKeyValue(TypeAttribute, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Onion)).ToString(), []byte(tor.GetTorV3Hostname(profile.Ed25519PublicKey))) + cp.storage.StoreProfileKeyValue(TypePrivateKey, "Ed25519PrivateKey", profile.Ed25519PrivateKey) + cp.storage.StoreProfileKeyValue(TypePublicKey, "Ed25519PublicKey", profile.Ed25519PublicKey) + + for k, v := range profile.Attributes { + parts := strings.SplitN(k, ".", 2) + if len(parts) == 2 { + scope := attr.IntoScope(parts[0]) + zone, path := attr.ParseZone(parts[1]) + cp.SetScopedZonedAttribute(scope, zone, path, v) + } else { + log.Errorf("could not import legacy style attribute %v", k) + } + } + + for _, contact := range profile.Contacts { + var conversationID int + var err error + if contact.Authorization == model.AuthApproved { + conversationID, err = cp.NewContactConversation(contact.Onion, model.DefaultP2PAccessControl(), true) + } else if contact.Authorization == model.AuthBlocked { + conversationID, err = cp.NewContactConversation(contact.Onion, model.AccessControl{Blocked: true, Read: false, Append: false}, true) + } else { + conversationID, err = cp.NewContactConversation(contact.Onion, model.DefaultP2PAccessControl(), false) + } + + if err == nil { + for key, value := range contact.Attributes { + switch key { + case event.SaveHistoryKey: + cp.SetConversationAttribute(conversationID, attr.LocalScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(event.SaveHistoryKey)), value) + case string(model.BundleType): + cp.AddServer(value) + case string(model.KeyTypeTokenOnion): + //ignore + case string(model.KeyTypeServerOnion): + // ignore + case string(model.KeyTypePrivacyPass): + // ignore + case lastKnownSignature: + cp.SetConversationAttribute(conversationID, attr.LocalScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(lastReceivedSignature)), value) + default: + log.Errorf("could not import conversation attribute %v", key) + } + } + + if name, exists := contact.Attributes["local.name"]; exists { + cp.SetConversationAttribute(conversationID, attr.LocalScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name)), name) + } + if name, exists := contact.Attributes["peer.name"]; exists { + cp.SetConversationAttribute(conversationID, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name)), name) + } + + for _, message := range contact.Timeline.GetMessages() { + // By definition anything stored in legacy timelines in acknowledged + attr := model.Attributes{constants.AttrAuthor: message.PeerID, constants.AttrAck: event.True, constants.AttrSentTimestamp: message.Timestamp.Format(time.RFC3339Nano)} + if message.Flags&0x01 == 0x01 { + attr[constants.AttrRejected] = event.True + } + if message.Flags&0x02 == 0x02 { + attr[constants.AttrDownloaded] = event.True + } + cp.storage.InsertMessage(conversationID, 0, message.Message, attr, model.GenerateRandomID(), model.CalculateContentHash(message.PeerID, message.Message)) + } + } + } + + for _, group := range profile.Groups { + group.GroupName = group.Attributes["local.name"] + invite, err := group.Invite() + if err == nil { + // Automatically grab all the important fields... + conversationID, err := cp.ImportGroup(invite) + if err == nil { + for _, message := range group.Timeline.GetMessages() { + // By definition anything stored in legacy timelines in acknowledged + attr := model.Attributes{constants.AttrAuthor: message.PeerID, constants.AttrAck: event.True, constants.AttrSentTimestamp: message.Timestamp.Format(time.RFC3339Nano)} + if message.Flags&0x01 == 0x01 { + attr[constants.AttrRejected] = event.True + } + if message.Flags&0x02 == 0x02 { + attr[constants.AttrDownloaded] = event.True + } + cp.storage.InsertMessage(conversationID, 0, message.Message, attr, base64.StdEncoding.EncodeToString(message.Signature), model.CalculateContentHash(message.PeerID, message.Message)) + } + } + } + } + cp.eventBus.Shutdown() // We disregard all events from profile... return cp } // Init instantiates a cwtchPeer +// Status: Ready for 1.5 func (cp *cwtchPeer) Init(eventBus event.Manager) { cp.InitForEvents(eventBus, DefaultEventsToHandle) - // Upgrade the Cwtch Peer if necessary - // It would be nice to do these checks in the storage engine itself, but it is easier to do them here - // rather than duplicating the logic to construct/reconstruct attributes in storage engine... - // TODO: Remove these checks after Cwtch ~1.5 storage engine is implemented - if _, exists := cp.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name); !exists { - // If public.profile.name does not exist, and we have an existing public.name then: - // set public.profile.name from public.name - // set local.profile.name from public.name - if name, exists := cp.Profile.GetAttribute(attr.GetPublicScope(constants.Name)); exists { - cp.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name, name) - cp.SetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Name, name) - } else { - // Otherwise check if local.name exists and set it from that - // If not, then check the very old unzoned, unscoped name. - // If not, then set directly from Profile.Name... - if name, exists := cp.Profile.GetAttribute(attr.GetLocalScope(constants.Name)); exists { - cp.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name, name) - cp.SetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Name, name) - } else if name, exists := cp.Profile.GetAttribute(constants.Name); exists { - cp.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name, name) - cp.SetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Name, name) - } else { - // Profile.Name is very deprecated at this point... - cp.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name, cp.Profile.Name) - cp.SetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Name, cp.Profile.Name) - } - } - } - // At this point we can safely assume that public.profile.name exists localName, _ := cp.GetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Name) publicName, _ := cp.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name) @@ -338,25 +388,11 @@ func (cp *cwtchPeer) Init(eventBus event.Manager) { if localName != publicName { cp.SetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Name, publicName) } - - // At this point we can safely assume that public.profile.name exists AND is consistent with - // local.profile.name - regardless of whatever Cwtch version we have upgraded from. This will - // be important after Cwtch 1.5 when we purge all previous references to local.profile.name and - // profile-> name - and remove all name processing code from libcwtch-go. - - // If local.profile.tag does not exist then set it from deprecated GetAttribute - if _, exists := cp.GetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Tag); !exists { - if tag, exists := cp.Profile.GetAttribute(constants.Tag); exists { - cp.SetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Tag, tag) - } else { - // Assume a default password, which will allow the older profile to have it's password reset by the UI - cp.SetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Tag, constants.ProfileTypeV1DefaultPassword) - } - } } +// InitForEvents +// Status: Ready for 1.5 func (cp *cwtchPeer) InitForEvents(eventBus event.Manager, toBeHandled []event.Type) { - cp.queue = event.NewQueue() go cp.eventHandler() cp.eventBus = eventBus @@ -364,6 +400,7 @@ func (cp *cwtchPeer) InitForEvents(eventBus event.Manager, toBeHandled []event.T } // AutoHandleEvents sets an event (if able) to be handled by this peer +// Status: Ready for 1.5 func (cp *cwtchPeer) AutoHandleEvents(events []event.Type) { for _, ev := range events { if _, exists := autoHandleableEvents[ev]; exists { @@ -375,85 +412,171 @@ func (cp *cwtchPeer) AutoHandleEvents(events []event.Type) { } // ImportGroup initializes a group from an imported source rather than a peer invite -func (cp *cwtchPeer) ImportGroup(exportedInvite string) (string, error) { - cp.mutex.Lock() - defer cp.mutex.Unlock() - gid, err := cp.Profile.ProcessInvite(exportedInvite) - - if err == nil { - cp.eventBus.Publish(event.NewEvent(event.NewGroup, map[event.Field]string{event.GroupID: gid, event.GroupInvite: exportedInvite})) +// Status: TODO +func (cp *cwtchPeer) ImportGroup(exportedInvite string) (int, error) { + gci, err := model.ValidateInvite(exportedInvite) + if err != nil { + return -1, err } - - return gid, err + groupConversationID, err := cp.NewContactConversation(gci.GroupID, model.DefaultP2PAccessControl(), true) + if err == nil { + cp.SetConversationAttribute(groupConversationID, attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.GroupID)), gci.GroupID) + cp.SetConversationAttribute(groupConversationID, attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.GroupServer)), gci.ServerHost) + cp.SetConversationAttribute(groupConversationID, attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.GroupKey)), base64.StdEncoding.EncodeToString(gci.SharedKey)) + cp.SetConversationAttribute(groupConversationID, attr.LocalScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name)), gci.GroupName) + cp.eventBus.Publish(event.NewEvent(event.NewGroup, map[event.Field]string{event.ConversationID: strconv.Itoa(groupConversationID), event.GroupServer: gci.ServerHost, event.GroupInvite: exportedInvite})) + } + return groupConversationID, err } -// ExportGroup serializes a group invite so it can be given offline -func (cp *cwtchPeer) ExportGroup(groupID string) (string, error) { +// NewContactConversation create a new p2p conversation with the given acl applied to the handle. +func (cp *cwtchPeer) NewContactConversation(handle string, acl model.AccessControl, accepted bool) (int, error) { cp.mutex.Lock() defer cp.mutex.Unlock() - group := cp.Profile.GetGroup(groupID) - if group != nil { - return group.Invite() + conversationID, err := cp.storage.NewConversation(handle, model.Attributes{event.SaveHistoryKey: event.DeleteHistoryDefault}, model.AccessControlList{handle: acl}, accepted) + cp.eventBus.Publish(event.NewEvent(event.ContactCreated, map[event.Field]string{event.ConversationID: strconv.Itoa(conversationID), event.RemotePeer: handle})) + return conversationID, err +} + +// AcceptConversation looks up a conversation by `handle` and sets the Accepted status to `true` +// This will cause Cwtch to auto connect to this conversation on start up +func (cp *cwtchPeer) AcceptConversation(id int) error { + cp.mutex.Lock() + defer cp.mutex.Unlock() + return cp.storage.AcceptConversation(id) +} + +// BlockConversation looks up a conversation by `handle` and sets the Accepted status to `true` +// This will cause Cwtch to auto connect to this conversation on start up +func (cp *cwtchPeer) BlockConversation(id int) error { + cp.mutex.Lock() + defer cp.mutex.Unlock() + ci, err := cp.storage.GetConversation(id) + if err != nil { + return err } - return "", errors.New("group id could not be found") + // p2p conversations have a single ACL referencing the remote peer. Set this to blocked... + ci.ACL[ci.Handle] = model.AccessControl{Blocked: true, Read: false, Append: false} + // Send an event in any case to block the protocol engine... + // TODO at some point in the future engine needs to understand ACLs not just legacy auth status + cp.eventBus.Publish(event.NewEvent(event.SetPeerAuthorization, map[event.Field]string{event.RemotePeer: ci.Handle, event.Authorization: string(model.AuthBlocked)})) + return cp.storage.SetConversationACL(id, ci.ACL) +} + +func (cp *cwtchPeer) FetchConversations() ([]*model.Conversation, error) { + cp.mutex.Lock() + defer cp.mutex.Unlock() + return cp.storage.FetchConversations() +} + +func (cp *cwtchPeer) GetConversationInfo(conversation int) (*model.Conversation, error) { + cp.mutex.Lock() + defer cp.mutex.Unlock() + return cp.storage.GetConversation(conversation) +} + +// FetchConversationInfo returns information about the given conversation referenced by the handle +func (cp *cwtchPeer) FetchConversationInfo(handle string) (*model.Conversation, error) { + cp.mutex.Lock() + defer cp.mutex.Unlock() + return cp.storage.GetConversationByHandle(handle) +} + +// DeleteConversation purges all data about the conversation, including message timelines, referenced by the handle +func (cp *cwtchPeer) DeleteConversation(id int) error { + cp.mutex.Lock() + defer cp.mutex.Unlock() + return cp.storage.DeleteConversation(id) +} + +// SetConversationAttribute sets the conversation attribute at path to value +func (cp *cwtchPeer) SetConversationAttribute(id int, path attr.ScopedZonedPath, value string) error { + cp.mutex.Lock() + defer cp.mutex.Unlock() + return cp.storage.SetConversationAttribute(id, path, value) +} + +// GetConversationAttribute is a shortcut method for retrieving the value of a given path +func (cp *cwtchPeer) GetConversationAttribute(id int, path attr.ScopedZonedPath) (string, error) { + cp.mutex.Lock() + defer cp.mutex.Unlock() + ci, err := cp.storage.GetConversation(id) + if err != nil { + return "", err + } + val, exists := ci.Attributes[path.ToString()] + if !exists { + return "", fmt.Errorf("%v does not exist for conversation %v", path.ToString(), id) + } + return val, nil +} + +// GetChannelMessage returns a message from a conversation channel referenced by the absolute ID. +// Note: This should note be used to index a list as the ID is not expected to be tied to absolute position +// in the table (e.g. deleted messages, expired messages, etc.) +func (cp *cwtchPeer) GetChannelMessage(conversation int, channel int, id int) (string, model.Attributes, error) { + cp.mutex.Lock() + defer cp.mutex.Unlock() + return cp.storage.GetChannelMessage(conversation, channel, id) +} + +// GetChannelMessageCount returns the absolute number of messages in a given conversation channel +func (cp *cwtchPeer) GetChannelMessageCount(conversation int, channel int) (int, error) { + cp.mutex.Lock() + defer cp.mutex.Unlock() + return cp.storage.GetChannelMessageCount(conversation, channel) +} + +// GetMostRecentMessages returns a selection of messages, ordered by most recently inserted +func (cp *cwtchPeer) GetMostRecentMessages(conversation int, channel int, offset int, limit int) ([]model.ConversationMessage, error) { + cp.mutex.Lock() + defer cp.mutex.Unlock() + return cp.storage.GetMostRecentMessages(conversation, channel, offset, limit) +} + +// UpdateMessageAttribute sets a given key/value attribute on the message in the given conversation/channel +// errors if the message doesn't exist, or for underlying daabase issues. +func (cp *cwtchPeer) UpdateMessageAttribute(conversation int, channel int, id int, key string, value string) error { + _, attr, err := cp.GetChannelMessage(conversation, channel, id) + if err == nil { + cp.mutex.Lock() + defer cp.mutex.Unlock() + attr[key] = value + return cp.storage.UpdateMessageAttributes(conversation, channel, id, attr) + } + return err } // StartGroup create a new group linked to the given server and returns the group ID, an invite or an error. -func (cp *cwtchPeer) StartGroup(server string) (string, string, error) { - cp.mutex.Lock() - groupID, invite, err := cp.Profile.StartGroup(server) - cp.mutex.Unlock() +// Status: TODO change server handle to conversation id...? +func (cp *cwtchPeer) StartGroup(name string, server string) (int, error) { + group, err := model.NewGroup(server) if err == nil { - group := cp.GetGroup(groupID) - jsobj, err := json.Marshal(group) - if err == nil { - cp.eventBus.Publish(event.NewEvent(event.GroupCreated, map[event.Field]string{ - event.GroupID: groupID, - event.GroupServer: group.GroupServer, - event.GroupInvite: invite, - // Needed for Storage Engine... - event.Data: string(jsobj), - })) + conversationID, err := cp.NewContactConversation(group.GroupID, model.DefaultP2PAccessControl(), true) + if err != nil { + return -1, err } - } else { - log.Errorf("error creating group: %v", err) + cp.SetConversationAttribute(conversationID, attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.GroupID)), group.GroupID) + cp.SetConversationAttribute(conversationID, attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.GroupServer)), group.GroupServer) + cp.SetConversationAttribute(conversationID, attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.GroupKey)), base64.StdEncoding.EncodeToString(group.GroupKey[:])) + cp.SetConversationAttribute(conversationID, attr.LocalScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name)), name) + + cp.eventBus.Publish(event.NewEvent(event.GroupCreated, map[event.Field]string{ + event.ConversationID: strconv.Itoa(conversationID), + event.GroupID: group.GroupID, + event.GroupServer: group.GroupServer, + })) + return conversationID, nil } - return groupID, invite, err -} - -// GetGroups returns an unordered list of all group IDs. -func (cp *cwtchPeer) GetGroups() []string { - cp.mutex.Lock() - defer cp.mutex.Unlock() - return cp.Profile.GetGroups() -} - -// GetGroup returns a pointer to a specific group, nil if no group exists. -func (cp *cwtchPeer) GetGroup(groupID string) *model.Group { - cp.mutex.Lock() - defer cp.mutex.Unlock() - return cp.Profile.GetGroup(groupID) -} - -func (cp *cwtchPeer) AddContact(nick, onion string, authorization model.Authorization) { - decodedPub, _ := base32.StdEncoding.DecodeString(strings.ToUpper(onion)) - pp := &model.PublicProfile{Name: nick, Ed25519PublicKey: decodedPub, Authorization: authorization, Onion: onion, Attributes: map[string]string{"nick": nick}} - cp.Profile.AddContact(onion, pp) - pd, _ := json.Marshal(pp) - cp.eventBus.Publish(event.NewEvent(event.PeerCreated, map[event.Field]string{ - event.Data: string(pd), - event.RemotePeer: onion, - })) - cp.eventBus.Publish(event.NewEventList(event.SetPeerAuthorization, event.RemotePeer, onion, event.Authorization, string(authorization))) - - // Default to Deleting Peer History - cp.eventBus.Publish(event.NewEventList(event.SetPeerAttribute, event.RemotePeer, onion, event.SaveHistoryKey, event.DeleteHistoryDefault)) + log.Errorf("error creating group: %v", err) + return -1, err } // AddServer takes in a serialized server specification (a bundle of related keys) and adds a contact for the // server assuming there are no errors and the contact doesn't already exist. // Returns the onion of the new server if added // TODO in the future this function should also integrate with a trust provider to validate the key bundle. +// Status: Ready for 1.5 func (cp *cwtchPeer) AddServer(serverSpecification string) (string, error) { // This confirms that the server did at least sign the bundle keyBundle, err := model.DeserializeAndVerify([]byte(serverSpecification)) @@ -462,7 +585,8 @@ func (cp *cwtchPeer) AddServer(serverSpecification string) (string, error) { } log.Debugf("Got new key bundle %v", keyBundle.Serialize()) - // TODO if the key bundle is incomplete then error out. In the future we may allow servers to attest to new + // if the key bundle is incomplete then error out. + // TODO In the future we may allow servers to attest to new // keys or subsets of keys, but for now they must commit only to a complete set of keys required for Cwtch Groups // (that way we can be assured that the keybundle we store is a valid one) if !keyBundle.HasKeyType(model.KeyTypeTokenOnion) || !keyBundle.HasKeyType(model.KeyTypeServerOnion) || !keyBundle.HasKeyType(model.KeyTypePrivacyPass) { @@ -474,243 +598,261 @@ func (cp *cwtchPeer) AddServer(serverSpecification string) (string, error) { onion := string(onionKey) // Add the contact if we don't already have it - if cp.GetContact(onion) == nil { - decodedPub, _ := base32.StdEncoding.DecodeString(strings.ToUpper(onion)) - ab := keyBundle.AttributeBundle() - pp := &model.PublicProfile{Name: onion, Ed25519PublicKey: decodedPub, Authorization: model.AuthUnknown, Onion: onion, Attributes: ab} - - // The only part of this function that actually modifies the profile... - cp.mutex.Lock() - cp.Profile.AddContact(onion, pp) - cp.mutex.Unlock() - - pd, _ := json.Marshal(pp) - - // Sync the Storage Engine - cp.eventBus.Publish(event.NewEvent(event.PeerCreated, map[event.Field]string{ - event.Data: string(pd), - event.RemotePeer: onion, - })) - } - - // At this point we know the server exists - server := cp.GetContact(onion) - ab := keyBundle.AttributeBundle() - - // Check server bundle for consistency if we have different keys stored than in the tofu bundle then we - // abort... - for k, v := range ab { - val, exists := server.GetAttribute(k) - if exists { - if val != v { - // this is inconsistent! - return "", model.InconsistentKeyBundleError - } + conversationInfo, _ := cp.FetchConversationInfo(onion) + if conversationInfo == nil { + _, err := cp.NewContactConversation(onion, model.DefaultP2PAccessControl(), true) + if err != nil { + return "", err } - // we haven't seen this key associated with the server before } - // Store the key bundle for the server so we can reconstruct a tofubundle invite - cp.SetContactAttribute(onion, string(model.BundleType), serverSpecification) + conversationInfo, err = cp.FetchConversationInfo(onion) + if conversationInfo != nil && err == nil { + ab := keyBundle.AttributeBundle() + for k, v := range ab { + val, exists := conversationInfo.Attributes[k] + if exists { + if val != v { + // this is inconsistent! + return "", model.InconsistentKeyBundleError + } + } + // we haven't seen this key associated with the server before + } - // If we have gotten to this point we can assume this is a safe key bundle signed by the - // server with no conflicting keys. So we are going to publish all the keys - for k, v := range ab { - log.Debugf("Server (%v) has %v key %v", onion, k, v) - cp.SetContactAttribute(onion, k, v) + // // If we have gotten to this point we can assume this is a safe key bundle signed by the + // // server with no conflicting keys. So we are going to save all the keys + for k, v := range ab { + cp.SetConversationAttribute(conversationInfo.ID, attr.PublicScope.ConstructScopedZonedPath(attr.ServerKeyZone.ConstructZonedPath(k)), v) + } + cp.SetConversationAttribute(conversationInfo.ID, attr.PublicScope.ConstructScopedZonedPath(attr.ServerKeyZone.ConstructZonedPath(string(model.BundleType))), serverSpecification) + return onion, err } - - return onion, nil + return "", err } - return "", err + return "", model.InconsistentKeyBundleError } -// GetContacts returns an unordered list of onions -func (cp *cwtchPeer) GetContacts() []string { - cp.mutex.Lock() - defer cp.mutex.Unlock() - return cp.Profile.GetContacts() -} - -// GetServers returns an unordered list of servers +// GetServers returns an unordered list of server handles func (cp *cwtchPeer) GetServers() []string { - contacts := cp.Profile.GetContacts() var servers []string - for _, contact := range contacts { - if cp.GetContact(contact).IsServer() { - servers = append(servers, contact) + conversations, err := cp.FetchConversations() + if err == nil { + for _, conversationInfo := range conversations { + if conversationInfo.IsServer() { + servers = append(servers, conversationInfo.Handle) + } } } return servers } -// GetContact returns a given contact, nil is no such contact exists -func (cp *cwtchPeer) GetContact(onion string) *model.PublicProfile { - cp.mutex.Lock() - defer cp.mutex.Unlock() - contact, _ := cp.Profile.GetContact(onion) - return contact -} - +// GetOnion +// Status: Deprecated in 1.5 func (cp *cwtchPeer) GetOnion() string { cp.mutex.Lock() defer cp.mutex.Unlock() - return cp.Profile.Onion -} -func (cp *cwtchPeer) GetPeerState(onion string) (connections.ConnectionState, bool) { - cp.mutex.Lock() - defer cp.mutex.Unlock() - if peer, ok := cp.Profile.Contacts[onion]; ok { - return connections.ConnectionStateToType()[peer.State], true - } - return connections.DISCONNECTED, false + onion, _ := cp.storage.LoadProfileKeyValue(TypeAttribute, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Onion)).ToString()) + return string(onion) } -func (cp *cwtchPeer) GetGroupState(groupid string) (connections.ConnectionState, bool) { +// GetPeerState +// Status: Ready for 1.5 +func (cp *cwtchPeer) GetPeerState(handle string) connections.ConnectionState { cp.mutex.Lock() defer cp.mutex.Unlock() - if group, ok := cp.Profile.Groups[groupid]; ok { - return connections.ConnectionStateToType()[group.State], true + if state, ok := cp.state[handle]; ok { + return state } - return connections.DISCONNECTED, false + return connections.DISCONNECTED } -// PeerWithOnion is the entry point for cwtchPeer relationships +// PeerWithOnion initiates a request to the Protocol Engine to set up Cwtch Session with a given tor v3 onion +// address. func (cp *cwtchPeer) PeerWithOnion(onion string) { - cp.mutex.Lock() - defer cp.mutex.Unlock() - if _, exists := cp.Profile.GetContact(onion); !exists { - cp.AddContact(onion, onion, model.AuthApproved) - } cp.eventBus.Publish(event.NewEvent(event.PeerRequest, map[event.Field]string{event.RemotePeer: onion})) } -// DeleteContact deletes a peer from the profile, storage, and handling -func (cp *cwtchPeer) DeleteContact(onion string) { - cp.mutex.Lock() - cp.Profile.DeleteContact(onion) - defer cp.mutex.Unlock() - cp.eventBus.Publish(event.NewEventList(event.DeleteContact, event.RemotePeer, onion)) +// SendInviteToConversation kicks off the invite process +func (cp *cwtchPeer) SendInviteToConversation(conversationID int, inviteConversationID int) error { + var invite model.MessageWrapper + + inviteConversationInfo, err := cp.GetConversationInfo(inviteConversationID) + + if inviteConversationInfo == nil || err != nil { + return err + } + + if tor.IsValidHostname(inviteConversationInfo.Handle) { + invite = model.MessageWrapper{Overlay: model.OverlayInviteContact, Data: inviteConversationInfo.Handle} + } else { + // Reconstruct Group + groupID, ok := inviteConversationInfo.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.GroupID)).ToString()] + if !ok { + return errors.New("group structure is malformed - no id") + } + groupServer, ok := inviteConversationInfo.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.GroupServer)).ToString()] + if !ok { + return errors.New("group structure is malformed - no server") + } + groupKeyBase64, ok := inviteConversationInfo.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.GroupKey)).ToString()] + if !ok { + return errors.New("group structure is malformed - no key") + } + groupName, ok := inviteConversationInfo.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name)).ToString()] + if !ok { + return errors.New("group structure is malformed - no name") + } + + groupKey, err := base64.StdEncoding.DecodeString(groupKeyBase64) + if err != nil { + return errors.New("malformed group key") + } + + var groupKeyFixed = [32]byte{} + copy(groupKeyFixed[:], groupKey[:]) + + group := model.Group{ + GroupID: groupID, + GroupName: groupName, + GroupKey: groupKeyFixed, + GroupServer: groupServer, + } + + groupInvite, err := group.Invite() + if err != nil { + return errors.New("group invite is malformed") + } + + serverInfo, err := cp.FetchConversationInfo(groupServer) + if err != nil { + return errors.New("unknown server associated with group") + } + + bundle, exists := serverInfo.Attributes[attr.PublicScope.ConstructScopedZonedPath(attr.ServerKeyZone.ConstructZonedPath(string(model.BundleType))).ToString()] + if !exists { + return errors.New("server bundle not found") + } + + invite = model.MessageWrapper{Overlay: model.OverlayInviteContact, Data: fmt.Sprintf("tofubundle:server:%s||%s", base64.StdEncoding.EncodeToString([]byte(bundle)), groupInvite)} + } + + inviteBytes, err := json.Marshal(invite) + if err != nil { + log.Errorf("malformed invite: %v", err) + return err + } + return cp.SendMessage(conversationID, string(inviteBytes)) } -// DeleteGroup deletes a Group from the profile, storage, and handling -func (cp *cwtchPeer) DeleteGroup(groupID string) { - cp.mutex.Lock() - cp.Profile.DeleteGroup(groupID) - defer cp.mutex.Unlock() - cp.eventBus.Publish(event.NewEventList(event.DeleteGroup, event.GroupID, groupID)) -} - -// InviteOnionToGroup kicks off the invite process -func (cp *cwtchPeer) InviteOnionToGroup(onion string, groupid string) error { - cp.mutex.Lock() - group := cp.Profile.GetGroup(groupid) - if group == nil { - cp.mutex.Unlock() - return errors.New("invalid group id") +func (cp *cwtchPeer) ImportBundle(importString string) error { + if tor.IsValidHostname(importString) { + _, err := cp.NewContactConversation(importString, model.DefaultP2PAccessControl(), true) + if err == nil { + return ConstructResponse(constants.ImportBundlePrefix, "success") + } + return ConstructResponse(constants.ImportBundlePrefix, err.Error()) + } else if strings.HasPrefix(importString, constants.TofuBundlePrefix) { + bundle := strings.Split(importString, "||") + if len(bundle) == 2 { + err := cp.ImportBundle(bundle[0][len(constants.TofuBundlePrefix):]) + // if the server import failed then abort the whole process.. + if err != nil && !strings.HasSuffix(err.Error(), "success") { + return ConstructResponse(constants.ImportBundlePrefix, err.Error()) + } + return cp.ImportBundle(bundle[1]) + } + } else if strings.HasPrefix(importString, constants.ServerPrefix) { + // Server Key Bundles are prefixed with + bundle, err := base64.StdEncoding.DecodeString(importString[len(constants.ServerPrefix):]) + if err == nil { + if _, err = cp.AddServer(string(bundle)); err != nil { + return ConstructResponse(constants.ImportBundlePrefix, err.Error()) + } + return ConstructResponse(constants.ImportBundlePrefix, "success") + } + return ConstructResponse(constants.ImportBundlePrefix, err.Error()) + } else if strings.HasPrefix(importString, constants.GroupPrefix) { + //eg: torv3JFDWkXExBsZLkjvfkkuAxHsiLGZBk0bvoeJID9ItYnU=EsEBCiBhOWJhZDU1OTQ0NWI3YmM2N2YxYTM5YjkzMTNmNTczNRIgpHeNaG+6jy750eDhwLO39UX4f2xs0irK/M3P6mDSYQIaOTJjM2ttb29ibnlnaGoyenc2cHd2N2Q1N3l6bGQ3NTNhdW8zdWdhdWV6enB2ZmFrM2FoYzRiZHlkCiJAdVSSVgsksceIfHe41OJu9ZFHO8Kwv3G6F5OK3Hw4qZ6hn6SiZjtmJlJezoBH0voZlCahOU7jCOg+dsENndZxAA== + if _, err := cp.ImportGroup(importString); err != nil { + return ConstructResponse(constants.ImportBundlePrefix, err.Error()) + } + return ConstructResponse(constants.ImportBundlePrefix, "success") } - invite, err := group.Invite() - cp.mutex.Unlock() - if err == nil { - err = cp.SendMessage(onion, invite) - } - return err + return ConstructResponse(constants.ImportBundlePrefix, "invalid_group_invite_prefix") } // JoinServer manages a new server connection with the given onion address func (cp *cwtchPeer) JoinServer(onion string) error { - if cp.GetContact(onion) != nil { - tokenY, yExists := cp.GetContact(onion).GetAttribute(string(model.KeyTypePrivacyPass)) - tokenOnion, onionExists := cp.GetContact(onion).GetAttribute(string(model.KeyTypeTokenOnion)) - if yExists && onionExists { - signature, exists := cp.GetContactAttribute(onion, lastKnownSignature) - if !exists { - signature = base64.StdEncoding.EncodeToString([]byte{}) - } - cp.eventBus.Publish(event.NewEvent(event.JoinServer, map[event.Field]string{event.GroupServer: onion, event.ServerTokenY: tokenY, event.ServerTokenOnion: tokenOnion, event.Signature: signature})) - return nil + ci, err := cp.FetchConversationInfo(onion) + if ci == nil || err != nil { + return errors.New("no keys found for server connection") + } + + //if cp.GetContact(onion) != nil { + tokenY, yExists := ci.Attributes[attr.PublicScope.ConstructScopedZonedPath(attr.ServerKeyZone.ConstructZonedPath(string(model.KeyTypePrivacyPass))).ToString()] + tokenOnion, onionExists := ci.Attributes[attr.PublicScope.ConstructScopedZonedPath(attr.ServerKeyZone.ConstructZonedPath(string(model.KeyTypeTokenOnion))).ToString()] + if yExists && onionExists { + signature, exists := ci.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(lastReceivedSignature)).ToString()] + if !exists { + signature = base64.StdEncoding.EncodeToString([]byte{}) } + cp.eventBus.Publish(event.NewEvent(event.JoinServer, map[event.Field]string{event.GroupServer: onion, event.ServerTokenY: tokenY, event.ServerTokenOnion: tokenOnion, event.Signature: signature})) + return nil } return errors.New("no keys found for server connection") } // ResyncServer completely tears down and resyncs a new server connection with the given onion address func (cp *cwtchPeer) ResyncServer(onion string) error { - if cp.GetContact(onion) != nil { - tokenY, yExists := cp.GetContact(onion).GetAttribute(string(model.KeyTypePrivacyPass)) - tokenOnion, onionExists := cp.GetContact(onion).GetAttribute(string(model.KeyTypeTokenOnion)) - if yExists && onionExists { - signature := base64.StdEncoding.EncodeToString([]byte{}) - cp.eventBus.Publish(event.NewEvent(event.JoinServer, map[event.Field]string{event.GroupServer: onion, event.ServerTokenY: tokenY, event.ServerTokenOnion: tokenOnion, event.Signature: signature})) - return nil - } + ci, err := cp.FetchConversationInfo(onion) + if ci == nil || err != nil { + return errors.New("no keys found for server connection") } - return errors.New("no keys found for server connection") + cp.SetConversationAttribute(ci.ID, attr.LocalScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(lastReceivedSignature)), base64.StdEncoding.EncodeToString([]byte{})) + return cp.JoinServer(onion) } +// SendGetValToPeer +// Status: Ready for 1.5 func (cp *cwtchPeer) SendGetValToPeer(onion string, scope string, path string) { ev := event.NewEventList(event.SendGetValMessageToPeer, event.RemotePeer, onion, event.Scope, scope, event.Path, path) cp.eventBus.Publish(ev) } -// BlockPeer blocks an existing peer relationship. -func (cp *cwtchPeer) SetContactAuthorization(peer string, authorization model.Authorization) error { - cp.mutex.Lock() - err := cp.Profile.SetContactAuthorization(peer, authorization) - cp.mutex.Unlock() - cp.eventBus.Publish(event.NewEvent(event.SetPeerAuthorization, map[event.Field]string{event.RemotePeer: peer, event.Authorization: string(authorization)})) - return err -} - -// AcceptInvite accepts a given existing group invite -func (cp *cwtchPeer) AcceptInvite(groupID string) error { - cp.mutex.Lock() - err := cp.Profile.AcceptInvite(groupID) - cp.mutex.Unlock() - if err != nil { - return err - } - cp.eventBus.Publish(event.NewEvent(event.AcceptGroupInvite, map[event.Field]string{event.GroupID: groupID})) - err = cp.JoinServer(cp.Profile.Groups[groupID].GroupServer) - - return err -} - -// RejectInvite rejects a given group invite. -func (cp *cwtchPeer) RejectInvite(groupID string) { - cp.mutex.Lock() - defer cp.mutex.Unlock() - cp.Profile.RejectInvite(groupID) - cp.eventBus.Publish(event.NewEvent(event.RejectGroupInvite, map[event.Field]string{event.GroupID: groupID})) -} - // Listen makes the peer open a listening port to accept incoming connections (and be detectably online) +// Status: Ready for 1.5 func (cp *cwtchPeer) Listen() { cp.mutex.Lock() defer cp.mutex.Unlock() if !cp.listenStatus { log.Infof("cwtchPeer Listen sending ProtocolEngineStartListen\n") cp.listenStatus = true - cp.eventBus.Publish(event.NewEvent(event.ProtocolEngineStartListen, map[event.Field]string{event.Onion: cp.Profile.Onion})) + onion, _ := cp.storage.LoadProfileKeyValue(TypeAttribute, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Onion)).ToString()) + cp.eventBus.Publish(event.NewEvent(event.ProtocolEngineStartListen, map[event.Field]string{event.Onion: string(onion)})) } // else protocol engine is already listening } // StartPeersConnections attempts to connect to peer connections +// Status: Ready for 1.5 func (cp *cwtchPeer) StartPeersConnections() { - for _, contact := range cp.GetContacts() { - if !cp.GetContact(contact).IsServer() { - cp.PeerWithOnion(contact) + conversations, _ := cp.FetchConversations() + for _, conversation := range conversations { + if conversation.Accepted && !conversation.IsGroup() && !conversation.IsServer() { + cp.PeerWithOnion(conversation.Handle) } } } // StartServerConnections attempts to connect to all server connections +// Status: Ready for 1.5 func (cp *cwtchPeer) StartServerConnections() { - for _, contact := range cp.GetContacts() { - if cp.GetContact(contact).IsServer() { - err := cp.JoinServer(contact) + conversations, _ := cp.FetchConversations() + for _, conversation := range conversations { + if conversation.IsServer() { + err := cp.JoinServer(conversation.Handle) if err != nil { // Almost certainly a programming error so print it.. log.Errorf("error joining server %v", err) @@ -719,75 +861,41 @@ func (cp *cwtchPeer) StartServerConnections() { } } -// SetContactAttribute sets an attribute for the indicated contact and emits an event -func (cp *cwtchPeer) SetContactAttribute(onion string, key string, val string) { - cp.mutex.Lock() - defer cp.mutex.Unlock() - if contact, ok := cp.Profile.GetContact(onion); ok { - contact.SetAttribute(key, val) - cp.eventBus.Publish(event.NewEvent(event.SetPeerAttribute, map[event.Field]string{ - event.RemotePeer: onion, - event.Key: key, - event.Data: val, - })) - } -} - -// GetContactAttribute gets an attribute for the indicated contact -func (cp *cwtchPeer) GetContactAttribute(onion string, key string) (string, bool) { - cp.mutex.Lock() - defer cp.mutex.Unlock() - if contact, ok := cp.Profile.GetContact(onion); ok { - if val, exists := contact.GetAttribute(key); exists { - return val, true - } - } - return "", false -} - -// SetGroupAttribute sets an attribute for the indicated group and emits an event -func (cp *cwtchPeer) SetGroupAttribute(gid string, key string, val string) { - cp.mutex.Lock() - defer cp.mutex.Unlock() - if group := cp.Profile.GetGroup(gid); group != nil { - group.SetAttribute(key, val) - cp.eventBus.Publish(event.NewEvent(event.SetGroupAttribute, map[event.Field]string{ - event.GroupID: gid, - event.Key: key, - event.Data: val, - })) - } -} - -// GetGroupAttribute gets an attribute for the indicated group -func (cp *cwtchPeer) GetGroupAttribute(gid string, key string) (string, bool) { - cp.mutex.Lock() - defer cp.mutex.Unlock() - if group := cp.Profile.GetGroup(gid); group != nil { - if val, exists := group.GetAttribute(key); exists { - return val, true - } - } - return "", false -} - // Shutdown kills all connections and cleans up all goroutines for the peer +// Status: Ready for 1.5 func (cp *cwtchPeer) Shutdown() { cp.mutex.Lock() defer cp.mutex.Unlock() cp.shutdown = true cp.queue.Shutdown() + if cp.storage != nil { + cp.storage.Close() + } } -func (cp *cwtchPeer) storeMessage(onion string, messageTxt string, sent time.Time) { - if cp.GetContact(onion) == nil { - cp.AddContact(onion, onion, model.AuthUnknown) +func (cp *cwtchPeer) storeMessage(handle string, message string, sent time.Time) error { + // TODO maybe atomize this? + ci, err := cp.FetchConversationInfo(handle) + if err != nil { + id, err := cp.NewContactConversation(handle, model.DefaultP2PAccessControl(), false) + if err != nil { + return err + } + ci, err = cp.GetConversationInfo(id) + if err != nil { + return err + } } cp.mutex.Lock() - cp.Profile.AddMessageToContactTimeline(onion, messageTxt, sent) - cp.mutex.Unlock() + defer cp.mutex.Unlock() + + // Generate a random number and use it as the signature + signature := event.GetRandNumber().String() + return cp.storage.InsertMessage(ci.ID, 0, message, model.Attributes{constants.AttrAuthor: handle, constants.AttrAck: event.True, constants.AttrSentTimestamp: sent.Format(time.RFC3339Nano)}, signature, model.CalculateContentHash(handle, message)) } +// ShareFile begins hosting the given serialized manifest +// Status: Ready for 1.5 func (cp *cwtchPeer) ShareFile(fileKey string, serializedManifest string) { tsStr, exists := cp.GetScopedZonedAttribute(attr.LocalScope, attr.FilesharingZone, fmt.Sprintf("%s.ts", fileKey)) if exists { @@ -808,60 +916,64 @@ func (cp *cwtchPeer) eventHandler() { case event.ProtocolEngineStopped: cp.mutex.Lock() cp.listenStatus = false - log.Infof("Protocol engine for %v has stopped listening", cp.Profile.Onion) + log.Infof("Protocol engine for %v has stopped listening", cp.GetOnion()) cp.mutex.Unlock() case event.EncryptedGroupMessage: - // If successful, a side effect is the message is added to the group's timeline + // If successful, a side effect is the message is added to the group's timeline ciphertext, _ := base64.StdEncoding.DecodeString(ev.Data[event.Ciphertext]) signature, _ := base64.StdEncoding.DecodeString(ev.Data[event.Signature]) - + log.Debugf("received encrypted group message: %x", ev.Data[event.Signature]) // SECURITY NOTE: A malicious server could insert posts such that everyone always has a different lastKnownSignature // However the server can always replace **all** messages in an attempt to track users // This is mitigated somewhat by resync events which do wipe things entire. // The security of cwtch groups are also not dependent on the servers inability to uniquely tag connections (as long as // it learns nothing else about each connection). // store the base64 encoded signature for later use - cp.SetContactAttribute(ev.Data[event.GroupServer], lastKnownSignature, ev.Data[event.Signature]) - cp.mutex.Lock() - ok, groupID, message, index := cp.Profile.AttemptDecryption(ciphertext, signature) - cp.mutex.Unlock() - if ok && index > -1 { - cp.eventBus.Publish(event.NewEvent(event.NewMessageFromGroup, map[event.Field]string{event.TimestampReceived: message.Received.Format(time.RFC3339Nano), event.TimestampSent: message.Timestamp.Format(time.RFC3339Nano), event.Data: message.Message, event.GroupID: groupID, event.Signature: base64.StdEncoding.EncodeToString(message.Signature), event.PreviousSignature: base64.StdEncoding.EncodeToString(message.PreviousMessageSig), event.RemotePeer: message.PeerID, event.Index: strconv.Itoa(index)})) + // TODO Server Connections should sent Connection ID + ci, err := cp.FetchConversationInfo(ev.Data[event.GroupServer]) + if ci == nil || err != nil { + log.Errorf("no server connection count") + return } - - // The group has been compromised - if !ok && groupID != "" { - if cp.Profile.GetGroup(groupID).IsCompromised { - cp.eventBus.Publish(event.NewEvent(event.GroupCompromised, map[event.Field]string{event.GroupID: groupID})) + cp.SetConversationAttribute(ci.ID, attr.LocalScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(lastReceivedSignature)), ev.Data[event.Signature]) + conversations, err := cp.FetchConversations() + if err == nil { + for _, conversationInfo := range conversations { + if !tor.IsValidHostname(conversationInfo.Handle) { + group, err := cp.constructGroupFromConversation(conversationInfo) + if err == nil { + success, dgm := group.AttemptDecryption(ciphertext, signature) + if success { + // Time to either acknowledge the message or insert a new message + cp.attemptInsertOrAcknowledgeLegacyGroupConversation(conversationInfo.ID, ev.Data[event.Signature], dgm) + break + } + } + } } } case event.NewMessageFromPeer: //event.TimestampReceived, event.RemotePeer, event.Data ts, _ := time.Parse(time.RFC3339Nano, ev.Data[event.TimestampReceived]) cp.storeMessage(ev.Data[event.RemotePeer], ev.Data[event.Data], ts) - case event.PeerAcknowledgement: - cp.mutex.Lock() - idx := cp.Profile.AckSentMessageToPeer(ev.Data[event.RemotePeer], ev.Data[event.EventID]) - edata := ev.Data - edata[event.Index] = strconv.Itoa(idx) - cp.eventBus.Publish(event.NewEvent(event.IndexedAcknowledgement, edata)) - cp.mutex.Unlock() - + err := cp.attemptAcknowledgeP2PConversation(ev.Data[event.RemotePeer], ev.Data[event.EventID]) + if err != nil { + // Note: This is not an Error because malicious peers can just send acks for random things + // There is no point in polluting error logs with that mess. + log.Debugf("failed to acknowledge acknowledgement: %v", err) + } case event.SendMessageToGroupError: - cp.mutex.Lock() - signature, _ := base64.StdEncoding.DecodeString(ev.Data[event.Signature]) - cp.Profile.AddGroupSentMessageError(ev.Data[event.GroupID], signature, ev.Data[event.Error]) - cp.mutex.Unlock() - + err := cp.attemptErrorConversationMessage(ev.Data[event.GroupID], ev.Data[event.Signature], event.SendMessageToGroupError, ev.Data[event.Error]) + if err != nil { + log.Errorf("failed to error p2p message: %v", err) + } case event.SendMessageToPeerError: - cp.mutex.Lock() - idx := cp.Profile.ErrorSentMessageToPeer(ev.Data[event.RemotePeer], ev.Data[event.EventID], ev.Data[event.Error]) - edata := ev.Data - edata[event.Index] = strconv.Itoa(idx) - cp.eventBus.Publish(event.NewEvent(event.IndexedFailure, edata)) - cp.mutex.Unlock() + err := cp.attemptErrorConversationMessage(ev.Data[event.RemotePeer], ev.Data[event.EventID], event.SendMessageToPeerError, ev.Data[event.Error]) + if err != nil { + log.Errorf("failed to error p2p message: %v", err) + } case event.RetryServerRequest: // Automated Join Server Request triggered by a plugin. log.Debugf("profile received an automated retry event for %v", ev.Data[event.GroupServer]) @@ -876,8 +988,9 @@ func (cp *cwtchPeer) eventHandler() { log.Debugf("NewGetValMessageFromPeer for %v.%v from %v\n", scope, path, onion) - remotePeer := cp.GetContact(onion) - if remotePeer != nil && remotePeer.Authorization == model.AuthApproved { + conversationInfo, err := cp.FetchConversationInfo(onion) + log.Debugf("confo info lookup newgetval %v %v %v", onion, conversationInfo, err) + if conversationInfo != nil && conversationInfo.Accepted { scope := attr.IntoScope(scope) if scope.IsPublic() || scope.IsConversation() { zone, zpath := attr.ParseZone(path) @@ -888,15 +1001,7 @@ func (cp *cwtchPeer) eventHandler() { val, exists = cp.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name) } - if exists && zone == attr.FilesharingZone && strings.HasSuffix(zpath, ".manifest.size") { - fileKey := strings.TrimSuffix(zpath, ".manifest.size") - serializedManifest, exists2 := cp.GetScopedZonedAttribute(attr.ConversationScope, attr.FilesharingZone, fmt.Sprintf("%s.manifest", fileKey)) - if exists2 { - cp.ShareFile(fileKey, serializedManifest) - } - } - - resp := event.NewEvent(event.SendRetValMessageToPeer, map[event.Field]string{event.RemotePeer: onion, event.Exists: strconv.FormatBool(exists)}) + resp := event.NewEvent(event.SendRetValMessageToPeer, map[event.Field]string{event.ConversationID: strconv.Itoa(conversationInfo.ID), event.RemotePeer: onion, event.Exists: strconv.FormatBool(exists)}) resp.EventID = ev.EventID if exists { resp.Data[event.Data] = val @@ -963,7 +1068,7 @@ func (cp *cwtchPeer) eventHandler() { path := ev.Data[event.Path] val := ev.Data[event.Data] exists, _ := strconv.ParseBool(ev.Data[event.Exists]) - log.Debugf("NewRetValMessageFromPeer %v %v%v %v %v\n", onion, scope, path, exists, val) + log.Debugf("NewRetValMessageFromPeer %v %v %v %v %v\n", onion, scope, path, exists, val) if exists { // Handle File Sharing Metadata @@ -984,27 +1089,23 @@ func (cp *cwtchPeer) eventHandler() { // Allow public profile parameters to be added as peer specific attributes... if attr.Scope(scope).IsPublic() && zone == attr.ProfileZone { - cp.SetContactAttribute(onion, attr.GetPeerScope(path), val) + ci, err := cp.FetchConversationInfo(onion) + log.Debugf("fetch conversation info %v %v", ci, err) + if ci != nil && err == nil { + err := cp.SetConversationAttribute(ci.ID, attr.Scope(scope).ConstructScopedZonedPath(zone.ConstructZonedPath(path)), val) + if err != nil { + log.Errorf("error setting conversation attribute %v", err) + } + } } } case event.PeerStateChange: cp.mutex.Lock() - if _, exists := cp.Profile.Contacts[ev.Data[event.RemotePeer]]; exists { - cp.Profile.Contacts[ev.Data[event.RemotePeer]].State = ev.Data[event.ConnectionState] - } + cp.state[ev.Data[event.RemotePeer]] = connections.ConnectionStateToType()[ev.Data[event.ConnectionState]] cp.mutex.Unlock() case event.ServerStateChange: cp.mutex.Lock() - // We update both the server contact status, as well as the groups the server belongs to - log.Debugf("Got Server State Change %v", ev) - cp.Profile.Contacts[ev.Data[event.GroupServer]].State = ev.Data[event.ConnectionState] - - // TODO deprecate this, the UI should consult the server contact entry instead (it's far more efficient) - for _, group := range cp.Profile.Groups { - if group.GroupServer == ev.Data[event.GroupServer] { - group.State = ev.Data[event.ConnectionState] - } - } + cp.state[ev.Data[event.GroupServer]] = connections.ConnectionStateToType()[ev.Data[event.ConnectionState]] cp.mutex.Unlock() default: @@ -1015,3 +1116,117 @@ func (cp *cwtchPeer) eventHandler() { } } } + +// attemptInsertOrAcknowledgeLegacyGroupConversation is a convenience method that looks up the conversation +// by the given handle and attempts to mark the message as acknowledged. returns error on failure +// to either find the contact or the associated message +func (cp *cwtchPeer) attemptInsertOrAcknowledgeLegacyGroupConversation(conversationID int, signature string, dm *groups.DecryptedGroupMessage) error { + log.Infof("attempting to insert or ack group message %v %v", conversationID, signature) + messageID, err := cp.GetChannelMessageBySignature(conversationID, 0, signature) + // We have received our own message (probably), acknowledge and move on... + if err == nil { + _, attr, err := cp.GetChannelMessage(conversationID, 0, messageID) + if err == nil { + cp.mutex.Lock() + attr[constants.AttrAck] = constants.True + cp.storage.UpdateMessageAttributes(conversationID, 0, messageID, attr) + cp.mutex.Unlock() + cp.eventBus.Publish(event.NewEvent(event.IndexedAcknowledgement, map[event.Field]string{event.ConversationID: strconv.Itoa(conversationID), event.Index: strconv.Itoa(messageID)})) + return nil + } + } else { + cp.mutex.Lock() + cp.storage.InsertMessage(conversationID, 0, dm.Text, model.Attributes{constants.AttrAck: constants.True, "PreviousSignature": base64.StdEncoding.EncodeToString(dm.PreviousMessageSig), constants.AttrAuthor: dm.Onion, constants.AttrSentTimestamp: time.Unix(int64(dm.Timestamp), 0).Format(time.RFC3339Nano)}, signature, model.CalculateContentHash(dm.Onion, dm.Text)) + newTotal, _ := cp.storage.GetChannelMessageCount(conversationID, 0) + cp.mutex.Unlock() + cp.eventBus.Publish(event.NewEvent(event.NewMessageFromGroup, map[event.Field]string{event.ConversationID: strconv.Itoa(conversationID), event.TimestampSent: time.Unix(int64(dm.Timestamp), 0).Format(time.RFC3339Nano), event.RemotePeer: dm.Onion, event.Index: strconv.Itoa(newTotal)})) + return nil + } + return err +} + +// attemptAcknowledgeP2PConversation is a convenience method that looks up the conversation +// by the given handle and attempts to mark the message as acknowledged. returns error on failure +// to either find the contact or the associated message +func (cp *cwtchPeer) attemptAcknowledgeP2PConversation(handle string, signature string) error { + ci, err := cp.FetchConversationInfo(handle) + // We should *never* received a peer acknowledgement for a conversation that doesn't exist... + if ci != nil && err == nil { + // for p2p messages the randomly generated event ID is the "signature" + id, err := cp.GetChannelMessageBySignature(ci.ID, 0, signature) + if err == nil { + _, attr, err := cp.GetChannelMessage(ci.ID, 0, id) + if err == nil { + cp.mutex.Lock() + attr[constants.AttrAck] = constants.True + cp.storage.UpdateMessageAttributes(ci.ID, 0, id, attr) + cp.mutex.Unlock() + cp.eventBus.Publish(event.NewEvent(event.IndexedAcknowledgement, map[event.Field]string{event.ConversationID: strconv.Itoa(ci.ID), event.RemotePeer: handle, event.Index: strconv.Itoa(id)})) + return nil + } + return err + } + return err + } + return err +} + +// attemptErrorConversationMessage is a convenience method that looks up the conversation +// by the given handle and attempts to mark the message as errored. returns error on failure +// to either find the contact or the associated message +func (cp *cwtchPeer) attemptErrorConversationMessage(handle string, signature string, eventType event.Type, error string) error { + ci, err := cp.FetchConversationInfo(handle) + // We should *never* received a peer acknowledgement for a conversation that doesn't exist... + if ci != nil && err == nil { + // for p2p messages the randomly generated event ID is the "signature" + id, err := cp.GetChannelMessageBySignature(ci.ID, 0, signature) + if err == nil { + _, attr, err := cp.GetChannelMessage(ci.ID, 0, id) + if err == nil { + cp.mutex.Lock() + attr[constants.AttrErr] = constants.True + cp.storage.UpdateMessageAttributes(ci.ID, 0, id, attr) + cp.mutex.Unlock() + cp.eventBus.Publish(event.NewEvent(eventType, map[event.Field]string{event.ConversationID: strconv.Itoa(ci.ID), event.RemotePeer: handle, event.Error: error, event.Index: strconv.Itoa(id)})) + return nil + } + return err + } + return err + } + return err +} + +func (cp *cwtchPeer) GetChannelMessageBySignature(conversationID int, channelID int, signature string) (int, error) { + cp.mutex.Lock() + defer cp.mutex.Unlock() + return cp.storage.GetChannelMessageBySignature(conversationID, channelID, signature) +} + +func (cp *cwtchPeer) GetChannelMessageByContentHash(conversationID int, channelID int, contenthash string) (int, error) { + cp.mutex.Lock() + defer cp.mutex.Unlock() + messageID, err := cp.storage.GetChannelMessageByContentHash(conversationID, channelID, contenthash) + if err == nil { + return cp.storage.GetRowNumberByMessageID(conversationID, channelID, messageID) + } + return -1, err +} + +// constructGroupFromConversation returns a model.Group wrapper around a database back groups. Useful for +// encrypting / decrypting messages to/from the group. +func (cp *cwtchPeer) constructGroupFromConversation(conversationInfo *model.Conversation) (*model.Group, error) { + key := conversationInfo.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.GroupKey)).ToString()] + groupKey, err := base64.StdEncoding.DecodeString(key) + if err != nil { + return nil, errors.New("group key is malformed") + } + var groupKeyFixed [32]byte + copy(groupKeyFixed[:], groupKey[:]) + group := model.Group{ + GroupID: conversationInfo.Handle, + GroupServer: conversationInfo.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.LegacyGroupZone.ConstructZonedPath(constants.GroupServer)).ToString()], + GroupKey: groupKeyFixed, + } + return &group, nil +} diff --git a/peer/cwtchprofilestorage.go b/peer/cwtchprofilestorage.go new file mode 100644 index 0000000..9548924 --- /dev/null +++ b/peer/cwtchprofilestorage.go @@ -0,0 +1,764 @@ +package peer + +import ( + "cwtch.im/cwtch/event" + "cwtch.im/cwtch/model" + "cwtch.im/cwtch/model/attr" + "database/sql" + "errors" + "fmt" + "git.openprivacy.ca/openprivacy/log" + "os" +) + +// StorageKeyType is an interface wrapper around storage key types +type StorageKeyType string + +const ( + // TypeAttribute for Profile Scoped and Zoned Attributes + TypeAttribute = StorageKeyType("Attribute") + + // TypePrivateKey for Profile Private Keys + TypePrivateKey = StorageKeyType("PrivateKey") + + // TypePublicKey for Profile Public Keys + TypePublicKey = StorageKeyType("PublicKey") +) + +// CwtchProfileStorage encapsulates common datastore requests so as to not pollute the main cwtch profile +// struct with database knowledge +type CwtchProfileStorage struct { + + // Note: Statements are thread safe.. + + // Profile related statements + insertProfileKeyValueStmt *sql.Stmt + selectProfileKeyValueStmt *sql.Stmt + + // Conversation related statements + insertConversationStmt *sql.Stmt + fetchAllConversationsStmt *sql.Stmt + selectConversationStmt *sql.Stmt + selectConversationByHandleStmt *sql.Stmt + acceptConversationStmt *sql.Stmt + deleteConversationStmt *sql.Stmt + setConversationAttributesStmt *sql.Stmt + setConversationACLStmt *sql.Stmt + + channelInsertStmts map[ChannelID]*sql.Stmt + channelUpdateMessageStmts map[ChannelID]*sql.Stmt + channelGetMessageStmts map[ChannelID]*sql.Stmt + channelGetMessageBySignatureStmts map[ChannelID]*sql.Stmt + channelGetCountStmts map[ChannelID]*sql.Stmt + channelGetMostRecentMessagesStmts map[ChannelID]*sql.Stmt + channelGetMessageByContentHashStmts map[ChannelID]*sql.Stmt + channelRowNumberStmts map[ChannelID]*sql.Stmt + ProfileDirectory string + db *sql.DB +} + +// ChannelID encapsulates the data necessary to reference a channel structure. +type ChannelID struct { + Conversation int + Channel int +} + +const insertProfileKeySQLStmt = `insert or replace into profile_kv(KeyType, KeyName, KeyValue) values(?,?,?);` +const selectProfileKeySQLStmt = `select KeyValue from profile_kv where KeyType=(?) and KeyName=(?);` + +const insertConversationSQLStmt = `insert into conversations(Handle, Attributes, ACL, Accepted) values(?,?,?,?);` +const fetchAllConversationsSQLStmt = `select ID, Handle, Attributes, ACL, Accepted from conversations;` +const selectConversationSQLStmt = `select ID, Handle, Attributes, ACL, Accepted from conversations where ID=(?);` +const selectConversationByHandleSQLStmt = `select ID, Handle, Attributes, ACL, Accepted from conversations where Handle=(?);` +const acceptConversationSQLStmt = `update conversations set Accepted=true where ID=(?);` +const setConversationAttributesSQLStmt = `update conversations set Attributes=(?) where ID=(?) ;` +const setConversationACLSQLStmt = `update conversations set ACL=(?) where ID=(?) ;` +const deleteConversationSQLStmt = `delete from conversations where ID=(?);` + +// createTableConversationMessagesSQLStmt is a template for creating conversation based tables... +const createTableConversationMessagesSQLStmt = `create table if not exists channel_%d_0_chat (ID integer unique primary key autoincrement, Body text, Attributes []byte, Expiry datetime, Signature text unique, ContentHash blob text);` + +// insertMessageIntoConversationSQLStmt is a template for creating conversation based tables... +const insertMessageIntoConversationSQLStmt = `insert into channel_%d_%d_chat (Body, Attributes, Signature, ContentHash) values(?,?,?,?);` + +// updateMessageIntoConversationSQLStmt is a template for updating attributes of a message in a conversation +const updateMessageIntoConversationSQLStmt = `update channel_%d_%d_chat set Attributes=(?) where ID=(?);` + +// purgeMessagesFromConversationSQLStmt is a template for updating attributes of a message in a conversation +const purgeMessagesFromConversationSQLStmt = `delete from channel_%d_%d_chat;` + +// getMessageFromConversationSQLStmt is a template for fetching a message by ID from a conversation +const getMessageFromConversationSQLStmt = `select Body, Attributes from channel_%d_%d_chat where ID=(?);` + +// getMessageBySignatureFromConversationSQLStmt is a template for selecting conversation messages by signature +const getMessageBySignatureFromConversationSQLStmt = `select ID from channel_%d_%d_chat where Signature=(?);` + +// getMessageByContentHashFromConversationSQLStmt is a template for selecting conversation messages by content hash +const getMessageByContentHashFromConversationSQLStmt = `select ID from channel_%d_%d_chat where ContentHash=(?) order by ID desc limit 1;` + +// getLocalIndexOfMessageIDSQLStmt is a template for fetching the offset of a message from the bottom of the database. +const getLocalIndexOfMessageIDSQLStmt = `select count (*) from channel_%d_%d_chat where ID >= (?) order by ID desc;` + +// getMessageCountFromConversationSQLStmt is a template for fetching the count of a messages in a conversation channel +const getMessageCountFromConversationSQLStmt = `select count(*) from channel_%d_%d_chat;` + +// getMostRecentMessagesSQLStmt is a template for fetching the most recent N messages in a conversation channel +const getMostRecentMessagesSQLStmt = `select ID, Body, Attributes, Signature, ContentHash from channel_%d_%d_chat order by ID desc limit (?) offset (?);` + +// NewCwtchProfileStorage constructs a new CwtchProfileStorage from a database. It is also responsible for +// Preparing commonly used SQL Statements +func NewCwtchProfileStorage(db *sql.DB, profileDirectory string) (*CwtchProfileStorage, error) { + + if db == nil { + return nil, errors.New("cannot construct cwtch profile storage with a nil database") + } + + insertProfileKeyValueStmt, err := db.Prepare(insertProfileKeySQLStmt) + if err != nil { + log.Errorf("error preparing query: %v %v", insertProfileKeySQLStmt, err) + return nil, err + } + + selectProfileKeyStmt, err := db.Prepare(selectProfileKeySQLStmt) + if err != nil { + log.Errorf("error preparing query: %v %v", selectProfileKeySQLStmt, err) + return nil, err + } + + insertConversationStmt, err := db.Prepare(insertConversationSQLStmt) + if err != nil { + log.Errorf("error preparing query: %v %v", insertConversationSQLStmt, err) + return nil, err + } + + fetchAllConversationsStmt, err := db.Prepare(fetchAllConversationsSQLStmt) + if err != nil { + log.Errorf("error preparing query: %v %v", fetchAllConversationsSQLStmt, err) + return nil, err + } + + selectConversationStmt, err := db.Prepare(selectConversationSQLStmt) + if err != nil { + log.Errorf("error preparing query: %v %v", selectConversationSQLStmt, err) + return nil, err + } + + selectConversationByHandleStmt, err := db.Prepare(selectConversationByHandleSQLStmt) + if err != nil { + log.Errorf("error preparing query: %v %v", selectConversationByHandleSQLStmt, err) + return nil, err + } + + acceptConversationStmt, err := db.Prepare(acceptConversationSQLStmt) + if err != nil { + log.Errorf("error preparing query: %v %v", acceptConversationSQLStmt, err) + return nil, err + } + + deleteConversationStmt, err := db.Prepare(deleteConversationSQLStmt) + if err != nil { + log.Errorf("error preparing query: %v %v", deleteConversationSQLStmt, err) + return nil, err + } + + setConversationAttributesStmt, err := db.Prepare(setConversationAttributesSQLStmt) + if err != nil { + log.Errorf("error preparing query: %v %v", setConversationAttributesSQLStmt, err) + return nil, err + } + + setConversationACLStmt, err := db.Prepare(setConversationACLSQLStmt) + if err != nil { + log.Errorf("error preparing query: %v %v", setConversationACLSQLStmt, err) + return nil, err + } + + return &CwtchProfileStorage{db: db, + ProfileDirectory: profileDirectory, + insertProfileKeyValueStmt: insertProfileKeyValueStmt, + selectProfileKeyValueStmt: selectProfileKeyStmt, + fetchAllConversationsStmt: fetchAllConversationsStmt, + insertConversationStmt: insertConversationStmt, + selectConversationStmt: selectConversationStmt, + selectConversationByHandleStmt: selectConversationByHandleStmt, + acceptConversationStmt: acceptConversationStmt, + deleteConversationStmt: deleteConversationStmt, + setConversationAttributesStmt: setConversationAttributesStmt, + setConversationACLStmt: setConversationACLStmt, + channelInsertStmts: map[ChannelID]*sql.Stmt{}, + channelUpdateMessageStmts: map[ChannelID]*sql.Stmt{}, + channelGetMessageStmts: map[ChannelID]*sql.Stmt{}, + channelGetMessageBySignatureStmts: map[ChannelID]*sql.Stmt{}, + channelGetMessageByContentHashStmts: map[ChannelID]*sql.Stmt{}, + channelGetMostRecentMessagesStmts: map[ChannelID]*sql.Stmt{}, + channelGetCountStmts: map[ChannelID]*sql.Stmt{}, + channelRowNumberStmts: map[ChannelID]*sql.Stmt{}, + }, + nil +} + +// StoreProfileKeyValue allows storing of typed Key/Value attribute in the Storage Engine +func (cps *CwtchProfileStorage) StoreProfileKeyValue(keyType StorageKeyType, key string, value []byte) error { + _, err := cps.insertProfileKeyValueStmt.Exec(keyType, key, value) + if err != nil { + log.Errorf("error executing query: %v", err) + return err + } + return nil +} + +// LoadProfileKeyValue allows fetching of typed values via a known Key from the Storage Engine +func (cps *CwtchProfileStorage) LoadProfileKeyValue(keyType StorageKeyType, key string) ([]byte, error) { + rows, err := cps.selectProfileKeyValueStmt.Query(keyType, key) + if err != nil { + log.Errorf("error executing query: %v", err) + return nil, err + } + + result := rows.Next() + + if !result { + return nil, errors.New("no result found") + } + + var keyValue []byte + err = rows.Scan(&keyValue) + if err != nil { + log.Errorf("error fetching rows: %v", err) + rows.Close() + return nil, err + } + rows.Close() + return keyValue, nil +} + +// NewConversation stores a new conversation in the data store +func (cps *CwtchProfileStorage) NewConversation(handle string, attributes model.Attributes, acl model.AccessControlList, accepted bool) (int, error) { + tx, err := cps.db.Begin() + + if err != nil { + log.Errorf("error executing transaction: %v", err) + return -1, err + } + + result, err := tx.Stmt(cps.insertConversationStmt).Exec(handle, attributes.Serialize(), acl.Serialize(), accepted) + if err != nil { + log.Errorf("error executing transaction: %v", err) + return -1, tx.Rollback() + } + + id, err := result.LastInsertId() + if err != nil { + log.Errorf("error executing transaction: %v", err) + return -1, tx.Rollback() + } + + result, err = tx.Exec(fmt.Sprintf(createTableConversationMessagesSQLStmt, id)) + if err != nil { + log.Errorf("error executing transaction: %v", err) + return -1, tx.Rollback() + } + + conversationID, err := result.LastInsertId() + if err != nil { + log.Errorf("error executing transaction: %v", err) + return -1, tx.Rollback() + } + + err = tx.Commit() + if err != nil { + log.Errorf("error executing transaction: %v", err) + return -1, tx.Rollback() + } + + return int(conversationID), nil +} + +// GetConversationByHandle is a convenience method to fetch an active conversation by a handle +// Usage Notes: This should **only** be used to look up p2p conversations by convention. +// Ideally this function should not exist, and all lookups should happen by ID (this is currently +// unavoidable in some circumstances because the event bus references conversations by handle, not by id) +func (cps *CwtchProfileStorage) GetConversationByHandle(handle string) (*model.Conversation, error) { + rows, err := cps.selectConversationByHandleStmt.Query(handle) + if err != nil { + log.Errorf("error executing query: %v", err) + return nil, err + } + + result := rows.Next() + + if !result { + return nil, errors.New("no result found") + } + + var id int + var acl []byte + var attributes []byte + var accepted bool + err = rows.Scan(&id, &handle, &attributes, &acl, &accepted) + if err != nil { + log.Errorf("error fetching rows: %v", err) + rows.Close() + return nil, err + } + rows.Close() + + return &model.Conversation{ID: id, Handle: handle, ACL: model.DeserializeAccessControlList(acl), Attributes: model.DeserializeAttributes(attributes), Accepted: accepted}, nil +} + +// FetchConversations returns *all* active conversations. This method should only be called +// on app start up to build a summary of conversations for the UI. Any further updates should be integrated +// through the event bus. +func (cps *CwtchProfileStorage) FetchConversations() ([]*model.Conversation, error) { + rows, err := cps.fetchAllConversationsStmt.Query() + if err != nil { + log.Errorf("error executing query: %v", err) + return nil, err + } + + var conversations []*model.Conversation + + defer rows.Close() + for { + result := rows.Next() + + if !result { + return conversations, nil + } + + var id int + var handle string + var acl []byte + var attributes []byte + var accepted bool + err = rows.Scan(&id, &handle, &attributes, &acl, &accepted) + if err != nil { + log.Errorf("error fetching rows: %v", err) + rows.Close() + return nil, err + } + conversations = append(conversations, &model.Conversation{ID: id, Handle: handle, ACL: model.DeserializeAccessControlList(acl), Attributes: model.DeserializeAttributes(attributes), Accepted: accepted}) + + } +} + +// GetConversation looks up a particular conversation by id +func (cps *CwtchProfileStorage) GetConversation(id int) (*model.Conversation, error) { + rows, err := cps.selectConversationStmt.Query(id) + if err != nil { + log.Errorf("error executing query: %v", err) + return nil, err + } + + result := rows.Next() + + if !result { + return nil, errors.New("no result found") + } + + var handle string + var acl []byte + var attributes []byte + var accepted bool + err = rows.Scan(&id, &handle, &attributes, &acl, &accepted) + if err != nil { + log.Errorf("error fetching rows: %v", err) + rows.Close() + return nil, err + } + rows.Close() + + return &model.Conversation{ID: id, Handle: handle, ACL: model.DeserializeAccessControlList(acl), Attributes: model.DeserializeAttributes(attributes), Accepted: accepted}, nil +} + +// AcceptConversation sets the accepted status of a conversation to true in the backing datastore +func (cps *CwtchProfileStorage) AcceptConversation(id int) error { + _, err := cps.acceptConversationStmt.Exec(id) + if err != nil { + log.Errorf("error executing query: %v", err) + return err + } + return nil +} + +// DeleteConversation purges the conversation and any associated message history from the conversation store. +func (cps *CwtchProfileStorage) DeleteConversation(id int) error { + _, err := cps.deleteConversationStmt.Exec(id) + if err != nil { + log.Errorf("error executing query: %v", err) + return err + } + return nil +} + +// SetConversationACL sets a new ACL on a given conversation. +func (cps *CwtchProfileStorage) SetConversationACL(id int, acl model.AccessControlList) error { + _, err := cps.setConversationACLStmt.Exec(acl, id) + if err != nil { + log.Errorf("error executing query: %v", err) + return err + } + return nil +} + +// SetConversationAttribute sets a new attribute on a given conversation. +func (cps *CwtchProfileStorage) SetConversationAttribute(id int, path attr.ScopedZonedPath, value string) error { + ci, err := cps.GetConversation(id) + if err != nil { + return err + } + ci.Attributes[path.ToString()] = value + _, err = cps.setConversationAttributesStmt.Exec(ci.Attributes.Serialize(), id) + if err != nil { + log.Errorf("error executing query: %v", err) + return err + } + return nil +} + +// InsertMessage appends a message to a conversation channel, with a given set of attributes +func (cps *CwtchProfileStorage) InsertMessage(conversation int, channel int, body string, attributes model.Attributes, signature string, contentHash string) error { + + channelID := ChannelID{Conversation: conversation, Channel: channel} + + _, exists := cps.channelInsertStmts[channelID] + if !exists { + conversationStmt, err := cps.db.Prepare(fmt.Sprintf(insertMessageIntoConversationSQLStmt, conversation, channel)) + if err != nil { + log.Errorf("error executing transaction: %v", err) + return err + } + cps.channelInsertStmts[channelID] = conversationStmt + } + + _, err := cps.channelInsertStmts[channelID].Exec(body, attributes.Serialize(), signature, contentHash) + if err != nil { + log.Errorf("error inserting message: %v %v", signature, err) + return err + } + + return nil +} + +// UpdateMessageAttributes updates the attributes associated with a message of a given conversation +func (cps *CwtchProfileStorage) UpdateMessageAttributes(conversation int, channel int, messageID int, attributes model.Attributes) error { + + channelID := ChannelID{Conversation: conversation, Channel: channel} + + _, exists := cps.channelUpdateMessageStmts[channelID] + if !exists { + conversationStmt, err := cps.db.Prepare(fmt.Sprintf(updateMessageIntoConversationSQLStmt, conversation, channel)) + if err != nil { + log.Errorf("error executing transaction: %v", err) + return err + } + cps.channelUpdateMessageStmts[channelID] = conversationStmt + } + + _, err := cps.channelUpdateMessageStmts[channelID].Exec(attributes.Serialize(), messageID) + if err != nil { + log.Errorf("error updating message: %v", err) + return err + } + + return nil +} + +// GetChannelMessageBySignature looks up a conversation message by signature instead of identifier. Both are unique but +// signatures are common between conversation participants (in groups) and so are a more useful message to index. +func (cps *CwtchProfileStorage) GetChannelMessageBySignature(conversation int, channel int, signature string) (int, error) { + channelID := ChannelID{Conversation: conversation, Channel: channel} + + _, exists := cps.channelGetMessageBySignatureStmts[channelID] + if !exists { + conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getMessageBySignatureFromConversationSQLStmt, conversation, channel)) + if err != nil { + log.Errorf("error executing transaction: %v", err) + return -1, err + } + cps.channelGetMessageBySignatureStmts[channelID] = conversationStmt + } + + rows, err := cps.channelGetMessageBySignatureStmts[channelID].Query(signature) + if err != nil { + log.Errorf("error executing query: %v", err) + return -1, err + } + + result := rows.Next() + + if !result { + return -1, errors.New("no result found") + } + + var id int + err = rows.Scan(&id) + if err != nil { + log.Errorf("error fetching rows: %v", err) + rows.Close() + return -1, err + } + rows.Close() + return id, nil +} + +// GetChannelMessageByContentHash looks up a conversation message by hash instead of identifier. +func (cps *CwtchProfileStorage) GetChannelMessageByContentHash(conversation int, channel int, hash string) (int, error) { + channelID := ChannelID{Conversation: conversation, Channel: channel} + + _, exists := cps.channelGetMessageByContentHashStmts[channelID] + if !exists { + conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getMessageByContentHashFromConversationSQLStmt, conversation, channel)) + if err != nil { + log.Errorf("error executing transaction: %v", err) + return -1, err + } + cps.channelGetMessageByContentHashStmts[channelID] = conversationStmt + } + + rows, err := cps.channelGetMessageByContentHashStmts[channelID].Query(hash) + if err != nil { + log.Errorf("error executing query: %v", err) + return -1, err + } + + result := rows.Next() + + if !result { + return -1, errors.New("no result found") + } + + var id int + err = rows.Scan(&id) + if err != nil { + log.Errorf("error fetching rows: %v", err) + rows.Close() + return -1, err + } + rows.Close() + + // Return the offset **not** the count + return id - 1, nil +} + +// GetRowNumberByMessageID looks up the row number of a message by the message ID +func (cps *CwtchProfileStorage) GetRowNumberByMessageID(conversation int, channel int, id int) (int, error) { + channelID := ChannelID{Conversation: conversation, Channel: channel} + + _, exists := cps.channelRowNumberStmts[channelID] + if !exists { + conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getLocalIndexOfMessageIDSQLStmt, conversation, channel)) + if err != nil { + log.Errorf("error executing transaction: %v", err) + return -1, err + } + cps.channelRowNumberStmts[channelID] = conversationStmt + } + + rows, err := cps.channelRowNumberStmts[channelID].Query(id) + if err != nil { + log.Errorf("error executing query: %v", err) + return -1, err + } + + result := rows.Next() + + if !result { + return -1, errors.New("no result found") + } + + var rownum int + err = rows.Scan(&rownum) + if err != nil { + log.Errorf("error fetching rows: %v", err) + rows.Close() + return -1, err + } + rows.Close() + + return rownum, nil +} + +// GetChannelMessage looks up a channel message by conversation, channel and message id. On success it +// returns the message body and the attributes associated with the message. Otherwise an error is returned. +func (cps *CwtchProfileStorage) GetChannelMessage(conversation int, channel int, messageID int) (string, model.Attributes, error) { + channelID := ChannelID{Conversation: conversation, Channel: channel} + + _, exists := cps.channelGetMessageStmts[channelID] + if !exists { + conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getMessageFromConversationSQLStmt, conversation, channel)) + if err != nil { + log.Errorf("error executing transaction: %v", err) + return "", nil, err + } + cps.channelGetMessageStmts[channelID] = conversationStmt + } + + rows, err := cps.channelGetMessageStmts[channelID].Query(messageID) + if err != nil { + log.Errorf("error executing query: %v", err) + return "", nil, err + } + + result := rows.Next() + + if !result { + return "", nil, errors.New("no result found") + } + + // Deserialize the Row + var body string + var attributes []byte + err = rows.Scan(&body, &attributes) + if err != nil { + log.Errorf("error fetching rows: %v", err) + rows.Close() + return "", nil, err + } + rows.Close() + + return body, model.DeserializeAttributes(attributes), nil +} + +// GetChannelMessageCount returns the number of messages in a channel +func (cps *CwtchProfileStorage) GetChannelMessageCount(conversation int, channel int) (int, error) { + channelID := ChannelID{Conversation: conversation, Channel: channel} + + _, exists := cps.channelGetCountStmts[channelID] + if !exists { + conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getMessageCountFromConversationSQLStmt, conversation, channel)) + if err != nil { + log.Errorf("error executing transaction: %v", err) + return -1, err + } + cps.channelGetCountStmts[channelID] = conversationStmt + } + + var count int + err := cps.channelGetCountStmts[channelID].QueryRow().Scan(&count) + if err != nil { + log.Errorf("error executing query: %v", err) + return -1, err + } + return count, nil +} + +// GetMostRecentMessages returns the most recent messages in a channel up to a given limit at a given offset +func (cps *CwtchProfileStorage) GetMostRecentMessages(conversation int, channel int, offset int, limit int) ([]model.ConversationMessage, error) { + channelID := ChannelID{Conversation: conversation, Channel: channel} + + _, exists := cps.channelGetMostRecentMessagesStmts[channelID] + if !exists { + conversationStmt, err := cps.db.Prepare(fmt.Sprintf(getMostRecentMessagesSQLStmt, conversation, channel)) + if err != nil { + log.Errorf("error executing transaction: %v", err) + return nil, err + } + cps.channelGetMostRecentMessagesStmts[channelID] = conversationStmt + } + + rows, err := cps.channelGetMostRecentMessagesStmts[channelID].Query(limit, offset) + if err != nil { + log.Errorf("error executing query: %v", err) + return nil, err + } + var conversationMessages []model.ConversationMessage + defer rows.Close() + for { + result := rows.Next() + if !result { + return conversationMessages, nil + } + var id int + var body string + var attributes []byte + var sig string + var contenthash string + err = rows.Scan(&id, &body, &attributes, &sig, &contenthash) + if err != nil { + return conversationMessages, err + } + conversationMessages = append(conversationMessages, model.ConversationMessage{ID: id, Body: body, Attr: model.DeserializeAttributes(attributes), Signature: sig, ContentHash: contenthash}) + } +} + +// PurgeConversationChannel deletes all message for a conversation channel. +func (cps *CwtchProfileStorage) PurgeConversationChannel(conversation int, channel int) error { + conversationStmt, err := cps.db.Prepare(fmt.Sprintf(purgeMessagesFromConversationSQLStmt, conversation, channel)) + if err != nil { + log.Errorf("error executing transaction: %v", err) + return err + } + conversationStmt.Exec() + return conversationStmt.Close() +} + +// PurgeNonSavedMessages deletes all message conversations that are not explicitly set to saved. +func (cps *CwtchProfileStorage) PurgeNonSavedMessages() { + // Purge Messages that are not stored... + ci, err := cps.FetchConversations() + if err == nil { + for _, conversation := range ci { + if !conversation.IsGroup() && !conversation.IsServer() { + if conversation.Attributes[attr.LocalScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(event.SaveHistoryKey)).ToString()] != event.SaveHistoryConfirmed { + log.Infof("purging conversation...") + // TODO: At some point in the future this needs to iterate over channels and make a decision for each on.. + cps.PurgeConversationChannel(conversation.ID, 0) + } + } + } + } +} + +// Close closes the underlying database and prepared statements +func (cps *CwtchProfileStorage) Close() { + if cps.db != nil { + + cps.PurgeNonSavedMessages() + + cps.insertProfileKeyValueStmt.Close() + cps.selectProfileKeyValueStmt.Close() + + cps.insertConversationStmt.Close() + cps.fetchAllConversationsStmt.Close() + cps.selectConversationStmt.Close() + cps.selectConversationByHandleStmt.Close() + cps.acceptConversationStmt.Close() + cps.deleteConversationStmt.Close() + cps.setConversationAttributesStmt.Close() + cps.setConversationACLStmt.Close() + + for _, v := range cps.channelInsertStmts { + v.Close() + } + for _, v := range cps.channelUpdateMessageStmts { + v.Close() + } + for _, v := range cps.channelGetMessageStmts { + v.Close() + } + for _, v := range cps.channelGetMessageBySignatureStmts { + v.Close() + } + for _, v := range cps.channelGetCountStmts { + v.Close() + } + for _, v := range cps.channelGetMostRecentMessagesStmts { + v.Close() + } + for _, v := range cps.channelGetMessageByContentHashStmts { + v.Close() + } + + cps.db.Close() + } +} + +// Delete unconditionally destroys the profile directory associated with the store. +// This is unrecoverable. +func (cps *CwtchProfileStorage) Delete() { + err := os.RemoveAll(cps.ProfileDirectory) + if err != nil { + log.Errorf("error deleting profile directory", err) + } +} diff --git a/peer/profile_interface.go b/peer/profile_interface.go new file mode 100644 index 0000000..e6a34d1 --- /dev/null +++ b/peer/profile_interface.go @@ -0,0 +1,118 @@ +package peer + +import ( + "cwtch.im/cwtch/event" + "cwtch.im/cwtch/model" + "cwtch.im/cwtch/model/attr" + "cwtch.im/cwtch/protocol/connections" + "git.openprivacy.ca/openprivacy/connectivity" +) + +// AccessPeeringState provides access to functions relating to the underlying connections of a peer. +type AccessPeeringState interface { + GetPeerState(string) connections.ConnectionState +} + +// ModifyPeeringState is a meta-interface intended to restrict callers to modify-only access to connection peers +type ModifyPeeringState interface { + BlockUnknownConnections() + AllowUnknownConnections() + PeerWithOnion(string) + JoinServer(string) error +} + +// ModifyContactsAndPeers is a meta-interface intended to restrict a call to reading and modifying contacts +// and peers. +type ModifyContactsAndPeers interface { + ModifyPeeringState +} + +// ReadServers provides access to the servers +type ReadServers interface { + GetServers() []string +} + +// ModifyGroups provides write-only access add/edit/remove new groups +type ModifyGroups interface { + ImportGroup(string) (int, error) + StartGroup(string, string) (int, error) +} + +// ModifyServers provides write-only access to servers +type ModifyServers interface { + AddServer(string) (string, error) + ResyncServer(onion string) error +} + +// SendMessages enables a caller to sender messages to a contact +type SendMessages interface { + SendMessage(conversation int, message string) error + SendInviteToConversation(conversationID int, inviteConversationID int) error + SendScopedZonedGetValToContact(conversationID int, scope attr.Scope, zone attr.Zone, key string) +} + +// CwtchPeer provides us with a way of testing systems built on top of cwtch without having to +// directly implement a cwtchPeer. +type CwtchPeer interface { + + // Core Cwtch Peer Functions that should not be exposed to + // most functions + Init(event.Manager) + + GenerateProtocolEngine(acn connectivity.ACN, bus event.Manager) (connections.Engine, error) + + AutoHandleEvents(events []event.Type) + Listen() + StartPeersConnections() + StartServerConnections() + Shutdown() + + // GetOnion is deprecated. If you find yourself needing to rely on this method it is time + // to consider replacing this with a GetAddress(es) function that can fully expand cwtch beyond the boundaries + // of tor v3 onion services. + // Deprecated + GetOnion() string + + // SetScopedZonedAttribute allows the setting of an attribute by scope and zone + // scope.zone.key = value + SetScopedZonedAttribute(scope attr.Scope, zone attr.Zone, key string, value string) + + // GetScopedZonedAttribute allows the retrieval of an attribute by scope and zone + // scope.zone.key = value + GetScopedZonedAttribute(scope attr.Scope, zone attr.Zone, key string) (string, bool) + + AccessPeeringState + ModifyPeeringState + + ModifyGroups + + ReadServers + ModifyServers + + SendMessages + + // Import Bundle + ImportBundle(string) error + + // New Unified Conversation Interfaces + NewContactConversation(handle string, acl model.AccessControl, accepted bool) (int, error) + FetchConversations() ([]*model.Conversation, error) + GetConversationInfo(conversation int) (*model.Conversation, error) + FetchConversationInfo(handle string) (*model.Conversation, error) + AcceptConversation(conversation int) error + BlockConversation(conversation int) error + SetConversationAttribute(conversation int, path attr.ScopedZonedPath, value string) error + GetConversationAttribute(conversation int, path attr.ScopedZonedPath) (string, error) + DeleteConversation(conversation int) error + + // New Unified Conversation Channel Interfaces + GetChannelMessage(conversation int, channel int, id int) (string, model.Attributes, error) + GetChannelMessageCount(conversation int, channel int) (int, error) + GetChannelMessageByContentHash(conversation int, channel int, contenthash string) (int, error) + GetMostRecentMessages(conversation int, channel int, offset int, limit int) ([]model.ConversationMessage, error) + UpdateMessageAttribute(conversation int, channel int, id int, key string, value string) error + + ShareFile(fileKey string, serializedManifest string) + CheckPassword(password string) bool + Delete() +} diff --git a/peer/response.go b/peer/response.go new file mode 100644 index 0000000..b784323 --- /dev/null +++ b/peer/response.go @@ -0,0 +1,13 @@ +package peer + +import "errors" + +// Response is a wrapper to better semantically convey the response type... +type Response error + +const errorSeparator = "." + +// ConstructResponse is a helper function for creating Response structures. +func ConstructResponse(prefix string, error string) Response { + return errors.New(prefix + errorSeparator + error) +} diff --git a/peer/sql_statements.go b/peer/sql_statements.go new file mode 100644 index 0000000..f6ea511 --- /dev/null +++ b/peer/sql_statements.go @@ -0,0 +1,29 @@ +package peer + +import ( + "database/sql" + "fmt" +) + +// SQLCreateTableProfileKeyValue creates the Profile Key Value Table +const SQLCreateTableProfileKeyValue = `create table if not exists profile_kv (KeyType text, KeyName text, KeyValue blob, UNIQUE (KeyType,KeyName));` + +// SQLCreateTableConversations creates the Profile Key Value Table +const SQLCreateTableConversations = `create table if not exists conversations (ID integer unique primary key autoincrement, Handle text, Attributes blob, ACL blob, Accepted bool);` + +// initializeDatabase executes all the sql statements necessary to construct the base of the database. +// db must be open +func initializeDatabase(db *sql.DB) error { + + _, err := db.Exec(SQLCreateTableProfileKeyValue) + if err != nil { + return fmt.Errorf("error On Executing Query: %v %v", SQLCreateTableProfileKeyValue, err) + } + + _, err = db.Exec(SQLCreateTableConversations) + if err != nil { + return fmt.Errorf("error On Executing Query: %v %v", SQLCreateTableConversations, err) + } + + return nil +} diff --git a/peer/storage.go b/peer/storage.go new file mode 100644 index 0000000..3d59a7f --- /dev/null +++ b/peer/storage.go @@ -0,0 +1,167 @@ +package peer + +import ( + "crypto/rand" + "database/sql" + "errors" + "fmt" + "git.openprivacy.ca/openprivacy/log" + "golang.org/x/crypto/pbkdf2" + "golang.org/x/crypto/sha3" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" +) + +const versionFile = "VERSION" +const version = "2" +const saltFile = "SALT" + +// CreateKeySalt derives a key and salt from a password: returns key, salt, err +func CreateKeySalt(password string) ([32]byte, [128]byte, error) { + var salt [128]byte + if _, err := io.ReadFull(rand.Reader, salt[:]); err != nil { + log.Errorf("Cannot read from random: %v\n", err) + return [32]byte{}, salt, err + } + dk := pbkdf2.Key([]byte(password), salt[:], 4096, 32, sha3.New512) + + var dkr [32]byte + copy(dkr[:], dk) + return dkr, salt, nil +} + +// createKey derives a key from a password and salt +func createKey(password string, salt []byte) [32]byte { + dk := pbkdf2.Key([]byte(password), salt, 4096, 32, sha3.New512) + + var dkr [32]byte + copy(dkr[:], dk) + return dkr +} + +func initV2Directory(directory, password string) ([32]byte, [128]byte, error) { + os.Mkdir(directory, 0700) + + key, salt, err := CreateKeySalt(password) + if err != nil { + log.Errorf("Could not create key for profile store from password: %v\n", err) + return [32]byte{}, [128]byte{}, err + } + + if err = ioutil.WriteFile(path.Join(directory, versionFile), []byte(version), 0600); err != nil { + log.Errorf("Could not write version file: %v", err) + return [32]byte{}, [128]byte{}, err + } + + if err = ioutil.WriteFile(path.Join(directory, saltFile), salt[:], 0600); err != nil { + log.Errorf("Could not write salt file: %v", err) + return [32]byte{}, [128]byte{}, err + } + + return key, salt, nil +} + +func openEncryptedDatabase(profileDirectory string, password string, createIfNotExists bool) (*sql.DB, error) { + salt, err := ioutil.ReadFile(path.Join(profileDirectory, saltFile)) + if err != nil { + return nil, err + } + + key := createKey(password, salt) + dbPath := filepath.Join(profileDirectory, "db") + + if !createIfNotExists { + if _, err := os.Stat(dbPath); errors.Is(err, os.ErrNotExist) { + return nil, err + } + } + + dbname := fmt.Sprintf("%v?_pragma_key=x'%x'&_pragma_cipher_page_size=8192", dbPath, key) + db, err := sql.Open("sqlite3", dbname) + if err != nil { + log.Errorf("could not open encrypted database", err) + return nil, err + } + return db, nil +} + +// CreateEncryptedStorePeer creates a *new* Cwtch Profile backed by an encrypted datastore +func CreateEncryptedStorePeer(profileDirectory string, name string, password string) (CwtchPeer, error) { + log.Debugf("Initializing Encrypted Storage Directory") + _, _, err := initV2Directory(profileDirectory, password) + if err != nil { + return nil, err + } + + log.Debugf("Opening Encrypted Database") + db, err := openEncryptedDatabase(profileDirectory, password, true) + if db == nil || err != nil { + return nil, fmt.Errorf("unable to open encrypted database: error: %v", err) + } + + log.Debugf("Initializing Database") + err = initializeDatabase(db) + + if err != nil { + db.Close() + return nil, err + } + + log.Debugf("Creating Cwtch Profile Backed By Encrypted Database") + + cps, err := NewCwtchProfileStorage(db, profileDirectory) + if err != nil { + db.Close() + return nil, err + } + + return NewProfileWithEncryptedStorage(name, cps), nil +} + +// CreateEncryptedStore creates a encrypted datastore +func CreateEncryptedStore(profileDirectory string, password string) (*CwtchProfileStorage, error) { + + log.Debugf("Creating Encrypted Database") + db, err := openEncryptedDatabase(profileDirectory, password, true) + if db == nil || err != nil { + return nil, fmt.Errorf("unable to open encrypted database: error: %v", err) + } + + log.Debugf("Initializing Database") + err = initializeDatabase(db) + + if err != nil { + db.Close() + return nil, err + } + + log.Debugf("Creating Cwtch Profile Backed By Encrypted Database") + + cps, err := NewCwtchProfileStorage(db, profileDirectory) + if err != nil { + db.Close() + return nil, err + } + + return cps, nil +} + +// FromEncryptedDatabase constructs a Cwtch Profile from an existing Encrypted Database +func FromEncryptedDatabase(profileDirectory string, password string) (CwtchPeer, error) { + log.Infof("Loading Encrypted Profile: %v", profileDirectory) + db, err := openEncryptedDatabase(profileDirectory, password, false) + if db == nil || err != nil { + return nil, fmt.Errorf("unable to open encrypted database: error: %v", err) + } + + log.Debugf("Initializing Profile from Encrypted Storage") + cps, err := NewCwtchProfileStorage(db, profileDirectory) + if err != nil { + db.Close() + return nil, err + } + return FromEncryptedStorage(cps), nil +} diff --git a/protocol/connections/engine.go b/protocol/connections/engine.go index 3762e2b..03b89df 100644 --- a/protocol/connections/engine.go +++ b/protocol/connections/engine.go @@ -140,7 +140,7 @@ func (e *engine) eventHandler() { case event.InvitePeerToGroup: err := e.sendPeerMessage(ev.Data[event.RemotePeer], pmodel.PeerMessage{ID: ev.EventID, Context: event.ContextInvite, Data: []byte(ev.Data[event.GroupInvite])}) if err != nil { - + e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: "peer is offline or the connection has yet to finalize"})) } case event.JoinServer: signature, err := base64.StdEncoding.DecodeString(ev.Data[event.Signature]) @@ -274,6 +274,15 @@ func (e *engine) listenFn() { func (e *engine) Shutdown() { e.shuttingDown = true e.service.Shutdown() + + e.ephemeralServices.Range(func(_, service interface{}) bool { + connection, ok := service.(*tor.BaseOnionService) + if ok { + log.Infof("shutting down ephemeral service") + connection.Shutdown() + } + return true + }) e.queue.Shutdown() } @@ -403,13 +412,6 @@ func (e *engine) serverConnecting(onion string) { })) } -func (e *engine) serverConnected(onion string) { - e.eventManager.Publish(event.NewEvent(event.ServerStateChange, map[event.Field]string{ - event.GroupServer: onion, - event.ConnectionState: ConnectionStateName[CONNECTED], - })) -} - func (e *engine) serverAuthed(onion string) { e.eventManager.Publish(event.NewEvent(event.ServerStateChange, map[event.Field]string{ event.GroupServer: onion, diff --git a/protocol/connections/tokenboardclientapp.go b/protocol/connections/tokenboardclientapp.go index 672426d..2b543b1 100644 --- a/protocol/connections/tokenboardclientapp.go +++ b/protocol/connections/tokenboardclientapp.go @@ -185,7 +185,7 @@ func (ta *TokenBoardClient) MakePayment() error { log.Debugf("Waiting for successful PoW Auth...") connected, err := client.Connect(ta.tokenServiceOnion, powTokenApp) - if connected == true && err == nil { + if connected && err == nil { log.Debugf("Waiting for successful Token Acquisition...") conn, err := client.WaitForCapabilityOrClose(ta.tokenServiceOnion, applications.HasTokensCapability) if err == nil { diff --git a/protocol/files/manifest.go b/protocol/files/manifest.go index b944fd8..4df5ae7 100644 --- a/protocol/files/manifest.go +++ b/protocol/files/manifest.go @@ -201,7 +201,7 @@ func (m *Manifest) StoreChunk(id uint64, contents []byte) (uint64, error) { // Write the contents of the chunk to the file _, err = m.openFd.Write(contents) - if err == nil && m.chunkComplete[id] == false { + if err == nil && !m.chunkComplete[id] { m.chunkComplete[id] = true m.progress++ } diff --git a/protocol/files/manifest_test.go b/protocol/files/manifest_test.go index c23e8b6..697073d 100644 --- a/protocol/files/manifest_test.go +++ b/protocol/files/manifest_test.go @@ -29,16 +29,16 @@ func TestManifest(t *testing.T) { t.Logf("%v", manifest) // Try to tread the chunk - contents, err := manifest.GetChunkBytes(1) + _, err = manifest.GetChunkBytes(1) if err == nil { t.Fatalf("chunk fetch should have thrown an error") } - contents, err = manifest.GetChunkBytes(0) + _, err = manifest.GetChunkBytes(0) if err != nil { t.Fatalf("chunk fetch error: %v", err) } - contents, err = manifest.GetChunkBytes(0) + _, err = manifest.GetChunkBytes(0) if err != nil { t.Fatalf("chunk fetch error: %v", err) } @@ -46,7 +46,6 @@ func TestManifest(t *testing.T) { json, _ := json.Marshal(manifest) t.Logf("%s", json) - t.Logf("%s", contents) } func TestManifestLarge(t *testing.T) { diff --git a/storage/profile_store.go b/storage/profile_store.go index a6ddae7..8c08943 100644 --- a/storage/profile_store.go +++ b/storage/profile_store.go @@ -3,25 +3,13 @@ package storage import ( "cwtch.im/cwtch/event" "cwtch.im/cwtch/model" - "cwtch.im/cwtch/storage/v0" "cwtch.im/cwtch/storage/v1" - "git.openprivacy.ca/openprivacy/log" - "io/ioutil" - "path" - "strconv" ) -const profileFilename = "profile" -const versionFile = "VERSION" -const currentVersion = 1 - // ProfileStore is an interface to managing the storage of Cwtch Profiles type ProfileStore interface { - Shutdown() - Delete() GetProfileCopy(timeline bool) *model.Profile GetNewPeerMessage() *event.Event - GetStatusMessages() []*event.Event CheckPassword(string) bool } @@ -33,10 +21,8 @@ func CreateProfileWriterStore(eventManager event.Manager, directory, password st // LoadProfileWriterStore loads a profile store from filestore listening for events and saving them // directory should be $appDir/profiles/$rand -func LoadProfileWriterStore(eventManager event.Manager, directory, password string) (ProfileStore, error) { - versionCheckUpgrade(directory, password) - - return v1.LoadProfileWriterStore(eventManager, directory, password) +func LoadProfileWriterStore(directory, password string) (ProfileStore, error) { + return v1.LoadProfileWriterStore(directory, password) } // ReadProfile reads a profile from storage and returns the profile @@ -46,49 +32,4 @@ func ReadProfile(directory string, key [32]byte, salt [128]byte) (*model.Profile return v1.ReadProfile(directory, key, salt) } -// NewProfile creates a new profile for use in the profile store. -func NewProfile(name string) *model.Profile { - profile := model.GenerateNewProfile(name) - return profile -} - // ********* Versioning and upgrade ********** - -func detectVersion(directory string) int { - vnumberStr, err := ioutil.ReadFile(path.Join(directory, versionFile)) - if err != nil { - return 0 - } - vnumber, err := strconv.Atoi(string(vnumberStr)) - if err != nil { - log.Errorf("Could not parse VERSION file contents: '%v' - %v\n", vnumber, err) - return -1 - } - return vnumber -} - -func upgradeV0ToV1(directory, password string) error { - log.Debugln("Attempting storage v0 to v1: Reading v0 profile...") - profile, err := v0.ReadProfile(directory, password) - if err != nil { - return err - } - - log.Debugln("Attempting storage v0 to v1: Writing v1 profile...") - return v1.UpgradeV0Profile(profile, directory, password) -} - -func versionCheckUpgrade(directory, password string) { - version := detectVersion(directory) - log.Debugf("versionCheck: %v\n", version) - if version == -1 { - return - } - if version == 0 { - err := upgradeV0ToV1(directory, password) - if err != nil { - return - } - //version = 1 - } -} diff --git a/storage/profile_store_test.go b/storage/profile_store_test.go deleted file mode 100644 index ed71ad8..0000000 --- a/storage/profile_store_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Known race issue with event bus channel closure - -package storage - -import ( - "cwtch.im/cwtch/event" - "cwtch.im/cwtch/model" - "cwtch.im/cwtch/storage/v0" - "fmt" - "git.openprivacy.ca/openprivacy/log" - "os" - "testing" - "time" -) - -const testingDir = "./testing" -const filenameBase = "testStream" -const password = "asdfqwer" -const line1 = "Hello from storage!" -const testProfileName = "Alice" -const testKey = "key" -const testVal = "value" -const testInitialMessage = "howdy" -const testMessage = "Hello from storage" - -func TestProfileStoreUpgradeV0toV1(t *testing.T) { - log.SetLevel(log.LevelDebug) - os.RemoveAll(testingDir) - eventBus := event.NewEventManager() - - queue := event.NewQueue() - eventBus.Subscribe(event.ChangePasswordSuccess, queue) - - fmt.Println("Creating and initializing v0 profile and store...") - profile := NewProfile(testProfileName) - profile.AddContact("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd", &model.PublicProfile{Attributes: map[string]string{string(model.KeyTypeServerOnion): "2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd"}}) - - ps1 := v0.NewProfileWriterStore(eventBus, testingDir, password, profile) - - groupid, invite, err := profile.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd") - if err != nil { - t.Errorf("Creating group: %v\n", err) - } - if err != nil { - t.Errorf("Creating group invite: %v\n", err) - } - - ps1.AddGroup(invite) - - fmt.Println("Sending 200 messages...") - - for i := 0; i < 200; i++ { - ps1.AddGroupMessage(groupid, time.Now().Format(time.RFC3339Nano), time.Now().Format(time.RFC3339Nano), profile.Onion, testMessage, []byte{byte(i)}) - } - - fmt.Println("Shutdown v0 profile store...") - ps1.Shutdown() - - fmt.Println("New v1 Profile store...") - ps2, err := LoadProfileWriterStore(eventBus, testingDir, password) - if err != nil { - t.Errorf("Error createing new profileStore with new password: %v\n", err) - return - } - - profile2 := ps2.GetProfileCopy(true) - - if profile2.Groups[groupid] == nil { - t.Errorf("Failed to load group %v\n", groupid) - return - } - - if len(profile2.Groups[groupid].Timeline.Messages) != 200 { - t.Errorf("Failed to load group's 200 messages, instead got %v\n", len(profile2.Groups[groupid].Timeline.Messages)) - } -} diff --git a/storage/v0/file_enc.go b/storage/v0/file_enc.go deleted file mode 100644 index 5b885bd..0000000 --- a/storage/v0/file_enc.go +++ /dev/null @@ -1,70 +0,0 @@ -package v0 - -import ( - "crypto/rand" - "errors" - "git.openprivacy.ca/openprivacy/log" - "golang.org/x/crypto/nacl/secretbox" - "golang.org/x/crypto/pbkdf2" - "golang.org/x/crypto/sha3" - "io" - "io/ioutil" - "path" -) - -// createKey derives a key from a password -func createKey(password string) ([32]byte, [128]byte, error) { - var salt [128]byte - if _, err := io.ReadFull(rand.Reader, salt[:]); err != nil { - log.Errorf("Cannot read from random: %v\n", err) - return [32]byte{}, salt, err - } - dk := pbkdf2.Key([]byte(password), salt[:], 4096, 32, sha3.New512) - - var dkr [32]byte - copy(dkr[:], dk) - return dkr, salt, nil -} - -//encryptFileData encrypts the cwtchPeer via the specified key. -func encryptFileData(data []byte, key [32]byte) ([]byte, error) { - var nonce [24]byte - - if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil { - log.Errorf("Cannot read from random: %v\n", err) - return nil, err - } - - encrypted := secretbox.Seal(nonce[:], data, &nonce, &key) - return encrypted, nil -} - -//decryptFile decrypts the passed ciphertext into a cwtchPeer via the specified key. -func decryptFile(ciphertext []byte, key [32]byte) ([]byte, error) { - var decryptNonce [24]byte - copy(decryptNonce[:], ciphertext[:24]) - decrypted, ok := secretbox.Open(nil, ciphertext[24:], &decryptNonce, &key) - if ok { - return decrypted, nil - } - return nil, errors.New("Failed to decrypt") -} - -// Load instantiates a cwtchPeer from the file store -func readEncryptedFile(directory, filename, password string) ([]byte, error) { - encryptedbytes, err := ioutil.ReadFile(path.Join(directory, filename)) - if err == nil && len(encryptedbytes) > 128 { - var dkr [32]byte - //Separate the salt from the encrypted bytes, then generate the derived key - salt, encryptedbytes := encryptedbytes[0:128], encryptedbytes[128:] - dk := pbkdf2.Key([]byte(password), salt, 4096, 32, sha3.New512) - copy(dkr[:], dk) - - data, err := decryptFile(encryptedbytes, dkr) - if err == nil { - return data, nil - } - return nil, err - } - return nil, err -} diff --git a/storage/v0/file_store.go b/storage/v0/file_store.go deleted file mode 100644 index 06b6ee1..0000000 --- a/storage/v0/file_store.go +++ /dev/null @@ -1,46 +0,0 @@ -package v0 - -import ( - "io/ioutil" - "path" -) - -// fileStore stores a cwtchPeer in an encrypted file -type fileStore struct { - directory string - filename string - password string -} - -// FileStore is a primitive around storing encrypted files -type FileStore interface { - Read() ([]byte, error) - Write(data []byte) error -} - -// NewFileStore instantiates a fileStore given a filename and a password -func NewFileStore(directory string, filename string, password string) FileStore { - filestore := new(fileStore) - filestore.password = password - filestore.filename = filename - filestore.directory = directory - return filestore -} - -func (fps *fileStore) Read() ([]byte, error) { - return readEncryptedFile(fps.directory, fps.filename, fps.password) -} - -// write serializes a cwtchPeer to a file -func (fps *fileStore) Write(data []byte) error { - key, salt, _ := createKey(fps.password) - encryptedbytes, err := encryptFileData(data, key) - if err != nil { - return err - } - - // the salt for the derived key is appended to the front of the file - encryptedbytes = append(salt[:], encryptedbytes...) - err = ioutil.WriteFile(path.Join(fps.directory, fps.filename), encryptedbytes, 0600) - return err -} diff --git a/storage/v0/profile_store.go b/storage/v0/profile_store.go deleted file mode 100644 index f5aeb9d..0000000 --- a/storage/v0/profile_store.go +++ /dev/null @@ -1,120 +0,0 @@ -package v0 - -import ( - "cwtch.im/cwtch/event" - "cwtch.im/cwtch/model" - "encoding/json" - "fmt" - "os" - "time" -) - -const groupIDLen = 32 -const peerIDLen = 56 -const profileFilename = "profile" - -// ProfileStoreV0 is a legacy profile store used now for upgrading legacy profile stores to newer versions -type ProfileStoreV0 struct { - fs FileStore - streamStores map[string]StreamStore // map [groupId|onion] StreamStore - directory string - password string - profile *model.Profile -} - -// NewProfileWriterStore returns a profile store backed by a filestore listening for events and saving them -// directory should be $appDir/profiles/$rand -func NewProfileWriterStore(eventManager event.Manager, directory, password string, profile *model.Profile) *ProfileStoreV0 { - os.Mkdir(directory, 0700) - ps := &ProfileStoreV0{fs: NewFileStore(directory, profileFilename, password), password: password, directory: directory, profile: profile, streamStores: map[string]StreamStore{}} - if profile != nil { - ps.save() - } - - return ps -} - -// ReadProfile reads a profile from storqage and returns the profile -// directory should be $appDir/profiles/$rand -func ReadProfile(directory, password string) (*model.Profile, error) { - os.Mkdir(directory, 0700) - ps := &ProfileStoreV0{fs: NewFileStore(directory, profileFilename, password), password: password, directory: directory, profile: nil, streamStores: map[string]StreamStore{}} - - err := ps.Load() - if err != nil { - return nil, err - } - - profile := ps.getProfileCopy(true) - - return profile, nil -} - -/********************************************************************************************/ - -// AddGroup For testing, adds a group to the profile (and starts a stream store) -func (ps *ProfileStoreV0) AddGroup(invite string) { - gid, err := ps.profile.ProcessInvite(invite) - if err == nil { - ps.save() - group := ps.profile.Groups[gid] - ps.streamStores[group.GroupID] = NewStreamStore(ps.directory, group.LocalID, ps.password) - } -} - -// AddGroupMessage for testing, adds a group message -func (ps *ProfileStoreV0) AddGroupMessage(groupid string, timeSent, timeRecvied string, remotePeer, data string, signature []byte) { - received, _ := time.Parse(time.RFC3339Nano, timeRecvied) - sent, _ := time.Parse(time.RFC3339Nano, timeSent) - message := model.Message{Received: received, Timestamp: sent, Message: data, PeerID: remotePeer, Signature: signature, PreviousMessageSig: []byte("PreviousSignature")} - ss, exists := ps.streamStores[groupid] - if exists { - ss.Write(message) - } else { - fmt.Println("ERROR") - } -} - -// GetNewPeerMessage is for AppService to call on Reload events, to reseed the AppClient with the loaded peers -func (ps *ProfileStoreV0) GetNewPeerMessage() *event.Event { - message := event.NewEventList(event.NewPeer, event.Identity, ps.profile.LocalID, event.Password, ps.password, event.Status, "running") - return &message -} - -// Load instantiates a cwtchPeer from the file store -func (ps *ProfileStoreV0) Load() error { - decrypted, err := ps.fs.Read() - if err != nil { - return err - } - cp := new(model.Profile) - err = json.Unmarshal(decrypted, &cp) - if err == nil { - ps.profile = cp - - for gid, group := range cp.Groups { - ss := NewStreamStore(ps.directory, group.LocalID, ps.password) - - cp.Groups[gid].Timeline.SetMessages(ss.Read()) - ps.streamStores[group.GroupID] = ss - } - } - - return err -} - -func (ps *ProfileStoreV0) getProfileCopy(timeline bool) *model.Profile { - return ps.profile.GetCopy(timeline) -} - -// Shutdown saves the storage system -func (ps *ProfileStoreV0) Shutdown() { - ps.save() -} - -/************* Writing *************/ - -func (ps *ProfileStoreV0) save() error { - bytes, _ := json.Marshal(ps.profile) - return ps.fs.Write(bytes) -} diff --git a/storage/v0/profile_store_test.go b/storage/v0/profile_store_test.go deleted file mode 100644 index cd5db41..0000000 --- a/storage/v0/profile_store_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Known race issue with event bus channel closure - -package v0 - -import ( - "cwtch.im/cwtch/event" - "cwtch.im/cwtch/model" - "log" - "os" - "testing" - "time" -) - -const testProfileName = "Alice" -const testKey = "key" -const testVal = "value" -const testInitialMessage = "howdy" -const testMessage = "Hello from storage" - -// NewProfile creates a new profile for use in the profile store. -func NewProfile(name string) *model.Profile { - profile := model.GenerateNewProfile(name) - return profile -} - -func TestProfileStoreWriteRead(t *testing.T) { - log.Println("profile store test!") - os.RemoveAll(testingDir) - eventBus := event.NewEventManager() - profile := NewProfile(testProfileName) - ps1 := NewProfileWriterStore(eventBus, testingDir, password, profile) - - profile.SetAttribute(testKey, testVal) - - groupid, invite, err := profile.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd") - if err != nil { - t.Errorf("Creating group: %v\n", err) - } - if err != nil { - t.Errorf("Creating group invite: %v\n", err) - } - - ps1.AddGroup(invite) - - ps1.AddGroupMessage(groupid, time.Now().Format(time.RFC3339Nano), time.Now().Format(time.RFC3339Nano), ps1.getProfileCopy(true).Onion, testMessage, []byte{byte(0x01)}) - - ps1.Shutdown() - - ps2 := NewProfileWriterStore(eventBus, testingDir, password, nil) - err = ps2.Load() - if err != nil { - t.Errorf("Error createing ProfileStoreV0: %v\n", err) - } - - profile = ps2.getProfileCopy(true) - if profile.Name != testProfileName { - t.Errorf("Profile name from loaded profile incorrect. Expected: '%v' Actual: '%v'\n", testProfileName, profile.Name) - } - - v, _ := profile.GetAttribute(testKey) - if v != testVal { - t.Errorf("Profile attribute '%v' incorrect. Expected: '%v' Actual: '%v'\n", testKey, testVal, v) - } - - group2 := ps2.getProfileCopy(true).Groups[groupid] - if group2 == nil { - t.Errorf("Group not loaded\n") - } - -} diff --git a/storage/v0/stream_store.go b/storage/v0/stream_store.go deleted file mode 100644 index 22bf385..0000000 --- a/storage/v0/stream_store.go +++ /dev/null @@ -1,145 +0,0 @@ -package v0 - -import ( - "cwtch.im/cwtch/model" - "encoding/json" - "fmt" - "git.openprivacy.ca/openprivacy/log" - "io/ioutil" - "os" - "path" - "sync" -) - -const ( - fileStorePartitions = 16 - bytesPerFile = 15 * 1024 -) - -// streamStore is a file-backed implementation of StreamStore using an in memory buffer of ~16KB and a rotating set of files -type streamStore struct { - password string - - storeDirectory string - filenameBase string - - lock sync.Mutex - - // Buffer is used just for current file to write to - messages []model.Message - bufferByteCount int -} - -// StreamStore provides a stream like interface to encrypted storage -type StreamStore interface { - Read() []model.Message - Write(m model.Message) -} - -// NewStreamStore returns an initialized StreamStore ready for reading and writing -func NewStreamStore(directory string, filenameBase string, password string) (store StreamStore) { - ss := &streamStore{storeDirectory: directory, filenameBase: filenameBase, password: password} - os.Mkdir(ss.storeDirectory, 0700) - - ss.initBuffer() - - return ss -} - -// Read returns all messages from the backing file (not the buffer, for writing to the current file) -func (ss *streamStore) Read() (messages []model.Message) { - ss.lock.Lock() - defer ss.lock.Unlock() - - resp := []model.Message{} - - for i := fileStorePartitions - 1; i >= 0; i-- { - filename := fmt.Sprintf("%s.%d", ss.filenameBase, i) - - bytes, err := readEncryptedFile(ss.storeDirectory, filename, ss.password) - if err != nil { - continue - } - - msgs := []model.Message{} - json.Unmarshal([]byte(bytes), &msgs) - resp = append(resp, msgs...) - } - - // 2019.10.10 "Acknowledged" & "ReceivedByServer" are added to the struct, populate it as true for old ones without - for i := 0; i < len(resp) && (resp[i].Acknowledged == false && resp[i].ReceivedByServer == false); i++ { - resp[i].Acknowledged = true - resp[i].ReceivedByServer = true - } - - return resp -} - -// ****** Writing *******/ - -func (ss *streamStore) WriteN(messages []model.Message) { - ss.lock.Lock() - defer ss.lock.Unlock() - - for _, m := range messages { - ss.updateBuffer(m) - - if ss.bufferByteCount > bytesPerFile { - ss.updateFile() - log.Debugf("rotating log file") - ss.rotateFileStore() - ss.initBuffer() - } - } -} - -// Write adds a GroupMessage to the store -func (ss *streamStore) Write(m model.Message) { - ss.lock.Lock() - defer ss.lock.Unlock() - ss.updateBuffer(m) - ss.updateFile() - - if ss.bufferByteCount > bytesPerFile { - log.Debugf("rotating log file") - ss.rotateFileStore() - ss.initBuffer() - } -} - -func (ss *streamStore) initBuffer() { - ss.messages = []model.Message{} - ss.bufferByteCount = 0 -} - -func (ss *streamStore) updateBuffer(m model.Message) { - ss.messages = append(ss.messages, m) - ss.bufferByteCount += (104 * 1.5) + len(m.Message) -} - -func (ss *streamStore) updateFile() error { - msgs, err := json.Marshal(ss.messages) - if err != nil { - log.Errorf("Failed to marshal group messages %v\n", err) - } - - // ENCRYPT - key, salt, _ := createKey(ss.password) - encryptedMsgs, err := encryptFileData(msgs, key) - if err != nil { - log.Errorf("Failed to encrypt messages: %v\n", err) - return err - } - encryptedMsgs = append(salt[:], encryptedMsgs...) - - ioutil.WriteFile(path.Join(ss.storeDirectory, fmt.Sprintf("%s.%d", ss.filenameBase, 0)), encryptedMsgs, 0700) - return nil -} - -func (ss *streamStore) rotateFileStore() { - os.Remove(path.Join(ss.storeDirectory, fmt.Sprintf("%s.%d", ss.filenameBase, fileStorePartitions-1))) - - for i := fileStorePartitions - 2; i >= 0; i-- { - os.Rename(path.Join(ss.storeDirectory, fmt.Sprintf("%s.%d", ss.filenameBase, i)), path.Join(ss.storeDirectory, fmt.Sprintf("%s.%d", ss.filenameBase, i+1))) - } -} diff --git a/storage/v0/stream_store_test.go b/storage/v0/stream_store_test.go deleted file mode 100644 index 7632f36..0000000 --- a/storage/v0/stream_store_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package v0 - -import ( - "cwtch.im/cwtch/model" - "os" - "testing" -) - -const testingDir = "./testing" -const filenameBase = "testStream" -const password = "asdfqwer" -const line1 = "Hello from storage!" - -func TestStreamStoreWriteRead(t *testing.T) { - os.Remove(".test.json") - os.RemoveAll(testingDir) - os.Mkdir(testingDir, 0777) - ss1 := NewStreamStore(testingDir, filenameBase, password) - m := model.Message{Message: line1} - ss1.Write(m) - - ss2 := NewStreamStore(testingDir, filenameBase, password) - messages := ss2.Read() - if len(messages) != 1 { - t.Errorf("Read messages has wrong length. Expected: 1 Actual: %d\n", len(messages)) - } - if messages[0].Message != line1 { - t.Errorf("Read message has wrong content. Expected: '%v' Actual: '%v'\n", line1, messages[0].Message) - } -} - -func TestStreamStoreWriteReadRotate(t *testing.T) { - os.Remove(".test.json") - os.RemoveAll(testingDir) - os.Mkdir(testingDir, 0777) - ss1 := NewStreamStore(testingDir, filenameBase, password) - m := model.Message{Message: line1} - for i := 0; i < 400; i++ { - ss1.Write(m) - } - - ss2 := NewStreamStore(testingDir, filenameBase, password) - messages := ss2.Read() - if len(messages) != 400 { - t.Errorf("Read messages has wrong length. Expected: 400 Actual: %d\n", len(messages)) - } - if messages[0].Message != line1 { - t.Errorf("Read message has wrong content. Expected: '%v' Actual: '%v'\n", line1, messages[0].Message) - } -} diff --git a/storage/v1/file_enc.go b/storage/v1/file_enc.go index 38cc063..6afa192 100644 --- a/storage/v1/file_enc.go +++ b/storage/v1/file_enc.go @@ -56,7 +56,7 @@ func DecryptFile(ciphertext []byte, key [32]byte) ([]byte, error) { if ok { return decrypted, nil } - return nil, errors.New("Failed to decrypt") + return nil, errors.New("failed to decrypt") } // ReadEncryptedFile reads data from an encrypted file in directory with key diff --git a/storage/v1/profile_store.go b/storage/v1/profile_store.go index a76c9cf..88078d5 100644 --- a/storage/v1/profile_store.go +++ b/storage/v1/profile_store.go @@ -3,18 +3,13 @@ package v1 import ( "cwtch.im/cwtch/event" "cwtch.im/cwtch/model" - "encoding/base64" "encoding/json" "git.openprivacy.ca/openprivacy/log" "io/ioutil" "os" "path" - "strconv" - "time" ) -const groupIDLen = 32 -const peerIDLen = 56 const profileFilename = "profile" const version = "1" const versionFile = "VERSION" @@ -22,15 +17,11 @@ const saltFile = "SALT" //ProfileStoreV1 storage for profiles and message streams that uses in memory key and fs stored salt instead of in memory password type ProfileStoreV1 struct { - fs FileStore - streamStores map[string]StreamStore // map [groupId|onion] StreamStore - directory string - profile *model.Profile - key [32]byte - salt [128]byte - eventManager event.Manager - queue event.Queue - writer bool + fs FileStore + directory string + profile *model.Profile + key [32]byte + salt [128]byte } // CheckPassword returns true if the given password produces the same key as the current stored key, otherwise false. @@ -70,42 +61,14 @@ func CreateProfileWriterStore(eventManager event.Manager, directory, password st return nil } - ps := &ProfileStoreV1{fs: NewFileStore(directory, profileFilename, key), key: key, salt: salt, directory: directory, profile: profile, eventManager: eventManager, streamStores: map[string]StreamStore{}, writer: true} - ps.save() - - ps.initProfileWriterStore() + ps := &ProfileStoreV1{fs: NewFileStore(directory, profileFilename, key), key: key, salt: salt, directory: directory, profile: profile} return ps } -func (ps *ProfileStoreV1) initProfileWriterStore() { - ps.queue = event.NewQueue() - go ps.eventHandler() - - ps.eventManager.Subscribe(event.SetPeerAuthorization, ps.queue) - ps.eventManager.Subscribe(event.PeerCreated, ps.queue) - ps.eventManager.Subscribe(event.GroupCreated, ps.queue) - ps.eventManager.Subscribe(event.SetAttribute, ps.queue) - ps.eventManager.Subscribe(event.SetPeerAttribute, ps.queue) - ps.eventManager.Subscribe(event.SetGroupAttribute, ps.queue) - ps.eventManager.Subscribe(event.AcceptGroupInvite, ps.queue) - ps.eventManager.Subscribe(event.RejectGroupInvite, ps.queue) - ps.eventManager.Subscribe(event.NewGroup, ps.queue) - ps.eventManager.Subscribe(event.NewMessageFromGroup, ps.queue) - ps.eventManager.Subscribe(event.SendMessageToPeer, ps.queue) - ps.eventManager.Subscribe(event.PeerAcknowledgement, ps.queue) - ps.eventManager.Subscribe(event.NewMessageFromPeer, ps.queue) - ps.eventManager.Subscribe(event.PeerStateChange, ps.queue) - ps.eventManager.Subscribe(event.ServerStateChange, ps.queue) - ps.eventManager.Subscribe(event.DeleteContact, ps.queue) - ps.eventManager.Subscribe(event.DeleteGroup, ps.queue) - ps.eventManager.Subscribe(event.ChangePassword, ps.queue) - ps.eventManager.Subscribe(event.UpdateMessageFlags, ps.queue) -} - // LoadProfileWriterStore loads a profile store from filestore listening for events and saving them // directory should be $appDir/profiles/$rand -func LoadProfileWriterStore(eventManager event.Manager, directory, password string) (*ProfileStoreV1, error) { +func LoadProfileWriterStore(directory, password string) (*ProfileStoreV1, error) { salt, err := ioutil.ReadFile(path.Join(directory, saltFile)) if err != nil { return nil, err @@ -113,7 +76,7 @@ func LoadProfileWriterStore(eventManager event.Manager, directory, password stri key := CreateKey(password, salt) - ps := &ProfileStoreV1{fs: NewFileStore(directory, profileFilename, key), key: key, directory: directory, profile: nil, eventManager: eventManager, streamStores: map[string]StreamStore{}, writer: true} + ps := &ProfileStoreV1{fs: NewFileStore(directory, profileFilename, key), key: key, directory: directory, profile: nil} copy(ps.salt[:], salt) err = ps.load() @@ -121,7 +84,6 @@ func LoadProfileWriterStore(eventManager event.Manager, directory, password stri return nil, err } - ps.initProfileWriterStore() return ps, nil } @@ -129,7 +91,7 @@ func LoadProfileWriterStore(eventManager event.Manager, directory, password stri // directory should be $appDir/profiles/$rand func ReadProfile(directory string, key [32]byte, salt [128]byte) (*model.Profile, error) { os.Mkdir(directory, 0700) - ps := &ProfileStoreV1{fs: NewFileStore(directory, profileFilename, key), key: key, salt: salt, directory: directory, profile: nil, eventManager: nil, streamStores: map[string]StreamStore{}, writer: true} + ps := &ProfileStoreV1{fs: NewFileStore(directory, profileFilename, key), key: key, salt: salt, directory: directory, profile: nil} err := ps.load() if err != nil { @@ -141,143 +103,12 @@ func ReadProfile(directory string, key [32]byte, salt [128]byte) (*model.Profile return profile, nil } -// UpgradeV0Profile takes a profile (presumably from a V0 store) and creates and writes a V1 store -func UpgradeV0Profile(profile *model.Profile, directory, password string) error { - key, salt, err := InitV1Directory(directory, password) - if err != nil { - return err - } - - ps := &ProfileStoreV1{fs: NewFileStore(directory, profileFilename, key), key: key, salt: salt, directory: directory, profile: profile, eventManager: nil, streamStores: map[string]StreamStore{}, writer: true} - ps.save() - - for gid, group := range ps.profile.Groups { - ss := NewStreamStore(ps.directory, group.LocalID, ps.key) - ss.WriteN(ps.profile.Groups[gid].Timeline.Messages) - } - - return nil -} - -// NewProfile creates a new profile for use in the profile store. -func NewProfile(name string) *model.Profile { - profile := model.GenerateNewProfile(name) - return profile -} - // GetNewPeerMessage is for AppService to call on Reload events, to reseed the AppClient with the loaded peers func (ps *ProfileStoreV1) GetNewPeerMessage() *event.Event { message := event.NewEventList(event.NewPeer, event.Identity, ps.profile.LocalID, event.Key, string(ps.key[:]), event.Salt, string(ps.salt[:])) return &message } -// GetStatusMessages creates an array of status messages for all peers and group servers from current information -func (ps *ProfileStoreV1) GetStatusMessages() []*event.Event { - messages := []*event.Event{} - for _, contact := range ps.profile.Contacts { - message := event.NewEvent(event.PeerStateChange, map[event.Field]string{ - event.RemotePeer: string(contact.Onion), - event.ConnectionState: contact.State, - }) - messages = append(messages, &message) - } - - doneServers := make(map[string]bool) - for _, group := range ps.profile.Groups { - if _, exists := doneServers[group.GroupServer]; !exists { - message := event.NewEvent(event.ServerStateChange, map[event.Field]string{ - event.GroupServer: string(group.GroupServer), - event.ConnectionState: group.State, - }) - messages = append(messages, &message) - doneServers[group.GroupServer] = true - } - } - - return messages -} - -// ChangePassword restores all data under a new password's encryption -func (ps *ProfileStoreV1) ChangePassword(oldpass, newpass, eventID string) { - oldkey := CreateKey(oldpass, ps.salt[:]) - - if oldkey != ps.key { - ps.eventManager.Publish(event.NewEventList(event.ChangePasswordError, event.Error, "Supplied current password does not match", event.EventID, eventID)) - return - } - - newkey := CreateKey(newpass, ps.salt[:]) - - newStreamStores := map[string]StreamStore{} - idToNewLocalID := map[string]string{} - - // Generate all new StreamStores with the new password and write all the old StreamStore data into these ones - for ssid, ss := range ps.streamStores { - // New ss with new pass and new localID - newlocalID := model.GenerateRandomID() - idToNewLocalID[ssid] = newlocalID - - newSS := NewStreamStore(ps.directory, newlocalID, newkey) - newStreamStores[ssid] = newSS - - // write whole store - messages := ss.Read() - newSS.WriteN(messages) - } - - // Switch over - oldStreamStores := ps.streamStores - ps.streamStores = newStreamStores - for ssid, newLocalID := range idToNewLocalID { - if len(ssid) == groupIDLen { - ps.profile.Groups[ssid].LocalID = newLocalID - } else { - if ps.profile.Contacts[ssid] != nil { - ps.profile.Contacts[ssid].LocalID = newLocalID - } else { - log.Errorf("Unknown Contact: %v. This is probably the result of corrupted development data from fuzzing. This contact will not appear in the new profile.", ssid) - } - } - } - - ps.key = newkey - ps.fs.ChangeKey(newkey) - ps.save() - - // Clean up - for _, oldss := range oldStreamStores { - oldss.Delete() - } - - ps.eventManager.Publish(event.NewEventList(event.ChangePasswordSuccess, event.EventID, eventID)) - return -} - -func (ps *ProfileStoreV1) save() error { - if ps.writer { - bytes, _ := json.Marshal(ps.profile) - return ps.fs.Write(bytes) - } - - return nil -} - -func (ps *ProfileStoreV1) regenStreamStore(messages []model.Message, contact string) { - oldss := ps.streamStores[contact] - newLocalID := model.GenerateRandomID() - newSS := NewStreamStore(ps.directory, newLocalID, ps.key) - newSS.WriteN(messages) - if len(contact) == groupIDLen { - ps.profile.Groups[contact].LocalID = newLocalID - } else { - // We can assume this exists as regen stream store should only happen to *update* a message - ps.profile.Contacts[contact].LocalID = newLocalID - } - ps.streamStores[contact] = newSS - ps.save() - oldss.Delete() -} - // load instantiates a cwtchPeer from the file store func (ps *ProfileStoreV1) load() error { decrypted, err := ps.fs.Read() @@ -301,16 +132,9 @@ func (ps *ProfileStoreV1) load() error { } } - // Check if there is any saved history... - saveHistory, keyExists := contact.GetAttribute(event.SaveHistoryKey) - if !keyExists { - contact.SetAttribute(event.SaveHistoryKey, event.DeleteHistoryDefault) - } - - if saveHistory == event.SaveHistoryConfirmed { + if contact.Attributes[event.SaveHistoryKey] == event.SaveHistoryConfirmed { ss := NewStreamStore(ps.directory, contact.LocalID, ps.key) cp.Contacts[contact.Onion].Timeline.SetMessages(ss.Read()) - ps.streamStores[contact.Onion] = ss } } @@ -320,15 +144,10 @@ func (ps *ProfileStoreV1) load() error { delete(cp.Groups, gid) continue } - ss := NewStreamStore(ps.directory, group.LocalID, ps.key) - cp.Groups[gid].Timeline.SetMessages(ss.Read()) cp.Groups[gid].Timeline.Sort() - ps.streamStores[group.GroupID] = ss } - - ps.save() } return err @@ -338,238 +157,3 @@ func (ps *ProfileStoreV1) load() error { func (ps *ProfileStoreV1) GetProfileCopy(timeline bool) *model.Profile { return ps.profile.GetCopy(timeline) } - -func (ps *ProfileStoreV1) eventHandler() { - for { - ev := ps.queue.Next() - log.Debugf("eventHandler event %v %v\n", ev.EventType, ev.EventID) - - switch ev.EventType { - case event.SetPeerAuthorization: - err := ps.profile.SetContactAuthorization(ev.Data[event.RemotePeer], model.Authorization(ev.Data[event.Authorization])) - if err == nil { - ps.save() - } - case event.PeerCreated: - var pp *model.PublicProfile - json.Unmarshal([]byte(ev.Data[event.Data]), &pp) - ps.profile.AddContact(ev.Data[event.RemotePeer], pp) - case event.GroupCreated: - var group *model.Group - json.Unmarshal([]byte(ev.Data[event.Data]), &group) - ps.profile.AddGroup(group) - ps.streamStores[group.GroupID] = NewStreamStore(ps.directory, group.LocalID, ps.key) - ps.save() - case event.SetAttribute: - ps.profile.SetAttribute(ev.Data[event.Key], ev.Data[event.Data]) - ps.save() - case event.SetPeerAttribute: - contact, exists := ps.profile.GetContact(ev.Data[event.RemotePeer]) - if exists { - contact.SetAttribute(ev.Data[event.Key], ev.Data[event.Data]) - ps.save() - - switch ev.Data[event.Key] { - case event.SaveHistoryKey: - if event.DeleteHistoryConfirmed == ev.Data[event.Data] { - ss, exists := ps.streamStores[ev.Data[event.RemotePeer]] - if exists { - ss.Delete() - delete(ps.streamStores, ev.Data[event.RemotePeer]) - } - } else if event.SaveHistoryConfirmed == ev.Data[event.Data] { - _, exists := ps.streamStores[ev.Data[event.RemotePeer]] - if !exists { - ss := NewStreamStore(ps.directory, contact.LocalID, ps.key) - ps.streamStores[ev.Data[event.RemotePeer]] = ss - } - } - default: - { - } - } - - } else { - log.Errorf("error setting attribute on peer %v peer does not exist", ev) - } - case event.SetGroupAttribute: - group := ps.profile.GetGroup(ev.Data[event.GroupID]) - if group != nil { - group.SetAttribute(ev.Data[event.Key], ev.Data[event.Data]) - ps.save() - } else { - log.Errorf("error setting attribute on group %v group does not exist", ev) - } - case event.AcceptGroupInvite: - err := ps.profile.AcceptInvite(ev.Data[event.GroupID]) - if err == nil { - ps.save() - } else { - log.Errorf("error accepting group invite") - } - case event.RejectGroupInvite: - ps.profile.RejectInvite(ev.Data[event.GroupID]) - ps.save() - case event.NewGroup: - gid, err := ps.profile.ProcessInvite(ev.Data[event.GroupInvite]) - if err == nil { - ps.save() - group := ps.profile.Groups[gid] - ps.streamStores[group.GroupID] = NewStreamStore(ps.directory, group.LocalID, ps.key) - } else { - log.Errorf("error storing new group invite: %v (%v)", err, ev) - } - case event.SendMessageToPeer: // cache the message till an ack, then it's given to stream store. - // stream store doesn't support updates, so we don't want to commit it till ack'd - ps.profile.AddSentMessageToContactTimeline(ev.Data[event.RemotePeer], ev.Data[event.Data], time.Now(), ev.EventID) - case event.NewMessageFromPeer: - ps.profile.AddMessageToContactTimeline(ev.Data[event.RemotePeer], ev.Data[event.Data], time.Now()) - ps.attemptSavePeerMessage(ev.Data[event.RemotePeer], ev.Data[event.Data], ev.Data[event.TimestampReceived], true) - case event.PeerAcknowledgement: - onion := ev.Data[event.RemotePeer] - eventID := ev.Data[event.EventID] - contact, ok := ps.profile.Contacts[onion] - if ok { - mIdx, ok := contact.UnacknowledgedMessages[eventID] - if ok { - message := contact.Timeline.Messages[mIdx] - ps.attemptSavePeerMessage(onion, message.Message, message.Timestamp.Format(time.RFC3339Nano), false) - } - } - ps.profile.AckSentMessageToPeer(ev.Data[event.RemotePeer], ev.Data[event.EventID]) - case event.NewMessageFromGroup: - groupid := ev.Data[event.GroupID] - received, _ := time.Parse(time.RFC3339Nano, ev.Data[event.TimestampReceived]) - sent, _ := time.Parse(time.RFC3339Nano, ev.Data[event.TimestampSent]) - sig, _ := base64.StdEncoding.DecodeString(ev.Data[event.Signature]) - prevsig, _ := base64.StdEncoding.DecodeString(ev.Data[event.PreviousSignature]) - message := model.Message{Received: received, Timestamp: sent, Message: ev.Data[event.Data], PeerID: ev.Data[event.RemotePeer], Signature: sig, PreviousMessageSig: prevsig, Acknowledged: true} - ss, exists := ps.streamStores[groupid] - if exists { - // We need to store a local copy of the message... - ps.profile.GetGroup(groupid).Timeline.Insert(&message) - ss.Write(message) - } else { - log.Errorf("error storing new group message: %v stream store does not exist", ev) - } - case event.PeerStateChange: - if _, exists := ps.profile.Contacts[ev.Data[event.RemotePeer]]; exists { - ps.profile.Contacts[ev.Data[event.RemotePeer]].State = ev.Data[event.ConnectionState] - } - case event.ServerStateChange: - for _, group := range ps.profile.Groups { - if group.GroupServer == ev.Data[event.GroupServer] { - group.State = ev.Data[event.ConnectionState] - } - } - case event.DeleteContact: - onion := ev.Data[event.RemotePeer] - ps.profile.DeleteContact(onion) - ps.save() - ss, exists := ps.streamStores[onion] - if exists { - ss.Delete() - delete(ps.streamStores, onion) - } - case event.DeleteGroup: - groupID := ev.Data[event.GroupID] - ps.profile.DeleteGroup(groupID) - ps.save() - ss, exists := ps.streamStores[groupID] - if exists { - ss.Delete() - delete(ps.streamStores, groupID) - } - case event.ChangePassword: - oldpass := ev.Data[event.Password] - newpass := ev.Data[event.NewPassword] - ps.ChangePassword(oldpass, newpass, ev.EventID) - case event.UpdateMessageFlags: - handle := ev.Data[event.Handle] - mIx, err := strconv.Atoi(ev.Data[event.Index]) - if err != nil { - log.Errorf("Invalid Message Index: %v", err) - return - } - flags, err := strconv.ParseUint(ev.Data[event.Flags], 2, 64) - if err != nil { - log.Errorf("Invalid Message Flags: %v", err) - return - } - ps.profile.UpdateMessageFlags(handle, mIx, flags) - if len(handle) == groupIDLen { - ps.regenStreamStore(ps.profile.GetGroup(handle).Timeline.Messages, handle) - } else if contact, exists := ps.profile.GetContact(handle); exists { - if exists { - val, _ := contact.GetAttribute(event.SaveHistoryKey) - if val == event.SaveHistoryConfirmed { - ps.regenStreamStore(contact.Timeline.Messages, handle) - } - } - } - default: - log.Debugf("shutting down profile store: %v", ev) - return - } - - } -} - -// attemptSavePeerMessage checks if the peer has been configured to save history from this peer -// and if so the peer saves the message into history. fromPeer is used to control if the message is saved -// as coming from the remote peer or if it was sent by out profile. -func (ps *ProfileStoreV1) attemptSavePeerMessage(peerID, messageData, timestampeReceived string, fromPeer bool) { - contact, exists := ps.profile.GetContact(peerID) - if exists { - val, _ := contact.GetAttribute(event.SaveHistoryKey) - switch val { - case event.SaveHistoryConfirmed: - { - peerID := peerID - var received time.Time - var message model.Message - if fromPeer { - received, _ = time.Parse(time.RFC3339Nano, timestampeReceived) - message = model.Message{Received: received, Timestamp: received, Message: messageData, PeerID: peerID, Signature: []byte{}, PreviousMessageSig: []byte{}} - } else { - received := time.Now() - message = model.Message{Received: received, Timestamp: received, Message: messageData, PeerID: ps.profile.Onion, Signature: []byte{}, PreviousMessageSig: []byte{}, Acknowledged: true} - } - ss, exists := ps.streamStores[peerID] - if exists { - ss.Write(message) - } else { - log.Errorf("error storing new peer message: %v stream store does not exist", peerID) - } - } - default: - { - } - } - } else { - log.Errorf("error saving message for peer that doesn't exist: %v", peerID) - } -} - -// Shutdown shuts down the queue / thread -func (ps *ProfileStoreV1) Shutdown() { - if ps.queue != nil { - ps.queue.Shutdown() - } -} - -// Delete removes all stored files for this stored profile -func (ps *ProfileStoreV1) Delete() { - log.Debugf("Delete ProfileStore for %v\n", ps.profile.Onion) - - for _, ss := range ps.streamStores { - ss.Delete() - } - - ps.fs.Delete() - - err := os.RemoveAll(ps.directory) - if err != nil { - log.Errorf("ProfileStore Delete error on RemoveAll on %v was %v\n", ps.directory, err) - } -} diff --git a/storage/v1/profile_store_test.go b/storage/v1/profile_store_test.go deleted file mode 100644 index 6cae401..0000000 --- a/storage/v1/profile_store_test.go +++ /dev/null @@ -1,159 +0,0 @@ -// Known race issue with event bus channel closure - -package v1 - -import ( - "cwtch.im/cwtch/event" - "cwtch.im/cwtch/model" - "encoding/base64" - "fmt" - "log" - "os" - "testing" - "time" -) - -const testProfileName = "Alice" -const testKey = "key" -const testVal = "value" -const testInitialMessage = "howdy" -const testMessage = "Hello from storage" - -func TestProfileStoreWriteRead(t *testing.T) { - log.Println("profile store test!") - os.RemoveAll(testingDir) - eventBus := event.NewEventManager() - profile := NewProfile(testProfileName) - // The lightest weight server entry possible (usually we would import a key bundle...) - profile.AddContact("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd", &model.PublicProfile{Attributes: map[string]string{string(model.KeyTypeServerOnion): "2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd"}}) - - ps1 := CreateProfileWriterStore(eventBus, testingDir, password, profile) - - eventBus.Publish(event.NewEvent(event.SetAttribute, map[event.Field]string{event.Key: testKey, event.Data: testVal})) - time.Sleep(1 * time.Second) - - groupid, invite, err := profile.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd") - if err != nil { - t.Errorf("Creating group: %v\n", err) - } - if err != nil { - t.Errorf("Creating group invite: %v\n", err) - } - - eventBus.Publish(event.NewEvent(event.NewGroup, map[event.Field]string{event.TimestampReceived: time.Now().Format(time.RFC3339Nano), event.RemotePeer: ps1.GetProfileCopy(true).Onion, event.GroupInvite: string(invite)})) - time.Sleep(1 * time.Second) - - eventBus.Publish(event.NewEvent(event.NewMessageFromGroup, map[event.Field]string{ - event.GroupID: groupid, - event.TimestampSent: time.Now().Format(time.RFC3339Nano), - event.TimestampReceived: time.Now().Format(time.RFC3339Nano), - event.RemotePeer: ps1.GetProfileCopy(true).Onion, - event.Data: testMessage, - })) - time.Sleep(1 * time.Second) - - ps1.Shutdown() - - ps2, err := LoadProfileWriterStore(eventBus, testingDir, password) - if err != nil { - t.Errorf("Error createing ProfileStoreV1: %v\n", err) - } - - profile = ps2.GetProfileCopy(true) - if profile.Name != testProfileName { - t.Errorf("Profile name from loaded profile incorrect. Expected: '%v' Actual: '%v'\n", testProfileName, profile.Name) - } - - v, _ := profile.GetAttribute(testKey) - if v != testVal { - t.Errorf("Profile attribute '%v' inccorect. Expected: '%v' Actual: '%v'\n", testKey, testVal, v) - } - - group2 := ps2.GetProfileCopy(true).Groups[groupid] - if group2 == nil { - t.Errorf("Group not loaded\n") - } - -} - -func TestProfileStoreChangePassword(t *testing.T) { - os.RemoveAll(testingDir) - eventBus := event.NewEventManager() - - queue := event.NewQueue() - eventBus.Subscribe(event.ChangePasswordSuccess, queue) - - profile := NewProfile(testProfileName) - profile.AddContact("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd", &model.PublicProfile{Attributes: map[string]string{string(model.KeyTypeServerOnion): "2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd"}}) - - ps1 := CreateProfileWriterStore(eventBus, testingDir, password, profile) - - groupid, invite, err := profile.StartGroup("2c3kmoobnyghj2zw6pwv7d57yzld753auo3ugauezzpvfak3ahc4bdyd") - if err != nil { - t.Errorf("Creating group: %v\n", err) - } - if err != nil { - t.Errorf("Creating group invite: %v\n", err) - } - - eventBus.Publish(event.NewEvent(event.NewGroup, map[event.Field]string{event.TimestampReceived: time.Now().Format(time.RFC3339Nano), event.RemotePeer: ps1.GetProfileCopy(true).Onion, event.GroupInvite: string(invite)})) - time.Sleep(1 * time.Second) - - fmt.Println("Sending 200 messages...") - - for i := 0; i < 200; i++ { - eventBus.Publish(event.NewEvent(event.NewMessageFromGroup, map[event.Field]string{ - event.GroupID: groupid, - event.TimestampSent: time.Now().Format(time.RFC3339Nano), - event.TimestampReceived: time.Now().Format(time.RFC3339Nano), - event.RemotePeer: profile.Onion, - event.Data: testMessage, - event.Signature: base64.StdEncoding.EncodeToString([]byte{byte(i)}), - })) - } - - newPass := "qwerty123" - - fmt.Println("Sending Change Passwords event...") - eventBus.Publish(event.NewEventList(event.ChangePassword, event.Password, password, event.NewPassword, newPass)) - - ev := queue.Next() - if ev.EventType != event.ChangePasswordSuccess { - t.Errorf("Unexpected event response detected %v\n", ev.EventType) - return - } - - fmt.Println("Sending 10 more messages...") - for i := 0; i < 10; i++ { - eventBus.Publish(event.NewEvent(event.NewMessageFromGroup, map[event.Field]string{ - event.GroupID: groupid, - event.TimestampSent: time.Now().Format(time.RFC3339Nano), - event.TimestampReceived: time.Now().Format(time.RFC3339Nano), - event.RemotePeer: profile.Onion, - event.Data: testMessage, - event.Signature: base64.StdEncoding.EncodeToString([]byte{0x01, byte(i)}), - })) - } - time.Sleep(3 * time.Second) - - fmt.Println("Shutdown profile store...") - ps1.Shutdown() - - fmt.Println("New Profile store...") - ps2, err := LoadProfileWriterStore(eventBus, testingDir, newPass) - if err != nil { - t.Errorf("Error createing new ProfileStoreV1 with new password: %v\n", err) - return - } - - profile2 := ps2.GetProfileCopy(true) - - if profile2.Groups[groupid] == nil { - t.Errorf("Failed to load group %v\n", groupid) - return - } - - if len(profile2.Groups[groupid].Timeline.Messages) != 210 { - t.Errorf("Failed to load group's 210 messages, instead got %v\n", len(profile2.Groups[groupid].Timeline.Messages)) - } -} diff --git a/storage/v1/stream_store.go b/storage/v1/stream_store.go index 9ec8e99..75ec293 100644 --- a/storage/v1/stream_store.go +++ b/storage/v1/stream_store.go @@ -15,7 +15,6 @@ import ( // This number is larger that the recommend chunk size of libsodium secretbox by an order of magnitude. // Since this code is not performance-sensitive (and is unlikely to gain any significant performance benefit from // cache-efficient chunking) this size isn’t currently a concern. -// TODO: revise and evaluate better storage options after beta” const ( fileStorePartitions = 128 bytesPerFile = 128 * 1024 diff --git a/testing/cwtch_peer_server_integration_test.go b/testing/cwtch_peer_server_integration_test.go index 0a77b4f..43a708b 100644 --- a/testing/cwtch_peer_server_integration_test.go +++ b/testing/cwtch_peer_server_integration_test.go @@ -1,27 +1,28 @@ package testing import ( + // Import SQL Cipher "crypto/rand" app2 "cwtch.im/cwtch/app" "cwtch.im/cwtch/app/utils" "cwtch.im/cwtch/event" - "cwtch.im/cwtch/event/bridge" "cwtch.im/cwtch/model" "cwtch.im/cwtch/model/attr" "cwtch.im/cwtch/model/constants" "cwtch.im/cwtch/peer" "cwtch.im/cwtch/protocol/connections" "encoding/base64" + "encoding/json" "fmt" "git.openprivacy.ca/openprivacy/connectivity/tor" "git.openprivacy.ca/openprivacy/log" + _ "github.com/mutecomm/go-sqlcipher/v4" mrand "math/rand" "os" "os/user" "path" "runtime" "runtime/pprof" - "strings" "testing" "time" ) @@ -32,71 +33,33 @@ var ( carolLines = []string{"Howdy, thanks!"} ) -func printAndCountVerifedTimeline(t *testing.T, timeline []model.Message) int { - numVerified := 0 - for _, message := range timeline { - fmt.Printf("%v %v> %s\n", message.Timestamp, message.PeerID, message.Message) - numVerified++ - } - return numVerified -} - -func waitForPeerGroupConnection(t *testing.T, peer peer.CwtchPeer, groupID string) { +func waitForConnection(t *testing.T, peer peer.CwtchPeer, addr string, target connections.ConnectionState) { peerName, _ := peer.GetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Name) for { - fmt.Printf("%v checking group connection...\n", peerName) - state, ok := peer.GetGroupState(groupID) - if ok { - fmt.Printf("Waiting for Peer %v to join group %v - state: %v\n", peerName, groupID, state) - if state == connections.FAILED { - t.Fatalf("%v could not connect to %v", peer.GetOnion(), groupID) - } - if state != connections.SYNCED { - fmt.Printf("peer %v %v waiting connect to group %v, currently: %v\n", peerName, peer.GetOnion(), groupID, connections.ConnectionStateName[state]) - time.Sleep(time.Second * 5) - continue - } else { - fmt.Printf("peer %v %v CONNECTED to group %v\n", peerName, peer.GetOnion(), groupID) - break - } + fmt.Printf("%v checking connection...\n", peerName) + state := peer.GetPeerState(addr) + fmt.Printf("Waiting for Peer %v to %v - state: %v\n", peerName, addr, state) + if state == connections.FAILED { + t.Fatalf("%v could not connect to %v", peer.GetOnion(), addr) } - time.Sleep(time.Second * 2) - } - return -} - -func waitForPeerPeerConnection(t *testing.T, peera peer.CwtchPeer, peerb peer.CwtchPeer) { - for { - state, ok := peera.GetPeerState(peerb.GetOnion()) - if ok { - //log.Infof("Waiting for Peer %v to peer with peer: %v - state: %v\n", peera.GetProfile().Name, peerb.GetProfile().Name, state) - if state == connections.FAILED { - t.Fatalf("%v could not connect to %v", peera.GetOnion(), peerb.GetOnion()) - } - if state != connections.AUTHENTICATED { - fmt.Printf("peer %v waiting connect to peer %v, currently: %v\n", peera.GetOnion(), peerb.GetOnion(), connections.ConnectionStateName[state]) - time.Sleep(time.Second * 5) - continue - } else { - peerAName, _ := peera.GetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Name) - peerBName, _ := peerb.GetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Name) - fmt.Printf("%v CONNECTED and AUTHED to %v\n", peerAName, peerBName) - break - } + if state != target { + fmt.Printf("peer %v %v waiting connect %v, currently: %v\n", peerName, peer.GetOnion(), addr, connections.ConnectionStateName[state]) + time.Sleep(time.Second * 5) + continue + } else { + fmt.Printf("peer %v %v CONNECTED to %v\n", peerName, peer.GetOnion(), addr) + break } } - return } func TestCwtchPeerIntegration(t *testing.T) { - numGoRoutinesStart := runtime.NumGoroutine() log.AddEverythingFromPattern("connectivity") log.SetLevel(log.LevelDebug) log.ExcludeFromPattern("connection/connection") log.ExcludeFromPattern("outbound/3dhauthchannel") log.ExcludeFromPattern("event/eventmanager") - log.ExcludeFromPattern("pipeBridge") log.ExcludeFromPattern("tapir") os.Mkdir("tordir", 0700) dataDir := path.Join("tordir", "tor") @@ -119,8 +82,11 @@ func TestCwtchPeerIntegration(t *testing.T) { if err != nil { t.Fatalf("Could not start Tor: %v", err) } - pid, _ := acn.GetPID() - t.Logf("Tor pid: %v", pid) + acn.WaitTillBootstrapped() + defer acn.Close() + + // We don't include ACN in our routine calculations anymore + numGoRoutinesStart := runtime.NumGoroutine() // ***** Cwtch Server management ***** @@ -135,41 +101,36 @@ func TestCwtchPeerIntegration(t *testing.T) { os.Mkdir(cwtchDir, 0700) os.RemoveAll(path.Join(cwtchDir, "testing")) os.Mkdir(path.Join(cwtchDir, "testing"), 0700) - bridgeClient := bridge.NewPipeBridgeClient(path.Join(cwtchDir, "testing/clientPipe"), path.Join(cwtchDir, "testing/servicePipe")) - bridgeService := bridge.NewPipeBridgeService(path.Join(cwtchDir, "testing/servicePipe"), path.Join(cwtchDir, "testing/clientPipe")) - appClient := app2.NewAppClient("./storage", bridgeClient) - appService := app2.NewAppService(acn, "./storage", bridgeService) numGoRoutinesPostAppStart := runtime.NumGoroutine() // ***** cwtchPeer setup ***** fmt.Println("Creating Alice...") - app.CreatePeer("alice", "asdfasdf") + app.CreateTaggedPeer("Alice", "asdfasdf", "test") fmt.Println("Creating Bob...") - app.CreatePeer("bob", "asdfasdf") + app.CreateTaggedPeer("Bob", "asdfasdf", "test") fmt.Println("Creating Carol...") - appClient.CreatePeer("carol", "asdfasdf") + app.CreateTaggedPeer("Carol", "asdfasdf", "test") - alice := utils.WaitGetPeer(app, "alice") + alice := utils.WaitGetPeer(app, "Alice") fmt.Println("Alice created:", alice.GetOnion()) alice.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name, "Alice") alice.AutoHandleEvents([]event.Type{event.PeerStateChange, event.ServerStateChange, event.NewGroupInvite, event.NewRetValMessageFromPeer}) - bob := utils.WaitGetPeer(app, "bob") + bob := utils.WaitGetPeer(app, "Bob") fmt.Println("Bob created:", bob.GetOnion()) bob.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name, "Bob") bob.AutoHandleEvents([]event.Type{event.PeerStateChange, event.ServerStateChange, event.NewGroupInvite, event.NewRetValMessageFromPeer}) - carol := utils.WaitGetPeer(appClient, "carol") + carol := utils.WaitGetPeer(app, "Carol") fmt.Println("Carol created:", carol.GetOnion()) carol.SetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name, "Carol") carol.AutoHandleEvents([]event.Type{event.PeerStateChange, event.ServerStateChange, event.NewGroupInvite, event.NewRetValMessageFromPeer}) app.LaunchPeers() - appClient.LaunchPeers() waitTime := time.Duration(60) * time.Second t.Logf("** Waiting for Alice, Bob, and Carol to connect with onion network... (%v)\n", waitTime) @@ -179,289 +140,239 @@ func TestCwtchPeerIntegration(t *testing.T) { // ***** Peering, server joining, group creation / invite ***** - fmt.Println("Alice joining server...") - if _, err := alice.AddServer(string(serverKeyBundle)); err != nil { - t.Fatalf("Failed to Add Server Bundle %v", err) - } - alice.JoinServer(ServerAddr) - fmt.Println("Alice peering with Bob...") - alice.PeerWithOnion(bob.GetOnion()) + // Simulate Alice Adding Bob + alice2bobConversationID, err := alice.NewContactConversation(bob.GetOnion(), model.DefaultP2PAccessControl(), true) + if err != nil { + t.Fatalf("error adding conversaiton %v", alice2bobConversationID) + } + bob2aliceConversationID, err := bob.NewContactConversation(alice.GetOnion(), model.DefaultP2PAccessControl(), true) + if err != nil { + t.Fatalf("error adding conversaiton %v", bob2aliceConversationID) + } - fmt.Println("Alice peering with Carol...") + t.Logf("Alice peering with Carol...") + // Simulate Alice Adding Carol + alice2carolConversationID, err := alice.NewContactConversation(carol.GetOnion(), model.DefaultP2PAccessControl(), true) + if err != nil { + t.Fatalf("error adding conversaiton %v", alice2carolConversationID) + } + carol2aliceConversationID, err := carol.NewContactConversation(alice.GetOnion(), model.DefaultP2PAccessControl(), true) + if err != nil { + t.Fatalf("error adding conversaiton %v", carol2aliceConversationID) + } + + alice.PeerWithOnion(bob.GetOnion()) alice.PeerWithOnion(carol.GetOnion()) - fmt.Println("Creating group on ", ServerAddr, "...") - groupID, _, err := alice.StartGroup(ServerAddr) - fmt.Printf("Created group: %v!\n", groupID) - if err != nil { - t.Errorf("Failed to init group: %v", err) - return - } + waitForConnection(t, alice, bob.GetOnion(), connections.AUTHENTICATED) + waitForConnection(t, alice, carol.GetOnion(), connections.AUTHENTICATED) + waitForConnection(t, bob, alice.GetOnion(), connections.AUTHENTICATED) + waitForConnection(t, carol, alice.GetOnion(), connections.AUTHENTICATED) - fmt.Println("Waiting for alice to join server...") - waitForPeerGroupConnection(t, alice, groupID) + t.Logf("Alice and Bob getVal public.name...") - fmt.Println("Waiting for alice and Bob to peer...") - waitForPeerPeerConnection(t, alice, bob) - // Need to add contact else SetContactAuth fails on peer peer doesnt exist - // Normal flow would be Bob app monitors for the new connection (a new connection state change to Auth - // and the adds the user to peer, and then approves or blocks it - bob.AddContact("alice?", alice.GetOnion(), model.AuthApproved) - bob.AddServer(string(serverKeyBundle)) - bob.SetContactAuthorization(alice.GetOnion(), model.AuthApproved) + alice.SendScopedZonedGetValToContact(alice2bobConversationID, attr.PublicScope, attr.ProfileZone, constants.Name) + bob.SendScopedZonedGetValToContact(bob2aliceConversationID, attr.PublicScope, attr.ProfileZone, constants.Name) - waitForPeerPeerConnection(t, alice, carol) - carol.AddContact("alice?", alice.GetOnion(), model.AuthApproved) - carol.AddServer(string(serverKeyBundle)) - carol.SetContactAuthorization(alice.GetOnion(), model.AuthApproved) - - fmt.Println("Alice and Bob getVal public.name...") - - alice.SendScopedZonedGetValToContact(bob.GetOnion(), attr.PublicScope, attr.ProfileZone, constants.Name) - bob.SendScopedZonedGetValToContact(alice.GetOnion(), attr.PublicScope, attr.ProfileZone, constants.Name) - - alice.SendScopedZonedGetValToContact(carol.GetOnion(), attr.PublicScope, attr.ProfileZone, constants.Name) - carol.SendScopedZonedGetValToContact(alice.GetOnion(), attr.PublicScope, attr.ProfileZone, constants.Name) + alice.SendScopedZonedGetValToContact(alice2carolConversationID, attr.PublicScope, attr.ProfileZone, constants.Name) + carol.SendScopedZonedGetValToContact(carol2aliceConversationID, attr.PublicScope, attr.ProfileZone, constants.Name) // This used to be 10, but increasing it to 30 because this is now causing frequent issues // Probably related to latency/throughput problems in the underlying tor network. time.Sleep(30 * time.Second) - aliceName, exists := bob.GetContactAttribute(alice.GetOnion(), attr.GetPeerScope(constants.Name)) - if !exists || aliceName != "Alice" { - t.Fatalf("Bob: alice GetKeyVal error on alice peer.name %v\n", exists) + aliceName, err := bob.GetConversationAttribute(bob2aliceConversationID, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name))) + if err != nil || aliceName != "Alice" { + t.Fatalf("Bob: alice GetKeyVal error on alice peer.name %v: %v\n", aliceName, err) } fmt.Printf("Bob has alice's name as '%v'\n", aliceName) - bobName, exists := alice.GetContactAttribute(bob.GetOnion(), attr.GetPeerScope(constants.Name)) - if !exists || bobName != "Bob" { - t.Fatalf("Alice: bob GetKeyVal error on bob peer.name\n") + bobName, err := alice.GetConversationAttribute(alice2bobConversationID, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name))) + if err != nil || bobName != "Bob" { + t.Fatalf("Alice: bob GetKeyVal error on bob peer.name %v: %v \n", bobName, err) } fmt.Printf("Alice has bob's name as '%v'\n", bobName) - aliceName, exists = carol.GetContactAttribute(alice.GetOnion(), attr.GetPeerScope(constants.Name)) - if !exists || aliceName != "Alice" { - t.Fatalf("carol GetKeyVal error for alice peer.name %v\n", exists) + aliceName, err = carol.GetConversationAttribute(carol2aliceConversationID, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name))) + if err != nil || aliceName != "Alice" { + t.Fatalf("carol GetKeyVal error for alice peer.name %v: %v\n", aliceName, err) } - carolName, exists := alice.GetContactAttribute(carol.GetOnion(), attr.GetPeerScope(constants.Name)) - if !exists || carolName != "Carol" { - t.Fatalf("alice GetKeyVal error, carol peer.name\n") + carolName, err := alice.GetConversationAttribute(alice2carolConversationID, attr.PublicScope.ConstructScopedZonedPath(attr.ProfileZone.ConstructZonedPath(constants.Name))) + if err != nil || carolName != "Carol" { + t.Fatalf("alice GetKeyVal error, carol peer.name: %v: %v\n", carolName, err) } fmt.Printf("Alice has carol's name as '%v'\n", carolName) + // Group Testing + + // Simulate Alice Creating a Group + fmt.Println("Alice joining server...") + if _, err := alice.AddServer(string(serverKeyBundle)); err != nil { + t.Fatalf("Failed to Add Server Bundle %v", err) + } + + bob.AddServer(string(serverKeyBundle)) + carol.AddServer(string(serverKeyBundle)) + + t.Logf("Waiting for alice to join server...") + err = alice.JoinServer(ServerAddr) + if err != nil { + t.Fatalf("alice cannot join server %v %v", ServerAddr, err) + } + waitForConnection(t, alice, ServerAddr, connections.SYNCED) + + // Creating a Group + t.Logf("Creating group on %v...", ServerAddr) + aliceGroupConversationID, err := alice.StartGroup("Our Cool Testing Group", ServerAddr) + t.Logf("Created group: %v!\n", aliceGroupConversationID) + if err != nil { + t.Errorf("Failed to init group: %v", err) + return + } + + // Invites fmt.Println("Alice inviting Bob to group...") - err = alice.InviteOnionToGroup(bob.GetOnion(), groupID) + err = alice.SendInviteToConversation(alice2bobConversationID, aliceGroupConversationID) if err != nil { t.Fatalf("Error for Alice inviting Bob to group: %v", err) } - time.Sleep(time.Second * 5) - fmt.Println("Bob examining groups and accepting invites...") - for _, message := range bob.GetContact(alice.GetOnion()).Timeline.GetMessages() { - fmt.Printf("Found message from Alice: %v", message.Message) - if strings.HasPrefix(message.Message, "torv3") { - gid, err := bob.ImportGroup(message.Message) - if err == nil { - fmt.Printf("Bob found invite...now accepting %v...", gid) - bob.AcceptInvite(gid) - } else { - t.Fatalf("Bob could not accept invite...%v", gid) - } - } - } + // Alice invites Bob to the Group... + message, _, err := bob.GetChannelMessage(bob2aliceConversationID, 0, 1) + t.Logf("Alice message to Bob %v %v", message, err) + var overlayMessage model.MessageWrapper + json.Unmarshal([]byte(message), &overlayMessage) + t.Logf("Parsed Overlay Message: %v", overlayMessage) + err = bob.ImportBundle(overlayMessage.Data) + t.Logf("Result of Bob Importing the Bundle from Alice: %v", err) - fmt.Println("Waiting for Bob to join connect to group server...") - waitForPeerGroupConnection(t, bob, groupID) + t.Logf("Waiting for Bob to join connect to group server...") + err = bob.JoinServer(ServerAddr) // for some unrealism we skip "discovering the server from the event bus + if err != nil { + t.Fatalf("alice cannot join server %v %v", ServerAddr, err) + } + bobGroupConversationID := 3 + waitForConnection(t, bob, ServerAddr, connections.SYNCED) numGoRoutinesPostServerConnect := runtime.NumGoroutine() // ***** Conversation ***** + t.Logf("Starting conversation in group...") + checkSendMessageToGroup(t, alice, aliceGroupConversationID, aliceLines[0]) + checkSendMessageToGroup(t, bob, bobGroupConversationID, bobLines[0]) + checkSendMessageToGroup(t, alice, aliceGroupConversationID, aliceLines[1]) + checkSendMessageToGroup(t, bob, bobGroupConversationID, bobLines[1]) - fmt.Println("Starting conversation in group...") - // Conversation - fmt.Printf("%v> %v\n", aliceName, aliceLines[0]) - err = alice.SendMessage(groupID, aliceLines[0]) + // Alice invites Bob to the Group... + message, _, err = carol.GetChannelMessage(carol2aliceConversationID, 0, 1) + t.Logf("Alice message to Carol %v %v", message, err) + json.Unmarshal([]byte(message), &overlayMessage) + t.Logf("Parsed Overlay Message: %v", overlayMessage) + err = carol.ImportBundle(overlayMessage.Data) + t.Logf("Result of Carol Importing the Bundle from Alice: %v", err) + + t.Logf("Waiting for Carol to join connect to group server...") + err = carol.JoinServer(ServerAddr) // for some unrealism we skip "discovering the server from the event bus if err != nil { - t.Fatalf("Alice failed to send a message to the group: %v", err) + t.Fatalf("carol cannot join server %v %v", ServerAddr, err) } - time.Sleep(time.Second * 10) + carolGroupConversationID := 3 + waitForConnection(t, carol, ServerAddr, connections.SYNCED) - fmt.Printf("%v> %v\n", bobName, bobLines[0]) - err = bob.SendMessage(groupID, bobLines[0]) - if err != nil { - t.Fatalf("Bob failed to send a message to the group: %v", err) - } - time.Sleep(time.Second * 10) + numGoRoutinesPostCarolConnect := runtime.NumGoroutine() - fmt.Printf("%v> %v\n", aliceName, aliceLines[1]) - alice.SendMessage(groupID, aliceLines[1]) - time.Sleep(time.Second * 10) + t.Logf("Shutting down Alice...") - fmt.Printf("%v> %v\n", bobName, bobLines[1]) - bob.SendMessage(groupID, bobLines[1]) - time.Sleep(time.Second * 10) + // Check Alice Timeline + checkMessage(t, alice, aliceGroupConversationID, 1, aliceLines[0]) + checkMessage(t, alice, aliceGroupConversationID, 2, bobLines[0]) + checkMessage(t, alice, aliceGroupConversationID, 3, aliceLines[1]) + checkMessage(t, alice, aliceGroupConversationID, 4, bobLines[1]) - fmt.Println("Alice inviting Carol to group...") - err = alice.InviteOnionToGroup(carol.GetOnion(), groupID) - if err != nil { - t.Fatalf("Error for Alice inviting Carol to group: %v", err) - } - time.Sleep(time.Second * 60) // Account for some token acquisition in Alice and Bob flows. - fmt.Println("Carol examining groups and accepting invites...") - for _, message := range carol.GetContact(alice.GetOnion()).Timeline.GetMessages() { - fmt.Printf("Found message from Alice: %v", message.Message) - if strings.HasPrefix(message.Message, "torv3") { - gid, err := carol.ImportGroup(message.Message) - if err == nil { - fmt.Printf("Carol found invite...now accepting %v...", gid) - carol.AcceptInvite(gid) - } else { - t.Fatalf("Carol could not accept invite...%v", gid) - } - } - } - - fmt.Println("Shutting down Alice...") app.ShutdownPeer(alice.GetOnion()) - time.Sleep(time.Second * 5) + time.Sleep(time.Second * 3) numGoRoutinesPostAlice := runtime.NumGoroutine() - fmt.Println("Carol joining server...") - carol.JoinServer(ServerAddr) - waitForPeerGroupConnection(t, carol, groupID) - numGoRotinesPostCarolConnect := runtime.NumGoroutine() - - fmt.Printf("%v> %v", bobName, bobLines[2]) - bob.SendMessage(groupID, bobLines[2]) - // Bob should have enough tokens so we don't need to account for - // token acquisition here... - - fmt.Printf("%v> %v", carolName, carolLines[0]) - carol.SendMessage(groupID, carolLines[0]) - time.Sleep(time.Second * 30) // we need to account for spam-based token acquisition, but everything should - // be warmed-up and delays should be pretty small. - - // ***** Verify Test ***** - - fmt.Println("Final syncing time...") + checkSendMessageToGroup(t, carol, carolGroupConversationID, carolLines[0]) + checkSendMessageToGroup(t, bob, bobGroupConversationID, bobLines[2]) time.Sleep(time.Second * 30) - alicesGroup := alice.GetGroup(groupID) - if alicesGroup == nil { - t.Error("aliceGroup == nil") - return - } + // Check Bob Timeline + checkMessage(t, bob, bobGroupConversationID, 1, aliceLines[0]) + checkMessage(t, bob, bobGroupConversationID, 2, bobLines[0]) + checkMessage(t, bob, bobGroupConversationID, 3, aliceLines[1]) + checkMessage(t, bob, bobGroupConversationID, 4, bobLines[1]) + checkMessage(t, bob, bobGroupConversationID, 5, carolLines[0]) + checkMessage(t, bob, bobGroupConversationID, 6, bobLines[2]) - fmt.Printf("Alice's TimeLine:\n") - aliceVerified := printAndCountVerifedTimeline(t, alicesGroup.GetTimeline()) - if aliceVerified != 4 { - t.Errorf("Alice did not have 4 verified messages") - } + // Check Carol Timeline + checkMessage(t, carol, carolGroupConversationID, 1, aliceLines[0]) + checkMessage(t, carol, carolGroupConversationID, 2, bobLines[0]) + checkMessage(t, carol, carolGroupConversationID, 3, aliceLines[1]) + checkMessage(t, carol, carolGroupConversationID, 4, bobLines[1]) + checkMessage(t, carol, carolGroupConversationID, 5, carolLines[0]) + checkMessage(t, carol, carolGroupConversationID, 6, bobLines[2]) - bobsGroup := bob.GetGroup(groupID) - if bobsGroup == nil { - t.Error("bobGroup == nil") - return - } - fmt.Printf("Bob's TimeLine:\n") - bobVerified := printAndCountVerifedTimeline(t, bobsGroup.GetTimeline()) - if bobVerified != 6 { - t.Errorf("Bob did not have 6 verified messages") - } - - carolsGroup := carol.GetGroup(groupID) - fmt.Printf("Carol's TimeLine:\n") - carolVerified := printAndCountVerifedTimeline(t, carolsGroup.GetTimeline()) - if carolVerified != 6 { - t.Errorf("Carol did not have 6 verified messages") - } - - if len(alicesGroup.GetTimeline()) != 4 { - t.Errorf("Alice's timeline does not have all messages") - } else { - // check message 0,1,2,3 - alicesGroup.Timeline.Sort() - aliceGroupTimeline := alicesGroup.GetTimeline() - if aliceGroupTimeline[0].Message != aliceLines[0] || aliceGroupTimeline[1].Message != bobLines[0] || - aliceGroupTimeline[2].Message != aliceLines[1] || aliceGroupTimeline[3].Message != bobLines[1] { - t.Errorf("Some of Alice's timeline messages did not have the expected content!") - } - } - - if len(bobsGroup.GetTimeline()) != 6 { - t.Errorf("Bob's timeline does not have all messages") - } else { - // check message 0,1,2,3,4,5 - bobsGroup.Timeline.Sort() - bobGroupTimeline := bobsGroup.GetTimeline() - if bobGroupTimeline[0].Message != aliceLines[0] || bobGroupTimeline[1].Message != bobLines[0] || - bobGroupTimeline[2].Message != aliceLines[1] || bobGroupTimeline[3].Message != bobLines[1] || - bobGroupTimeline[4].Message != bobLines[2] || bobGroupTimeline[5].Message != carolLines[0] { - t.Errorf("Some of Bob's timeline messages did not have the expected content!") - } - } - - if len(carolsGroup.GetTimeline()) != 6 { - t.Errorf("Carol's timeline does not have all messages") - } else { - // check message 0,1,2,3,4,5 - carolsGroup.Timeline.Sort() - carolGroupTimeline := carolsGroup.GetTimeline() - if carolGroupTimeline[0].Message != aliceLines[0] || carolGroupTimeline[1].Message != bobLines[0] || - carolGroupTimeline[2].Message != aliceLines[1] || carolGroupTimeline[3].Message != bobLines[1] || - carolGroupTimeline[4].Message != carolLines[0] || carolGroupTimeline[5].Message != bobLines[2] { - t.Errorf("Some of Carol's timeline messages did not have the expected content!") - } - } - - fmt.Println("Shutting down Bob...") + t.Logf("Shutting down Bob...") app.ShutdownPeer(bob.GetOnion()) time.Sleep(time.Second * 3) numGoRoutinesPostBob := runtime.NumGoroutine() - fmt.Println("Shutting down Carol...") - appClient.ShutdownPeer(carol.GetOnion()) + t.Logf("Shutting down Carol...") + app.ShutdownPeer(carol.GetOnion()) time.Sleep(time.Second * 3) numGoRoutinesPostCarol := runtime.NumGoroutine() - fmt.Println("Shutting down apps...") + t.Logf("Shutting down apps...") fmt.Printf("app Shutdown: %v\n", runtime.NumGoroutine()) app.Shutdown() - fmt.Printf("appClientShutdown: %v\n", runtime.NumGoroutine()) - appClient.Shutdown() - fmt.Printf("appServiceShutdown: %v\n", runtime.NumGoroutine()) - appService.Shutdown() - fmt.Printf("bridgeClientShutdown: %v\n", runtime.NumGoroutine()) - bridgeClient.Shutdown() time.Sleep(2 * time.Second) - fmt.Printf("brideServiceShutdown: %v\n", runtime.NumGoroutine()) - bridgeService.Shutdown() - time.Sleep(2 * time.Second) - - fmt.Printf("Done shutdown: %v\n", runtime.NumGoroutine()) + t.Logf("Done shutdown: %v\n", runtime.NumGoroutine()) numGoRoutinesPostAppShutdown := runtime.NumGoroutine() - fmt.Println("Shutting down ACN...") - acn.Close() + t.Logf("Shutting down ACN...") + // acn.Close() TODO: ACN Now gets closed automatically with defer...attempting to close twice results in a dead lock... time.Sleep(time.Second * 2) // Server ^^ has a 5 second loop attempting reconnect before exiting time.Sleep(time.Second * 30) // the network status plugin might keep goroutines alive for a minute before killing them - numGoRoutinesPostACN := runtime.NumGoroutine() // Printing out the current goroutines // Very useful if we are leaking any. pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) - fmt.Printf("numGoRoutinesStart: %v\nnumGoRoutinesPostAppStart: %v\nnumGoRoutinesPostPeerStart: %v\nnumGoRoutinesPostPeerAndServerConnect: %v\n"+ - "numGoRoutinesPostAlice: %v\nnumGoRotinesPostCarolConnect: %v\nnumGoRoutinesPostBob: %v\nnumGoRoutinesPostCarol: %v\nnumGoRoutinesPostAppShutdown: %v\nnumGoRoutinesPostACN: %v\n", + t.Logf("numGoRoutinesStart: %v\nnumGoRoutinesPostAppStart: %v\nnumGoRoutinesPostPeerStart: %v\nnumGoRoutinesPostPeerAndServerConnect: %v\n"+ + "numGoRoutinesPostAlice: %v\nnumGoRoutinesPostCarolConnect: %v\nnumGoRoutinesPostBob: %v\nnumGoRoutinesPostCarol: %v\nnumGoRoutinesPostAppShutdown: %v", numGoRoutinesStart, numGoRoutinesPostAppStart, numGoRoutinesPostPeerStart, numGoRoutinesPostServerConnect, - numGoRoutinesPostAlice, numGoRotinesPostCarolConnect, numGoRoutinesPostBob, numGoRoutinesPostCarol, numGoRoutinesPostAppShutdown, numGoRoutinesPostACN) + numGoRoutinesPostAlice, numGoRoutinesPostCarolConnect, numGoRoutinesPostBob, numGoRoutinesPostCarol, numGoRoutinesPostAppShutdown) - if numGoRoutinesStart != numGoRoutinesPostACN { - t.Errorf("Number of GoRoutines at start (%v) does not match number of goRoutines after cleanup of peers and servers (%v), clean up failed, leak detected!", numGoRoutinesStart, numGoRoutinesPostACN) + if numGoRoutinesStart != numGoRoutinesPostAppShutdown { + t.Errorf("Number of GoRoutines at start (%v) does not match number of goRoutines after cleanup of peers and servers (%v), clean up failed, v detected!", numGoRoutinesStart, numGoRoutinesPostAppShutdown) + } + +} + +// Utility function for sending a message from a peer to a group +func checkSendMessageToGroup(t *testing.T, profile peer.CwtchPeer, id int, message string) { + name, _ := profile.GetScopedZonedAttribute(attr.PublicScope, attr.ProfileZone, constants.Name) + t.Logf("%v> %v\n", name, message) + err := profile.SendMessage(id, message) + if err != nil { + t.Fatalf("Alice failed to send a message to the group: %v", err) + } + time.Sleep(time.Second * 10) +} + +// Utility function for testing that a message in a conversation is as expected +func checkMessage(t *testing.T, profile peer.CwtchPeer, id int, messageID int, expected string) { + message, _, err := profile.GetChannelMessage(id, 0, messageID) + if err != nil { + t.Fatalf("unexpected message %v expected: %v got error: %v", profile.GetOnion(), expected, err) + } + if message != expected { + t.Fatalf("unexpected message %v expected: %v got: [%v]", profile.GetOnion(), expected, message) } } diff --git a/testing/encryptedstorage/encrypted_storage_integration_test.go b/testing/encryptedstorage/encrypted_storage_integration_test.go new file mode 100644 index 0000000..e63cd54 --- /dev/null +++ b/testing/encryptedstorage/encrypted_storage_integration_test.go @@ -0,0 +1,169 @@ +package encryptedstorage + +import ( + // Import SQL Cipher + "crypto/rand" + app2 "cwtch.im/cwtch/app" + "cwtch.im/cwtch/app/utils" + "cwtch.im/cwtch/model" + "cwtch.im/cwtch/model/constants" + "cwtch.im/cwtch/peer" + "encoding/base64" + "fmt" + "git.openprivacy.ca/openprivacy/connectivity/tor" + "git.openprivacy.ca/openprivacy/log" + _ "github.com/mutecomm/go-sqlcipher/v4" + mrand "math/rand" + "os" + "path" + "path/filepath" + "testing" + "time" +) + +func TestEncryptedStorage(t *testing.T) { + + log.SetLevel(log.LevelDebug) + + os.Mkdir("tordir", 0700) + dataDir := filepath.Join("tordir", "tor") + os.MkdirAll(dataDir, 0700) + + // we don't need real randomness for the port, just to avoid a possible conflict... + mrand.Seed(int64(time.Now().Nanosecond())) + socksPort := mrand.Intn(1000) + 9051 + controlPort := mrand.Intn(1000) + 9052 + + // generate a random password + key := make([]byte, 64) + _, err := rand.Read(key) + if err != nil { + panic(err) + } + + tor.NewTorrc().WithSocksPort(socksPort).WithOnionTrafficOnly().WithHashedPassword(base64.StdEncoding.EncodeToString(key)).WithControlPort(controlPort).Build("tordir/tor/torrc") + acn, err := tor.NewTorACNWithAuth("./tordir", path.Join("..", "..", "tor"), controlPort, tor.HashedPasswordAuthenticator{Password: base64.StdEncoding.EncodeToString(key)}) + if err != nil { + t.Fatalf("Could not start Tor: %v", err) + } + + cwtchDir := path.Join(".", "encrypted_storage_profiles") + os.RemoveAll(cwtchDir) + os.Mkdir(cwtchDir, 0700) + + fmt.Println("Creating Alice...") + + defer acn.Close() + acn.WaitTillBootstrapped() + app := app2.NewApp(acn, cwtchDir) + app.CreateTaggedPeer("alice", "password", constants.ProfileTypeV1Password) + app.CreateTaggedPeer("bob", "password", constants.ProfileTypeV1Password) + + alice := utils.WaitGetPeer(app, "alice") + bob := utils.WaitGetPeer(app, "bob") + + alice.Listen() + bob.Listen() + + // To keep this large test organized, we will break it down into sub tests... + subTestAliceAddAndDeleteBob(t, alice, bob) + + conversations, err := alice.FetchConversations() + if err != nil || len(conversations) != 1 { + t.Fatalf("unexpected issue when fetching all of alices conversations. Expected 1 got : %v %v", conversations, err) + } + + alice.PeerWithOnion(bob.GetOnion()) + + time.Sleep(time.Second * 40) + + alice.SendMessage(2, "Hello Bob") + if err != nil { + t.Fatalf("alice should have been able to fetch her own message") + } + _, attr, _ := alice.GetChannelMessage(2, 0, 1) + if attr[constants.AttrAck] != "false" { + t.Fatalf("Alices message should have been acknowledged...yet") + } + + time.Sleep(time.Second * 30) + + ci, _ := bob.FetchConversationInfo(alice.GetOnion()) + body, _, err := bob.GetChannelMessage(ci.ID, 0, 1) + if body != "Hello Bob" || err != nil { + t.Fatalf("unexpected message in conversation channel %v %v", body, err) + } else { + t.Logf("succesfully found message in conversation channel %v", body) + } + + // Check that we received an ACk... + _, attr, err = alice.GetChannelMessage(2, 0, 1) + if err != nil { + t.Fatalf("alice should have been able to fetch her own message") + } + + if attr[constants.AttrAck] != "true" { + t.Fatalf("Alices message should have been acknowledged.") + } + + if count, err := alice.GetChannelMessageCount(2, 0); err != nil || count != 1 { + t.Fatalf("Channel should have a single message in it. Instead returned %v %v", count, err) + } + + messages, err := alice.GetMostRecentMessages(2, 0, 0, 10) + + if err != nil { + t.Fatalf("fetching messages over offset should not result in error: %v", err) + } + + if len(messages) != 1 || len(messages) > 0 && messages[0].Body != "Hello Bob" { + t.Fatalf("expeced GetMostRecentMessages to return 1, instead returned: %v %v", len(messages), messages) + } + + app.Shutdown() + +} + +// Sub Test testing that Alice can add Bob, delete the conversation associated with Bob, and then add Bob again +// Under a different conversation identifier. +func subTestAliceAddAndDeleteBob(t *testing.T, alice peer.CwtchPeer, bob peer.CwtchPeer) { + + t.Logf("Starting Sub Test AliceAddAndDeleteBob") + + alice.NewContactConversation(bob.GetOnion(), model.AccessControl{Read: true, Append: true, Blocked: false}, true) + + // Test Basic Fetching + bobCI, err := alice.FetchConversationInfo(bob.GetOnion()) + if bobCI == nil || err != nil { + t.Fatalf("alice should have been able to fetch bobs conversationf info ci:%v err:%v", bobCI, err) + } else { + t.Logf("Bobs Conversation Info fetched successfully: %v", bobCI) + } + + oldID := bobCI.ID + + alice.DeleteConversation(oldID) + + // Test Basic Fetching + bobCI, err = alice.FetchConversationInfo(bob.GetOnion()) + if bobCI != nil { + t.Fatalf("alice should **not** have been able to fetch bobs conversationf info ci:%v err:%v", bobCI, err) + } else { + t.Logf("expected error fetching deleted conversation info: %v", err) + } + + alice.NewContactConversation(bob.GetOnion(), model.AccessControl{Read: true, Append: true, Blocked: false}, true) + + // Test Basic Fetching + bobCI, err = alice.FetchConversationInfo(bob.GetOnion()) + if bobCI == nil || err != nil { + t.Fatalf("alice should have been able to fetch bobs conversationf info ci:%v err:%v", bobCI, err) + } else { + t.Logf("Bobs Conversation Info fetched successfully: %v", bobCI) + } + + if oldID == bobCI.ID { + t.Fatalf("bob should have a different conversation ID. Instead it is the same as the old conversation id, meaning something has gone wrong in the storage engine.") + } + +} diff --git a/testing/filesharing/file_sharing_integration_test.go b/testing/filesharing/file_sharing_integration_test.go index 19ef6f4..d24db99 100644 --- a/testing/filesharing/file_sharing_integration_test.go +++ b/testing/filesharing/file_sharing_integration_test.go @@ -18,6 +18,8 @@ import ( "fmt" "git.openprivacy.ca/openprivacy/connectivity/tor" "git.openprivacy.ca/openprivacy/log" + // Import SQL Cipher + _ "github.com/mutecomm/go-sqlcipher/v4" mrand "math/rand" "os" "os/user" @@ -30,31 +32,26 @@ import ( func waitForPeerPeerConnection(t *testing.T, peera peer.CwtchPeer, peerb peer.CwtchPeer) { for { - state, ok := peera.GetPeerState(peerb.GetOnion()) - if ok { - //log.Infof("Waiting for Peer %v to peer with peer: %v - state: %v\n", peera.GetProfile().Name, peerb.GetProfile().Name, state) - if state == connections.FAILED { - t.Fatalf("%v could not connect to %v", peera.GetOnion(), peerb.GetOnion()) - } - if state != connections.AUTHENTICATED { - fmt.Printf("peer %v waiting connect to peer %v, currently: %v\n", peera.GetOnion(), peerb.GetOnion(), connections.ConnectionStateName[state]) - time.Sleep(time.Second * 5) - continue - } else { - peerAName, _ := peera.GetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Name) - peerBName, _ := peerb.GetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Name) - fmt.Printf("%v CONNECTED and AUTHED to %v\n", peerAName, peerBName) - break - } + state := peera.GetPeerState(peerb.GetOnion()) + //log.Infof("Waiting for Peer %v to peer with peer: %v - state: %v\n", peera.GetProfile().Name, peerb.GetProfile().Name, state) + if state == connections.FAILED { + t.Fatalf("%v could not connect to %v", peera.GetOnion(), peerb.GetOnion()) + } + if state != connections.AUTHENTICATED { + fmt.Printf("peer %v waiting connect to peer %v, currently: %v\n", peera.GetOnion(), peerb.GetOnion(), connections.ConnectionStateName[state]) + time.Sleep(time.Second * 5) + continue + } else { + peerAName, _ := peera.GetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Name) + peerBName, _ := peerb.GetScopedZonedAttribute(attr.LocalScope, attr.ProfileZone, constants.Name) + fmt.Printf("%v CONNECTED and AUTHED to %v\n", peerAName, peerBName) + break } } - return } func TestFileSharing(t *testing.T) { - numGoRoutinesStart := runtime.NumGoroutine() - os.RemoveAll("cwtch.out.png") os.RemoveAll("cwtch.out.png.manifest") @@ -81,7 +78,10 @@ func TestFileSharing(t *testing.T) { if err != nil { t.Fatalf("Could not start Tor: %v", err) } + acn.WaitTillBootstrapped() + defer acn.Close() + numGoRoutinesStart := runtime.NumGoroutine() app := app2.NewApp(acn, "./storage") usr, _ := user.Current() @@ -91,11 +91,12 @@ func TestFileSharing(t *testing.T) { os.Mkdir(path.Join(cwtchDir, "testing"), 0700) fmt.Println("Creating Alice...") - app.CreatePeer("alice", "asdfasdf") + app.CreateTaggedPeer("alice", "asdfasdf", "testing") fmt.Println("Creating Bob...") - app.CreatePeer("bob", "asdfasdf") + app.CreateTaggedPeer("bob", "asdfasdf", "testing") + t.Logf("** Waiting for Alice, Bob...") alice := utils.WaitGetPeer(app, "alice") bob := utils.WaitGetPeer(app, "bob") @@ -105,13 +106,15 @@ func TestFileSharing(t *testing.T) { queueOracle := event.NewQueue() app.GetEventBus(bob.GetOnion()).Subscribe(event.FileDownloaded, queueOracle) + t.Logf("** Launching Peers...") app.LaunchPeers() waitTime := time.Duration(30) * time.Second t.Logf("** Waiting for Alice, Bob to connect with onion network... (%v)\n", waitTime) time.Sleep(waitTime) - bob.AddContact("alice?", alice.GetOnion(), model.AuthApproved) + bob.NewContactConversation(alice.GetOnion(), model.DefaultP2PAccessControl(), true) + alice.NewContactConversation(bob.GetOnion(), model.DefaultP2PAccessControl(), true) alice.PeerWithOnion(bob.GetOnion()) fmt.Println("Waiting for alice and Bob to peer...") @@ -121,7 +124,7 @@ func TestFileSharing(t *testing.T) { filesharingFunctionality, _ := filesharing.FunctionalityGate(map[string]bool{"filesharing": true}) - err = filesharingFunctionality.ShareFile("cwtch.png", alice, bob.GetOnion()) + err = filesharingFunctionality.ShareFile("cwtch.png", alice, 1) if err != nil { t.Fatalf("Error!: %v", err) @@ -130,21 +133,21 @@ func TestFileSharing(t *testing.T) { // Wait for the messages to arrive... time.Sleep(time.Second * 10) - for _, message := range bob.GetContact(alice.GetOnion()).Timeline.GetMessages() { + message, _, err := bob.GetChannelMessage(1, 0, 1) + if err != nil { + t.Fatalf("could not find file sharing message: %v", err) + } - var messageWrapper model.MessageWrapper - json.Unmarshal([]byte(message.Message), &messageWrapper) + var messageWrapper model.MessageWrapper + json.Unmarshal([]byte(message), &messageWrapper) - if messageWrapper.Overlay == model.OverlayFileSharing { - var fileMessageOverlay filesharing.OverlayMessage - err := json.Unmarshal([]byte(messageWrapper.Data), &fileMessageOverlay) + if messageWrapper.Overlay == model.OverlayFileSharing { + var fileMessageOverlay filesharing.OverlayMessage + err := json.Unmarshal([]byte(messageWrapper.Data), &fileMessageOverlay) - if err == nil { - filesharingFunctionality.DownloadFile(bob, alice.GetOnion(), "cwtch.out.png", "cwtch.out.png.manifest", fmt.Sprintf("%s.%s", fileMessageOverlay.Hash, fileMessageOverlay.Nonce)) - } + if err == nil { + filesharingFunctionality.DownloadFile(bob, 1, "cwtch.out.png", "cwtch.out.png.manifest", fmt.Sprintf("%s.%s", fileMessageOverlay.Hash, fileMessageOverlay.Nonce)) } - - fmt.Printf("Found message from Alice: %v", message.Message) } // Wait for the file downloaded event @@ -153,15 +156,14 @@ func TestFileSharing(t *testing.T) { t.Fatalf("Expected file download event") } - manifest, err := files.CreateManifest("cwtch.out.png") + manifest, _ := files.CreateManifest("cwtch.out.png") if hex.EncodeToString(manifest.RootHash) != "8f0ed73bbb30db45b6a740b1251cae02945f48e4f991464d5f3607685c45dcd136a325dab2e5f6429ce2b715e602b20b5b16bf7438fb6235fefe912adcedb5fd" { t.Fatalf("file hash does not match expected %x: ", manifest.RootHash) } queueOracle.Shutdown() app.Shutdown() - acn.Close() - + time.Sleep(3 * time.Second) numGoRoutinesPostACN := runtime.NumGoroutine() // Printing out the current goroutines diff --git a/testing/quality.sh b/testing/quality.sh index c913c92..5262cc6 100755 --- a/testing/quality.sh +++ b/testing/quality.sh @@ -9,7 +9,7 @@ go list ./... | xargs go vet echo "" echo "Linting:" -go list ./... | xargs golint +staticcheck ./... echo "Time to format" @@ -21,4 +21,4 @@ ineffassign . # misspell (https://github.com/client9/misspell/cmd/misspell) echo "Checking for misspelled words..." -misspell . | grep -v "vendor/" | grep -v "go.sum" | grep -v ".idea" +misspell . | grep -v "testing/" | grep -v "vendor/" | grep -v "go.sum" | grep -v ".idea" diff --git a/testing/tests.sh b/testing/tests.sh index 8a3d9a7..4c0c958 100755 --- a/testing/tests.sh +++ b/testing/tests.sh @@ -5,12 +5,10 @@ pwd GORACE="haltonerror=1" go test -race ${1} -coverprofile=model.cover.out -v ./model go test -race ${1} -coverprofile=event.cover.out -v ./event -go test -race ${1} -coverprofile=storage.v0.cover.out -v ./storage/v0 go test -race ${1} -coverprofile=storage.v1.cover.out -v ./storage/v1 go test -race ${1} -coverprofile=storage.cover.out -v ./storage go test -race ${1} -coverprofile=peer.connections.cover.out -v ./protocol/connections go test -race ${1} -coverprofile=peer.filesharing.cover.out -v ./protocol/files -go test -race ${1} -coverprofile=peer.cover.out -v ./peer echo "mode: set" > coverage.out && cat *.cover.out | grep -v mode: | sort -r | \ awk '{if($1 != last) {print $0;last=$1}}' >> coverage.out rm -rf *.cover.out