File Sharing MVP (#384)
Co-authored-by: erinn <erinn@openprivacy.ca> Reviewed-on: #384 Reviewed-by: Dan Ballard <dan@openprivacy.ca> Co-authored-by: Sarah Jamie Lewis <sarah@openprivacy.ca> Co-committed-by: Sarah Jamie Lewis <sarah@openprivacy.ca>
This commit is contained in:
parent
5165c5040e
commit
907a7ca638
|
@ -42,6 +42,14 @@ pipeline:
|
|||
event: [ push, pull_request ]
|
||||
commands:
|
||||
- go test -race -v cwtch.im/cwtch/testing/
|
||||
filesharing-integ-test:
|
||||
image: golang
|
||||
when:
|
||||
repo: cwtch.im/cwtch
|
||||
branch: master
|
||||
event: [ push, pull_request ]
|
||||
commands:
|
||||
- go test -race -v cwtch.im/cwtch/testing/filesharing
|
||||
notify-email:
|
||||
image: drillster/drone-email
|
||||
host: build.openprivacy.ca
|
||||
|
|
|
@ -12,3 +12,12 @@ server/app/messages
|
|||
/storage/*/testing/
|
||||
/storage/testing/
|
||||
/testing/storage/
|
||||
ebusgraph.txt
|
||||
messages/
|
||||
serverMonitorReport.txt
|
||||
testing/cwtch.out.png
|
||||
testing/cwtch.out.png.manifest
|
||||
testing/tordir/
|
||||
tokens-bak.db
|
||||
tokens.db
|
||||
tokens1.db
|
|
@ -248,6 +248,16 @@ const (
|
|||
|
||||
// For situations where we want to update $Identity -> $RemotePeer/$GroupID's total message count to be $Data
|
||||
MessageCounterResync = Type("MessageCounterResync")
|
||||
|
||||
// File Handling Events
|
||||
ShareManifest = Type("ShareManifest")
|
||||
ManifestSizeReceived = Type("ManifestSizeReceived")
|
||||
ManifestError = Type("ManifestError")
|
||||
ManifestReceived = Type("ManifestReceived")
|
||||
ManifestSaved = Type("ManifestSaved")
|
||||
FileDownloadProgressUpdate = Type("FileDownloadProgressUpdate")
|
||||
FileDownloaded = Type("FileDownloaded")
|
||||
FileVerificationFailed = Type("FileVerificationFailed")
|
||||
)
|
||||
|
||||
// Field defines common event attributes
|
||||
|
@ -312,6 +322,14 @@ const (
|
|||
Imported = Field("Imported")
|
||||
|
||||
Source = Field("Source")
|
||||
|
||||
FileKey = Field("FileKey")
|
||||
FileSizeInChunks = Field("FileSizeInChunks")
|
||||
ManifestSize = Field("ManifestSize")
|
||||
SerializedManifest = Field("SerializedManifest")
|
||||
TempFile = Field("TempFile")
|
||||
FilePath = Field("FilePath")
|
||||
NameSuggestion = Field("NameSuggestion")
|
||||
)
|
||||
|
||||
// Defining Common errors
|
||||
|
@ -333,6 +351,10 @@ const (
|
|||
ContextRaw = "im.cwtch.raw"
|
||||
ContextGetVal = "im.cwtch.getVal"
|
||||
ContextRetVal = "im.cwtch.retVal"
|
||||
ContextRequestManifest = "im.cwtch.file.request.manifest"
|
||||
ContextSendManifest = "im.cwtch.file.send.manifest"
|
||||
ContextRequestFile = "im.cwtch.file.request.chunk"
|
||||
ContextSendFile = "im.cwtch.file.send.chunk"
|
||||
)
|
||||
|
||||
// Define Default Attribute Keys
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
package filesharing
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"cwtch.im/cwtch/model"
|
||||
"cwtch.im/cwtch/model/attr"
|
||||
"cwtch.im/cwtch/peer"
|
||||
"cwtch.im/cwtch/protocol/files"
|
||||
"git.openprivacy.ca/openprivacy/log"
|
||||
)
|
||||
|
||||
// Functionality groups some common UI triggered functions for contacts...
|
||||
type Functionality struct {
|
||||
}
|
||||
|
||||
// FunctionalityGate returns contact.Functionality always
|
||||
func FunctionalityGate(experimentMap map[string]bool) (*Functionality, error) {
|
||||
if experimentMap["filesharing"] == true {
|
||||
return new(Functionality), nil
|
||||
}
|
||||
return nil, errors.New("filesharing is not enabled")
|
||||
}
|
||||
|
||||
// OverlayMessage presents the canonical format of the File Sharing functionality Overlay Message
|
||||
// This is the format that the UI will parse to display the message
|
||||
type OverlayMessage struct {
|
||||
Name string `json:"f"`
|
||||
Hash string `json:"h"`
|
||||
Nonce string `json:"n"`
|
||||
Size uint64 `json:"s"`
|
||||
}
|
||||
|
||||
// DownloadFile given a profile, a conversation handle and a file sharing key, start off a download process
|
||||
// to downloadFilePath
|
||||
func (f *Functionality) DownloadFile(profile peer.CwtchPeer, handle string, downloadFilePath string, manifestFilePath string, key string) {
|
||||
profile.SetAttribute(attr.GetLocalScope(fmt.Sprintf("%s.manifest", key)), manifestFilePath)
|
||||
profile.SetAttribute(attr.GetLocalScope(key), downloadFilePath)
|
||||
profile.SendGetValToPeer(handle, attr.PublicScope, fmt.Sprintf("%s.manifest.size", key))
|
||||
}
|
||||
|
||||
// ShareFile given a profile and a conversation handle, sets up a file sharing process to share the file
|
||||
// at filepath
|
||||
func (f *Functionality) ShareFile(filepath string, profile peer.CwtchPeer, handle string) error {
|
||||
manifest, err := files.CreateManifest(filepath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var nonce [24]byte
|
||||
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
|
||||
log.Errorf("Cannot read from random: %v\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
message := OverlayMessage{
|
||||
Name: path.Base(manifest.FileName),
|
||||
Hash: hex.EncodeToString(manifest.RootHash),
|
||||
Nonce: hex.EncodeToString(nonce[:]),
|
||||
Size: manifest.FileSizeInBytes,
|
||||
}
|
||||
|
||||
data, _ := json.Marshal(message)
|
||||
|
||||
wrapper := model.MessageWrapper{
|
||||
Overlay: model.OverlayFileSharing,
|
||||
Data: string(data),
|
||||
}
|
||||
|
||||
wrapperJSON, _ := json.Marshal(wrapper)
|
||||
key := fmt.Sprintf("%x.%x", manifest.RootHash, nonce)
|
||||
serializedManifest, _ := json.Marshal(manifest)
|
||||
|
||||
// Store the size of the manifest (in chunks) as part of the public scope so contacts who we share the file with
|
||||
// can fetch the manifest as if it were a file.
|
||||
// manifest.FileName gets redacted in filesharing_subsystem (to remove the system-specific file hierarchy),
|
||||
// but we need to *store* the full path because the sender also uses it to locate the file
|
||||
lenDiff := len(filepath) - len(path.Base(filepath))
|
||||
profile.SetAttribute(attr.GetPublicScope(fmt.Sprintf("%s.manifest.size", key)), strconv.Itoa(int(math.Ceil(float64(len(serializedManifest)-lenDiff)/float64(files.DefaultChunkSize)))))
|
||||
|
||||
profile.ShareFile(key, string(serializedManifest))
|
||||
|
||||
profile.SendMessage(handle, string(wrapperJSON))
|
||||
|
||||
return nil
|
||||
}
|
11
go.mod
11
go.mod
|
@ -3,15 +3,12 @@ module cwtch.im/cwtch
|
|||
go 1.14
|
||||
|
||||
require (
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.4.4
|
||||
git.openprivacy.ca/openprivacy/connectivity v1.4.5
|
||||
git.openprivacy.ca/openprivacy/log v1.0.2
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.4.9
|
||||
git.openprivacy.ca/openprivacy/connectivity v1.5.0
|
||||
git.openprivacy.ca/openprivacy/log v1.0.3
|
||||
github.com/gtank/ristretto255 v0.1.2
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||
github.com/struCoder/pidusage v0.1.3
|
||||
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
|
||||
golang.org/x/tools v0.1.2 // indirect
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||
)
|
||||
|
|
71
go.sum
71
go.sum
|
@ -1,40 +1,14 @@
|
|||
git.openprivacy.ca/cwtch.im/tapir v0.3.1 h1:+d1dHyPvZ8JmdfFe/oXWJPardzflRIhcdILtkeArkW8=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.3.1/go.mod h1:q6RMI/TQvRN8SCtRY3GryOawMcB0uG6NjP6M77oSMx8=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.3.2 h1:thLWqqY1LkirWFcy9Tg6NgWeYbvo9xBm+s2XVnCIvpY=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.3.2/go.mod h1:q6RMI/TQvRN8SCtRY3GryOawMcB0uG6NjP6M77oSMx8=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.3.3 h1:Q7F8JijgOMMYSy3IdZl7+r6qkWckEWV1+EY7q6MAkVs=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.3.3/go.mod h1:ZMg9Jzh0n3Os2aSF4z+bx/n8WBCJBN7KCQESXperYts=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.3.4 h1:g7yZkfz/vWr/t2tFXa/t0Ebr/w665uIKpxpCZ3lIPCo=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.3.4/go.mod h1:+Niy2AHhQC351ZTtfhC0uLjViCICyOxCJZsIlGKKNAU=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.3.5 h1:AlqAhluY4ivznGoHh37Khyxy0u9IbtYskP93wgtmYx8=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.3.5/go.mod h1:eH6dZxXrhW0C4KZX18ksUa6XJCrEvtg8cJJ/Fy6gv+E=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.4.0 h1:clG8uORt0NKEhT4P+Dpw1pzyUuYzYBMevGqn2pciKk8=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.4.0/go.mod h1:eH6dZxXrhW0C4KZX18ksUa6XJCrEvtg8cJJ/Fy6gv+E=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.4.1 h1:9LMpQX41IzecNNlRc1FZKXHg6wlFss679tFsa3vzb3Y=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.4.1/go.mod h1:eH6dZxXrhW0C4KZX18ksUa6XJCrEvtg8cJJ/Fy6gv+E=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.4.2 h1:bxMWZnVJXX4dqqOFS7ELW4iFkVL4GS8wiRkjRv5rJe8=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.4.2/go.mod h1:eH6dZxXrhW0C4KZX18ksUa6XJCrEvtg8cJJ/Fy6gv+E=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.4.3 h1:sctSfUXHDIqaHfJPDl+5lHtmoEJolQiHTcHZGAe5Qc4=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.4.3/go.mod h1:10qEaib5x021zgyZ/97JKWsEpedH5+Vfy2CvB2V+08E=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.4.4 h1:KyuTVmr9GYptTCeR7JDODjmhBBbnIBf9V3NSC4+6bHc=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.4.4/go.mod h1:qMFTdmDZITc1BLP1jSW0gVpLmvpg+Zjsh5ek8StwbFE=
|
||||
filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmGfU=
|
||||
filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.4.9 h1:LXonlztwvI1F1++0IyomIcDH1/Bxzo+oN8YjGonNvjM=
|
||||
git.openprivacy.ca/cwtch.im/tapir v0.4.9/go.mod h1:p4bHo3DAO8wwimU6JAeZXbfPQ4jnoA2bV+4YvknWTNQ=
|
||||
git.openprivacy.ca/openprivacy/bine v0.0.4 h1:CO7EkGyz+jegZ4ap8g5NWRuDHA/56KKvGySR6OBPW+c=
|
||||
git.openprivacy.ca/openprivacy/bine v0.0.4/go.mod h1:13ZqhKyqakDsN/ZkQkIGNULsmLyqtXc46XBcnuXm/mU=
|
||||
git.openprivacy.ca/openprivacy/connectivity v1.4.0 h1:c7AANUCrlA4hIqXxIGDOWMtSe8CpDleD1877PShScbM=
|
||||
git.openprivacy.ca/openprivacy/connectivity v1.4.0/go.mod h1:bR0Myx9nm2YzWtsThRelkNMV4Pp7sPDa123O1qsAbVo=
|
||||
git.openprivacy.ca/openprivacy/connectivity v1.4.1 h1:zoM+j7PFj8mQeUCNiDNMe7Uq9dhcJDOhaZcSANfeDL4=
|
||||
git.openprivacy.ca/openprivacy/connectivity v1.4.1/go.mod h1:bR0Myx9nm2YzWtsThRelkNMV4Pp7sPDa123O1qsAbVo=
|
||||
git.openprivacy.ca/openprivacy/connectivity v1.4.2 h1:rQFIjWunLlRmXL5Efsv+7+1cA70T6Uza6RCy2PRm9zc=
|
||||
git.openprivacy.ca/openprivacy/connectivity v1.4.2/go.mod h1:bR0Myx9nm2YzWtsThRelkNMV4Pp7sPDa123O1qsAbVo=
|
||||
git.openprivacy.ca/openprivacy/connectivity v1.4.3 h1:i2Ad/U9FlL9dKr2bhRck7lJ8NoWyGtoEfUwoCyMT0fU=
|
||||
git.openprivacy.ca/openprivacy/connectivity v1.4.3/go.mod h1:bR0Myx9nm2YzWtsThRelkNMV4Pp7sPDa123O1qsAbVo=
|
||||
git.openprivacy.ca/openprivacy/connectivity v1.4.4 h1:11M3akVCyy/luuhMpZTM1r9Jayl7IHD944Bxsn2FDpU=
|
||||
git.openprivacy.ca/openprivacy/connectivity v1.4.4/go.mod h1:JVRCIdL+lAG6ohBFWiKeC/MN42nnC0sfFszR9XG6vPQ=
|
||||
git.openprivacy.ca/openprivacy/connectivity v1.4.5 h1:UYMdCWPzEAP7LbqdMXGNXmfKjWlvfnKdmewBtnbgQRI=
|
||||
git.openprivacy.ca/openprivacy/connectivity v1.4.5/go.mod h1:JVRCIdL+lAG6ohBFWiKeC/MN42nnC0sfFszR9XG6vPQ=
|
||||
git.openprivacy.ca/openprivacy/log v1.0.1/go.mod h1:gGYK8xHtndRLDymFtmjkG26GaMQNgyhioNS82m812Iw=
|
||||
git.openprivacy.ca/openprivacy/log v1.0.2 h1:HLP4wsw4ljczFAelYnbObIs821z+jgMPCe8uODPnGQM=
|
||||
git.openprivacy.ca/openprivacy/connectivity v1.5.0 h1:ZxsR/ZaVKXIkD2x6FlajZn62ciNQjamrI4i/5xIpdoQ=
|
||||
git.openprivacy.ca/openprivacy/connectivity v1.5.0/go.mod h1:UjQiGBnWbotmBzIw59B8H6efwDadjkKzm3RPT1UaIRw=
|
||||
git.openprivacy.ca/openprivacy/log v1.0.2/go.mod h1:gGYK8xHtndRLDymFtmjkG26GaMQNgyhioNS82m812Iw=
|
||||
git.openprivacy.ca/openprivacy/log v1.0.3 h1:E/PMm4LY+Q9s3aDpfySfEDq/vYQontlvNj/scrPaga0=
|
||||
git.openprivacy.ca/openprivacy/log v1.0.3/go.mod h1:gGYK8xHtndRLDymFtmjkG26GaMQNgyhioNS82m812Iw=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
@ -42,9 +16,7 @@ github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is=
|
|||
github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s=
|
||||
github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc=
|
||||
github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o=
|
||||
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
|
@ -54,58 +26,31 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb
|
|||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/struCoder/pidusage v0.1.3 h1:pZcSa6asBE38TJtW0Nui6GeCjLTpaT/jAnNP7dUTLSQ=
|
||||
github.com/struCoder/pidusage v0.1.3/go.mod h1:pWBlW3YuSwRl6h7R5KbvA4N8oOqe9LjaKW5CwT1SPjI=
|
||||
github.com/yuin/goldmark v1.3.5 h1:dPmz1Snjq0kmkz159iL7S6WzdahUTHnHB5M56WFVifs=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
|
||||
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee h1:4yd7jl+vXjalO5ztz6Vc1VADv+S/80LGJmyl1ROJ2AI=
|
||||
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44 h1:Bli41pIlzTzf3KEY06n+xnzK/BESIg2ze4Pgfh/aI8c=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e h1:FDhOuMEY4JVRztM/gsbk+IKUQ8kj74bxZrgw87eMMVc=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
package model
|
||||
|
||||
// MessageWrapper is the canonical Cwtch overlay wrapper
|
||||
type MessageWrapper struct {
|
||||
Overlay int `json:"o"`
|
||||
Data string `json:"d"`
|
||||
}
|
||||
|
||||
// OverlayChat is the canonical identifier for chat overlays
|
||||
const OverlayChat = 1
|
||||
|
||||
// OverlayInviteContact is the canonical identifier for the contact invite overlay
|
||||
const OverlayInviteContact = 100
|
||||
|
||||
// OverlayInviteGroup is the canonical identifier for the group invite overlay
|
||||
const OverlayInviteGroup = 101
|
||||
|
||||
// OverlayFileSharing is the canonical identifier for the file sharing overlay
|
||||
const OverlayFileSharing = 200
|
|
@ -1,19 +1,24 @@
|
|||
package peer
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/event"
|
||||
"cwtch.im/cwtch/model"
|
||||
"cwtch.im/cwtch/model/attr"
|
||||
"cwtch.im/cwtch/protocol/connections"
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"git.openprivacy.ca/openprivacy/log"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"cwtch.im/cwtch/event"
|
||||
"cwtch.im/cwtch/model"
|
||||
"cwtch.im/cwtch/model/attr"
|
||||
"cwtch.im/cwtch/protocol/connections"
|
||||
"cwtch.im/cwtch/protocol/files"
|
||||
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
||||
"git.openprivacy.ca/openprivacy/log"
|
||||
)
|
||||
|
||||
const lastKnownSignature = "LastKnowSignature"
|
||||
|
@ -21,7 +26,8 @@ const lastKnownSignature = "LastKnowSignature"
|
|||
var autoHandleableEvents = map[event.Type]bool{event.EncryptedGroupMessage: true, event.PeerStateChange: true,
|
||||
event.ServerStateChange: true, event.NewGroupInvite: true, event.NewMessageFromPeer: true,
|
||||
event.PeerAcknowledgement: true, event.PeerError: true, event.SendMessageToPeerError: true, event.SendMessageToGroupError: true,
|
||||
event.NewGetValMessageFromPeer: true, event.NewRetValMessageFromPeer: true, event.ProtocolEngineStopped: true, event.RetryServerRequest: true}
|
||||
event.NewGetValMessageFromPeer: true, event.NewRetValMessageFromPeer: true, event.ProtocolEngineStopped: true, event.RetryServerRequest: true,
|
||||
event.ManifestSizeReceived: true, event.ManifestReceived: true}
|
||||
|
||||
// DefaultEventsToHandle specifies which events will be subscribed to
|
||||
// when a peer has its Init() function called
|
||||
|
@ -48,6 +54,26 @@ type cwtchPeer struct {
|
|||
eventBus event.Manager
|
||||
}
|
||||
|
||||
// SendMessage is a higher level that merges sending messages to contacts and group handles
|
||||
// If you try to send a message to a handle that doesn't exist, malformed or an incorrect type then
|
||||
// this function will error
|
||||
func (cp *cwtchPeer) SendMessage(handle string, message string) error {
|
||||
// Group Handles are always 32 bytes in length, but we forgo any further testing here
|
||||
// and delegate the group existence check to SendMessageToGroupTracked
|
||||
if len(handle) == 32 {
|
||||
_, err := cp.SendMessageToGroupTracked(handle, message)
|
||||
return err
|
||||
} else if tor.IsValidHostname(handle) {
|
||||
// We assume we are sending to a Contact.
|
||||
// (Servers are technically Contacts)
|
||||
cp.SendMessageToPeer(handle, message)
|
||||
// We assume this is always successful as it is always valid to attempt to
|
||||
// Contact a valid hostname
|
||||
return nil
|
||||
}
|
||||
return errors.New("malformed handle type")
|
||||
}
|
||||
|
||||
func (cp *cwtchPeer) UpdateMessageFlags(handle string, mIdx int, flags uint64) {
|
||||
cp.mutex.Lock()
|
||||
defer cp.mutex.Unlock()
|
||||
|
@ -134,9 +160,12 @@ type ModifyServers interface {
|
|||
}
|
||||
|
||||
// SendMessages enables a caller to sender messages to a contact
|
||||
// Note:
|
||||
type SendMessages interface {
|
||||
SendMessage(handle string, message string) error
|
||||
|
||||
SendGetValToPeer(string, string, string)
|
||||
|
||||
// Deprecated
|
||||
SendMessageToPeer(string, string) string
|
||||
|
||||
// TODO This should probably not be exposed
|
||||
|
@ -148,6 +177,8 @@ type SendMessages interface {
|
|||
|
||||
// SendMessagesToGroup enables a caller to sender messages to a group
|
||||
type SendMessagesToGroup interface {
|
||||
|
||||
// Deprecated
|
||||
SendMessageToGroupTracked(string, string) (string, error)
|
||||
}
|
||||
|
||||
|
@ -189,6 +220,8 @@ type CwtchPeer interface {
|
|||
SendMessages
|
||||
ModifyMessages
|
||||
SendMessagesToGroup
|
||||
|
||||
ShareFile(fileKey string, serializedManifest string)
|
||||
}
|
||||
|
||||
// NewCwtchPeer creates and returns a new cwtchPeer with the given name.
|
||||
|
@ -702,6 +735,10 @@ func (cp *cwtchPeer) StoreMessage(onion string, messageTxt string, sent time.Tim
|
|||
cp.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (cp *cwtchPeer) ShareFile(fileKey string, serializedManifest string) {
|
||||
cp.eventBus.Publish(event.NewEvent(event.ShareManifest, map[event.Field]string{event.FileKey: fileKey, event.SerializedManifest: serializedManifest}))
|
||||
}
|
||||
|
||||
// eventHandler process events from other subsystems
|
||||
func (cp *cwtchPeer) eventHandler() {
|
||||
for {
|
||||
|
@ -795,6 +832,50 @@ func (cp *cwtchPeer) eventHandler() {
|
|||
|
||||
/***** Non default but requestable handlable events *****/
|
||||
|
||||
case event.ManifestReceived:
|
||||
log.Debugf("Manifest Received Event!: %v", ev)
|
||||
handle := ev.Data[event.Handle]
|
||||
fileKey := ev.Data[event.FileKey]
|
||||
serializedManifest := ev.Data[event.SerializedManifest]
|
||||
|
||||
manifestFilePath, exists := cp.GetAttribute(attr.GetLocalScope(fmt.Sprintf("%v.manifest", fileKey)))
|
||||
if exists {
|
||||
downloadFilePath, exists := cp.GetAttribute(attr.GetLocalScope(fileKey))
|
||||
if exists {
|
||||
log.Debugf("downloading manifest to %v, file to %v", manifestFilePath, downloadFilePath)
|
||||
var manifest files.Manifest
|
||||
err := json.Unmarshal([]byte(serializedManifest), &manifest)
|
||||
if err == nil {
|
||||
manifest.Title = manifest.FileName
|
||||
manifest.FileName = downloadFilePath
|
||||
log.Debugf("saving manifest")
|
||||
err = manifest.Save(manifestFilePath)
|
||||
if err != nil {
|
||||
log.Errorf("could not save manifest: %v", err)
|
||||
} else {
|
||||
tempFile := ""
|
||||
if runtime.GOOS == "android" {
|
||||
tempFile = manifestFilePath[0 : len(manifestFilePath)-len(".manifest")]
|
||||
log.Debugf("derived android temp path: %v", tempFile)
|
||||
}
|
||||
cp.eventBus.Publish(event.NewEvent(event.ManifestSaved, map[event.Field]string{
|
||||
event.FileKey: fileKey,
|
||||
event.Handle: handle,
|
||||
event.SerializedManifest: string(manifest.Serialize()),
|
||||
event.TempFile: tempFile,
|
||||
event.NameSuggestion: manifest.Title,
|
||||
}))
|
||||
}
|
||||
} else {
|
||||
log.Errorf("error saving manifest: %v", err)
|
||||
}
|
||||
} else {
|
||||
log.Errorf("found manifest path but not download path for %v", fileKey)
|
||||
}
|
||||
} else {
|
||||
log.Errorf("no download path found for manifest: %v", fileKey)
|
||||
}
|
||||
|
||||
case event.NewRetValMessageFromPeer:
|
||||
onion := ev.Data[event.RemotePeer]
|
||||
scope := ev.Data[event.Scope]
|
||||
|
@ -804,9 +885,22 @@ func (cp *cwtchPeer) eventHandler() {
|
|||
log.Debugf("NewRetValMessageFromPeer %v %v%v %v %v\n", onion, scope, path, exists, val)
|
||||
if exists {
|
||||
if scope == attr.PublicScope {
|
||||
if strings.HasSuffix(path, ".manifest.size") {
|
||||
fileKey := strings.Replace(path, ".manifest.size", "", 1)
|
||||
size, err := strconv.Atoi(val)
|
||||
// if size is valid and below the maximum size for a manifest
|
||||
// this is to prevent malicious sharers from using large amounts of memory when distributing
|
||||
// a manifest as we reconstruct this in-memory
|
||||
if err == nil && size < files.MaxManifestSize {
|
||||
cp.eventBus.Publish(event.NewEvent(event.ManifestSizeReceived, map[event.Field]string{event.FileKey: fileKey, event.ManifestSize: val, event.Handle: onion}))
|
||||
} else {
|
||||
cp.eventBus.Publish(event.NewEvent(event.ManifestError, map[event.Field]string{event.FileKey: fileKey, event.Handle: onion}))
|
||||
}
|
||||
} else {
|
||||
cp.SetContactAttribute(onion, attr.GetPeerScope(path), val)
|
||||
}
|
||||
}
|
||||
}
|
||||
case event.PeerStateChange:
|
||||
cp.mutex.Lock()
|
||||
if _, exists := cp.Profile.Contacts[ev.Data[event.RemotePeer]]; exists {
|
||||
|
|
|
@ -1,12 +1,18 @@
|
|||
package connections
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/event"
|
||||
"cwtch.im/cwtch/model"
|
||||
"cwtch.im/cwtch/protocol/groups"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"cwtch.im/cwtch/event"
|
||||
"cwtch.im/cwtch/model"
|
||||
"cwtch.im/cwtch/protocol/files"
|
||||
"cwtch.im/cwtch/protocol/groups"
|
||||
pmodel "cwtch.im/cwtch/protocol/model"
|
||||
"git.openprivacy.ca/cwtch.im/tapir"
|
||||
"git.openprivacy.ca/cwtch.im/tapir/applications"
|
||||
"git.openprivacy.ca/cwtch.im/tapir/networks/tor"
|
||||
|
@ -16,9 +22,6 @@ import (
|
|||
"git.openprivacy.ca/openprivacy/log"
|
||||
"github.com/gtank/ristretto255"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type engine struct {
|
||||
|
@ -40,12 +43,17 @@ type engine struct {
|
|||
// Nextgen Tapir Service
|
||||
service tapir.Service
|
||||
|
||||
getValRequests sync.Map // [string]string eventID:Data
|
||||
|
||||
// Nextgen Tapir Service
|
||||
ephemeralServices sync.Map // string(onion) => tapir.Service
|
||||
|
||||
// Required for listen(), inaccessible from identity
|
||||
privateKey ed25519.PrivateKey
|
||||
|
||||
// file sharing subsystem is responsible for maintaining active shares and downloads
|
||||
filesharingSubSystem files.FileSharingSubSystem
|
||||
|
||||
shuttingDown bool
|
||||
}
|
||||
|
||||
|
@ -92,6 +100,11 @@ func NewProtocolEngine(identity primitives.Identity, privateKey ed25519.PrivateK
|
|||
engine.eventManager.Subscribe(event.BlockUnknownPeers, engine.queue)
|
||||
engine.eventManager.Subscribe(event.AllowUnknownPeers, engine.queue)
|
||||
|
||||
// File Handling
|
||||
engine.eventManager.Subscribe(event.ShareManifest, engine.queue)
|
||||
engine.eventManager.Subscribe(event.ManifestSizeReceived, engine.queue)
|
||||
engine.eventManager.Subscribe(event.ManifestSaved, engine.queue)
|
||||
|
||||
for peer, authorization := range peerAuthorizations {
|
||||
engine.authorizations.Store(peer, authorization)
|
||||
}
|
||||
|
@ -125,7 +138,10 @@ func (e *engine) eventHandler() {
|
|||
go e.peerWithOnion(ev.Data[event.RemotePeer])
|
||||
}
|
||||
case event.InvitePeerToGroup:
|
||||
e.sendMessageToPeer(ev.EventID, ev.Data[event.RemotePeer], event.ContextInvite, []byte(ev.Data[event.GroupInvite]))
|
||||
err := e.sendPeerMessage(ev.Data[event.RemotePeer], pmodel.PeerMessage{ID: ev.EventID, Context: event.ContextInvite, Data: []byte(ev.Data[event.GroupInvite])})
|
||||
if err != nil {
|
||||
|
||||
}
|
||||
case event.JoinServer:
|
||||
signature, err := base64.StdEncoding.DecodeString(ev.Data[event.Signature])
|
||||
if err != nil {
|
||||
|
@ -145,21 +161,24 @@ func (e *engine) eventHandler() {
|
|||
case event.SendMessageToGroup:
|
||||
ciphertext, _ := base64.StdEncoding.DecodeString(ev.Data[event.Ciphertext])
|
||||
signature, _ := base64.StdEncoding.DecodeString(ev.Data[event.Signature])
|
||||
go e.sendMessageToGroup(ev.Data[event.GroupID], ev.Data[event.GroupServer], ciphertext, signature)
|
||||
go e.sendMessageToGroup(ev.Data[event.GroupID], ev.Data[event.GroupServer], ciphertext, signature, 0)
|
||||
case event.SendMessageToPeer:
|
||||
// TODO: remove this passthrough once the UI is integrated.
|
||||
context, ok := ev.Data[event.EventContext]
|
||||
if !ok {
|
||||
context = event.ContextRaw
|
||||
}
|
||||
err := e.sendMessageToPeer(ev.EventID, ev.Data[event.RemotePeer], context, []byte(ev.Data[event.Data]))
|
||||
if err != nil {
|
||||
if err := e.sendPeerMessage(ev.Data[event.RemotePeer], pmodel.PeerMessage{ID: ev.EventID, Context: context, Data: []byte(ev.Data[event.Data])}); err != nil {
|
||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: "peer is offline or the connection has yet to finalize"}))
|
||||
}
|
||||
case event.SendGetValMessageToPeer:
|
||||
e.sendGetValToPeer(ev.EventID, ev.Data[event.RemotePeer], ev.Data[event.Scope], ev.Data[event.Path])
|
||||
if err := e.sendGetValToPeer(ev.EventID, ev.Data[event.RemotePeer], ev.Data[event.Scope], ev.Data[event.Path]); err != nil {
|
||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: err.Error()}))
|
||||
}
|
||||
case event.SendRetValMessageToPeer:
|
||||
e.sendRetValToPeer(ev.EventID, ev.Data[event.RemotePeer], ev.Data[event.Data], ev.Data[event.Exists])
|
||||
if err := e.sendRetValToPeer(ev.EventID, ev.Data[event.RemotePeer], ev.Data[event.Data], ev.Data[event.Exists]); err != nil {
|
||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: err.Error()}))
|
||||
}
|
||||
case event.SetPeerAuthorization:
|
||||
auth := model.Authorization(ev.Data[event.Authorization])
|
||||
e.authorizations.Store(ev.Data[event.RemotePeer], auth)
|
||||
|
@ -181,6 +200,29 @@ func (e *engine) eventHandler() {
|
|||
e.blockUnknownContacts = true
|
||||
case event.ProtocolEngineStartListen:
|
||||
go e.listenFn()
|
||||
case event.ShareManifest:
|
||||
e.filesharingSubSystem.ShareFile(ev.Data[event.FileKey], ev.Data[event.SerializedManifest])
|
||||
case event.ManifestSizeReceived:
|
||||
handle := ev.Data[event.Handle]
|
||||
key := ev.Data[event.FileKey]
|
||||
size, _ := strconv.Atoi(ev.Data[event.ManifestSize])
|
||||
if err := e.sendPeerMessage(handle, e.filesharingSubSystem.FetchManifest(key, uint64(size))); err != nil {
|
||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: err.Error()}))
|
||||
}
|
||||
case event.ManifestSaved:
|
||||
handle := ev.Data[event.Handle]
|
||||
key := ev.Data[event.FileKey]
|
||||
serializedManifest := ev.Data[event.SerializedManifest]
|
||||
tempFile := ev.Data[event.TempFile]
|
||||
title := ev.Data[event.NameSuggestion]
|
||||
// NOTE: for now there will probably only ever be a single chunk request. When we enable group
|
||||
// sharing and rehosting then this loop will serve as a a way of splitting the request among multiple
|
||||
// contacts
|
||||
for _, message := range e.filesharingSubSystem.CompileChunkRequests(key, serializedManifest, tempFile, title) {
|
||||
if err := e.sendPeerMessage(handle, message); err != nil {
|
||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: ev.Data[event.RemotePeer], event.EventID: ev.EventID, event.Error: err.Error()}))
|
||||
}
|
||||
}
|
||||
default:
|
||||
return
|
||||
}
|
||||
|
@ -217,7 +259,6 @@ func (e *engine) createPeerTemplate() *PeerApp {
|
|||
peerAppTemplate.OnAuth = e.ignoreOnShutdown(e.peerAuthed)
|
||||
peerAppTemplate.OnConnecting = e.ignoreOnShutdown(e.peerConnecting)
|
||||
peerAppTemplate.OnClose = e.ignoreOnShutdown(e.peerDisconnected)
|
||||
peerAppTemplate.RetValHandler = e.handlePeerRetVal
|
||||
return peerAppTemplate
|
||||
}
|
||||
|
||||
|
@ -404,28 +445,16 @@ func (e *engine) peerDisconnected(onion string) {
|
|||
}))
|
||||
}
|
||||
|
||||
// sendMessageToPeer sends a message to a peer under a given context
|
||||
func (e *engine) sendMessageToPeer(eventID string, onion string, context string, message []byte) error {
|
||||
conn, err := e.service.WaitForCapabilityOrClose(onion, cwtchCapability)
|
||||
if err == nil {
|
||||
peerApp, ok := (conn.App()).(*PeerApp)
|
||||
if ok {
|
||||
peerApp.SendMessage(PeerMessage{eventID, context, message})
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed type assertion conn.App != PeerApp")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (e *engine) sendGetValToPeer(eventID, onion, scope, path string) error {
|
||||
log.Debugf("sendGetValMessage to peer %v %v%v\n", onion, scope, path)
|
||||
log.Debugf("sendGetValMessage to peer %v %v.%v\n", onion, scope, path)
|
||||
getVal := peerGetVal{Scope: scope, Path: path}
|
||||
message, err := json.Marshal(getVal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return e.sendMessageToPeer(eventID, onion, event.ContextGetVal, message)
|
||||
|
||||
e.getValRequests.Store(onion+eventID, message)
|
||||
return e.sendPeerMessage(onion, pmodel.PeerMessage{ID: eventID, Context: event.ContextGetVal, Data: message})
|
||||
}
|
||||
|
||||
func (e *engine) sendRetValToPeer(eventID, onion, val, existsStr string) error {
|
||||
|
@ -436,7 +465,7 @@ func (e *engine) sendRetValToPeer(eventID, onion, val, existsStr string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return e.sendMessageToPeer(eventID, onion, event.ContextRetVal, message)
|
||||
return e.sendPeerMessage(onion, pmodel.PeerMessage{ID: eventID, Context: event.ContextRetVal, Data: message})
|
||||
}
|
||||
|
||||
func (e *engine) deleteConnection(id string) {
|
||||
|
@ -454,7 +483,18 @@ func (e *engine) receiveGroupMessage(server string, gm *groups.EncryptedGroupMes
|
|||
}
|
||||
|
||||
// sendMessageToGroup attempts to sent the given message to the given group id.
|
||||
func (e *engine) sendMessageToGroup(groupID string, server string, ct []byte, sig []byte) {
|
||||
func (e *engine) sendMessageToGroup(groupID string, server string, ct []byte, sig []byte, attempts int) {
|
||||
|
||||
// sending to groups can fail for a few reasons (slow server, not enough tokens, etc.)
|
||||
// rather than trying to keep all that logic in method we simply back-off and try again
|
||||
// but if we fail more than 5 times then we report back to the client so they can investigate other options.
|
||||
// Note: This flow only applies to online-and-connected servers (this method will return faster if the server is not
|
||||
// online)
|
||||
if attempts >= 5 {
|
||||
log.Errorf("failed to post a message to a group after %v attempts", attempts)
|
||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: "could not make payment to server", event.Signature: base64.StdEncoding.EncodeToString(sig)}))
|
||||
return
|
||||
}
|
||||
|
||||
es, ok := e.ephemeralServices.Load(server)
|
||||
if es == nil || !ok {
|
||||
|
@ -468,19 +508,16 @@ func (e *engine) sendMessageToGroup(groupID string, server string, ct []byte, si
|
|||
tokenApp, ok := (conn.App()).(*TokenBoardClient)
|
||||
if ok {
|
||||
if spent, numtokens := tokenApp.Post(ct, sig); !spent {
|
||||
// TODO: while this works for the spam guard, it won't work for other forms of payment...
|
||||
// Make an -inline- payment, this will hold the goroutine
|
||||
if err := tokenApp.MakePayment(); err == nil {
|
||||
// This really shouldn't fail since we now know we have the required tokens...
|
||||
if spent, _ := tokenApp.Post(ct, sig); !spent {
|
||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: err.Error(), event.Signature: base64.StdEncoding.EncodeToString(sig)}))
|
||||
}
|
||||
} else {
|
||||
// Broadast the token error
|
||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: err.Error(), event.Signature: base64.StdEncoding.EncodeToString(sig)}))
|
||||
}
|
||||
} else if numtokens < 5 {
|
||||
// we failed to post, probably because we ran out of tokens... so make a payment
|
||||
go tokenApp.MakePayment()
|
||||
// backoff
|
||||
time.Sleep(time.Second * 5)
|
||||
// try again
|
||||
e.sendMessageToGroup(groupID, server, ct, sig, attempts+1)
|
||||
} else {
|
||||
if numtokens < 5 {
|
||||
go tokenApp.MakePayment()
|
||||
}
|
||||
}
|
||||
// regardless we return....
|
||||
return
|
||||
|
@ -489,9 +526,22 @@ func (e *engine) sendMessageToGroup(groupID string, server string, ct []byte, si
|
|||
e.eventManager.Publish(event.NewEvent(event.SendMessageToGroupError, map[event.Field]string{event.GroupID: groupID, event.GroupServer: server, event.Error: "server-connection-not-valid", event.Signature: base64.StdEncoding.EncodeToString(sig)}))
|
||||
}
|
||||
|
||||
// TODO this is becoming cluttered
|
||||
func (e *engine) handlePeerMessage(hostname string, eventID string, context string, message []byte) {
|
||||
log.Debugf("New message from peer: %v %v", hostname, context)
|
||||
if context == event.ContextGetVal {
|
||||
|
||||
if context == event.ContextAck {
|
||||
e.peerAck(hostname, eventID)
|
||||
} else if context == event.ContextRetVal {
|
||||
req, ok := e.getValRequests.Load(hostname + eventID)
|
||||
if ok {
|
||||
reqStr := req.([]byte)
|
||||
e.handlePeerRetVal(hostname, reqStr, message)
|
||||
e.getValRequests.Delete(hostname + eventID)
|
||||
} else {
|
||||
log.Errorf("could not find val request for %v %s", hostname, eventID)
|
||||
}
|
||||
} else if context == event.ContextGetVal {
|
||||
var getVal peerGetVal
|
||||
err := json.Unmarshal(message, &getVal)
|
||||
if err == nil {
|
||||
|
@ -499,8 +549,46 @@ func (e *engine) handlePeerMessage(hostname string, eventID string, context stri
|
|||
ev.EventID = eventID
|
||||
e.eventManager.Publish(ev)
|
||||
}
|
||||
} else if context == event.ContextRequestManifest {
|
||||
for _, message := range e.filesharingSubSystem.RequestManifestParts(eventID) {
|
||||
if err := e.sendPeerMessage(hostname, message); err != nil {
|
||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: hostname, event.EventID: eventID, event.Error: err.Error()}))
|
||||
}
|
||||
}
|
||||
} else if context == event.ContextSendManifest {
|
||||
if fileKey, manifest := e.filesharingSubSystem.ReceiveManifestPart(eventID, message); len(manifest) != 0 {
|
||||
// We have a valid manifest
|
||||
e.eventManager.Publish(event.NewEvent(event.ManifestReceived, map[event.Field]string{event.Handle: hostname, event.FileKey: fileKey, event.SerializedManifest: manifest}))
|
||||
}
|
||||
} else if context == event.ContextRequestFile {
|
||||
for _, message := range e.filesharingSubSystem.ProcessChunkRequest(eventID, message) {
|
||||
if err := e.sendPeerMessage(hostname, message); err != nil {
|
||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: hostname, event.EventID: eventID, event.Error: err.Error()}))
|
||||
}
|
||||
}
|
||||
} else if context == event.ContextSendFile {
|
||||
fileKey, progress, totalChunks, _, title := e.filesharingSubSystem.ProcessChunk(eventID, message)
|
||||
if len(fileKey) != 0 {
|
||||
e.eventManager.Publish(event.NewEvent(event.FileDownloadProgressUpdate, map[event.Field]string{event.FileKey: fileKey, event.Progress: strconv.Itoa(int(progress)), event.FileSizeInChunks: strconv.Itoa(int(totalChunks)), event.NameSuggestion: title}))
|
||||
if progress == totalChunks {
|
||||
if tempFile, filePath, success := e.filesharingSubSystem.VerifyFile(fileKey); success {
|
||||
log.Debugf("file verified and downloaded!")
|
||||
e.eventManager.Publish(event.NewEvent(event.FileDownloaded, map[event.Field]string{event.FileKey: fileKey, event.FilePath: filePath, event.TempFile: tempFile}))
|
||||
} else {
|
||||
log.Debugf("file failed to verify!")
|
||||
e.eventManager.Publish(event.NewEvent(event.FileVerificationFailed, map[event.Field]string{event.FileKey: fileKey}))
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Fall through handler for the default text conversation.
|
||||
e.eventManager.Publish(event.NewEvent(event.NewMessageFromPeer, map[event.Field]string{event.TimestampReceived: time.Now().Format(time.RFC3339Nano), event.RemotePeer: hostname, event.Data: string(message)}))
|
||||
|
||||
// Send an explicit acknowledgement
|
||||
// Every other protocol should have a explicit acknowledgement message e.g. value lookups have responses, and file handling has an explicit flow
|
||||
if err := e.sendPeerMessage(hostname, pmodel.PeerMessage{ID: eventID, Context: event.ContextAck, Data: []byte{}}); err != nil {
|
||||
e.eventManager.Publish(event.NewEvent(event.SendMessageToPeerError, map[event.Field]string{event.RemotePeer: hostname, event.EventID: eventID, event.Error: err.Error()}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -530,3 +618,17 @@ func (e *engine) leaveServer(server string) {
|
|||
e.ephemeralServices.Delete(server)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *engine) sendPeerMessage(handle string, message pmodel.PeerMessage) error {
|
||||
conn, err := e.service.WaitForCapabilityOrClose(handle, cwtchCapability)
|
||||
if err == nil {
|
||||
peerApp, ok := (conn.App()).(*PeerApp)
|
||||
if ok {
|
||||
return peerApp.SendMessage(message)
|
||||
}
|
||||
log.Debugf("could not derive peer app: %v", err)
|
||||
return fmt.Errorf("could not find peer app to send message to: %v", handle)
|
||||
}
|
||||
log.Debugf("could not send peer message: %v", err)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
package connections
|
||||
|
||||
import (
|
||||
"cwtch.im/cwtch/event"
|
||||
model2 "cwtch.im/cwtch/protocol/model"
|
||||
"encoding/json"
|
||||
"git.openprivacy.ca/cwtch.im/tapir"
|
||||
"git.openprivacy.ca/cwtch.im/tapir/applications"
|
||||
"git.openprivacy.ca/openprivacy/log"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const cwtchCapability = tapir.Capability("cwtchCapability")
|
||||
|
@ -16,22 +15,12 @@ type PeerApp struct {
|
|||
applications.AuthApp
|
||||
connection tapir.Connection
|
||||
MessageHandler func(string, string, string, []byte)
|
||||
RetValHandler func(string, []byte, []byte)
|
||||
IsBlocked func(string) bool
|
||||
IsAllowed func(string) bool
|
||||
OnAcknowledgement func(string, string)
|
||||
OnAuth func(string)
|
||||
OnClose func(string)
|
||||
OnConnecting func(string)
|
||||
|
||||
getValRequests sync.Map // [string]string eventID:Data
|
||||
}
|
||||
|
||||
// PeerMessage is an encapsulation that can be used by higher level applications
|
||||
type PeerMessage struct {
|
||||
ID string // A unique Message ID (primarily used for acknowledgments)
|
||||
Context string // A unique context identifier i.e. im.cwtch.chat
|
||||
Data []byte // The serialized data packet.
|
||||
}
|
||||
|
||||
type peerGetVal struct {
|
||||
|
@ -53,7 +42,6 @@ func (pa *PeerApp) NewInstance() tapir.Application {
|
|||
newApp.OnAuth = pa.OnAuth
|
||||
newApp.OnClose = pa.OnClose
|
||||
newApp.OnConnecting = pa.OnConnecting
|
||||
newApp.RetValHandler = pa.RetValHandler
|
||||
return newApp
|
||||
}
|
||||
|
||||
|
@ -88,26 +76,11 @@ func (pa *PeerApp) listen() {
|
|||
pa.OnClose(pa.connection.Hostname())
|
||||
return
|
||||
}
|
||||
var peerMessage PeerMessage
|
||||
var peerMessage model2.PeerMessage
|
||||
err := json.Unmarshal(message, &peerMessage)
|
||||
if err == nil {
|
||||
switch peerMessage.Context {
|
||||
case event.ContextAck:
|
||||
pa.OnAcknowledgement(pa.connection.Hostname(), peerMessage.ID)
|
||||
case event.ContextRetVal:
|
||||
req, ok := pa.getValRequests.Load(peerMessage.ID)
|
||||
if ok {
|
||||
reqStr := []byte(req.(string))
|
||||
pa.RetValHandler(pa.connection.Hostname(), reqStr, peerMessage.Data)
|
||||
pa.getValRequests.Delete(peerMessage.ID)
|
||||
}
|
||||
default:
|
||||
if pa.IsAllowed(pa.connection.Hostname()) {
|
||||
pa.MessageHandler(pa.connection.Hostname(), peerMessage.ID, peerMessage.Context, peerMessage.Data)
|
||||
|
||||
// Acknowledge the message
|
||||
pa.SendMessage(PeerMessage{peerMessage.ID, event.ContextAck, []byte{}})
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Errorf("Error unmarshalling PeerMessage package: %x %v", message, err)
|
||||
|
@ -117,10 +90,11 @@ func (pa *PeerApp) listen() {
|
|||
|
||||
// SendMessage sends the peer a preformatted message
|
||||
// NOTE: This is a stub, we will likely want to extend this to better reflect the desired protocol
|
||||
func (pa *PeerApp) SendMessage(message PeerMessage) {
|
||||
if message.Context == event.ContextGetVal {
|
||||
pa.getValRequests.Store(message.ID, string(message.Data))
|
||||
}
|
||||
serialized, _ := json.Marshal(message)
|
||||
func (pa *PeerApp) SendMessage(message model2.PeerMessage) error {
|
||||
serialized, err := json.Marshal(message)
|
||||
if err == nil {
|
||||
pa.connection.Send(serialized)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -12,7 +12,9 @@ import (
|
|||
"git.openprivacy.ca/openprivacy/connectivity"
|
||||
"git.openprivacy.ca/openprivacy/log"
|
||||
"github.com/gtank/ristretto255"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewTokenBoardClient generates a new Client for Token Board
|
||||
|
@ -179,11 +181,16 @@ func (ta *TokenBoardClient) MakePayment() error {
|
|||
powTokenApp := new(applications.ApplicationChain).
|
||||
ChainApplication(new(applications.ProofOfWorkApplication), applications.SuccessfulProofOfWorkCapability).
|
||||
ChainApplication(tokenApplication, applications.HasTokensCapability)
|
||||
client.Connect(ta.tokenServiceOnion, powTokenApp)
|
||||
|
||||
log.Debugf("Waiting for successful PoW Auth...")
|
||||
|
||||
connected, err := client.Connect(ta.tokenServiceOnion, powTokenApp)
|
||||
if connected == true && err == nil {
|
||||
log.Debugf("Waiting for successful Token Acquisition...")
|
||||
conn, err := client.WaitForCapabilityOrClose(ta.tokenServiceOnion, applications.HasTokensCapability)
|
||||
if err == nil {
|
||||
powtapp, _ := conn.App().(*applications.TokenApplication)
|
||||
powtapp, ok := conn.App().(*applications.TokenApplication)
|
||||
if ok {
|
||||
// Update tokens...we need a lock here to prevent SpendToken from modifying the tokens
|
||||
// during this process..
|
||||
log.Debugf("Updating Tokens")
|
||||
|
@ -194,8 +201,18 @@ func (ta *TokenBoardClient) MakePayment() error {
|
|||
conn.Close()
|
||||
return nil
|
||||
}
|
||||
log.Debugf("Error making payment: to %v %v", ta.tokenServiceOnion, err)
|
||||
return err
|
||||
log.Errorf("invalid cast of powapp. this should not happen %v %v", powtapp, reflect.TypeOf(conn.App()))
|
||||
return errors.New("invalid cast of powapp. this should never happen")
|
||||
}
|
||||
log.Debugf("could not connect to payment server %v..trying again")
|
||||
return ta.MakePayment()
|
||||
} else if connected && err != nil {
|
||||
log.Debugf("inexplicable error: %v", err)
|
||||
}
|
||||
log.Debugf("failed to make a connection. trying again...")
|
||||
// it doesn't actually take that long to make a payment, so waiting a small amount of time should suffice
|
||||
time.Sleep(time.Second)
|
||||
return ta.MakePayment()
|
||||
}
|
||||
|
||||
// NextToken retrieves the next token
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
package files
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ChunkSpec is a wrapper around an uncompressed array of chunk identifiers
|
||||
type ChunkSpec []uint64
|
||||
|
||||
// CreateChunkSpec given a full list of chunks with their downloaded status (true for downloaded, false otherwise)
|
||||
// derives a list of identifiers of chunks that have not been downloaded yet
|
||||
func CreateChunkSpec(progress []bool) ChunkSpec {
|
||||
var chunks ChunkSpec
|
||||
for i, p := range progress {
|
||||
if !p {
|
||||
chunks = append(chunks, uint64(i))
|
||||
}
|
||||
}
|
||||
return chunks
|
||||
}
|
||||
|
||||
// Deserialize takes in a compressed chunk spec and returns an uncompressed ChunkSpec or an error
|
||||
// if the serialized chunk spec has format errors
|
||||
func Deserialize(serialized string) (*ChunkSpec, error) {
|
||||
|
||||
var chunkSpec ChunkSpec
|
||||
|
||||
if len(serialized) == 0 {
|
||||
return &chunkSpec, nil
|
||||
}
|
||||
|
||||
ranges := strings.Split(serialized, ",")
|
||||
for _, r := range ranges {
|
||||
parts := strings.Split(r, ":")
|
||||
if len(parts) == 1 {
|
||||
single, err := strconv.Atoi(r)
|
||||
if err != nil {
|
||||
return nil, errors.New("invalid chunk spec")
|
||||
}
|
||||
chunkSpec = append(chunkSpec, uint64(single))
|
||||
} else if len(parts) == 2 {
|
||||
start, err1 := strconv.Atoi(parts[0])
|
||||
end, err2 := strconv.Atoi(parts[1])
|
||||
if err1 != nil || err2 != nil {
|
||||
return nil, errors.New("invalid chunk spec")
|
||||
}
|
||||
for i := start; i <= end; i++ {
|
||||
chunkSpec = append(chunkSpec, uint64(i))
|
||||
}
|
||||
} else {
|
||||
return nil, errors.New("invalid chunk spec")
|
||||
}
|
||||
}
|
||||
return &chunkSpec, nil
|
||||
}
|
||||
|
||||
// Serialize compresses the ChunkSpec into a list of inclusive ranges e.g. 1,2,3,5,6,7 becomes "1:3,5:7"
|
||||
func (cs ChunkSpec) Serialize() string {
|
||||
result := ""
|
||||
|
||||
i := 0
|
||||
|
||||
for {
|
||||
if i >= len(cs) {
|
||||
break
|
||||
}
|
||||
j := i + 1
|
||||
for ; j < len(cs) && cs[j] == cs[j-1]+1; j++ {
|
||||
}
|
||||
|
||||
if result != "" {
|
||||
result += ","
|
||||
}
|
||||
|
||||
if j == i+1 {
|
||||
result += fmt.Sprintf("%d", cs[i])
|
||||
} else {
|
||||
result += fmt.Sprintf("%d:%d", cs[i], cs[j-1])
|
||||
}
|
||||
i = j
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
package files
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestChunkSpec(t *testing.T) {
|
||||
|
||||
var testCases = map[string]ChunkSpec{
|
||||
"0": CreateChunkSpec([]bool{false}),
|
||||
"0:10": CreateChunkSpec([]bool{false, false, false, false, false, false, false, false, false, false, false}),
|
||||
"0:1,3:5,7:9": CreateChunkSpec([]bool{false, false, true, false, false, false, true, false, false, false, true}),
|
||||
"": CreateChunkSpec([]bool{true, true, true, true, true, true, true, true, true, true, true}),
|
||||
"2,5,8,10": CreateChunkSpec([]bool{true, true, false, true, true, false, true, true, false, true, false}),
|
||||
//
|
||||
"0,2:10": CreateChunkSpec([]bool{false, true, false, false, false, false, false, false, false, false, false}),
|
||||
"0:8,10": CreateChunkSpec([]bool{false, false, false, false, false, false, false, false, false, true, false}),
|
||||
"1:9": CreateChunkSpec([]bool{true, false, false, false, false, false, false, false, false, false, true}),
|
||||
}
|
||||
|
||||
for k, v := range testCases {
|
||||
if k != v.Serialize() {
|
||||
t.Fatalf("got %v but expected %v", v.Serialize(), k)
|
||||
}
|
||||
t.Logf("%v == %v", k, v.Serialize())
|
||||
}
|
||||
|
||||
for k, v := range testCases {
|
||||
if cs, err := Deserialize(k); err != nil {
|
||||
t.Fatalf("error deserialized key: %v %v", k, err)
|
||||
} else {
|
||||
if v.Serialize() != cs.Serialize() {
|
||||
t.Fatalf("got %v but expected %v", v.Serialize(), cs.Serialize())
|
||||
}
|
||||
t.Logf("%v == %v", cs.Serialize(), v.Serialize())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,233 @@
|
|||
package files
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"cwtch.im/cwtch/event"
|
||||
"cwtch.im/cwtch/protocol/model"
|
||||
"git.openprivacy.ca/openprivacy/log"
|
||||
)
|
||||
|
||||
// FileSharingSubSystem encapsulates the functionality necessary to share and download files via Cwtch
|
||||
//
|
||||
type FileSharingSubSystem struct {
|
||||
|
||||
// for sharing files
|
||||
activeShares sync.Map // file key to manifest
|
||||
|
||||
// for downloading files
|
||||
prospectiveManifests sync.Map // file key to serialized manifests
|
||||
activeDownloads sync.Map // file key to manifests
|
||||
}
|
||||
|
||||
// ShareFile given a file key and a serialized manifest, allow the serialized manifest to be downloaded
|
||||
// by Cwtch profiles in possession of the fileKey
|
||||
func (fsss *FileSharingSubSystem) ShareFile(fileKey string, serializedManifest string) {
|
||||
var manifest Manifest
|
||||
err := json.Unmarshal([]byte(serializedManifest), &manifest)
|
||||
if err != nil {
|
||||
log.Errorf("could not share file %v", err)
|
||||
return
|
||||
}
|
||||
fsss.activeShares.Store(fileKey, &manifest)
|
||||
}
|
||||
|
||||
// FetchManifest given a file key and knowledge of the manifest size in chunks (obtained via an attribute lookup)
|
||||
// construct a request to download the manifest.
|
||||
func (fsss *FileSharingSubSystem) FetchManifest(fileKey string, manifestSize uint64) model.PeerMessage {
|
||||
fsss.prospectiveManifests.Store(fileKey, strings.Repeat("\"", int(manifestSize*DefaultChunkSize)))
|
||||
return model.PeerMessage{
|
||||
Context: event.ContextRequestManifest,
|
||||
ID: fileKey,
|
||||
Data: []byte{},
|
||||
}
|
||||
}
|
||||
|
||||
// CompileChunkRequests takes in a complete serializedManifest and returns a set of chunk request messages
|
||||
// TODO in the future we will want this to return the handles of contacts to request chunks from
|
||||
func (fsss *FileSharingSubSystem) CompileChunkRequests(fileKey, serializedManifest, tempFile, title string) []model.PeerMessage {
|
||||
var manifest Manifest
|
||||
err := json.Unmarshal([]byte(serializedManifest), &manifest)
|
||||
var messages []model.PeerMessage
|
||||
if err == nil {
|
||||
manifest.TempFileName = tempFile
|
||||
manifest.Title = title
|
||||
err := manifest.PrepareDownload()
|
||||
if err == nil {
|
||||
fsss.activeDownloads.Store(fileKey, &manifest)
|
||||
log.Debugf("downloading file chunks: %v", manifest.GetChunkRequest().Serialize())
|
||||
messages = append(messages, model.PeerMessage{
|
||||
ID: fileKey,
|
||||
Context: event.ContextRequestFile,
|
||||
Data: []byte(manifest.GetChunkRequest().Serialize()),
|
||||
})
|
||||
} else {
|
||||
log.Errorf("couldn't prepare download: %v", err)
|
||||
}
|
||||
}
|
||||
return messages
|
||||
}
|
||||
|
||||
// RequestManifestParts given a fileKey construct a set of messages representing requests to download various
|
||||
// parts of the Manifest
|
||||
func (fsss *FileSharingSubSystem) RequestManifestParts(fileKey string) []model.PeerMessage {
|
||||
manifestI, exists := fsss.activeShares.Load(fileKey)
|
||||
var messages []model.PeerMessage
|
||||
if exists {
|
||||
oldManifest := manifestI.(*Manifest)
|
||||
serializedOldManifest := oldManifest.Serialize()
|
||||
log.Debugf("found serialized manifest: %s", serializedOldManifest)
|
||||
|
||||
// copy so we dont get threading issues by modifying the original
|
||||
// and then redact the file path before sending
|
||||
// nb: manifest.size has already been corrected elsewhere
|
||||
var manifest Manifest
|
||||
json.Unmarshal([]byte(serializedOldManifest), &manifest)
|
||||
manifest.FileName = path.Base(manifest.FileName)
|
||||
serializedManifest := manifest.Serialize()
|
||||
|
||||
chunkID := 0
|
||||
for i := 0; i < len(serializedManifest); i += DefaultChunkSize {
|
||||
offset := i
|
||||
end := i + DefaultChunkSize
|
||||
// truncate end
|
||||
if end > len(serializedManifest) {
|
||||
end = len(serializedManifest)
|
||||
}
|
||||
chunk := serializedManifest[offset:end]
|
||||
// request this manifest part
|
||||
messages = append(messages, model.PeerMessage{
|
||||
Context: event.ContextSendManifest,
|
||||
ID: fmt.Sprintf("%s.%d", fileKey, chunkID),
|
||||
Data: chunk,
|
||||
})
|
||||
chunkID++
|
||||
}
|
||||
}
|
||||
return messages
|
||||
}
|
||||
|
||||
// ReceiveManifestPart given a manifestKey reconstruct part the manifest from the provided part
|
||||
func (fsss *FileSharingSubSystem) ReceiveManifestPart(manifestKey string, part []byte) (fileKey string, serializedManifest string) {
|
||||
fileKeyParts := strings.Split(manifestKey, ".")
|
||||
if len(fileKeyParts) == 3 { // rootHash.nonce.manifestPart
|
||||
fileKey = fmt.Sprintf("%s.%s", fileKeyParts[0], fileKeyParts[1])
|
||||
log.Debugf("manifest filekey: %s", fileKey)
|
||||
manifestPart, err := strconv.Atoi(fileKeyParts[2])
|
||||
if err == nil {
|
||||
serializedManifest, exists := fsss.prospectiveManifests.Load(fileKey)
|
||||
if exists {
|
||||
serializedManifest := serializedManifest.(string)
|
||||
log.Debugf("loaded manifest")
|
||||
offset := manifestPart * DefaultChunkSize
|
||||
end := (manifestPart + 1) * DefaultChunkSize
|
||||
|
||||
log.Debugf("storing manifest part %v %v", offset, end)
|
||||
serializedManifestBytes := []byte(serializedManifest)
|
||||
copy(serializedManifestBytes[offset:end], part[:])
|
||||
|
||||
if len(part) < DefaultChunkSize {
|
||||
serializedManifestBytes = serializedManifestBytes[0 : len(serializedManifestBytes)-(DefaultChunkSize-len(part))]
|
||||
}
|
||||
|
||||
serializedManifest = string(serializedManifestBytes)
|
||||
fsss.prospectiveManifests.Store(fileKey, serializedManifest)
|
||||
log.Debugf("current manifest: [%s]", serializedManifest)
|
||||
var manifest Manifest
|
||||
err := json.Unmarshal([]byte(serializedManifest), &manifest)
|
||||
if err == nil && hex.EncodeToString(manifest.RootHash) == fileKeyParts[0] {
|
||||
log.Debugf("valid manifest received! %x", manifest.RootHash)
|
||||
return fileKey, serializedManifest
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// ProcessChunkRequest given a fileKey, and a chunk request, compile a set of responses for each requested Chunk
|
||||
func (fsss *FileSharingSubSystem) ProcessChunkRequest(fileKey string, serializedChunkRequest []byte) []model.PeerMessage {
|
||||
log.Debugf("chunk request: %v", fileKey)
|
||||
// fileKey is rootHash.nonce
|
||||
manifestI, exists := fsss.activeShares.Load(fileKey)
|
||||
var messages []model.PeerMessage
|
||||
if exists {
|
||||
manifest := manifestI.(*Manifest)
|
||||
log.Debugf("manifest found: %x", manifest.RootHash)
|
||||
chunkSpec, err := Deserialize(string(serializedChunkRequest))
|
||||
log.Debugf("deserialized chunk spec found: %v [%s]", chunkSpec, serializedChunkRequest)
|
||||
if err == nil {
|
||||
for _, chunk := range *chunkSpec {
|
||||
contents, err := manifest.GetChunkBytes(chunk)
|
||||
if err == nil {
|
||||
log.Debugf("sending chunk: %v %x", chunk, contents)
|
||||
messages = append(messages, model.PeerMessage{
|
||||
ID: fmt.Sprintf("%v.%d", fileKey, chunk),
|
||||
Context: event.ContextSendFile,
|
||||
Data: contents,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return messages
|
||||
}
|
||||
|
||||
// ProcessChunk given a chunk key and a chunk attempt to store and verify the chunk as part of an active download
|
||||
// If this results in the file download being completed return downloaded = true
|
||||
// Always return the progress of a matched download if it exists along with the total number of chunks and the
|
||||
// given chunk ID
|
||||
// If not such active download exists then return an empty file key and ignore all further processing.
|
||||
func (fsss *FileSharingSubSystem) ProcessChunk(chunkKey string, chunk []byte) (fileKey string, progress uint64, totalChunks uint64, chunkID uint64, title string) {
|
||||
fileKeyParts := strings.Split(chunkKey, ".")
|
||||
log.Debugf("got chunk for %s", fileKeyParts)
|
||||
if len(fileKeyParts) == 3 { // fileKey is rootHash.nonce.chunk
|
||||
// recalculate file key
|
||||
fileKey = fmt.Sprintf("%s.%s", fileKeyParts[0], fileKeyParts[1])
|
||||
derivedChunkID, err := strconv.Atoi(fileKeyParts[2])
|
||||
if err == nil {
|
||||
chunkID = uint64(derivedChunkID)
|
||||
log.Debugf("got chunk id %d", chunkID)
|
||||
manifestI, exists := fsss.activeDownloads.Load(fileKey)
|
||||
if exists {
|
||||
manifest := manifestI.(*Manifest)
|
||||
totalChunks = uint64(len(manifest.Chunks))
|
||||
title = manifest.Title
|
||||
log.Debugf("found active manifest %v", manifest)
|
||||
progress, err = manifest.StoreChunk(chunkID, chunk)
|
||||
log.Debugf("attempts to store chunk %v %v", progress, err)
|
||||
if err != nil {
|
||||
log.Debugf("error storing chunk: %v", err)
|
||||
// malicious contacts who share conversations can share random chunks
|
||||
// these will not match the chunk hash and as such will fail.
|
||||
// at this point we can't differentiate between a malicious chunk and failure to store a
|
||||
// legitimate chunk, so if there is an error we silently drop it and expect the higher level callers (e.g. the ui)
|
||||
//to detect and respond to missing chunks if it detects them..
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// VerifyFile returns true if the file has been downloaded, false otherwise
|
||||
// as well as the temporary filename, if one was used
|
||||
func (fsss *FileSharingSubSystem) VerifyFile(fileKey string) (tempFile string, filePath string, downloaded bool) {
|
||||
manifestI, exists := fsss.activeDownloads.Load(fileKey)
|
||||
if exists {
|
||||
manifest := manifestI.(*Manifest)
|
||||
if manifest.VerifyFile() == nil {
|
||||
manifest.Close()
|
||||
fsss.activeDownloads.Delete(fileKey)
|
||||
log.Debugf("file verified and downloaded!")
|
||||
return manifest.TempFileName, manifest.FileName, true
|
||||
}
|
||||
}
|
||||
return "", "", false
|
||||
}
|
|
@ -0,0 +1,329 @@
|
|||
package files
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"crypto/subtle"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Chunk is a wrapper around a hash
|
||||
type Chunk []byte
|
||||
|
||||
// DefaultChunkSize is the default value of a manifest chunk
|
||||
const DefaultChunkSize = 4096
|
||||
|
||||
// MaxManifestSize is the maximum size of a manifest (in DefaultChunkSize)
|
||||
// Because we reconstruct the manifest in memory we have to practically limit this size.
|
||||
// 2622000 * 4096 ~= 10GB using 4096 byte chunks
|
||||
// This makes the actual manifest size ~125Mb which seems reasonable for a 10Gb file.
|
||||
// most file transfers are expected to have manifest that are much smaller.
|
||||
const MaxManifestSize = 2622000
|
||||
|
||||
// Manifest is a collection of hashes and other metadata needed to reconstruct a file and verify contents given a root hash
|
||||
type Manifest struct {
|
||||
Chunks []Chunk
|
||||
FileName string
|
||||
RootHash []byte
|
||||
FileSizeInBytes uint64
|
||||
ChunkSizeInBytes uint64
|
||||
TempFileName string `json:"-"`
|
||||
Title string `json:"-"`
|
||||
|
||||
chunkComplete []bool
|
||||
openFd *os.File
|
||||
progress uint64
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// CreateManifest takes in a file path and constructs a file sharing manifest of hashes along with
|
||||
// other information necessary to download, reconstruct and verify the file.
|
||||
func CreateManifest(path string) (*Manifest, error) {
|
||||
// Process file into Chunks
|
||||
f, err := os.Open(path)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
reader := bufio.NewReader(f)
|
||||
buf := make([]byte, DefaultChunkSize)
|
||||
|
||||
var chunks []Chunk
|
||||
fileSizeInBytes := uint64(0)
|
||||
|
||||
rootHash := sha512.New()
|
||||
|
||||
for {
|
||||
n, err := reader.Read(buf)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
break
|
||||
}
|
||||
hash := sha256.New()
|
||||
hash.Write(buf[0:n])
|
||||
rootHash.Write(buf[0:n])
|
||||
chunkHash := hash.Sum(nil)
|
||||
chunks = append(chunks, chunkHash)
|
||||
fileSizeInBytes += uint64(n)
|
||||
}
|
||||
|
||||
return &Manifest{
|
||||
Chunks: chunks,
|
||||
FileName: path,
|
||||
RootHash: rootHash.Sum(nil),
|
||||
ChunkSizeInBytes: DefaultChunkSize,
|
||||
FileSizeInBytes: fileSizeInBytes,
|
||||
chunkComplete: make([]bool, len(chunks)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetChunkBytes takes in a chunk identifier and returns the bytes associated with that chunk
|
||||
// it does not attempt to validate the chunk Hash.
|
||||
func (m *Manifest) GetChunkBytes(id uint64) ([]byte, error) {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
if id >= uint64(len(m.Chunks)) {
|
||||
return nil, errors.New("chunk not found")
|
||||
}
|
||||
|
||||
if err := m.getFileHandle(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Seek to Chunk
|
||||
offset, err := m.openFd.Seek(int64(id*m.ChunkSizeInBytes), 0)
|
||||
if (uint64(offset) != id*m.ChunkSizeInBytes) || err != nil {
|
||||
return nil, errors.New("chunk not found")
|
||||
}
|
||||
|
||||
// Read chunk into memory and return...
|
||||
reader := bufio.NewReader(m.openFd)
|
||||
buf := make([]byte, m.ChunkSizeInBytes)
|
||||
n, err := reader.Read(buf)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return buf[0:n], nil
|
||||
}
|
||||
|
||||
// LoadManifest reads in a json serialized Manifest from a file
|
||||
func LoadManifest(filename string) (*Manifest, error) {
|
||||
bytes, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifest := new(Manifest)
|
||||
err = json.Unmarshal(bytes, manifest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifest.chunkComplete = make([]bool, len(manifest.Chunks))
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
// VerifyFile attempts to calculate the rootHash of a file and compare it to the expected rootHash stored in the
|
||||
// manifest
|
||||
func (m *Manifest) VerifyFile() error {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
if err := m.getFileHandle(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
offset, err := m.openFd.Seek(0, 0)
|
||||
if offset != 0 || err != nil {
|
||||
return errors.New("chunk not found")
|
||||
}
|
||||
|
||||
rootHash := sha512.New()
|
||||
reader := bufio.NewReader(m.openFd)
|
||||
buf := make([]byte, m.ChunkSizeInBytes)
|
||||
for {
|
||||
n, err := reader.Read(buf)
|
||||
rootHash.Write(buf[0:n])
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
calculatedRootHash := rootHash.Sum(nil)
|
||||
if subtle.ConstantTimeCompare(m.RootHash, calculatedRootHash) != 1 {
|
||||
return fmt.Errorf("hashes do not match %x %x", m.RootHash, calculatedRootHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StoreChunk takes in a chunk id and contents, verifies the chunk has the expected hash and if so store the contents
|
||||
// in the file.
|
||||
func (m *Manifest) StoreChunk(id uint64, contents []byte) (uint64, error) {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
// Check the chunk id
|
||||
if id >= uint64(len(m.Chunks)) {
|
||||
return 0, errors.New("invalid chunk id")
|
||||
}
|
||||
|
||||
// Validate the chunk hash
|
||||
hash := sha256.New()
|
||||
hash.Write(contents)
|
||||
chunkHash := hash.Sum(nil)
|
||||
|
||||
if subtle.ConstantTimeCompare(chunkHash, m.Chunks[id]) != 1 {
|
||||
return 0, fmt.Errorf("invalid chunk hash %x %x", chunkHash, m.Chunks[id])
|
||||
}
|
||||
|
||||
if err := m.getFileHandle(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
offset, err := m.openFd.Seek(int64(id*m.ChunkSizeInBytes), 0)
|
||||
if (uint64(offset) != id*m.ChunkSizeInBytes) || err != nil {
|
||||
return 0, errors.New("chunk not found")
|
||||
}
|
||||
|
||||
// Write the contents of the chunk to the file
|
||||
_, err = m.openFd.Write(contents)
|
||||
|
||||
if err == nil && m.chunkComplete[id] == false {
|
||||
m.chunkComplete[id] = true
|
||||
m.progress++
|
||||
}
|
||||
|
||||
return m.progress, err
|
||||
}
|
||||
|
||||
// private function to set the internal file handle
|
||||
func (m *Manifest) getFileHandle() error {
|
||||
// Seek to the chunk in the file
|
||||
if m.openFd == nil {
|
||||
useFileName := m.FileName
|
||||
if m.TempFileName != "" {
|
||||
useFileName = m.TempFileName
|
||||
}
|
||||
fd, err := os.OpenFile(useFileName, os.O_RDWR, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.openFd = fd
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetChunkRequest returns an uncompressed list of Chunks needed to complete the file described in the manifest
|
||||
func (m *Manifest) GetChunkRequest() ChunkSpec {
|
||||
return CreateChunkSpec(m.chunkComplete)
|
||||
}
|
||||
|
||||
// PrepareDownload creates an empty file of the expected size of the file described by the manifest
|
||||
// If the file already exists it assume it is the correct file and that it is resuming from when it left off.
|
||||
func (m *Manifest) PrepareDownload() error {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
m.chunkComplete = make([]bool, len(m.Chunks))
|
||||
|
||||
if info, err := os.Stat(m.FileName); os.IsNotExist(err) {
|
||||
useFileName := m.FileName
|
||||
if m.TempFileName != "" {
|
||||
useFileName = m.TempFileName
|
||||
}
|
||||
|
||||
fd, err := os.Create(useFileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.openFd = fd
|
||||
|
||||
writer := bufio.NewWriter(m.openFd)
|
||||
buf := make([]byte, m.ChunkSizeInBytes)
|
||||
for chunk := 0; chunk < len(m.Chunks)-1; chunk++ {
|
||||
_, err := writer.Write(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
lastChunkSize := m.FileSizeInBytes % m.ChunkSizeInBytes
|
||||
if lastChunkSize > 0 {
|
||||
buf = make([]byte, lastChunkSize)
|
||||
_, err := writer.Write(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
writer.Flush()
|
||||
} else {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if uint64(info.Size()) != m.FileSizeInBytes {
|
||||
return fmt.Errorf("file exists but is the wrong size")
|
||||
}
|
||||
|
||||
if err := m.getFileHandle(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Calculate Progress
|
||||
reader := bufio.NewReader(m.openFd)
|
||||
buf := make([]byte, m.ChunkSizeInBytes)
|
||||
chunkI := 0
|
||||
for {
|
||||
n, err := reader.Read(buf)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
hash := sha512.New()
|
||||
hash.Write(buf[0:n])
|
||||
chunkHash := hash.Sum(nil)
|
||||
m.progress = 0
|
||||
if subtle.ConstantTimeCompare(chunkHash, m.Chunks[chunkI]) == 1 {
|
||||
m.chunkComplete[chunkI] = true
|
||||
m.progress++
|
||||
}
|
||||
chunkI++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the underlying file descriptor
|
||||
func (m *Manifest) Close() {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
if m.openFd != nil {
|
||||
m.openFd.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// Save writes a JSON encoded byte array version of the manifest to path
|
||||
func (m *Manifest) Save(path string) error {
|
||||
return ioutil.WriteFile(path, m.Serialize(), 0600)
|
||||
}
|
||||
|
||||
// Serialize returns the manifest as a JSON encoded byte array
|
||||
func (m *Manifest) Serialize() []byte {
|
||||
data, _ := json.Marshal(m)
|
||||
return data
|
||||
}
|
|
@ -0,0 +1,128 @@
|
|||
package files
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestManifest(t *testing.T) {
|
||||
manifest, err := CreateManifest("testdata/example.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("manifest create error: %v", err)
|
||||
}
|
||||
|
||||
if len(manifest.Chunks) != 1 {
|
||||
t.Fatalf("manifest had unepxected Chunks : %v", manifest.Chunks)
|
||||
}
|
||||
|
||||
if manifest.FileSizeInBytes != 12 {
|
||||
t.Fatalf("manifest had unepxected length : %v", manifest.FileSizeInBytes)
|
||||
}
|
||||
|
||||
if hex.EncodeToString(manifest.RootHash) != "861844d6704e8573fec34d967e20bcfef3d424cf48be04e6dc08f2bd58c729743371015ead891cc3cf1c9d34b49264b510751b1ff9e537937bc46b5d6ff4ecc8" {
|
||||
t.Fatalf("manifest had incorrect root Hash : %v", manifest.RootHash)
|
||||
}
|
||||
|
||||
t.Logf("%v", manifest)
|
||||
|
||||
// Try to tread the chunk
|
||||
contents, err := manifest.GetChunkBytes(1)
|
||||
if err == nil {
|
||||
t.Fatalf("chunk fetch should have thrown an error")
|
||||
}
|
||||
|
||||
contents, err = manifest.GetChunkBytes(0)
|
||||
if err != nil {
|
||||
t.Fatalf("chunk fetch error: %v", err)
|
||||
}
|
||||
contents, err = manifest.GetChunkBytes(0)
|
||||
if err != nil {
|
||||
t.Fatalf("chunk fetch error: %v", err)
|
||||
}
|
||||
|
||||
json, _ := json.Marshal(manifest)
|
||||
t.Logf("%s", json)
|
||||
|
||||
t.Logf("%s", contents)
|
||||
}
|
||||
|
||||
func TestManifestLarge(t *testing.T) {
|
||||
manifest, err := CreateManifest("testdata/cwtch.png")
|
||||
if err != nil {
|
||||
t.Fatalf("manifest create error: %v", err)
|
||||
}
|
||||
|
||||
if len(manifest.Chunks) != int(math.Ceil(float64(51791)/DefaultChunkSize)) {
|
||||
t.Fatalf("manifest had unexpected Chunks : %v", manifest.Chunks)
|
||||
}
|
||||
|
||||
if manifest.FileSizeInBytes != 51791 {
|
||||
t.Fatalf("manifest had unepxected length : %v", manifest.FileSizeInBytes)
|
||||
}
|
||||
|
||||
if hex.EncodeToString(manifest.RootHash) != "8f0ed73bbb30db45b6a740b1251cae02945f48e4f991464d5f3607685c45dcd136a325dab2e5f6429ce2b715e602b20b5b16bf7438fb6235fefe912adcedb5fd" {
|
||||
t.Fatalf("manifest had incorrect root Hash : %v", manifest.RootHash)
|
||||
}
|
||||
|
||||
t.Logf("%v", len(manifest.Chunks))
|
||||
|
||||
json, _ := json.Marshal(manifest)
|
||||
t.Logf("%v %s", len(json), json)
|
||||
|
||||
// Pretend we downloaded the manifest
|
||||
ioutil.WriteFile("testdata/cwtch.png.manifest", json, 0600)
|
||||
|
||||
// Load the manifest from a file
|
||||
cwtchPngManifest, err := LoadManifest("testdata/cwtch.png.manifest")
|
||||
if err != nil {
|
||||
t.Fatalf("manifest create error: %v", err)
|
||||
}
|
||||
defer cwtchPngManifest.Close()
|
||||
t.Logf("%v", cwtchPngManifest)
|
||||
|
||||
// Test verifying the hash
|
||||
if cwtchPngManifest.VerifyFile() != nil {
|
||||
t.Fatalf("hashes do not validate error: %v", err)
|
||||
}
|
||||
|
||||
// Prepare Download
|
||||
cwtchPngOutManifest, _ := LoadManifest("testdata/cwtch.png.manifest")
|
||||
cwtchPngOutManifest.FileName = "testdata/cwtch.out.png"
|
||||
|
||||
defer cwtchPngOutManifest.Close()
|
||||
err = cwtchPngOutManifest.PrepareDownload()
|
||||
if err != nil {
|
||||
t.Fatalf("could not prepare download %v", err)
|
||||
}
|
||||
|
||||
for i := 0; i < len(cwtchPngManifest.Chunks); i++ {
|
||||
|
||||
t.Logf("Sending Chunk %v %x from %v", i, cwtchPngManifest.Chunks[i], cwtchPngManifest.FileName)
|
||||
|
||||
contents, err := cwtchPngManifest.GetChunkBytes(uint64(i))
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("could not get chunk %v %v", i, err)
|
||||
}
|
||||
t.Logf("Progress: %v", cwtchPngOutManifest.chunkComplete)
|
||||
_, err = cwtchPngOutManifest.StoreChunk(uint64(i), contents)
|
||||
if err != nil {
|
||||
t.Fatalf("could not store chunk %v %v", i, err)
|
||||
}
|
||||
|
||||
}
|
||||
err = cwtchPngOutManifest.VerifyFile()
|
||||
if err != nil {
|
||||
t.Fatalf("could not verify file %v", err)
|
||||
}
|
||||
|
||||
// Test that changing the hash throws an error
|
||||
cwtchPngManifest.RootHash[3] = 0xFF
|
||||
if cwtchPngManifest.VerifyFile() == nil {
|
||||
t.Fatalf("hashes should not validate error")
|
||||
}
|
||||
|
||||
}
|
Binary file not shown.
After Width: | Height: | Size: 51 KiB |
Binary file not shown.
After Width: | Height: | Size: 51 KiB |
|
@ -0,0 +1 @@
|
|||
{"Chunks":["BXbFagOrWyDwcsnW+f1O6fddCqJywEISjUrzI31FAE0=","1SZcGk0NSduL093Hh0hZ4WVcx2o6VKgL3kUy2WqmdLY=","R4wwVcR4andJJ0fkXlp/td1ZSjH7xHi3Egh8aloWONA=","TAuI06kog7TYVDSO8AgWprAGY8LSlGBwqZvpgMymhZE=","XQLxqLjiM0qIAeOmGIrZJkyuCEfJ4t+ikgbV1ohudiY=","aXInp/WF58A5/TGkwAwniNvIU2ZlRjVtrpClw0sBcVM=","oSCjcrenQ4+Pix4jtgNCRt40K0kQ41eCumSJO0Gqo/0=","FebZSfHuyVdRWkS8/IaWA6UooEURkf9vPxnqZXKII8g=","tITbm77ca1YmExGzbX4WBP5fAOh4bUzDtceN1VBYcBI=","VJd8rWuMtrZzqobdKam0n6t4Vgo72GcsNRNzMk46PsI=","7ywzxLV44HVk9wz+QQHvvVQJAFkTU6/pHyVFjE0uF40=","PoHUwEoQOSXv8ZpJ9bGeCZqiwY34bXcFcBki2OPxd8o=","eogaSYPKrl0MFEqVP1mwUMczMCcnjjwUmUz/0DsAF48="],"FileName":"testdata/cwtch.png","RootHash":"jw7XO7sw20W2p0CxJRyuApRfSOT5kUZNXzYHaFxF3NE2oyXasuX2QpzitxXmArILWxa/dDj7YjX+/pEq3O21/Q==","FileSizeInBytes":51791,"ChunkSizeInBytes":4096}
|
|
@ -0,0 +1 @@
|
|||
Hello World!
|
|
@ -0,0 +1,8 @@
|
|||
package model
|
||||
|
||||
// PeerMessage is an encapsulation that can be used by higher level applications
|
||||
type PeerMessage struct {
|
||||
ID string // A unique Message ID (primarily used for acknowledgments)
|
||||
Context string // A unique context identifier i.e. im.cwtch.chat
|
||||
Data []byte // The serialized data packet.
|
||||
}
|
|
@ -490,7 +490,7 @@ func (ps *ProfileStoreV1) eventHandler() {
|
|||
}
|
||||
flags, err := strconv.ParseUint(ev.Data[event.Flags], 2, 64)
|
||||
if err != nil {
|
||||
log.Errorf("Invalid Message Falgs: %v", err)
|
||||
log.Errorf("Invalid Message Flags: %v", err)
|
||||
return
|
||||
}
|
||||
ps.profile.UpdateMessageFlags(handle, mIx, flags)
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 51 KiB |
|
@ -0,0 +1,174 @@
|
|||
package filesharing
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
app2 "cwtch.im/cwtch/app"
|
||||
"cwtch.im/cwtch/app/utils"
|
||||
"cwtch.im/cwtch/event"
|
||||
"cwtch.im/cwtch/functionality/filesharing"
|
||||
"cwtch.im/cwtch/model"
|
||||
"cwtch.im/cwtch/model/attr"
|
||||
"cwtch.im/cwtch/peer"
|
||||
"cwtch.im/cwtch/protocol/connections"
|
||||
"cwtch.im/cwtch/protocol/files"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"git.openprivacy.ca/openprivacy/connectivity/tor"
|
||||
"git.openprivacy.ca/openprivacy/log"
|
||||
mrand "math/rand"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func waitForPeerPeerConnection(t *testing.T, peera peer.CwtchPeer, peerb peer.CwtchPeer) {
|
||||
for {
|
||||
state, ok := peera.GetPeerState(peerb.GetOnion())
|
||||
if ok {
|
||||
//log.Infof("Waiting for Peer %v to peer with peer: %v - state: %v\n", peera.GetProfile().Name, peerb.GetProfile().Name, state)
|
||||
if state == connections.FAILED {
|
||||
t.Fatalf("%v could not connect to %v", peera.GetOnion(), peerb.GetOnion())
|
||||
}
|
||||
if state != connections.AUTHENTICATED {
|
||||
fmt.Printf("peer %v waiting connect to peer %v, currently: %v\n", peera.GetOnion(), peerb.GetOnion(), connections.ConnectionStateName[state])
|
||||
time.Sleep(time.Second * 5)
|
||||
continue
|
||||
} else {
|
||||
peerAName, _ := peera.GetAttribute(attr.GetLocalScope("name"))
|
||||
peerBName, _ := peerb.GetAttribute(attr.GetLocalScope("name"))
|
||||
fmt.Printf("%v CONNECTED and AUTHED to %v\n", peerAName, peerBName)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func TestFileSharing(t *testing.T) {
|
||||
|
||||
numGoRoutinesStart := runtime.NumGoroutine()
|
||||
|
||||
os.RemoveAll("cwtch.out.png")
|
||||
os.RemoveAll("cwtch.out.png.manifest")
|
||||
|
||||
log.SetLevel(log.LevelDebug)
|
||||
|
||||
os.Mkdir("tordir", 0700)
|
||||
dataDir := path.Join("tordir", "tor")
|
||||
os.MkdirAll(dataDir, 0700)
|
||||
|
||||
// we don't need real randomness for the port, just to avoid a possible conflict...
|
||||
mrand.Seed(int64(time.Now().Nanosecond()))
|
||||
socksPort := mrand.Intn(1000) + 9051
|
||||
controlPort := mrand.Intn(1000) + 9052
|
||||
|
||||
// generate a random password
|
||||
key := make([]byte, 64)
|
||||
_, err := rand.Read(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
tor.NewTorrc().WithSocksPort(socksPort).WithOnionTrafficOnly().WithHashedPassword(base64.StdEncoding.EncodeToString(key)).WithControlPort(controlPort).Build("tordir/tor/torrc")
|
||||
acn, err := tor.NewTorACNWithAuth("./tordir", path.Join("..", "..", "tor"), controlPort, tor.HashedPasswordAuthenticator{Password: base64.StdEncoding.EncodeToString(key)})
|
||||
if err != nil {
|
||||
t.Fatalf("Could not start Tor: %v", err)
|
||||
}
|
||||
|
||||
app := app2.NewApp(acn, "./storage")
|
||||
|
||||
usr, _ := user.Current()
|
||||
cwtchDir := path.Join(usr.HomeDir, ".cwtch")
|
||||
os.Mkdir(cwtchDir, 0700)
|
||||
os.RemoveAll(path.Join(cwtchDir, "testing"))
|
||||
os.Mkdir(path.Join(cwtchDir, "testing"), 0700)
|
||||
|
||||
fmt.Println("Creating Alice...")
|
||||
app.CreatePeer("alice", "asdfasdf")
|
||||
|
||||
fmt.Println("Creating Bob...")
|
||||
app.CreatePeer("bob", "asdfasdf")
|
||||
|
||||
alice := utils.WaitGetPeer(app, "alice")
|
||||
bob := utils.WaitGetPeer(app, "bob")
|
||||
|
||||
alice.AutoHandleEvents([]event.Type{event.PeerStateChange, event.NewRetValMessageFromPeer})
|
||||
bob.AutoHandleEvents([]event.Type{event.PeerStateChange, event.NewRetValMessageFromPeer, event.ManifestReceived})
|
||||
|
||||
queueOracle := event.NewQueue()
|
||||
app.GetEventBus(bob.GetOnion()).Subscribe(event.FileDownloaded, queueOracle)
|
||||
|
||||
app.LaunchPeers()
|
||||
|
||||
waitTime := time.Duration(30) * time.Second
|
||||
t.Logf("** Waiting for Alice, Bob to connect with onion network... (%v)\n", waitTime)
|
||||
time.Sleep(waitTime)
|
||||
|
||||
bob.AddContact("alice?", alice.GetOnion(), model.AuthApproved)
|
||||
alice.PeerWithOnion(bob.GetOnion())
|
||||
|
||||
fmt.Println("Waiting for alice and Bob to peer...")
|
||||
waitForPeerPeerConnection(t, alice, bob)
|
||||
|
||||
fmt.Println("Alice and Bob are Connected!!")
|
||||
|
||||
filesharingFunctionality, _ := filesharing.FunctionalityGate(map[string]bool{"filesharing": true})
|
||||
|
||||
err = filesharingFunctionality.ShareFile("cwtch.png", alice, bob.GetOnion())
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error!: %v", err)
|
||||
}
|
||||
|
||||
// Wait for the messages to arrive...
|
||||
time.Sleep(time.Second * 10)
|
||||
|
||||
for _, message := range bob.GetContact(alice.GetOnion()).Timeline.GetMessages() {
|
||||
|
||||
var messageWrapper model.MessageWrapper
|
||||
json.Unmarshal([]byte(message.Message), &messageWrapper)
|
||||
|
||||
if messageWrapper.Overlay == model.OverlayFileSharing {
|
||||
var fileMessageOverlay filesharing.OverlayMessage
|
||||
err := json.Unmarshal([]byte(messageWrapper.Data), &fileMessageOverlay)
|
||||
|
||||
if err == nil {
|
||||
filesharingFunctionality.DownloadFile(bob, alice.GetOnion(), "cwtch.out.png", "cwtch.out.png.manifest", fmt.Sprintf("%s.%s", fileMessageOverlay.Hash, fileMessageOverlay.Nonce))
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Found message from Alice: %v", message.Message)
|
||||
}
|
||||
|
||||
// Wait for the file downloaded event
|
||||
ev := queueOracle.Next()
|
||||
if ev.EventType != event.FileDownloaded {
|
||||
t.Fatalf("Expected file download event")
|
||||
}
|
||||
|
||||
manifest, err := files.CreateManifest("cwtch.out.png")
|
||||
if hex.EncodeToString(manifest.RootHash) != "8f0ed73bbb30db45b6a740b1251cae02945f48e4f991464d5f3607685c45dcd136a325dab2e5f6429ce2b715e602b20b5b16bf7438fb6235fefe912adcedb5fd" {
|
||||
t.Fatalf("file hash does not match expected %x: ", manifest.RootHash)
|
||||
}
|
||||
|
||||
queueOracle.Shutdown()
|
||||
app.Shutdown()
|
||||
acn.Close()
|
||||
|
||||
numGoRoutinesPostACN := runtime.NumGoroutine()
|
||||
|
||||
// Printing out the current goroutines
|
||||
// Very useful if we are leaking any.
|
||||
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||
|
||||
if numGoRoutinesStart != numGoRoutinesPostACN {
|
||||
t.Errorf("Number of GoRoutines at start (%v) does not match number of goRoutines after cleanup of peers and servers (%v), clean up failed, leak detected!", numGoRoutinesStart, numGoRoutinesPostACN)
|
||||
}
|
||||
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -9,6 +9,7 @@ go test -race ${1} -coverprofile=storage.v0.cover.out -v ./storage/v0
|
|||
go test -race ${1} -coverprofile=storage.v1.cover.out -v ./storage/v1
|
||||
go test -race ${1} -coverprofile=storage.cover.out -v ./storage
|
||||
go test -race ${1} -coverprofile=peer.connections.cover.out -v ./protocol/connections
|
||||
go test -race ${1} -coverprofile=peer.filesharing.cover.out -v ./protocol/files
|
||||
go test -race ${1} -coverprofile=peer.cover.out -v ./peer
|
||||
echo "mode: set" > coverage.out && cat *.cover.out | grep -v mode: | sort -r | \
|
||||
awk '{if($1 != last) {print $0;last=$1}}' >> coverage.out
|
||||
|
|
Loading…
Reference in New Issue