diff --git a/Makefile b/Makefile
index 0c1bf19248..2048213a52 100644
--- a/Makefile
+++ b/Makefile
@@ -43,6 +43,7 @@ $(eval $(call makemock, pkg/events, Callbacks, eventsmoc
$(eval $(call makemock, pkg/identity, Plugin, identitymocks))
$(eval $(call makemock, pkg/identity, Callbacks, identitymocks))
$(eval $(call makemock, pkg/dataexchange, Plugin, dataexchangemocks))
+$(eval $(call makemock, pkg/dataexchange, DXEvent, dataexchangemocks))
$(eval $(call makemock, pkg/dataexchange, Callbacks, dataexchangemocks))
$(eval $(call makemock, pkg/tokens, Plugin, tokenmocks))
$(eval $(call makemock, pkg/tokens, Callbacks, tokenmocks))
diff --git a/docs/architecture/ping_pong_txflow.md b/docs/architecture/ping_pong_txflow.md
index bc262cb3fe..bfd17b087a 100644
--- a/docs/architecture/ping_pong_txflow.md
+++ b/docs/architecture/ping_pong_txflow.md
@@ -50,7 +50,7 @@ This is deliberately a simple flow, and all kinds of additional layers might wel
## Broadcast Public Description of Binary Data Asset (Member 1)
-- Upload BLOB of the actual data
+- Upload Blob of the actual data
- Returns a hash of the payload
- Upload JSON containing the public index data
- Includes the hash of the full payload
diff --git a/docs/gettingstarted/broadcast_data.md b/docs/gettingstarted/broadcast_data.md
index 5982805718..12ab5dabf6 100644
--- a/docs/gettingstarted/broadcast_data.md
+++ b/docs/gettingstarted/broadcast_data.md
@@ -24,7 +24,7 @@ nav_order: 4
- Can be sent in-line, uploaded in advanced, or received from other parties
- Can include smaller JSON payloads suitable for database storage
- These can be verified against a `datatype`
- - Can include references to large (multi megabyte/gigabyte) BLOB data
+ - Can include references to large (multi megabyte/gigabyte) Blob data
- Sequenced via the blockchain
- The blockchain does not contain any data, just a hash pin
- Batched for efficiency
@@ -125,7 +125,7 @@ Here we make two API calls.
2. Broadcast a message referring to that data
-- The BLOB attachment gets published to shared storage
+- The Blob attachment gets published to shared storage
- This happens the first time a broadcast happens on a data attachment
- A pin goes to the blockchain
- The metadata goes into a batch with the message
@@ -144,7 +144,7 @@ curl -sLo - https://github.com/hyperledger/firefly/raw/main/docs/firefly_logo.pn
http://localhost:5000/api/v1/namespaces/default/data
```
-### Example data response from BLOB upload
+### Example data response from Blob upload
Status: `200 OK` - your data is uploaded to your local FireFly node
diff --git a/docs/gettingstarted/private_send.md b/docs/gettingstarted/private_send.md
index a0873e1e45..a630949c9b 100644
--- a/docs/gettingstarted/private_send.md
+++ b/docs/gettingstarted/private_send.md
@@ -24,7 +24,7 @@ nav_order: 4
- Can be sent in-line, uploaded in advanced, or received from other parties
- Can include smaller JSON payloads suitable for database storage
- These can be verified against a `datatype`
- - Can include references to large (multi megabyte/gigabyte) BLOB data
+ - Can include references to large (multi megabyte/gigabyte) Blob data
- A `group` specifies who has visibility to the data
- The author must be included in the group - auto-added if omitted
- Can be specified in-line in the message by listing recipients directly
@@ -210,7 +210,7 @@ Here we make two API calls.
2. Privately send a message referring to that data
-- The BLOB is sent privately to each party
+- The Blob is sent privately to each party
- A pin goes to the blockchain
- The metadata goes into a batch with the message
@@ -228,7 +228,7 @@ curl -sLo - https://github.com/hyperledger/firefly/raw/main/docs/firefly_logo.pn
http://localhost:5000/api/v1/namespaces/default/data
```
-### Example data response from BLOB upload
+### Example data response from Blob upload
Status: `200 OK` - your data is uploaded to your local FireFly node
diff --git a/docs/gettingstarted/query_messages.md b/docs/gettingstarted/query_messages.md
index 19a50f7a24..ed7c76ff31 100644
--- a/docs/gettingstarted/query_messages.md
+++ b/docs/gettingstarted/query_messages.md
@@ -34,7 +34,7 @@ This builds on the APIs to query and filter messages, described below
### Example 1: Query confirmed messages
These are the messages ready to be processed in your application.
-All data associated with the message (including BLOB attachments) is available,
+All data associated with the message (including Blob attachments) is available,
and if they are sequenced by the blockchain, then those blockchain transactions
are complete.
diff --git a/docs/images/ping_pong.svg b/docs/images/ping_pong.svg
index 36df9989ac..46941b54fb 100644
--- a/docs/images/ping_pong.svg
+++ b/docs/images/ping_pong.svg
@@ -1344,7 +1344,7 @@
id="text7051">Upload BLOB of the actual dataUpload Blob of the actual dataBLOBBlobBLOBBlobBLOBBlobdata + BLOB URLdata + Blob URLBLOBBlobBLOBBlob 0 {
diff --git a/internal/apiserver/route_get_data_blob_test.go b/internal/apiserver/route_get_data_blob_test.go
index ed0693b743..982e25e59e 100644
--- a/internal/apiserver/route_get_data_blob_test.go
+++ b/internal/apiserver/route_get_data_blob_test.go
@@ -37,7 +37,7 @@ func TestGetDataBlob(t *testing.T) {
res := httptest.NewRecorder()
blobHash := fftypes.NewRandB32()
- mdm.On("DownloadBLOB", mock.Anything, "mynamespace", "abcd1234").
+ mdm.On("DownloadBlob", mock.Anything, "mynamespace", "abcd1234").
Return(&fftypes.Blob{
Hash: blobHash,
Size: 12345,
diff --git a/internal/apiserver/route_post_data.go b/internal/apiserver/route_post_data.go
index 1e83986d4e..33798acf5c 100644
--- a/internal/apiserver/route_post_data.go
+++ b/internal/apiserver/route_post_data.go
@@ -74,7 +74,7 @@ var postData = &oapispec.Route{
}
data.Value = fftypes.JSONAnyPtr(metadata)
}
- output, err = getOr(r.Ctx).Data().UploadBLOB(r.Ctx, r.PP["ns"], data, r.Part, strings.EqualFold(r.FP["autometa"], "true"))
+ output, err = getOr(r.Ctx).Data().UploadBlob(r.Ctx, r.PP["ns"], data, r.Part, strings.EqualFold(r.FP["autometa"], "true"))
return output, err
},
}
diff --git a/internal/apiserver/route_post_data_test.go b/internal/apiserver/route_post_data_test.go
index 8916b1b0ac..0698fc2619 100644
--- a/internal/apiserver/route_post_data_test.go
+++ b/internal/apiserver/route_post_data_test.go
@@ -67,7 +67,7 @@ func TestPostDataBinary(t *testing.T) {
res := httptest.NewRecorder()
- mdm.On("UploadBLOB", mock.Anything, "ns1", mock.AnythingOfType("*fftypes.DataRefOrValue"), mock.AnythingOfType("*fftypes.Multipart"), false).
+ mdm.On("UploadBlob", mock.Anything, "ns1", mock.AnythingOfType("*fftypes.DataRefOrValue"), mock.AnythingOfType("*fftypes.Multipart"), false).
Return(&fftypes.Data{}, nil)
r.ServeHTTP(res, req)
@@ -107,7 +107,7 @@ func TestPostDataBinaryObjAutoMeta(t *testing.T) {
res := httptest.NewRecorder()
- mdm.On("UploadBLOB", mock.Anything, "ns1", mock.MatchedBy(func(d *fftypes.DataRefOrValue) bool {
+ mdm.On("UploadBlob", mock.Anything, "ns1", mock.MatchedBy(func(d *fftypes.DataRefOrValue) bool {
assert.Equal(t, `{"filename":"anything"}`, string(*d.Value))
assert.Equal(t, fftypes.ValidatorTypeJSON, d.Validator)
assert.Equal(t, "fileinfo", d.Datatype.Name)
@@ -141,7 +141,7 @@ func TestPostDataBinaryStringMetadata(t *testing.T) {
res := httptest.NewRecorder()
- mdm.On("UploadBLOB", mock.Anything, "ns1", mock.MatchedBy(func(d *fftypes.DataRefOrValue) bool {
+ mdm.On("UploadBlob", mock.Anything, "ns1", mock.MatchedBy(func(d *fftypes.DataRefOrValue) bool {
assert.Equal(t, `"string metadata"`, string(*d.Value))
assert.Equal(t, "", string(d.Validator))
assert.Nil(t, d.Datatype)
@@ -174,7 +174,7 @@ func TestPostDataTrailingMetadata(t *testing.T) {
res := httptest.NewRecorder()
- mdm.On("UploadBLOB", mock.Anything, "ns1", mock.Anything, mock.AnythingOfType("*fftypes.Multipart"), false).
+ mdm.On("UploadBlob", mock.Anything, "ns1", mock.Anything, mock.AnythingOfType("*fftypes.Multipart"), false).
Return(&fftypes.Data{}, nil)
r.ServeHTTP(res, req)
@@ -201,7 +201,7 @@ func TestPostDataBinaryMissing(t *testing.T) {
res := httptest.NewRecorder()
- mdm.On("UploadBLOB", mock.Anything, "ns1", mock.AnythingOfType("*fftypes.DataRefOrValue"), mock.AnythingOfType("*fftypes.Multipart"), false).
+ mdm.On("UploadBlob", mock.Anything, "ns1", mock.AnythingOfType("*fftypes.DataRefOrValue"), mock.AnythingOfType("*fftypes.Multipart"), false).
Return(&fftypes.Data{}, nil)
r.ServeHTTP(res, req)
@@ -221,7 +221,7 @@ func TestPostDataBadForm(t *testing.T) {
res := httptest.NewRecorder()
- mdm.On("UploadBLOB", mock.Anything, "ns1", mock.AnythingOfType("*fftypes.DataRefOrValue"), mock.AnythingOfType("*fftypes.Multipart"), false).
+ mdm.On("UploadBlob", mock.Anything, "ns1", mock.AnythingOfType("*fftypes.DataRefOrValue"), mock.AnythingOfType("*fftypes.Multipart"), false).
Return(&fftypes.Data{}, nil)
r.ServeHTTP(res, req)
diff --git a/internal/blockchain/ethereum/ethereum.go b/internal/blockchain/ethereum/ethereum.go
index 703ab9cb68..e5aa7fd142 100644
--- a/internal/blockchain/ethereum/ethereum.go
+++ b/internal/blockchain/ethereum/ethereum.go
@@ -389,7 +389,7 @@ func (e *Ethereum) handleContractEvent(ctx context.Context, msgJSON fftypes.JSON
return e.callbacks.BlockchainEvent(event)
}
-func (e *Ethereum) handleReceipt(ctx context.Context, reply fftypes.JSONObject) error {
+func (e *Ethereum) handleReceipt(ctx context.Context, reply fftypes.JSONObject) {
l := log.L(ctx)
headers := reply.GetObject("headers")
@@ -399,19 +399,19 @@ func (e *Ethereum) handleReceipt(ctx context.Context, reply fftypes.JSONObject)
message := reply.GetString("errorMessage")
if requestID == "" || replyType == "" {
l.Errorf("Reply cannot be processed - missing fields: %+v", reply)
- return nil // Swallow this and move on
+ return
}
operationID, err := fftypes.ParseUUID(ctx, requestID)
if err != nil {
l.Errorf("Reply cannot be processed - bad ID: %+v", reply)
- return nil // Swallow this and move on
+ return
}
updateType := fftypes.OpStatusSucceeded
if replyType != "TransactionSuccess" {
updateType = fftypes.OpStatusFailed
}
l.Infof("Ethconnect '%s' reply: request=%s tx=%s message=%s", replyType, requestID, txHash, message)
- return e.callbacks.BlockchainOpUpdate(e, operationID, updateType, txHash, message, reply)
+ e.callbacks.BlockchainOpUpdate(e, operationID, updateType, txHash, message, reply)
}
func (e *Ethereum) buildEventLocationString(msgJSON fftypes.JSONObject) string {
@@ -483,7 +483,7 @@ func (e *Ethereum) eventLoop() {
err = e.wsconn.Send(ctx, ack)
}
case map[string]interface{}:
- err = e.handleReceipt(ctx, fftypes.JSONObject(msgTyped))
+ e.handleReceipt(ctx, fftypes.JSONObject(msgTyped))
default:
l.Errorf("Message unexpected: %+v", msgTyped)
continue
diff --git a/internal/blockchain/ethereum/ethereum_test.go b/internal/blockchain/ethereum/ethereum_test.go
index 44607b9375..a6b2b547f1 100644
--- a/internal/blockchain/ethereum/ethereum_test.go
+++ b/internal/blockchain/ethereum/ethereum_test.go
@@ -1067,6 +1067,7 @@ func TestEventLoopContextCancelled(t *testing.T) {
wsm.On("Close").Return()
e.closed = make(chan struct{})
e.eventLoop() // we're simply looking for it exiting
+ wsm.AssertExpectations(t)
}
func TestEventLoopReceiveClosed(t *testing.T) {
@@ -1079,19 +1080,24 @@ func TestEventLoopReceiveClosed(t *testing.T) {
wsm.On("Close").Return()
e.closed = make(chan struct{})
e.eventLoop() // we're simply looking for it exiting
+ wsm.AssertExpectations(t)
}
func TestEventLoopSendClosed(t *testing.T) {
e, cancel := newTestEthereum()
- cancel()
+ s := make(chan []byte, 1)
+ s <- []byte(`[]`)
r := make(chan []byte)
wsm := e.wsconn.(*wsmocks.WSClient)
- close(r)
- wsm.On("Receive").Return((<-chan []byte)(r))
- wsm.On("Send", mock.Anything, mock.Anything).Return(fmt.Errorf("pop"))
+ wsm.On("Receive").Return((<-chan []byte)(s))
+ wsm.On("Send", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
+ go cancel()
+ close(r)
+ }).Return(fmt.Errorf("pop"))
wsm.On("Close").Return()
e.closed = make(chan struct{})
e.eventLoop() // we're simply looking for it exiting
+ wsm.AssertExpectations(t)
}
func TestHandleReceiptTXSuccess(t *testing.T) {
@@ -1139,8 +1145,7 @@ func TestHandleReceiptTXSuccess(t *testing.T) {
err := json.Unmarshal(data.Bytes(), &reply)
assert.NoError(t, err)
- err = e.handleReceipt(context.Background(), reply)
- assert.NoError(t, err)
+ e.handleReceipt(context.Background(), reply)
}
@@ -1189,7 +1194,7 @@ func TestHandleBadPayloadsAndThenReceiptFailure(t *testing.T) {
<-done
}
-func TestHandleReceiptNoRequestID(t *testing.T) {
+func TestHandleMsgBatchBadDAta(t *testing.T) {
em := &blockchainmocks.Callbacks{}
wsm := &wsmocks.WSClient{}
e := &Ethereum{
@@ -1203,8 +1208,7 @@ func TestHandleReceiptNoRequestID(t *testing.T) {
data := fftypes.JSONAnyPtr(`{}`)
err := json.Unmarshal(data.Bytes(), &reply)
assert.NoError(t, err)
- err = e.handleReceipt(context.Background(), reply)
- assert.NoError(t, err)
+ e.handleReceipt(context.Background(), reply)
}
func TestHandleReceiptBadRequestID(t *testing.T) {
@@ -1221,8 +1225,7 @@ func TestHandleReceiptBadRequestID(t *testing.T) {
data := fftypes.JSONAnyPtr(`{"headers":{"requestId":"1","type":"TransactionSuccess"}}`)
err := json.Unmarshal(data.Bytes(), &reply)
assert.NoError(t, err)
- err = e.handleReceipt(context.Background(), reply)
- assert.NoError(t, err)
+ e.handleReceipt(context.Background(), reply)
}
func TestFormatNil(t *testing.T) {
diff --git a/internal/blockchain/fabric/fabric.go b/internal/blockchain/fabric/fabric.go
index c466ae19d9..19a5d589ec 100644
--- a/internal/blockchain/fabric/fabric.go
+++ b/internal/blockchain/fabric/fabric.go
@@ -370,7 +370,7 @@ func (f *Fabric) handleContractEvent(ctx context.Context, msgJSON fftypes.JSONOb
return f.callbacks.BlockchainEvent(event)
}
-func (f *Fabric) handleReceipt(ctx context.Context, reply fftypes.JSONObject) error {
+func (f *Fabric) handleReceipt(ctx context.Context, reply fftypes.JSONObject) {
l := log.L(ctx)
headers := reply.GetObject("headers")
@@ -380,19 +380,19 @@ func (f *Fabric) handleReceipt(ctx context.Context, reply fftypes.JSONObject) er
message := reply.GetString("errorMessage")
if requestID == "" || replyType == "" {
l.Errorf("Reply cannot be processed: %+v", reply)
- return nil // Swallow this and move on
+ return
}
operationID, err := fftypes.ParseUUID(ctx, requestID)
if err != nil {
l.Errorf("Reply cannot be processed - bad ID: %+v", reply)
- return nil // Swallow this and move on
+ return
}
updateType := fftypes.OpStatusSucceeded
if replyType != "TransactionSuccess" {
updateType = fftypes.OpStatusFailed
}
l.Infof("Fabconnect '%s' reply tx=%s (request=%s) %s", replyType, txHash, requestID, message)
- return f.callbacks.BlockchainOpUpdate(f, operationID, updateType, txHash, message, reply)
+ f.callbacks.BlockchainOpUpdate(f, operationID, updateType, txHash, message, reply)
}
func (f *Fabric) handleMessageBatch(ctx context.Context, messages []interface{}) error {
@@ -460,7 +460,7 @@ func (f *Fabric) eventLoop() {
err = f.wsconn.Send(ctx, ack)
}
case map[string]interface{}:
- err = f.handleReceipt(ctx, fftypes.JSONObject(msgTyped))
+ f.handleReceipt(ctx, fftypes.JSONObject(msgTyped))
default:
l.Errorf("Message unexpected: %+v", msgTyped)
continue
diff --git a/internal/blockchain/fabric/fabric_test.go b/internal/blockchain/fabric/fabric_test.go
index 993c200ab0..7192826040 100644
--- a/internal/blockchain/fabric/fabric_test.go
+++ b/internal/blockchain/fabric/fabric_test.go
@@ -889,15 +889,19 @@ func TestEventLoopReceiveClosed(t *testing.T) {
func TestEventLoopSendClosed(t *testing.T) {
e, cancel := newTestFabric()
- cancel()
+ s := make(chan []byte, 1)
+ s <- []byte(`[]`)
r := make(chan []byte)
wsm := e.wsconn.(*wsmocks.WSClient)
- close(r)
- wsm.On("Receive").Return((<-chan []byte)(r))
+ wsm.On("Receive").Return((<-chan []byte)(s))
wsm.On("Close").Return()
- wsm.On("Send", mock.Anything, mock.Anything).Return(fmt.Errorf("pop"))
+ wsm.On("Send", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")).Run(func(args mock.Arguments) {
+ go cancel()
+ close(r)
+ })
e.closed = make(chan struct{})
e.eventLoop() // we're simply looking for it exiting
+ wsm.AssertExpectations(t)
}
func TestEventLoopUnexpectedMessage(t *testing.T) {
@@ -979,8 +983,7 @@ func TestHandleReceiptTXSuccess(t *testing.T) {
err := json.Unmarshal(data, &reply)
assert.NoError(t, err)
- err = e.handleReceipt(context.Background(), reply)
- assert.NoError(t, err)
+ e.handleReceipt(context.Background(), reply)
}
@@ -998,8 +1001,7 @@ func TestHandleReceiptNoRequestID(t *testing.T) {
data := []byte(`{}`)
err := json.Unmarshal(data, &reply)
assert.NoError(t, err)
- err = e.handleReceipt(context.Background(), reply)
- assert.NoError(t, err)
+ e.handleReceipt(context.Background(), reply)
}
func TestHandleReceiptBadRequestID(t *testing.T) {
@@ -1028,8 +1030,7 @@ func TestHandleReceiptBadRequestID(t *testing.T) {
err := json.Unmarshal(data, &reply)
assert.NoError(t, err)
- err = e.handleReceipt(context.Background(), reply)
- assert.NoError(t, err)
+ e.handleReceipt(context.Background(), reply)
}
func TestHandleReceiptFailedTx(t *testing.T) {
@@ -1068,8 +1069,7 @@ func TestHandleReceiptFailedTx(t *testing.T) {
err := json.Unmarshal(data, &reply)
assert.NoError(t, err)
- err = e.handleReceipt(context.Background(), reply)
- assert.NoError(t, err)
+ e.handleReceipt(context.Background(), reply)
}
func TestFormatNil(t *testing.T) {
diff --git a/internal/broadcast/operations.go b/internal/broadcast/operations.go
index 6d0ba25d6b..6b75dfe600 100644
--- a/internal/broadcast/operations.go
+++ b/internal/broadcast/operations.go
@@ -148,7 +148,7 @@ func (bm *broadcastManager) uploadBatch(ctx context.Context, data uploadBatchDat
func (bm *broadcastManager) uploadBlob(ctx context.Context, data uploadBlobData) (outputs fftypes.JSONObject, complete bool, err error) {
// Stream from the local data exchange ...
- reader, err := bm.exchange.DownloadBLOB(ctx, data.Blob.PayloadRef)
+ reader, err := bm.exchange.DownloadBlob(ctx, data.Blob.PayloadRef)
if err != nil {
return nil, false, i18n.WrapError(ctx, err, i18n.MsgDownloadBlobFailed, data.Blob.PayloadRef)
}
diff --git a/internal/broadcast/operations_test.go b/internal/broadcast/operations_test.go
index 91d14f33a1..8e96acb151 100644
--- a/internal/broadcast/operations_test.go
+++ b/internal/broadcast/operations_test.go
@@ -274,7 +274,7 @@ func TestPrepareAndRunUploadBlob(t *testing.T) {
mdi.On("GetDataByID", mock.Anything, data.ID, false).Return(data, nil)
mdi.On("GetBlobMatchingHash", mock.Anything, blob.Hash).Return(blob, nil)
mps.On("UploadData", context.Background(), mock.Anything).Return("123", nil)
- mdx.On("DownloadBLOB", context.Background(), mock.Anything).Return(reader, nil)
+ mdx.On("DownloadBlob", context.Background(), mock.Anything).Return(reader, nil)
mdi.On("UpdateData", context.Background(), data.ID, mock.MatchedBy(func(update database.Update) bool {
info, _ := update.Finalize()
assert.Equal(t, 1, len(info.SetOperations))
@@ -440,7 +440,7 @@ func TestRunOperationUploadBlobUpdateFail(t *testing.T) {
mdi := bm.database.(*databasemocks.Plugin)
reader := ioutil.NopCloser(strings.NewReader("some data"))
- mdx.On("DownloadBLOB", context.Background(), mock.Anything).Return(reader, nil)
+ mdx.On("DownloadBlob", context.Background(), mock.Anything).Return(reader, nil)
mps.On("UploadData", context.Background(), mock.Anything).Return("123", nil)
mdi.On("UpdateData", context.Background(), data.ID, mock.Anything).Return(fmt.Errorf("pop"))
@@ -473,7 +473,7 @@ func TestRunOperationUploadBlobUploadFail(t *testing.T) {
mdx := bm.exchange.(*dataexchangemocks.Plugin)
reader := ioutil.NopCloser(strings.NewReader("some data"))
- mdx.On("DownloadBLOB", context.Background(), mock.Anything).Return(reader, nil)
+ mdx.On("DownloadBlob", context.Background(), mock.Anything).Return(reader, nil)
mps.On("UploadData", context.Background(), mock.Anything).Return("", fmt.Errorf("pop"))
_, complete, err := bm.RunOperation(context.Background(), opUploadBlob(op, data, blob))
@@ -504,7 +504,7 @@ func TestRunOperationUploadBlobDownloadFail(t *testing.T) {
mdx := bm.exchange.(*dataexchangemocks.Plugin)
reader := ioutil.NopCloser(strings.NewReader("some data"))
- mdx.On("DownloadBLOB", context.Background(), mock.Anything).Return(reader, fmt.Errorf("pop"))
+ mdx.On("DownloadBlob", context.Background(), mock.Anything).Return(reader, fmt.Errorf("pop"))
_, complete, err := bm.RunOperation(context.Background(), opUploadBlob(op, data, blob))
diff --git a/internal/config/config.go b/internal/config/config.go
index dfa5d847b3..bb40c94227 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -70,6 +70,18 @@ var (
BatchRetryInitDelay = rootKey("batch.retry.initDelay")
// BatchRetryMaxDelay is the maximum delay between retry attempts
BatchRetryMaxDelay = rootKey("batch.retry.maxDelay")
+ // BlobReceiverRetryInitDelay is the initial retry delay
+ BlobReceiverRetryInitDelay = rootKey("blobreceiver.retry.initialDelay")
+ // BlobReceiverRetryMaxDelay is the maximum retry delay
+ BlobReceiverRetryMaxDelay = rootKey("blobreceiver.retry.maxDelay")
+ // BlobReceiverRetryFactor is the backoff factor to use for retries
+ BlobReceiverRetryFactor = rootKey("blobreceiver.retry.factor")
+ // BlobReceiverWorkerCount
+ BlobReceiverWorkerCount = rootKey("blobreceiver.worker.count")
+ // BlobReceiverWorkerBatchTimeout
+ BlobReceiverWorkerBatchTimeout = rootKey("blobreceiver.worker.batchTimeout")
+ // BlobReceiverWorkerBatchMaxInserts
+ BlobReceiverWorkerBatchMaxInserts = rootKey("blobreceiver.worker.batchMaxInserts")
// BlockchainEventCacheSize size of cache for blockchain events
BlockchainEventCacheSize = rootKey("blockchainevent.cache.size")
// BlockchainEventCacheTTL time to live of cache for blockchain events
@@ -356,6 +368,12 @@ func Reset() {
viper.SetDefault(string(BatchRetryInitDelay), "250ms")
viper.SetDefault(string(BatchRetryMaxDelay), "30s")
viper.SetDefault(string(BatchRetryMaxDelay), "30s")
+ viper.SetDefault(string(BlobReceiverRetryInitDelay), "250ms")
+ viper.SetDefault(string(BlobReceiverRetryMaxDelay), "1m")
+ viper.SetDefault(string(BlobReceiverRetryFactor), 2.0)
+ viper.SetDefault(string(BlobReceiverWorkerBatchTimeout), "50ms")
+ viper.SetDefault(string(BlobReceiverWorkerCount), 5)
+ viper.SetDefault(string(BlobReceiverWorkerBatchMaxInserts), 200)
viper.SetDefault(string(BroadcastBatchAgentTimeout), "2m")
viper.SetDefault(string(BroadcastBatchSize), 200)
viper.SetDefault(string(BroadcastBatchPayloadLimit), "800Kb")
diff --git a/internal/data/blobstore.go b/internal/data/blobstore.go
index e85da59457..9a1508891e 100644
--- a/internal/data/blobstore.go
+++ b/internal/data/blobstore.go
@@ -38,7 +38,7 @@ type blobStore struct {
exchange dataexchange.Plugin
}
-func (bs *blobStore) uploadVerifyBLOB(ctx context.Context, ns string, id *fftypes.UUID, reader io.Reader) (hash *fftypes.Bytes32, written int64, payloadRef string, err error) {
+func (bs *blobStore) uploadVerifyBlob(ctx context.Context, ns string, id *fftypes.UUID, reader io.Reader) (hash *fftypes.Bytes32, written int64, payloadRef string, err error) {
hashCalc := sha256.New()
dxReader, dx := io.Pipe()
storeAndHash := io.MultiWriter(hashCalc, dx)
@@ -47,12 +47,12 @@ func (bs *blobStore) uploadVerifyBLOB(ctx context.Context, ns string, id *fftype
go func() {
var err error
written, err = io.Copy(storeAndHash, reader)
- log.L(ctx).Debugf("Upload BLOB streamed %d bytes (err=%v)", written, err)
+ log.L(ctx).Debugf("Upload Blob streamed %d bytes (err=%v)", written, err)
_ = dx.Close()
copyDone <- err
}()
- payloadRef, uploadHash, uploadSize, dxErr := bs.exchange.UploadBLOB(ctx, ns, *id, dxReader)
+ payloadRef, uploadHash, uploadSize, dxErr := bs.exchange.UploadBlob(ctx, ns, *id, dxReader)
dxReader.Close()
copyErr := <-copyDone
if dxErr != nil {
@@ -63,7 +63,7 @@ func (bs *blobStore) uploadVerifyBLOB(ctx context.Context, ns string, id *fftype
}
hash = fftypes.HashResult(hashCalc)
- log.L(ctx).Debugf("Upload BLOB size=%d hashes: calculated=%s upload=%s (expected=%v) size=%d", written, hash, uploadHash, uploadSize, written)
+ log.L(ctx).Debugf("Upload Blob size=%d hashes: calculated=%s upload=%s (expected=%v) size=%d", written, hash, uploadHash, uploadSize, written)
if !uploadHash.Equals(hash) {
return nil, -1, "", i18n.NewError(ctx, i18n.MsgDXBadHash, uploadHash, hash)
@@ -76,7 +76,7 @@ func (bs *blobStore) uploadVerifyBLOB(ctx context.Context, ns string, id *fftype
}
-func (bs *blobStore) UploadBLOB(ctx context.Context, ns string, inData *fftypes.DataRefOrValue, mpart *fftypes.Multipart, autoMeta bool) (*fftypes.Data, error) {
+func (bs *blobStore) UploadBlob(ctx context.Context, ns string, inData *fftypes.DataRefOrValue, mpart *fftypes.Multipart, autoMeta bool) (*fftypes.Data, error) {
data := &fftypes.Data{
ID: fftypes.NewUUID(),
@@ -91,7 +91,7 @@ func (bs *blobStore) UploadBLOB(ctx context.Context, ns string, inData *fftypes.
data.Namespace = ns
data.Created = fftypes.Now()
- hash, blobSize, payloadRef, err := bs.uploadVerifyBLOB(ctx, ns, data.ID, mpart.Data)
+ hash, blobSize, payloadRef, err := bs.uploadVerifyBlob(ctx, ns, data.ID, mpart.Data)
if err != nil {
return nil, err
}
@@ -123,7 +123,7 @@ func (bs *blobStore) UploadBLOB(ctx context.Context, ns string, inData *fftypes.
if err != nil {
return nil, err
}
- log.L(ctx).Infof("Uploaded BLOB blobhash=%s hash=%s (%s)", data.Blob.Hash, data.Hash, units.HumanSizeWithPrecision(float64(blobSize), 2))
+ log.L(ctx).Infof("Uploaded Blob blobhash=%s hash=%s (%s)", data.Blob.Hash, data.Hash, units.HumanSizeWithPrecision(float64(blobSize), 2))
err = bs.database.RunAsGroup(ctx, func(ctx context.Context) error {
err := bs.database.UpsertData(ctx, data, database.UpsertOptimizationNew)
@@ -139,7 +139,7 @@ func (bs *blobStore) UploadBLOB(ctx context.Context, ns string, inData *fftypes.
return data, nil
}
-func (bs *blobStore) DownloadBLOB(ctx context.Context, ns, dataID string) (*fftypes.Blob, io.ReadCloser, error) {
+func (bs *blobStore) DownloadBlob(ctx context.Context, ns, dataID string) (*fftypes.Blob, io.ReadCloser, error) {
if err := fftypes.ValidateFFNameField(ctx, ns, "namespace"); err != nil {
return nil, nil, err
@@ -168,6 +168,6 @@ func (bs *blobStore) DownloadBLOB(ctx context.Context, ns, dataID string) (*ffty
return nil, nil, i18n.NewError(ctx, i18n.MsgBlobNotFound, data.Blob.Hash)
}
- reader, err := bs.exchange.DownloadBLOB(ctx, blob.PayloadRef)
+ reader, err := bs.exchange.DownloadBlob(ctx, blob.PayloadRef)
return blob, reader, err
}
diff --git a/internal/data/blobstore_test.go b/internal/data/blobstore_test.go
index 7a9e2e6786..a0b18415ae 100644
--- a/internal/data/blobstore_test.go
+++ b/internal/data/blobstore_test.go
@@ -57,7 +57,7 @@ func TestUploadBlobOk(t *testing.T) {
dxID := make(chan fftypes.UUID, 1)
mdx := dm.exchange.(*dataexchangemocks.Plugin)
- dxUpload := mdx.On("UploadBLOB", ctx, "ns1", mock.Anything, mock.Anything)
+ dxUpload := mdx.On("UploadBlob", ctx, "ns1", mock.Anything, mock.Anything)
dxUpload.RunFn = func(a mock.Arguments) {
readBytes, err := ioutil.ReadAll(a[3].(io.Reader))
assert.Nil(t, err)
@@ -68,7 +68,7 @@ func TestUploadBlobOk(t *testing.T) {
dxUpload.ReturnArguments = mock.Arguments{fmt.Sprintf("ns1/%s", uuid), &hash, int64(len(b)), err}
}
- data, err := dm.UploadBLOB(ctx, "ns1", &fftypes.DataRefOrValue{}, &fftypes.Multipart{Data: bytes.NewReader(b)}, false)
+ data, err := dm.UploadBlob(ctx, "ns1", &fftypes.DataRefOrValue{}, &fftypes.Multipart{Data: bytes.NewReader(b)}, false)
assert.NoError(t, err)
// Check the hashes and other details of the data
@@ -99,7 +99,7 @@ func TestUploadBlobAutoMetaOk(t *testing.T) {
dxID := make(chan fftypes.UUID, 1)
mdx := dm.exchange.(*dataexchangemocks.Plugin)
- dxUpload := mdx.On("UploadBLOB", ctx, "ns1", mock.Anything, mock.Anything)
+ dxUpload := mdx.On("UploadBlob", ctx, "ns1", mock.Anything, mock.Anything)
dxUpload.RunFn = func(a mock.Arguments) {
readBytes, err := ioutil.ReadAll(a[3].(io.Reader))
assert.Nil(t, err)
@@ -109,7 +109,7 @@ func TestUploadBlobAutoMetaOk(t *testing.T) {
dxUpload.ReturnArguments = mock.Arguments{fmt.Sprintf("ns1/%s", uuid), &hash, int64(len(readBytes)), err}
}
- data, err := dm.UploadBLOB(ctx, "ns1", &fftypes.DataRefOrValue{
+ data, err := dm.UploadBlob(ctx, "ns1", &fftypes.DataRefOrValue{
Value: fftypes.JSONAnyPtr(`{"custom": "value1"}`),
}, &fftypes.Multipart{
Data: bytes.NewReader([]byte(`hello`)),
@@ -134,7 +134,7 @@ func TestUploadBlobBadValidator(t *testing.T) {
dxID := make(chan fftypes.UUID, 1)
mdx := dm.exchange.(*dataexchangemocks.Plugin)
- dxUpload := mdx.On("UploadBLOB", ctx, "ns1", mock.Anything, mock.Anything)
+ dxUpload := mdx.On("UploadBlob", ctx, "ns1", mock.Anything, mock.Anything)
dxUpload.RunFn = func(a mock.Arguments) {
readBytes, err := ioutil.ReadAll(a[3].(io.Reader))
assert.Nil(t, err)
@@ -144,7 +144,7 @@ func TestUploadBlobBadValidator(t *testing.T) {
dxUpload.ReturnArguments = mock.Arguments{fmt.Sprintf("ns1/%s", uuid), &hash, int64(len(readBytes)), err}
}
- _, err := dm.UploadBLOB(ctx, "ns1", &fftypes.DataRefOrValue{
+ _, err := dm.UploadBlob(ctx, "ns1", &fftypes.DataRefOrValue{
Value: fftypes.JSONAnyPtr(`{"custom": "value1"}`),
Validator: "wrong",
}, &fftypes.Multipart{
@@ -164,13 +164,13 @@ func TestUploadBlobReadFail(t *testing.T) {
defer cancel()
mdx := dm.exchange.(*dataexchangemocks.Plugin)
- dxUpload := mdx.On("UploadBLOB", ctx, "ns1", mock.Anything, mock.Anything).Return("", fftypes.NewRandB32(), int64(0), nil)
+ dxUpload := mdx.On("UploadBlob", ctx, "ns1", mock.Anything, mock.Anything).Return("", fftypes.NewRandB32(), int64(0), nil)
dxUpload.RunFn = func(a mock.Arguments) {
_, err := ioutil.ReadAll(a[3].(io.Reader))
assert.NoError(t, err)
}
- _, err := dm.UploadBLOB(ctx, "ns1", &fftypes.DataRefOrValue{}, &fftypes.Multipart{Data: iotest.ErrReader(fmt.Errorf("pop"))}, false)
+ _, err := dm.UploadBlob(ctx, "ns1", &fftypes.DataRefOrValue{}, &fftypes.Multipart{Data: iotest.ErrReader(fmt.Errorf("pop"))}, false)
assert.Regexp(t, "FF10217.*pop", err)
}
@@ -181,9 +181,9 @@ func TestUploadBlobWriteFailDoesNotRead(t *testing.T) {
defer cancel()
mdx := dm.exchange.(*dataexchangemocks.Plugin)
- mdx.On("UploadBLOB", ctx, "ns1", mock.Anything, mock.Anything).Return("", nil, int64(0), fmt.Errorf("pop"))
+ mdx.On("UploadBlob", ctx, "ns1", mock.Anything, mock.Anything).Return("", nil, int64(0), fmt.Errorf("pop"))
- _, err := dm.UploadBLOB(ctx, "ns1", &fftypes.DataRefOrValue{}, &fftypes.Multipart{Data: bytes.NewReader([]byte(`any old data`))}, false)
+ _, err := dm.UploadBlob(ctx, "ns1", &fftypes.DataRefOrValue{}, &fftypes.Multipart{Data: bytes.NewReader([]byte(`any old data`))}, false)
assert.Regexp(t, "pop", err)
}
@@ -195,13 +195,13 @@ func TestUploadBlobHashMismatchCalculated(t *testing.T) {
b := []byte(`any old data`)
mdx := dm.exchange.(*dataexchangemocks.Plugin)
- dxUpload := mdx.On("UploadBLOB", ctx, "ns1", mock.Anything, mock.Anything).Return("", fftypes.NewRandB32(), int64(12345), nil)
+ dxUpload := mdx.On("UploadBlob", ctx, "ns1", mock.Anything, mock.Anything).Return("", fftypes.NewRandB32(), int64(12345), nil)
dxUpload.RunFn = func(a mock.Arguments) {
_, err := ioutil.ReadAll(a[3].(io.Reader))
assert.Nil(t, err)
}
- _, err := dm.UploadBLOB(ctx, "ns1", &fftypes.DataRefOrValue{}, &fftypes.Multipart{Data: bytes.NewReader([]byte(b))}, false)
+ _, err := dm.UploadBlob(ctx, "ns1", &fftypes.DataRefOrValue{}, &fftypes.Multipart{Data: bytes.NewReader([]byte(b))}, false)
assert.Regexp(t, "FF10238", err)
}
@@ -214,13 +214,13 @@ func TestUploadBlobSizeMismatch(t *testing.T) {
var hash fftypes.Bytes32 = sha256.Sum256(b)
mdx := dm.exchange.(*dataexchangemocks.Plugin)
- dxUpload := mdx.On("UploadBLOB", ctx, "ns1", mock.Anything, mock.Anything).Return("", &hash, int64(12345), nil)
+ dxUpload := mdx.On("UploadBlob", ctx, "ns1", mock.Anything, mock.Anything).Return("", &hash, int64(12345), nil)
dxUpload.RunFn = func(a mock.Arguments) {
_, err := ioutil.ReadAll(a[3].(io.Reader))
assert.Nil(t, err)
}
- _, err := dm.UploadBLOB(ctx, "ns1", &fftypes.DataRefOrValue{}, &fftypes.Multipart{Data: bytes.NewReader([]byte(b))}, false)
+ _, err := dm.UploadBlob(ctx, "ns1", &fftypes.DataRefOrValue{}, &fftypes.Multipart{Data: bytes.NewReader([]byte(b))}, false)
assert.Regexp(t, "FF10323", err)
}
@@ -233,7 +233,7 @@ func TestUploadBlobUpsertFail(t *testing.T) {
var hash fftypes.Bytes32 = sha256.Sum256(b)
mdx := dm.exchange.(*dataexchangemocks.Plugin)
- dxUpload := mdx.On("UploadBLOB", ctx, "ns1", mock.Anything, mock.Anything).Return("", &hash, int64(len(b)), nil)
+ dxUpload := mdx.On("UploadBlob", ctx, "ns1", mock.Anything, mock.Anything).Return("", &hash, int64(len(b)), nil)
dxUpload.RunFn = func(a mock.Arguments) {
_, err := ioutil.ReadAll(a[3].(io.Reader))
assert.Nil(t, err)
@@ -241,7 +241,7 @@ func TestUploadBlobUpsertFail(t *testing.T) {
mdi := dm.database.(*databasemocks.Plugin)
mdi.On("RunAsGroup", mock.Anything, mock.Anything).Return(fmt.Errorf("pop"))
- _, err := dm.UploadBLOB(ctx, "ns1", &fftypes.DataRefOrValue{}, &fftypes.Multipart{Data: bytes.NewReader([]byte(b))}, false)
+ _, err := dm.UploadBlob(ctx, "ns1", &fftypes.DataRefOrValue{}, &fftypes.Multipart{Data: bytes.NewReader([]byte(b))}, false)
assert.Regexp(t, "pop", err)
}
@@ -268,11 +268,11 @@ func TestDownloadBlobOk(t *testing.T) {
}, nil)
mdx := dm.exchange.(*dataexchangemocks.Plugin)
- mdx.On("DownloadBLOB", ctx, "ns1/blob1").Return(
+ mdx.On("DownloadBlob", ctx, "ns1/blob1").Return(
ioutil.NopCloser(bytes.NewReader([]byte("some blob"))),
nil)
- blob, reader, err := dm.DownloadBLOB(ctx, "ns1", dataID.String())
+ blob, reader, err := dm.DownloadBlob(ctx, "ns1", dataID.String())
assert.NoError(t, err)
assert.Equal(t, blobHash.String(), blob.Hash.String())
b, err := ioutil.ReadAll(reader)
@@ -299,7 +299,7 @@ func TestDownloadBlobNotFound(t *testing.T) {
}, nil)
mdi.On("GetBlobMatchingHash", ctx, blobHash).Return(nil, nil)
- _, _, err := dm.DownloadBLOB(ctx, "ns1", dataID.String())
+ _, _, err := dm.DownloadBlob(ctx, "ns1", dataID.String())
assert.Regexp(t, "FF10239", err)
}
@@ -322,7 +322,7 @@ func TestDownloadBlobLookupErr(t *testing.T) {
}, nil)
mdi.On("GetBlobMatchingHash", ctx, blobHash).Return(nil, fmt.Errorf("pop"))
- _, _, err := dm.DownloadBLOB(ctx, "ns1", dataID.String())
+ _, _, err := dm.DownloadBlob(ctx, "ns1", dataID.String())
assert.Regexp(t, "pop", err)
}
@@ -341,7 +341,7 @@ func TestDownloadBlobNoBlob(t *testing.T) {
Blob: &fftypes.BlobRef{},
}, nil)
- _, _, err := dm.DownloadBLOB(ctx, "ns1", dataID.String())
+ _, _, err := dm.DownloadBlob(ctx, "ns1", dataID.String())
assert.Regexp(t, "FF10241", err)
}
@@ -360,7 +360,7 @@ func TestDownloadBlobNSMismatch(t *testing.T) {
Blob: &fftypes.BlobRef{},
}, nil)
- _, _, err := dm.DownloadBLOB(ctx, "ns1", dataID.String())
+ _, _, err := dm.DownloadBlob(ctx, "ns1", dataID.String())
assert.Regexp(t, "FF10143", err)
}
@@ -375,7 +375,7 @@ func TestDownloadBlobDataLookupErr(t *testing.T) {
mdi := dm.database.(*databasemocks.Plugin)
mdi.On("GetDataByID", ctx, dataID, false).Return(nil, fmt.Errorf("pop"))
- _, _, err := dm.DownloadBLOB(ctx, "ns1", dataID.String())
+ _, _, err := dm.DownloadBlob(ctx, "ns1", dataID.String())
assert.Regexp(t, "pop", err)
}
@@ -387,7 +387,7 @@ func TestDownloadBlobBadNS(t *testing.T) {
dataID := fftypes.NewUUID()
- _, _, err := dm.DownloadBLOB(ctx, "!wrong", dataID.String())
+ _, _, err := dm.DownloadBlob(ctx, "!wrong", dataID.String())
assert.Regexp(t, "FF10131.*namespace", err)
}
@@ -397,7 +397,7 @@ func TestDownloadBlobBadID(t *testing.T) {
dm, ctx, cancel := newTestDataManager(t)
defer cancel()
- _, _, err := dm.DownloadBLOB(ctx, "ns1", "!uuid")
+ _, _, err := dm.DownloadBlob(ctx, "ns1", "!uuid")
assert.Regexp(t, "FF10142", err)
}
diff --git a/internal/data/data_manager.go b/internal/data/data_manager.go
index c001f5adcc..3e5e64bf15 100644
--- a/internal/data/data_manager.go
+++ b/internal/data/data_manager.go
@@ -46,8 +46,8 @@ type Manager interface {
VerifyNamespaceExists(ctx context.Context, ns string) error
UploadJSON(ctx context.Context, ns string, inData *fftypes.DataRefOrValue) (*fftypes.Data, error)
- UploadBLOB(ctx context.Context, ns string, inData *fftypes.DataRefOrValue, blob *fftypes.Multipart, autoMeta bool) (*fftypes.Data, error)
- DownloadBLOB(ctx context.Context, ns, dataID string) (*fftypes.Blob, io.ReadCloser, error)
+ UploadBlob(ctx context.Context, ns string, inData *fftypes.DataRefOrValue, blob *fftypes.Multipart, autoMeta bool) (*fftypes.Data, error)
+ DownloadBlob(ctx context.Context, ns, dataID string) (*fftypes.Blob, io.ReadCloser, error)
HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted) (*fftypes.Batch, error)
WaitStop()
}
diff --git a/internal/database/sqlcommon/blob_sql.go b/internal/database/sqlcommon/blob_sql.go
index f71ba7893c..2d98b6d8a4 100644
--- a/internal/database/sqlcommon/blob_sql.go
+++ b/internal/database/sqlcommon/blob_sql.go
@@ -47,24 +47,65 @@ func (s *SQLCommon) InsertBlob(ctx context.Context, blob *fftypes.Blob) (err err
}
defer s.rollbackTx(ctx, tx, autoCommit)
- sequence, err := s.insertTx(ctx, tx,
- sq.Insert("blobs").
- Columns(blobColumns...).
- Values(
- blob.Hash,
- blob.PayloadRef,
- blob.Peer,
- blob.Created,
- blob.Size,
- ),
+ err = s.attemptBlobInsert(ctx, tx, blob)
+ if err != nil {
+ return err
+ }
+
+ return s.commitTx(ctx, tx, autoCommit)
+}
+
+func (s *SQLCommon) setBlobInsertValues(query sq.InsertBuilder, blob *fftypes.Blob) sq.InsertBuilder {
+ return query.Values(
+ blob.Hash,
+ blob.PayloadRef,
+ blob.Peer,
+ blob.Created,
+ blob.Size,
+ )
+}
+
+func (s *SQLCommon) attemptBlobInsert(ctx context.Context, tx *txWrapper, blob *fftypes.Blob) (err error) {
+ blob.Sequence, err = s.insertTx(ctx, tx,
+ s.setBlobInsertValues(sq.Insert("blobs").Columns(blobColumns...), blob),
nil, // no change events for blobs
)
+ return err
+}
+
+func (s *SQLCommon) InsertBlobs(ctx context.Context, blobs []*fftypes.Blob) (err error) {
+
+ ctx, tx, autoCommit, err := s.beginOrUseTx(ctx)
if err != nil {
return err
}
- blob.Sequence = sequence
+ defer s.rollbackTx(ctx, tx, autoCommit)
+
+ if s.features.MultiRowInsert {
+ query := sq.Insert("blobs").Columns(blobColumns...)
+ for _, blob := range blobs {
+ query = s.setBlobInsertValues(query, blob)
+ }
+ sequences := make([]int64, len(blobs))
+ err := s.insertTxRows(ctx, tx, query,
+ nil, /* no change events for blobs */
+ sequences,
+ true /* we want the caller to be able to retry with individual upserts */)
+ if err != nil {
+ return err
+ }
+ } else {
+ // Fall back to individual inserts grouped in a TX
+ for _, blob := range blobs {
+ err := s.attemptBlobInsert(ctx, tx, blob)
+ if err != nil {
+ return err
+ }
+ }
+ }
return s.commitTx(ctx, tx, autoCommit)
+
}
func (s *SQLCommon) blobResult(ctx context.Context, row *sql.Rows) (*fftypes.Blob, error) {
diff --git a/internal/database/sqlcommon/blob_sql_test.go b/internal/database/sqlcommon/blob_sql_test.go
index be1cb439dc..d2ce813756 100644
--- a/internal/database/sqlcommon/blob_sql_test.go
+++ b/internal/database/sqlcommon/blob_sql_test.go
@@ -79,7 +79,7 @@ func TestBlobsE2EWithDB(t *testing.T) {
}
-func TestUpsertBlobFailBegin(t *testing.T) {
+func TestInsertBlobFailBegin(t *testing.T) {
s, mock := newMockProvider().init()
mock.ExpectBegin().WillReturnError(fmt.Errorf("pop"))
err := s.InsertBlob(context.Background(), &fftypes.Blob{})
@@ -87,7 +87,7 @@ func TestUpsertBlobFailBegin(t *testing.T) {
assert.NoError(t, mock.ExpectationsWereMet())
}
-func TestUpsertBlobFailInsert(t *testing.T) {
+func TestInsertBlobFailInsert(t *testing.T) {
s, mock := newMockProvider().init()
mock.ExpectBegin()
mock.ExpectExec("INSERT .*").WillReturnError(fmt.Errorf("pop"))
@@ -97,7 +97,7 @@ func TestUpsertBlobFailInsert(t *testing.T) {
assert.NoError(t, mock.ExpectationsWereMet())
}
-func TestUpsertBlobFailCommit(t *testing.T) {
+func TestInsertBlobFailCommit(t *testing.T) {
s, mock := newMockProvider().init()
mock.ExpectBegin()
mock.ExpectExec("INSERT .*").WillReturnResult(sqlmock.NewResult(1, 1))
@@ -107,6 +107,59 @@ func TestUpsertBlobFailCommit(t *testing.T) {
assert.NoError(t, mock.ExpectationsWereMet())
}
+func TestInsertBlobsBeginFail(t *testing.T) {
+ s, mock := newMockProvider().init()
+ mock.ExpectBegin().WillReturnError(fmt.Errorf("pop"))
+ err := s.InsertBlobs(context.Background(), []*fftypes.Blob{})
+ assert.Regexp(t, "FF10114", err)
+ assert.NoError(t, mock.ExpectationsWereMet())
+ s.callbacks.AssertExpectations(t)
+}
+
+func TestInsertBlobsMultiRowOK(t *testing.T) {
+ s, mock := newMockProvider().init()
+ s.features.MultiRowInsert = true
+ s.fakePSQLInsert = true
+
+ blob1 := &fftypes.Blob{Hash: fftypes.NewRandB32(), PayloadRef: "pay1"}
+ blob2 := &fftypes.Blob{Hash: fftypes.NewRandB32(), PayloadRef: "pay2"}
+
+ mock.ExpectBegin()
+ mock.ExpectQuery("INSERT.*").WillReturnRows(sqlmock.NewRows([]string{sequenceColumn}).
+ AddRow(int64(1001)).
+ AddRow(int64(1002)),
+ )
+ mock.ExpectCommit()
+ err := s.InsertBlobs(context.Background(), []*fftypes.Blob{blob1, blob2})
+ assert.NoError(t, err)
+ assert.NoError(t, mock.ExpectationsWereMet())
+ s.callbacks.AssertExpectations(t)
+}
+
+func TestInsertBlobsMultiRowFail(t *testing.T) {
+ s, mock := newMockProvider().init()
+ s.features.MultiRowInsert = true
+ s.fakePSQLInsert = true
+ blob1 := &fftypes.Blob{Hash: fftypes.NewRandB32(), PayloadRef: "pay1"}
+ mock.ExpectBegin()
+ mock.ExpectQuery("INSERT.*").WillReturnError(fmt.Errorf("pop"))
+ err := s.InsertBlobs(context.Background(), []*fftypes.Blob{blob1})
+ assert.Regexp(t, "FF10116", err)
+ assert.NoError(t, mock.ExpectationsWereMet())
+ s.callbacks.AssertExpectations(t)
+}
+
+func TestInsertBlobsSingleRowFail(t *testing.T) {
+ s, mock := newMockProvider().init()
+ blob1 := &fftypes.Blob{Hash: fftypes.NewRandB32(), PayloadRef: "pay1"}
+ mock.ExpectBegin()
+ mock.ExpectExec("INSERT.*").WillReturnError(fmt.Errorf("pop"))
+ err := s.InsertBlobs(context.Background(), []*fftypes.Blob{blob1})
+ assert.Regexp(t, "FF10116", err)
+ assert.NoError(t, mock.ExpectationsWereMet())
+ s.callbacks.AssertExpectations(t)
+}
+
func TestGetBlobByIDSelectFail(t *testing.T) {
s, mock := newMockProvider().init()
mock.ExpectQuery("SELECT .*").WillReturnError(fmt.Errorf("pop"))
diff --git a/internal/dataexchange/ffdx/dxevent.go b/internal/dataexchange/ffdx/dxevent.go
new file mode 100644
index 0000000000..53fbb07f39
--- /dev/null
+++ b/internal/dataexchange/ffdx/dxevent.go
@@ -0,0 +1,183 @@
+// Copyright © 2022 Kaleido, Inc.
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ffdx
+
+import (
+ "github.com/hyperledger/firefly/internal/i18n"
+ "github.com/hyperledger/firefly/internal/log"
+ "github.com/hyperledger/firefly/pkg/dataexchange"
+ "github.com/hyperledger/firefly/pkg/fftypes"
+)
+
+type wsEvent struct {
+ Type msgType `json:"type"`
+ EventID string `json:"id"`
+ Sender string `json:"sender"`
+ Recipient string `json:"recipient"`
+ RequestID string `json:"requestId"`
+ Path string `json:"path"`
+ Message string `json:"message"`
+ Hash string `json:"hash"`
+ Size int64 `json:"size"`
+ Error string `json:"error"`
+ Manifest string `json:"manifest"`
+ Info fftypes.JSONObject `json:"info"`
+}
+
+type dxEvent struct {
+ ffdx *FFDX
+ id string
+ dxType dataexchange.DXEventType
+ messageReceived *dataexchange.MessageReceived
+ privateBlobReceived *dataexchange.PrivateBlobReceived
+ transferResult *dataexchange.TransferResult
+}
+
+func (e *dxEvent) ID() string {
+ return e.id
+}
+
+func (e *dxEvent) Type() dataexchange.DXEventType {
+ return e.dxType
+}
+
+func (e *dxEvent) AckWithManifest(manifest string) {
+ select {
+ case e.ffdx.ackChannel <- &ack{
+ eventID: e.id,
+ manifest: manifest,
+ }:
+ case <-e.ffdx.ctx.Done():
+ log.L(e.ffdx.ctx).Debugf("Ack received after close: %s", e.id)
+ }
+}
+
+func (e *dxEvent) Ack() {
+ e.AckWithManifest("")
+}
+
+func (e *dxEvent) MessageReceived() *dataexchange.MessageReceived {
+ return e.messageReceived
+}
+
+func (e *dxEvent) PrivateBlobReceived() *dataexchange.PrivateBlobReceived {
+ return e.privateBlobReceived
+}
+
+func (e *dxEvent) TransferResult() *dataexchange.TransferResult {
+ return e.transferResult
+}
+
+func (h *FFDX) dispatchEvent(msg *wsEvent) {
+ var err error
+ e := &dxEvent{ffdx: h, id: msg.EventID}
+ switch msg.Type {
+ case messageFailed:
+ e.dxType = dataexchange.DXEventTypeTransferResult
+ e.transferResult = &dataexchange.TransferResult{
+ TrackingID: msg.RequestID,
+ Status: fftypes.OpStatusFailed,
+ TransportStatusUpdate: fftypes.TransportStatusUpdate{
+ Error: msg.Error,
+ Info: msg.Info,
+ },
+ }
+ case messageDelivered:
+ status := fftypes.OpStatusSucceeded
+ if h.capabilities.Manifest {
+ status = fftypes.OpStatusPending
+ }
+ e.dxType = dataexchange.DXEventTypeTransferResult
+ e.transferResult = &dataexchange.TransferResult{
+ TrackingID: msg.RequestID,
+ Status: status,
+ TransportStatusUpdate: fftypes.TransportStatusUpdate{
+ Info: msg.Info,
+ },
+ }
+ case messageAcknowledged:
+ e.dxType = dataexchange.DXEventTypeTransferResult
+ e.transferResult = &dataexchange.TransferResult{
+ TrackingID: msg.RequestID,
+ Status: fftypes.OpStatusSucceeded,
+ TransportStatusUpdate: fftypes.TransportStatusUpdate{
+ Manifest: msg.Manifest,
+ Info: msg.Info,
+ },
+ }
+ case messageReceived:
+ e.dxType = dataexchange.DXEventTypeMessageReceived
+ e.messageReceived = &dataexchange.MessageReceived{
+ PeerID: msg.Sender,
+ Data: []byte(msg.Message),
+ }
+ case blobFailed:
+ e.dxType = dataexchange.DXEventTypeTransferResult
+ e.transferResult = &dataexchange.TransferResult{
+ TrackingID: msg.RequestID,
+ Status: fftypes.OpStatusFailed,
+ TransportStatusUpdate: fftypes.TransportStatusUpdate{
+ Error: msg.Error,
+ Info: msg.Info,
+ },
+ }
+ case blobDelivered:
+ status := fftypes.OpStatusSucceeded
+ if h.capabilities.Manifest {
+ status = fftypes.OpStatusPending
+ }
+ e.dxType = dataexchange.DXEventTypeTransferResult
+ e.transferResult = &dataexchange.TransferResult{
+ TrackingID: msg.RequestID,
+ Status: status,
+ TransportStatusUpdate: fftypes.TransportStatusUpdate{
+ Info: msg.Info,
+ },
+ }
+ case blobReceived:
+ var hash *fftypes.Bytes32
+ hash, err = fftypes.ParseBytes32(h.ctx, msg.Hash)
+ if err == nil {
+ e.dxType = dataexchange.DXEventTypePrivateBlobReceived
+ e.privateBlobReceived = &dataexchange.PrivateBlobReceived{
+ PeerID: msg.Sender,
+ Hash: *hash,
+ Size: msg.Size,
+ PayloadRef: msg.Path,
+ }
+ }
+ case blobAcknowledged:
+ e.dxType = dataexchange.DXEventTypeTransferResult
+ e.transferResult = &dataexchange.TransferResult{
+ TrackingID: msg.RequestID,
+ Status: fftypes.OpStatusSucceeded,
+ TransportStatusUpdate: fftypes.TransportStatusUpdate{
+ Hash: msg.Hash,
+ Info: msg.Info,
+ },
+ }
+ default:
+ err = i18n.NewError(h.ctx, i18n.MsgUnpexectedDXMessageType, msg.Type)
+ }
+ // If we couldn't dispatch the event we received, we still ack it
+ if err != nil {
+ log.L(h.ctx).Warnf("Failed to dispatch DX event: %s", err)
+ e.Ack()
+ } else {
+ h.callbacks.DXEvent(e)
+ }
+}
diff --git a/internal/dataexchange/ffdx/ffdx.go b/internal/dataexchange/ffdx/ffdx.go
index be412a6fea..58bcf2114a 100644
--- a/internal/dataexchange/ffdx/ffdx.go
+++ b/internal/dataexchange/ffdx/ffdx.go
@@ -46,20 +46,7 @@ type FFDX struct {
initialized bool
initMutex sync.Mutex
nodes []fftypes.JSONObject
-}
-
-type wsEvent struct {
- Type msgType `json:"type"`
- Sender string `json:"sender"`
- Recipient string `json:"recipient"`
- RequestID string `json:"requestId"`
- Path string `json:"path"`
- Message string `json:"message"`
- Hash string `json:"hash"`
- Size int64 `json:"size"`
- Error string `json:"error"`
- Manifest string `json:"manifest"`
- Info fftypes.JSONObject `json:"info"`
+ ackChannel chan *ack
}
const (
@@ -104,6 +91,7 @@ type transferBlob struct {
type wsAck struct {
Action string `json:"action"`
+ ID string `json:"id"`
Manifest string `json:"manifest,omitempty"` // FireFly core determined that DX should propagate opaquely to TransferResult, if this DX supports delivery acknowledgements.
}
@@ -111,6 +99,11 @@ type dxStatus struct {
Status string `json:"status"`
}
+type ack struct {
+ eventID string
+ manifest string
+}
+
func (h *FFDX) Name() string {
return "ffdx"
}
@@ -118,6 +111,7 @@ func (h *FFDX) Name() string {
func (h *FFDX) Init(ctx context.Context, prefix config.Prefix, nodes []fftypes.JSONObject, callbacks dataexchange.Callbacks) (err error) {
h.ctx = log.WithLogField(ctx, "dx", "https")
h.callbacks = callbacks
+ h.ackChannel = make(chan *ack)
h.needsInit = prefix.GetBool(DataExchangeInitEnabled)
@@ -139,6 +133,7 @@ func (h *FFDX) Init(ctx context.Context, prefix config.Prefix, nodes []fftypes.J
return err
}
go h.eventLoop()
+ go h.ackLoop()
return nil
}
@@ -216,7 +211,7 @@ func (h *FFDX) AddPeer(ctx context.Context, peer fftypes.JSONObject) (err error)
return nil
}
-func (h *FFDX) UploadBLOB(ctx context.Context, ns string, id fftypes.UUID, content io.Reader) (payloadRef string, hash *fftypes.Bytes32, size int64, err error) {
+func (h *FFDX) UploadBlob(ctx context.Context, ns string, id fftypes.UUID, content io.Reader) (payloadRef string, hash *fftypes.Bytes32, size int64, err error) {
payloadRef = fmt.Sprintf("%s/%s", ns, &id)
var upload uploadBlob
res, err := h.client.R().SetContext(ctx).
@@ -233,7 +228,7 @@ func (h *FFDX) UploadBLOB(ctx context.Context, ns string, id fftypes.UUID, conte
return payloadRef, hash, upload.Size, nil
}
-func (h *FFDX) DownloadBLOB(ctx context.Context, payloadRef string) (content io.ReadCloser, err error) {
+func (h *FFDX) DownloadBlob(ctx context.Context, payloadRef string) (content io.ReadCloser, err error) {
res, err := h.client.R().SetContext(ctx).
SetDoNotParseResponse(true).
Get(fmt.Sprintf("/api/v1/blobs/%s", payloadRef))
@@ -266,7 +261,7 @@ func (h *FFDX) SendMessage(ctx context.Context, opID *fftypes.UUID, peerID strin
return nil
}
-func (h *FFDX) TransferBLOB(ctx context.Context, opID *fftypes.UUID, peerID, payloadRef string) (err error) {
+func (h *FFDX) TransferBlob(ctx context.Context, opID *fftypes.UUID, peerID, payloadRef string) (err error) {
if err := h.checkInitialized(ctx); err != nil {
return err
}
@@ -286,7 +281,7 @@ func (h *FFDX) TransferBLOB(ctx context.Context, opID *fftypes.UUID, peerID, pay
return nil
}
-func (h *FFDX) CheckBLOBReceived(ctx context.Context, peerID, ns string, id fftypes.UUID) (hash *fftypes.Bytes32, size int64, err error) {
+func (h *FFDX) CheckBlobReceived(ctx context.Context, peerID, ns string, id fftypes.UUID) (hash *fftypes.Bytes32, size int64, err error) {
var responseData responseWithRequestID
res, err := h.client.R().SetContext(ctx).
SetResult(&responseData).
@@ -310,6 +305,28 @@ func (h *FFDX) CheckBLOBReceived(ctx context.Context, peerID, ns string, id ffty
return hash, size, nil
}
+func (h *FFDX) ackLoop() {
+ for {
+ select {
+ case <-h.ctx.Done():
+ log.L(h.ctx).Debugf("Ack loop exiting")
+ return
+ case ack := <-h.ackChannel:
+ // Send the ack
+ ackBytes, _ := json.Marshal(&wsAck{
+ Action: "ack",
+ ID: ack.eventID,
+ Manifest: ack.manifest,
+ })
+ err := h.wsconn.Send(h.ctx, ackBytes)
+ if err != nil {
+ // Note we only get the error in the case we're closing down, so no need to retry
+ log.L(h.ctx).Warnf("Ack loop send failed: %s", err)
+ }
+ }
+ }
+}
+
func (h *FFDX) eventLoop() {
defer h.wsconn.Close()
l := log.L(h.ctx).WithField("role", "event-loop")
@@ -333,72 +350,7 @@ func (h *FFDX) eventLoop() {
continue // Swallow this and move on
}
l.Debugf("Received %s event from DX sender=%s", msg.Type, msg.Sender)
- var manifest string
- switch msg.Type {
- case messageFailed:
- err = h.callbacks.TransferResult(msg.RequestID, fftypes.OpStatusFailed, fftypes.TransportStatusUpdate{
- Error: msg.Error,
- Info: msg.Info,
- })
- case messageDelivered:
- status := fftypes.OpStatusSucceeded
- if h.capabilities.Manifest {
- status = fftypes.OpStatusPending
- }
- err = h.callbacks.TransferResult(msg.RequestID, status, fftypes.TransportStatusUpdate{
- Info: msg.Info,
- })
- case messageAcknowledged:
- err = h.callbacks.TransferResult(msg.RequestID, fftypes.OpStatusSucceeded, fftypes.TransportStatusUpdate{
- Manifest: msg.Manifest,
- Info: msg.Info,
- })
- case messageReceived:
- manifest, err = h.callbacks.MessageReceived(msg.Sender, []byte(msg.Message))
- case blobFailed:
- err = h.callbacks.TransferResult(msg.RequestID, fftypes.OpStatusFailed, fftypes.TransportStatusUpdate{
- Error: msg.Error,
- Info: msg.Info,
- })
- case blobDelivered:
- status := fftypes.OpStatusSucceeded
- if h.capabilities.Manifest {
- status = fftypes.OpStatusPending
- }
- err = h.callbacks.TransferResult(msg.RequestID, status, fftypes.TransportStatusUpdate{
- Info: msg.Info,
- })
- case blobReceived:
- var hash *fftypes.Bytes32
- hash, err = fftypes.ParseBytes32(ctx, msg.Hash)
- if err != nil {
- l.Errorf("Invalid hash received in DX event: '%s'", msg.Hash)
- err = nil // still confirm the message
- } else {
- err = h.callbacks.PrivateBLOBReceived(msg.Sender, *hash, msg.Size, msg.Path)
- }
- case blobAcknowledged:
- err = h.callbacks.TransferResult(msg.RequestID, fftypes.OpStatusSucceeded, fftypes.TransportStatusUpdate{
- Hash: msg.Hash,
- Info: msg.Info,
- })
- default:
- l.Errorf("Message unexpected: %s", msg.Type)
- }
-
- // Send the ack - as long as we didn't fail processing (which should only happen in core
- // if core itself is shutting down)
- if err == nil {
- ackBytes, _ := json.Marshal(&wsAck{
- Action: "commit",
- Manifest: manifest,
- })
- err = h.wsconn.Send(ctx, ackBytes)
- }
- if err != nil {
- l.Errorf("Event loop exiting: %s", err)
- return
- }
+ h.dispatchEvent(&msg)
}
}
}
diff --git a/internal/dataexchange/ffdx/ffdx_test.go b/internal/dataexchange/ffdx/ffdx_test.go
index 8548a31d01..2abe59a6b3 100644
--- a/internal/dataexchange/ffdx/ffdx_test.go
+++ b/internal/dataexchange/ffdx/ffdx_test.go
@@ -30,6 +30,7 @@ import (
"github.com/hyperledger/firefly/internal/restclient"
"github.com/hyperledger/firefly/mocks/dataexchangemocks"
"github.com/hyperledger/firefly/mocks/wsmocks"
+ "github.com/hyperledger/firefly/pkg/dataexchange"
"github.com/hyperledger/firefly/pkg/fftypes"
"github.com/hyperledger/firefly/pkg/wsclient"
"github.com/jarcoal/httpmock"
@@ -59,12 +60,14 @@ func newTestFFDX(t *testing.T, manifestEnabled bool) (h *FFDX, toServer, fromSer
nodes := make([]fftypes.JSONObject, 0)
h.InitPrefix(utConfPrefix)
- err := h.Init(context.Background(), utConfPrefix, nodes, &dataexchangemocks.Callbacks{})
+ dxCtx, dxCancel := context.WithCancel(context.Background())
+ err := h.Init(dxCtx, utConfPrefix, nodes, &dataexchangemocks.Callbacks{})
assert.NoError(t, err)
assert.Equal(t, "ffdx", h.Name())
assert.NotNil(t, h.Capabilities())
return h, toServer, fromServer, httpURL, func() {
cancel()
+ dxCancel()
httpmock.DeactivateAndReset()
}
}
@@ -88,6 +91,18 @@ func TestInitMissingURL(t *testing.T) {
assert.Regexp(t, "FF10138", err)
}
+func acker() func(args mock.Arguments) {
+ return func(args mock.Arguments) {
+ args[0].(dataexchange.DXEvent).Ack()
+ }
+}
+
+func manifestAcker(manifest string) func(args mock.Arguments) {
+ return func(args mock.Arguments) {
+ args[0].(dataexchange.DXEvent).AckWithManifest(manifest)
+ }
+}
+
func TestGetEndpointInfo(t *testing.T) {
h, _, _, httpURL, done := newTestFFDX(t, false)
defer done()
@@ -162,7 +177,7 @@ func TestAddPeerError(t *testing.T) {
assert.Regexp(t, "FF10229", err)
}
-func TestUploadBLOB(t *testing.T) {
+func TestUploadBlob(t *testing.T) {
h, _, _, httpURL, done := newTestFFDX(t, false)
defer done()
@@ -185,14 +200,14 @@ func TestUploadBLOB(t *testing.T) {
return res, nil
})
- payloadRef, hashReturned, sizeReturned, err := h.UploadBLOB(context.Background(), "ns1", *u, bytes.NewReader([]byte(`{}`)))
+ payloadRef, hashReturned, sizeReturned, err := h.UploadBlob(context.Background(), "ns1", *u, bytes.NewReader([]byte(`{}`)))
assert.NoError(t, err)
assert.Equal(t, fmt.Sprintf("ns1/%s", u.String()), payloadRef)
assert.Equal(t, *hash, *hashReturned)
assert.Equal(t, int64(12345), sizeReturned)
}
-func TestUploadBLOBBadHash(t *testing.T) {
+func TestUploadBlobBadHash(t *testing.T) {
h, _, _, httpURL, done := newTestFFDX(t, false)
defer done()
@@ -210,11 +225,11 @@ func TestUploadBLOBBadHash(t *testing.T) {
return res, nil
})
- _, _, _, err := h.UploadBLOB(context.Background(), "ns1", *u, bytes.NewReader([]byte(`{}`)))
+ _, _, _, err := h.UploadBlob(context.Background(), "ns1", *u, bytes.NewReader([]byte(`{}`)))
assert.Regexp(t, "FF10237", err)
}
-func TestUploadBLOBError(t *testing.T) {
+func TestUploadBlobError(t *testing.T) {
h, _, _, httpURL, done := newTestFFDX(t, false)
defer done()
@@ -222,11 +237,11 @@ func TestUploadBLOBError(t *testing.T) {
httpmock.RegisterResponder("PUT", fmt.Sprintf("%s/api/v1/blobs/ns1/%s", httpURL, u),
httpmock.NewJsonResponderOrPanic(500, fftypes.JSONObject{}))
- _, _, _, err := h.UploadBLOB(context.Background(), "ns1", *u, bytes.NewReader([]byte(`{}`)))
+ _, _, _, err := h.UploadBlob(context.Background(), "ns1", *u, bytes.NewReader([]byte(`{}`)))
assert.Regexp(t, "FF10229", err)
}
-func TestCheckBLOBReceivedOk(t *testing.T) {
+func TestCheckBlobReceivedOk(t *testing.T) {
h, _, _, httpURL, done := newTestFFDX(t, false)
defer done()
@@ -245,13 +260,13 @@ func TestCheckBLOBReceivedOk(t *testing.T) {
return res, nil
})
- hashReturned, size, err := h.CheckBLOBReceived(context.Background(), "peer1", "ns1", *u)
+ hashReturned, size, err := h.CheckBlobReceived(context.Background(), "peer1", "ns1", *u)
assert.NoError(t, err)
assert.Equal(t, *hash, *hashReturned)
assert.Equal(t, int64(size), size)
}
-func TestCheckBLOBReceivedBadHash(t *testing.T) {
+func TestCheckBlobReceivedBadHash(t *testing.T) {
h, _, _, httpURL, done := newTestFFDX(t, false)
defer done()
@@ -268,11 +283,11 @@ func TestCheckBLOBReceivedBadHash(t *testing.T) {
return res, nil
})
- _, _, err := h.CheckBLOBReceived(context.Background(), "peer1", "ns1", *u)
+ _, _, err := h.CheckBlobReceived(context.Background(), "peer1", "ns1", *u)
assert.Regexp(t, "FF10237", err)
}
-func TestCheckBLOBReceivedBadSize(t *testing.T) {
+func TestCheckBlobReceivedBadSize(t *testing.T) {
h, _, _, httpURL, done := newTestFFDX(t, false)
defer done()
@@ -291,11 +306,11 @@ func TestCheckBLOBReceivedBadSize(t *testing.T) {
return res, nil
})
- _, _, err := h.CheckBLOBReceived(context.Background(), "peer1", "ns1", *u)
+ _, _, err := h.CheckBlobReceived(context.Background(), "peer1", "ns1", *u)
assert.Regexp(t, "FF10237", err)
}
-func TestCheckBLOBReceivedNotFound(t *testing.T) {
+func TestCheckBlobReceivedNotFound(t *testing.T) {
h, _, _, httpURL, done := newTestFFDX(t, false)
defer done()
@@ -309,12 +324,12 @@ func TestCheckBLOBReceivedNotFound(t *testing.T) {
return res, nil
})
- hashReturned, _, err := h.CheckBLOBReceived(context.Background(), "peer1", "ns1", *u)
+ hashReturned, _, err := h.CheckBlobReceived(context.Background(), "peer1", "ns1", *u)
assert.NoError(t, err)
assert.Nil(t, hashReturned)
}
-func TestCheckBLOBReceivedError(t *testing.T) {
+func TestCheckBlobReceivedError(t *testing.T) {
h, _, _, httpURL, done := newTestFFDX(t, false)
defer done()
@@ -328,11 +343,11 @@ func TestCheckBLOBReceivedError(t *testing.T) {
return res, nil
})
- _, _, err := h.CheckBLOBReceived(context.Background(), "peer1", "ns1", *u)
+ _, _, err := h.CheckBlobReceived(context.Background(), "peer1", "ns1", *u)
assert.Regexp(t, "FF10229", err)
}
-func TestDownloadBLOB(t *testing.T) {
+func TestDownloadBlob(t *testing.T) {
h, _, _, httpURL, done := newTestFFDX(t, false)
defer done()
@@ -341,21 +356,21 @@ func TestDownloadBLOB(t *testing.T) {
httpmock.RegisterResponder("GET", fmt.Sprintf("%s/api/v1/blobs/ns1/%s", httpURL, u),
httpmock.NewBytesResponder(200, []byte(`some data`)))
- rc, err := h.DownloadBLOB(context.Background(), fmt.Sprintf("ns1/%s", u))
+ rc, err := h.DownloadBlob(context.Background(), fmt.Sprintf("ns1/%s", u))
assert.NoError(t, err)
b, err := ioutil.ReadAll(rc)
rc.Close()
assert.Equal(t, `some data`, string(b))
}
-func TestDownloadBLOBError(t *testing.T) {
+func TestDownloadBlobError(t *testing.T) {
h, _, _, httpURL, done := newTestFFDX(t, false)
defer done()
httpmock.RegisterResponder("GET", fmt.Sprintf("%s/api/v1/blobs/bad", httpURL),
httpmock.NewJsonResponderOrPanic(500, fftypes.JSONObject{}))
- _, err := h.DownloadBLOB(context.Background(), "bad")
+ _, err := h.DownloadBlob(context.Background(), "bad")
assert.Regexp(t, "FF10229", err)
}
@@ -382,7 +397,7 @@ func TestSendMessageError(t *testing.T) {
assert.Regexp(t, "FF10229", err)
}
-func TestTransferBLOB(t *testing.T) {
+func TestTransferBlob(t *testing.T) {
h, _, _, httpURL, done := newTestFFDX(t, false)
defer done()
@@ -390,18 +405,18 @@ func TestTransferBLOB(t *testing.T) {
httpmock.RegisterResponder("POST", fmt.Sprintf("%s/api/v1/transfers", httpURL),
httpmock.NewJsonResponderOrPanic(200, fftypes.JSONObject{}))
- err := h.TransferBLOB(context.Background(), fftypes.NewUUID(), "peer1", "ns1/id1")
+ err := h.TransferBlob(context.Background(), fftypes.NewUUID(), "peer1", "ns1/id1")
assert.NoError(t, err)
}
-func TestTransferBLOBError(t *testing.T) {
+func TestTransferBlobError(t *testing.T) {
h, _, _, httpURL, done := newTestFFDX(t, false)
defer done()
httpmock.RegisterResponder("POST", fmt.Sprintf("%s/api/v1/transfers", httpURL),
httpmock.NewJsonResponderOrPanic(500, fftypes.JSONObject{}))
- err := h.TransferBLOB(context.Background(), fftypes.NewUUID(), "peer1", "ns1/id1")
+ err := h.TransferBlob(context.Background(), fftypes.NewUUID(), "peer1", "ns1/id1")
assert.Regexp(t, "FF10229", err)
}
@@ -413,72 +428,107 @@ func TestEvents(t *testing.T) {
err := h.Start()
assert.NoError(t, err)
- fromServer <- `!}` // ignored
- fromServer <- `{}` // ignored
+ fromServer <- `!}` // ignored without ack
+ fromServer <- `{"id":"0"}` // ignored with ack
msg := <-toServer
- assert.Equal(t, `{"action":"commit"}`, string(msg))
+ assert.Equal(t, `{"action":"ack","id":"0"}`, string(msg))
mcb := h.callbacks.(*dataexchangemocks.Callbacks)
- mcb.On("TransferResult", "tx12345", fftypes.OpStatusFailed, mock.MatchedBy(func(ts fftypes.TransportStatusUpdate) bool {
- return "pop" == ts.Error
- })).Return(nil)
- fromServer <- `{"type":"message-failed","requestID":"tx12345","error":"pop"}`
+ mcb.On("DXEvent", mock.MatchedBy(func(ev dataexchange.DXEvent) bool {
+ return ev.ID() == "1" &&
+ ev.Type() == dataexchange.DXEventTypeTransferResult &&
+ ev.TransferResult().TrackingID == "tx12345" &&
+ ev.TransferResult().Status == fftypes.OpStatusFailed &&
+ ev.TransferResult().Error == "pop"
+ })).Run(acker()).Return(nil)
+ fromServer <- `{"id":"1","type":"message-failed","requestID":"tx12345","error":"pop"}`
msg = <-toServer
- assert.Equal(t, `{"action":"commit"}`, string(msg))
-
- mcb.On("TransferResult", "tx12345", fftypes.OpStatusSucceeded, mock.Anything).Return(nil)
- fromServer <- `{"type":"message-delivered","requestID":"tx12345"}`
+ assert.Equal(t, `{"action":"ack","id":"1"}`, string(msg))
+
+ mcb.On("DXEvent", mock.MatchedBy(func(ev dataexchange.DXEvent) bool {
+ return ev.ID() == "2" &&
+ ev.Type() == dataexchange.DXEventTypeTransferResult &&
+ ev.TransferResult().TrackingID == "tx12345" &&
+ ev.TransferResult().Status == fftypes.OpStatusSucceeded
+ })).Run(acker()).Return(nil)
+ fromServer <- `{"id":"2","type":"message-delivered","requestID":"tx12345"}`
msg = <-toServer
- assert.Equal(t, `{"action":"commit"}`, string(msg))
-
- mcb.On("TransferResult", "tx12345", fftypes.OpStatusSucceeded, mock.MatchedBy(func(ts fftypes.TransportStatusUpdate) bool {
- return ts.Manifest == `{"manifest":true}` && ts.Info.String() == `{"signatures":"and stuff"}`
- })).Return(nil)
- fromServer <- `{"type":"message-acknowledged","requestID":"tx12345","info":{"signatures":"and stuff"},"manifest":"{\"manifest\":true}"}`
+ assert.Equal(t, `{"action":"ack","id":"2"}`, string(msg))
+
+ mcb.On("DXEvent", mock.MatchedBy(func(ev dataexchange.DXEvent) bool {
+ return ev.ID() == "3" &&
+ ev.Type() == dataexchange.DXEventTypeTransferResult &&
+ ev.TransferResult().TrackingID == "tx12345" &&
+ ev.TransferResult().Status == fftypes.OpStatusSucceeded &&
+ ev.TransferResult().Manifest == `{"manifest":true}` &&
+ ev.TransferResult().Info.String() == `{"signatures":"and stuff"}`
+ })).Run(acker()).Return(nil)
+ fromServer <- `{"id":"3","type":"message-acknowledged","requestID":"tx12345","info":{"signatures":"and stuff"},"manifest":"{\"manifest\":true}"}`
msg = <-toServer
- assert.Equal(t, `{"action":"commit"}`, string(msg))
-
- mcb.On("MessageReceived", "peer1", []byte("message1")).Return(`{"manifest":true}`, nil)
- fromServer <- `{"type":"message-received","sender":"peer1","message":"message1"}`
+ assert.Equal(t, `{"action":"ack","id":"3"}`, string(msg))
+
+ mcb.On("DXEvent", mock.MatchedBy(func(ev dataexchange.DXEvent) bool {
+ return ev.ID() == "4" &&
+ ev.Type() == dataexchange.DXEventTypeMessageReceived &&
+ ev.MessageReceived().PeerID == "peer1" &&
+ string(ev.MessageReceived().Data) == "message1"
+ })).Run(manifestAcker(`{"manifest":true}`)).Return(nil)
+ fromServer <- `{"id":"4","type":"message-received","sender":"peer1","message":"message1"}`
msg = <-toServer
- assert.Equal(t, `{"action":"commit","manifest":"{\"manifest\":true}"}`, string(msg))
-
- mcb.On("TransferResult", "tx12345", fftypes.OpStatusFailed, mock.MatchedBy(func(ts fftypes.TransportStatusUpdate) bool {
- return "pop" == ts.Error
- })).Return(nil)
- fromServer <- `{"type":"blob-failed","requestID":"tx12345","error":"pop"}`
+ assert.Equal(t, `{"action":"ack","id":"4","manifest":"{\"manifest\":true}"}`, string(msg))
+
+ mcb.On("DXEvent", mock.MatchedBy(func(ev dataexchange.DXEvent) bool {
+ return ev.ID() == "5" &&
+ ev.Type() == dataexchange.DXEventTypeTransferResult &&
+ ev.TransferResult().TrackingID == "tx12345" &&
+ ev.TransferResult().Status == fftypes.OpStatusFailed &&
+ ev.TransferResult().Error == "pop"
+ })).Run(acker()).Return(nil)
+ fromServer <- `{"id":"5","type":"blob-failed","requestID":"tx12345","error":"pop"}`
msg = <-toServer
- assert.Equal(t, `{"action":"commit"}`, string(msg))
-
- mcb.On("TransferResult", "tx12345", fftypes.OpStatusSucceeded, mock.Anything).Return(nil)
- fromServer <- `{"type":"blob-delivered","requestID":"tx12345"}`
+ assert.Equal(t, `{"action":"ack","id":"5"}`, string(msg))
+
+ mcb.On("DXEvent", mock.MatchedBy(func(ev dataexchange.DXEvent) bool {
+ return ev.ID() == "6" &&
+ ev.Type() == dataexchange.DXEventTypeTransferResult &&
+ ev.TransferResult().TrackingID == "tx12345" &&
+ ev.TransferResult().Status == fftypes.OpStatusSucceeded &&
+ ev.TransferResult().Info.String() == `{"some":"details"}`
+ })).Run(acker()).Return(nil)
+ fromServer <- `{"id":"6","type":"blob-delivered","requestID":"tx12345","info":{"some":"details"}}`
msg = <-toServer
- assert.Equal(t, `{"action":"commit"}`, string(msg))
+ assert.Equal(t, `{"action":"ack","id":"6"}`, string(msg))
- fromServer <- `{"type":"blob-received","sender":"peer1","path":"ns1/! not a UUID"}`
+ fromServer <- `{"id":"7","type":"blob-received","sender":"peer1","path":"ns1/! not a UUID"}`
msg = <-toServer
- assert.Equal(t, `{"action":"commit"}`, string(msg))
+ assert.Equal(t, `{"action":"ack","id":"7"}`, string(msg))
u := fftypes.NewUUID()
- fromServer <- fmt.Sprintf(`{"type":"blob-received","sender":"peer1","path":"ns1/%s","hash":"!wrong","size":-1}`, u.String())
+ fromServer <- fmt.Sprintf(`{"id":"8","type":"blob-received","sender":"peer1","path":"ns1/%s","hash":"!wrong","size":-1}`, u.String())
msg = <-toServer
- assert.Equal(t, `{"action":"commit"}`, string(msg))
+ assert.Equal(t, `{"action":"ack","id":"8"}`, string(msg))
hash := fftypes.NewRandB32()
- mcb.On("PrivateBLOBReceived", mock.Anything, mock.MatchedBy(func(b32 fftypes.Bytes32) bool {
- return b32 == *hash
- }), int64(12345), fmt.Sprintf("ns1/%s", u.String())).Return(nil)
- fromServer <- fmt.Sprintf(`{"type":"blob-received","sender":"peer1","path":"ns1/%s","hash":"%s","size":12345}`, u.String(), hash.String())
+ mcb.On("DXEvent", mock.MatchedBy(func(ev dataexchange.DXEvent) bool {
+ return ev.ID() == "9" &&
+ ev.Type() == dataexchange.DXEventTypePrivateBlobReceived &&
+ ev.PrivateBlobReceived().Hash.Equals(hash)
+ })).Run(acker()).Return(nil)
+ fromServer <- fmt.Sprintf(`{"id":"9","type":"blob-received","sender":"peer1","path":"ns1/%s","hash":"%s","size":12345}`, u.String(), hash.String())
msg = <-toServer
- assert.Equal(t, `{"action":"commit"}`, string(msg))
-
- mcb.On("TransferResult", "tx12345", fftypes.OpStatusSucceeded, mock.MatchedBy(func(ts fftypes.TransportStatusUpdate) bool {
- return ts.Manifest == `{"manifest":true}` && ts.Info.String() == `{"signatures":"and stuff"}`
- })).Return(nil)
- fromServer <- `{"type":"blob-acknowledged","requestID":"tx12345","info":{"signatures":"and stuff"},"manifest":"{\"manifest\":true}"}`
+ assert.Equal(t, `{"action":"ack","id":"9"}`, string(msg))
+
+ mcb.On("DXEvent", mock.MatchedBy(func(ev dataexchange.DXEvent) bool {
+ return ev.ID() == "10" &&
+ ev.Type() == dataexchange.DXEventTypeTransferResult &&
+ ev.TransferResult().TrackingID == "tx12345" &&
+ ev.TransferResult().Status == fftypes.OpStatusSucceeded &&
+ ev.TransferResult().Info.String() == `{"signatures":"and stuff"}`
+ })).Run(acker()).Return(nil)
+ fromServer <- `{"id":"10","type":"blob-acknowledged","requestID":"tx12345","info":{"signatures":"and stuff"},"manifest":"{\"manifest\":true}"}`
msg = <-toServer
- assert.Equal(t, `{"action":"commit"}`, string(msg))
+ assert.Equal(t, `{"action":"ack","id":"10"}`, string(msg))
mcb.AssertExpectations(t)
}
@@ -491,22 +541,30 @@ func TestEventsWithManifest(t *testing.T) {
err := h.Start()
assert.NoError(t, err)
- fromServer <- `!}` // ignored
- fromServer <- `{}` // ignored
+ fromServer <- `!}` // ignored without ack
+ fromServer <- `{"id":"0"}` // ignored with ack
msg := <-toServer
- assert.Equal(t, `{"action":"commit"}`, string(msg))
+ assert.Equal(t, `{"action":"ack","id":"0"}`, string(msg))
mcb := h.callbacks.(*dataexchangemocks.Callbacks)
- mcb.On("TransferResult", "tx12345", fftypes.OpStatusPending, mock.Anything).Return(nil)
- fromServer <- `{"type":"message-delivered","requestID":"tx12345"}`
+ mcb.On("DXEvent", mock.MatchedBy(func(ev dataexchange.DXEvent) bool {
+ return ev.ID() == "1" &&
+ ev.Type() == dataexchange.DXEventTypeTransferResult &&
+ ev.TransferResult().Status == fftypes.OpStatusPending
+ })).Run(acker()).Return(nil)
+ fromServer <- `{"id":"1","type":"message-delivered","requestID":"tx12345"}`
msg = <-toServer
- assert.Equal(t, `{"action":"commit"}`, string(msg))
-
- mcb.On("TransferResult", "tx12345", fftypes.OpStatusPending, mock.Anything).Return(nil)
- fromServer <- `{"type":"blob-delivered","requestID":"tx12345"}`
+ assert.Equal(t, `{"action":"ack","id":"1"}`, string(msg))
+
+ mcb.On("DXEvent", mock.MatchedBy(func(ev dataexchange.DXEvent) bool {
+ return ev.ID() == "2" &&
+ ev.Type() == dataexchange.DXEventTypeTransferResult &&
+ ev.TransferResult().Status == fftypes.OpStatusPending
+ })).Run(acker()).Return(nil)
+ fromServer <- `{"id":"2","type":"blob-delivered","requestID":"tx12345"}`
msg = <-toServer
- assert.Equal(t, `{"action":"commit"}`, string(msg))
+ assert.Equal(t, `{"action":"ack","id":"2"}`, string(msg))
mcb.AssertExpectations(t)
}
@@ -529,17 +587,21 @@ func TestEventLoopReceiveClosed(t *testing.T) {
func TestEventLoopSendClosed(t *testing.T) {
dxc := &dataexchangemocks.Callbacks{}
wsm := &wsmocks.WSClient{}
+ ctx, cancelCtx := context.WithCancel(context.Background())
h := &FFDX{
- ctx: context.Background(),
- callbacks: dxc,
- wsconn: wsm,
+ ctx: ctx,
+ callbacks: dxc,
+ wsconn: wsm,
+ ackChannel: make(chan *ack, 1),
+ }
+ h.ackChannel <- &ack{
+ eventID: "12345",
}
- r := make(chan []byte, 1)
- r <- []byte(`{}`)
wsm.On("Close").Return()
- wsm.On("Receive").Return((<-chan []byte)(r))
- wsm.On("Send", mock.Anything, mock.Anything).Return(fmt.Errorf("pop"))
- h.eventLoop() // we're simply looking for it exiting
+ wsm.On("Send", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")).Run(func(args mock.Arguments) {
+ cancelCtx()
+ })
+ h.ackLoop() // we're simply looking for it exiting
}
func TestEventLoopClosedContext(t *testing.T) {
@@ -627,7 +689,7 @@ func TestDXUninitialized(t *testing.T) {
err = h.AddPeer(context.Background(), fftypes.JSONObject{})
assert.Regexp(t, "FF10342", err)
- err = h.TransferBLOB(context.Background(), fftypes.NewUUID(), "peer1", "ns1/id1")
+ err = h.TransferBlob(context.Background(), fftypes.NewUUID(), "peer1", "ns1/id1")
assert.Regexp(t, "FF10342", err)
err = h.SendMessage(context.Background(), fftypes.NewUUID(), "peer1", []byte(`some data`))
diff --git a/internal/events/aggregator.go b/internal/events/aggregator.go
index 364f830362..f8d4bc29bd 100644
--- a/internal/events/aggregator.go
+++ b/internal/events/aggregator.go
@@ -591,44 +591,3 @@ func (ag *aggregator) resolveBlobs(ctx context.Context, data fftypes.DataArray)
return true, nil
}
-
-func (ag *aggregator) rewindForBlobArrival(ctx context.Context, blobHash *fftypes.Bytes32) error {
-
- batchIDs := make(map[fftypes.UUID]bool)
-
- // We need to work out what pins potentially are unblocked by the arrival of this data
-
- // Find any data associated with this blob
- var data []*fftypes.DataRef
- filter := database.DataQueryFactory.NewFilter(ctx).Eq("blob.hash", blobHash)
- data, _, err := ag.database.GetDataRefs(ctx, filter)
- if err != nil {
- return err
- }
-
- // Find the messages assocated with that data
- var messages []*fftypes.Message
- for _, data := range data {
- fb := database.MessageQueryFactory.NewFilter(ctx)
- filter := fb.And(fb.Eq("confirmed", nil))
- messages, _, err = ag.database.GetMessagesForData(ctx, data.ID, filter)
- if err != nil {
- return err
- }
- }
-
- // Find the unique batch IDs for all the messages
- for _, msg := range messages {
- if msg.BatchID != nil {
- batchIDs[*msg.BatchID] = true
- }
- }
-
- // Initiate rewinds for all the batchIDs that are potentially completed by the arrival of this data
- for bid := range batchIDs {
- var batchID = bid // cannot use the address of the loop var
- log.L(ag.ctx).Infof("Batch '%s' contains reference to received blob %s", &bid, blobHash)
- ag.rewindBatches <- batchID
- }
- return nil
-}
diff --git a/internal/events/batch_pin_complete_test.go b/internal/events/batch_pin_complete_test.go
index 18c763dd39..8b7cd37109 100644
--- a/internal/events/batch_pin_complete_test.go
+++ b/internal/events/batch_pin_complete_test.go
@@ -179,7 +179,7 @@ func TestBatchPinCompleteOkPrivate(t *testing.T) {
// Call through to persistBatch - the hash of our batch will be invalid,
// which is swallowed without error as we cannot retry (it is logged of course)
- fn := mdi.Calls[0].Arguments[1].(func(ctx context.Context) error)
+ fn := mdi.Calls[1].Arguments[1].(func(ctx context.Context) error)
err = fn(context.Background())
assert.NoError(t, err)
diff --git a/internal/events/blob_receiver.go b/internal/events/blob_receiver.go
new file mode 100644
index 0000000000..be298e210d
--- /dev/null
+++ b/internal/events/blob_receiver.go
@@ -0,0 +1,282 @@
+// Copyright © 2022 Kaleido, Inc.
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package events
+
+import (
+ "context"
+ "database/sql/driver"
+ "fmt"
+ "time"
+
+ "github.com/hyperledger/firefly/internal/config"
+ "github.com/hyperledger/firefly/internal/log"
+ "github.com/hyperledger/firefly/internal/retry"
+ "github.com/hyperledger/firefly/pkg/database"
+ "github.com/hyperledger/firefly/pkg/fftypes"
+)
+
+type blobNotification struct {
+ blob *fftypes.Blob
+ onComplete func()
+}
+
+type blobReceiverBatch struct {
+ notifications []*blobNotification
+ timeoutContext context.Context
+ timeoutCancel func()
+}
+
+// blobReceiver
+type blobReceiver struct {
+ ctx context.Context
+ aggregator *aggregator
+ cancelFunc func()
+ database database.Plugin
+ workQueue chan *blobNotification
+ workersDone []chan struct{}
+ conf blobReceiverConf
+ closed bool
+ retry *retry.Retry
+}
+
+type blobReceiverConf struct {
+ workerCount int
+ batchTimeout time.Duration
+ maxInserts int
+}
+
+func newBlobReceiver(ctx context.Context, ag *aggregator) *blobReceiver {
+ br := &blobReceiver{
+ aggregator: ag,
+ database: ag.database,
+ conf: blobReceiverConf{
+ workerCount: config.GetInt(config.BlobReceiverWorkerCount),
+ batchTimeout: config.GetDuration(config.BlobReceiverWorkerBatchTimeout),
+ maxInserts: config.GetInt(config.BlobReceiverWorkerBatchMaxInserts),
+ },
+ retry: &retry.Retry{
+ InitialDelay: config.GetDuration(config.BlobReceiverRetryInitDelay),
+ MaximumDelay: config.GetDuration(config.BlobReceiverRetryMaxDelay),
+ Factor: config.GetFloat64(config.BlobReceiverRetryFactor),
+ },
+ }
+ br.ctx, br.cancelFunc = context.WithCancel(ctx)
+ if !ag.database.Capabilities().Concurrency {
+ log.L(ctx).Infof("Database plugin not configured for concurrency. Batched blob receiver updates disabled")
+ br.conf.workerCount = 0
+ }
+ return br
+}
+
+func (br *blobReceiver) blobReceived(ctx context.Context, notification *blobNotification) {
+ if br.conf.workerCount > 0 {
+ select {
+ case br.workQueue <- notification:
+ log.L(ctx).Debugf("Dispatched blob notification %s", notification.blob.Hash)
+ case <-br.ctx.Done():
+ log.L(ctx).Debugf("Not submitting received blob due to cancelled context")
+ }
+ return
+ }
+ // Otherwise do it in-line on this context
+ err := br.handleBlobNotificationsRetry(ctx, []*blobNotification{notification})
+ if err != nil {
+ log.L(ctx).Warnf("Exiting while updating operation: %s", err)
+ }
+}
+
+func (br *blobReceiver) initQueues() {
+ br.workQueue = make(chan *blobNotification)
+ br.workersDone = make([]chan struct{}, br.conf.workerCount)
+ for i := 0; i < br.conf.workerCount; i++ {
+ br.workersDone[i] = make(chan struct{})
+ }
+}
+
+func (br *blobReceiver) start() {
+ if br.conf.workerCount > 0 {
+ br.initQueues()
+ for i := 0; i < br.conf.workerCount; i++ {
+ go br.blobReceiverLoop(i)
+ }
+ }
+}
+
+func (br *blobReceiver) stop() {
+ br.closed = true
+ br.cancelFunc()
+ for _, workerDone := range br.workersDone {
+ <-workerDone
+ }
+}
+
+func (br *blobReceiver) blobReceiverLoop(index int) {
+ defer close(br.workersDone[index])
+
+ ctx := log.WithLogField(br.ctx, "blobreceiver", fmt.Sprintf("brcvr_%.3d", index))
+
+ var batch *blobReceiverBatch
+ for !br.closed {
+ var timeoutContext context.Context
+ var timedOut bool
+ if batch != nil {
+ timeoutContext = batch.timeoutContext
+ } else {
+ timeoutContext = ctx
+ }
+ select {
+ case work := <-br.workQueue:
+ if batch == nil {
+ batch = &blobReceiverBatch{}
+ batch.timeoutContext, batch.timeoutCancel = context.WithTimeout(ctx, br.conf.batchTimeout)
+ }
+ batch.notifications = append(batch.notifications, work)
+ case <-timeoutContext.Done():
+ timedOut = true
+ }
+
+ if batch != nil && (timedOut || len(batch.notifications) >= br.conf.maxInserts) {
+ batch.timeoutCancel()
+ err := br.handleBlobNotificationsRetry(ctx, batch.notifications)
+ if err != nil {
+ log.L(ctx).Debugf("Blob receiver worker exiting: %s", err)
+ return
+ }
+ batch = nil
+ }
+ }
+}
+
+func (br *blobReceiver) handleBlobNotificationsRetry(ctx context.Context, notifications []*blobNotification) error {
+ // We process the event in a retry loop (which will break only if the context is closed), so that
+ // we only confirm consumption of the event to the plugin once we've processed it.
+ err := br.retry.Do(ctx, "blob reference insert", func(attempt int) (retry bool, err error) {
+ return true, br.database.RunAsGroup(ctx, func(ctx context.Context) error {
+ return br.handleBlobNotifications(ctx, notifications)
+ })
+ })
+ // We only get an error here if we're exiting
+ if err != nil {
+ return err
+ }
+ // Notify all callbacks we completed
+ for _, notification := range notifications {
+ if notification.onComplete != nil {
+ notification.onComplete()
+ }
+ }
+ return nil
+}
+
+func (br *blobReceiver) insertNewBlobs(ctx context.Context, notifications []*blobNotification) ([]driver.Value, error) {
+
+ allHashes := make([]driver.Value, len(notifications))
+ for i, n := range notifications {
+ allHashes[i] = n.blob.Hash
+ }
+
+ // We want just one record in our DB for each entry in DX, so make the logic idempotent.
+ // Note that we do create a record for each separate receipt of data on a new payload ref,
+ // even if the hash of that data is the same.
+ fb := database.BlobQueryFactory.NewFilter(ctx)
+ filter := fb.In("hash", allHashes)
+ existingBlobs, _, err := br.database.GetBlobs(ctx, filter)
+ if err != nil {
+ return nil, err
+ }
+ newBlobs := make([]*fftypes.Blob, 0, len(existingBlobs))
+ newHashes := make([]driver.Value, 0, len(existingBlobs))
+ for _, notification := range notifications {
+ foundExisting := false
+ // Check for duplicates in the DB
+ for _, existing := range existingBlobs {
+ if existing.Hash.Equals(notification.blob.Hash) && existing.PayloadRef == notification.blob.PayloadRef {
+ foundExisting = true
+ break
+ }
+ }
+ // Check for duplicates in the notifications
+ for _, inBatch := range newBlobs {
+ if inBatch.Hash.Equals(notification.blob.Hash) && inBatch.PayloadRef == notification.blob.PayloadRef {
+ foundExisting = true
+ break
+ }
+ }
+ if !foundExisting {
+ newBlobs = append(newBlobs, notification.blob)
+ newHashes = append(newHashes, notification.blob.Hash)
+ }
+ }
+
+ // Insert the new blobs
+ if len(newBlobs) > 0 {
+ err = br.database.InsertBlobs(ctx, newBlobs)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return newHashes, nil
+
+}
+
+func (br *blobReceiver) handleBlobNotifications(ctx context.Context, notifications []*blobNotification) error {
+
+ l := log.L(br.ctx)
+
+ // Determine what blobs are new
+ newHashes, err := br.insertNewBlobs(ctx, notifications)
+ if err != nil {
+ return err
+ }
+ if len(newHashes) == 0 {
+ return nil
+ }
+
+ // We need to work out what pins potentially are unblocked by the arrival of this data
+ batchIDs := make(map[fftypes.UUID]bool)
+
+ // Find any data associated with this blob
+ var data []*fftypes.DataRef
+ filter := database.DataQueryFactory.NewFilter(ctx).In("blob.hash", newHashes)
+ data, _, err = br.database.GetDataRefs(ctx, filter)
+ if err != nil {
+ return err
+ }
+
+ // Find the messages assocated with that data
+ for _, data := range data {
+ fb := database.MessageQueryFactory.NewFilter(ctx)
+ filter := fb.And(fb.Eq("confirmed", nil))
+ messages, _, err := br.database.GetMessagesForData(ctx, data.ID, filter)
+ if err != nil {
+ return err
+ }
+ // Find the unique batch IDs for all the messages
+ for _, msg := range messages {
+ if msg.BatchID != nil {
+ l.Debugf("Message %s in batch %s contains data %s reference to blob", msg.Header.ID, msg.BatchID, data.ID)
+ batchIDs[*msg.BatchID] = true
+ }
+ }
+ }
+
+ // Initiate rewinds for all the batchIDs that are potentially completed by the arrival of this data
+ for batchID := range batchIDs {
+ br.aggregator.rewindBatches <- batchID
+ }
+ return nil
+}
diff --git a/internal/events/blob_receiver_test.go b/internal/events/blob_receiver_test.go
new file mode 100644
index 0000000000..e834e79484
--- /dev/null
+++ b/internal/events/blob_receiver_test.go
@@ -0,0 +1,129 @@
+// Copyright © 2022 Kaleido, Inc.
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package events
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/hyperledger/firefly/mocks/databasemocks"
+ "github.com/hyperledger/firefly/pkg/fftypes"
+ "github.com/stretchr/testify/mock"
+)
+
+func TestBlobReceiverBackgroundDispatchOK(t *testing.T) {
+
+ em, cancel := newTestEventManagerWithDBConcurrency(t)
+ defer cancel()
+ em.blobReceiver.start()
+
+ dataID := fftypes.NewUUID()
+ batchID := fftypes.NewUUID()
+
+ mdi := em.database.(*databasemocks.Plugin)
+ mdi.On("GetBlobs", mock.Anything, mock.Anything).Return([]*fftypes.Blob{}, nil, nil)
+ mdi.On("InsertBlobs", mock.Anything, mock.Anything).Return(nil, nil)
+ mdi.On("GetDataRefs", mock.Anything, mock.Anything).Return(fftypes.DataRefs{
+ {ID: dataID},
+ }, nil, nil)
+ mdi.On("GetMessagesForData", mock.Anything, dataID, mock.Anything).Return([]*fftypes.Message{
+ {Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, BatchID: batchID},
+ }, nil, nil)
+
+ blobHash := fftypes.NewRandB32()
+ done := make(chan struct{})
+ em.blobReceiver.blobReceived(em.ctx, &blobNotification{
+ blob: &fftypes.Blob{
+ Hash: blobHash,
+ },
+ })
+ em.blobReceiver.blobReceived(em.ctx, &blobNotification{
+ blob: &fftypes.Blob{
+ Hash: blobHash, // de-dup'd
+ },
+ onComplete: func() {
+ close(done)
+ },
+ })
+ <-done
+
+ mdi.AssertExpectations(t)
+ em.blobReceiver.stop()
+
+}
+
+func TestBlobReceiverBackgroundDispatchCancelled(t *testing.T) {
+
+ em, cancel := newTestEventManagerWithDBConcurrency(t)
+ cancel()
+ em.blobReceiver.start()
+
+ em.blobReceiver.blobReceived(em.ctx, &blobNotification{
+ blob: &fftypes.Blob{
+ Hash: fftypes.NewRandB32(),
+ },
+ })
+ em.blobReceiver.stop()
+
+}
+
+func TestBlobReceiverBackgroundDispatchFail(t *testing.T) {
+
+ em, cancel := newTestEventManagerWithDBConcurrency(t)
+ em.blobReceiver.start()
+
+ done := make(chan struct{})
+ mdi := em.database.(*databasemocks.Plugin)
+ mdi.On("GetBlobs", mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("pop")).Run(func(args mock.Arguments) {
+ cancel()
+ close(done)
+ })
+
+ em.blobReceiver.blobReceived(em.ctx, &blobNotification{
+ blob: &fftypes.Blob{
+ Hash: fftypes.NewRandB32(),
+ },
+ })
+ <-done
+
+ mdi.AssertExpectations(t)
+ em.blobReceiver.stop()
+
+}
+
+func TestBlobReceiverDispatchDup(t *testing.T) {
+
+ em, cancel := newTestEventManager(t)
+ defer cancel()
+
+ blobHash := fftypes.NewRandB32()
+
+ mdi := em.database.(*databasemocks.Plugin)
+ mdi.On("GetBlobs", mock.Anything, mock.Anything).Return([]*fftypes.Blob{
+ {Hash: blobHash, PayloadRef: "payload1"},
+ }, nil, nil)
+
+ em.blobReceiver.blobReceived(em.ctx, &blobNotification{
+ blob: &fftypes.Blob{
+ Hash: blobHash,
+ PayloadRef: "payload1",
+ },
+ })
+
+ mdi.AssertExpectations(t)
+
+}
diff --git a/internal/events/dx_callbacks.go b/internal/events/dx_callbacks.go
index a9bf766cb2..e16c11d9cb 100644
--- a/internal/events/dx_callbacks.go
+++ b/internal/events/dx_callbacks.go
@@ -27,38 +27,6 @@ import (
"github.com/hyperledger/firefly/pkg/fftypes"
)
-func (em *eventManager) MessageReceived(dx dataexchange.Plugin, peerID string, data []byte) (manifest string, err error) {
-
- l := log.L(em.ctx)
-
- // De-serializae the transport wrapper
- var wrapper *fftypes.TransportWrapper
- err = json.Unmarshal(data, &wrapper)
- if err != nil {
- l.Errorf("Invalid transmission from %s peer '%s': %s", dx.Name(), peerID, err)
- return "", nil
- }
- if wrapper.Batch == nil {
- l.Errorf("Invalid transmission: nil batch")
- return "", nil
- }
- l.Infof("Private batch received from %s peer '%s' (len=%d)", dx.Name(), peerID, len(data))
-
- if wrapper.Batch.Payload.TX.Type == fftypes.TransactionTypeUnpinned {
- valid, err := em.definitions.EnsureLocalGroup(em.ctx, wrapper.Group)
- if err != nil {
- return "", err
- }
- if !valid {
- l.Errorf("Invalid transmission: invalid group")
- return "", nil
- }
- }
-
- manifestString, err := em.privateBatchReceived(peerID, wrapper.Batch)
- return manifestString, err
-}
-
// Check data exchange peer the data came from, has been registered to the org listed in the batch.
// Note the on-chain identity check is performed separately by the aggregator (across broadcast and private consistently).
func (em *eventManager) checkReceivedOffchainIdentity(ctx context.Context, peerID, author string) (node *fftypes.Identity, err error) {
@@ -108,13 +76,24 @@ func (em *eventManager) checkReceivedOffchainIdentity(ctx context.Context, peerI
return node, nil
}
-func (em *eventManager) privateBatchReceived(peerID string, batch *fftypes.Batch) (manifest string, err error) {
+func (em *eventManager) privateBatchReceived(peerID string, batch *fftypes.Batch, wrapperGroup *fftypes.Group) (manifest string, err error) {
// Retry for persistence errors (not validation errors)
err = em.retry.Do(em.ctx, "private batch received", func(attempt int) (bool, error) {
return true, em.database.RunAsGroup(em.ctx, func(ctx context.Context) error {
l := log.L(ctx)
+ if wrapperGroup != nil && batch.Payload.TX.Type == fftypes.TransactionTypeUnpinned {
+ valid, err := em.definitions.EnsureLocalGroup(ctx, wrapperGroup)
+ if err != nil {
+ return err
+ }
+ if !valid {
+ l.Errorf("Invalid transmission: invalid group: %+v", wrapperGroup)
+ return nil
+ }
+ }
+
node, err := em.checkReceivedOffchainIdentity(ctx, peerID, batch.Author)
if err != nil {
return err
@@ -140,8 +119,11 @@ func (em *eventManager) privateBatchReceived(peerID string, batch *fftypes.Batch
return nil
})
})
+ if err != nil {
+ return "", err
+ }
// Poke the aggregator to do its stuff - after we have committed the transaction so the pins are visible
- if err == nil && batch.Payload.TX.Type == fftypes.TransactionTypeBatchPin {
+ if batch.Payload.TX.Type == fftypes.TransactionTypeBatchPin {
log.L(em.ctx).Debugf("Rewinding for persisted private batch %s", batch.ID)
em.aggregator.rewindBatches <- *batch.ID
}
@@ -185,35 +167,69 @@ func (em *eventManager) markUnpinnedMessagesConfirmed(ctx context.Context, batch
return nil
}
-func (em *eventManager) PrivateBLOBReceived(dx dataexchange.Plugin, peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error {
+func (em *eventManager) DXEvent(dx dataexchange.Plugin, event dataexchange.DXEvent) {
+ switch event.Type() {
+ case dataexchange.DXEventTypePrivateBlobReceived:
+ em.privateBlobReceived(dx, event)
+ case dataexchange.DXEventTypeMessageReceived:
+ // Batches are significant items of work in their own right, so get dispatched to their own routines
+ go em.messageReceived(dx, event)
+ default:
+ log.L(em.ctx).Errorf("Invalid DX event type from %s: %d", dx.Name(), event.Type())
+ event.Ack() // still ack
+ }
+}
+
+func (em *eventManager) messageReceived(dx dataexchange.Plugin, event dataexchange.DXEvent) {
l := log.L(em.ctx)
- l.Infof("Blob received event from data exchange %s: Peer='%s' Hash='%v' PayloadRef='%s'", dx.Name(), peerID, &hash, payloadRef)
- if peerID == "" || len(peerID) > 256 || payloadRef == "" || len(payloadRef) > 1024 {
- l.Errorf("Invalid blob received event from data exhange: Peer='%s' Hash='%v' PayloadRef='%s'", peerID, &hash, payloadRef)
- return nil // we consume the event still
+ mr := event.MessageReceived()
+
+ // De-serializae the transport wrapper
+ var wrapper *fftypes.TransportWrapper
+ err := json.Unmarshal(mr.Data, &wrapper)
+ if err != nil {
+ l.Errorf("Invalid transmission from %s peer '%s': %s", dx.Name(), mr.PeerID, err)
+ event.AckWithManifest("")
+ return
+ }
+ if wrapper.Batch == nil {
+ l.Errorf("Invalid transmission: nil batch")
+ event.AckWithManifest("")
+ return
}
+ l.Infof("Private batch received from %s peer '%s' (len=%d)", dx.Name(), mr.PeerID, len(mr.Data))
- return em.blobReceivedCommon(peerID, hash, size, payloadRef)
+ manifestString, err := em.privateBatchReceived(mr.PeerID, wrapper.Batch, wrapper.Group)
+ if err != nil {
+ l.Warnf("Exited while persisting batch: %s", err)
+ // We do NOT ack here as we broke out of the retry
+ return
+ }
+ event.AckWithManifest(manifestString)
}
-func (em *eventManager) blobReceivedCommon(peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error {
- // We process the event in a retry loop (which will break only if the context is closed), so that
- // we only confirm consumption of the event to the plugin once we've processed it.
- return em.retry.Do(em.ctx, "blob reference insert", func(attempt int) (retry bool, err error) {
- return true, em.database.RunAsGroup(em.ctx, func(ctx context.Context) error {
- // Insert the blob into the detabase
- err := em.database.InsertBlob(ctx, &fftypes.Blob{
- Peer: peerID,
- PayloadRef: payloadRef,
- Hash: &hash,
- Size: size,
- Created: fftypes.Now(),
- })
- if err != nil {
- return err
- }
- return em.aggregator.rewindForBlobArrival(ctx, &hash)
- })
+func (em *eventManager) privateBlobReceived(dx dataexchange.Plugin, event dataexchange.DXEvent) {
+ br := event.PrivateBlobReceived()
+ log.L(em.ctx).Infof("Blob received event from data exchange %s: Peer='%s' Hash='%v' PayloadRef='%s'", dx.Name(), br.PeerID, &br.Hash, br.PayloadRef)
+
+ if br.PeerID == "" || len(br.PeerID) > 256 || br.PayloadRef == "" || len(br.PayloadRef) > 1024 {
+ log.L(em.ctx).Errorf("Invalid blob received event from data exhange: Peer='%s' Hash='%v' PayloadRef='%s'", br.PeerID, &br.Hash, br.PayloadRef)
+ event.Ack() // Still confirm the event
+ return
+ }
+
+ // Dispatch to the blob receiver for efficient batch DB operations
+ em.blobReceiver.blobReceived(em.ctx, &blobNotification{
+ blob: &fftypes.Blob{
+ Peer: br.PeerID,
+ PayloadRef: br.PayloadRef,
+ Hash: &br.Hash,
+ Size: br.Size,
+ Created: fftypes.Now(),
+ },
+ onComplete: func() {
+ event.Ack()
+ },
})
}
diff --git a/internal/events/dx_callbacks_test.go b/internal/events/dx_callbacks_test.go
index 8d3fe90e31..54b611933e 100644
--- a/internal/events/dx_callbacks_test.go
+++ b/internal/events/dx_callbacks_test.go
@@ -28,6 +28,7 @@ import (
"github.com/hyperledger/firefly/mocks/definitionsmocks"
"github.com/hyperledger/firefly/mocks/identitymanagermocks"
"github.com/hyperledger/firefly/pkg/database"
+ "github.com/hyperledger/firefly/pkg/dataexchange"
"github.com/hyperledger/firefly/pkg/fftypes"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
@@ -79,11 +80,64 @@ func newTestNode(name string, owner *fftypes.Identity) *fftypes.Identity {
return identity
}
+func newMessageReceivedNoAck(peerID string, data []byte) *dataexchangemocks.DXEvent {
+ mde := &dataexchangemocks.DXEvent{}
+ mde.On("MessageReceived").Return(&dataexchange.MessageReceived{
+ PeerID: peerID,
+ Data: data,
+ })
+ mde.On("Type").Return(dataexchange.DXEventTypeMessageReceived).Maybe()
+ return mde
+}
+
+func newMessageReceived(peerID string, data []byte, expectedManifest string) *dataexchangemocks.DXEvent {
+ mde := newMessageReceivedNoAck(peerID, data)
+ mde.On("AckWithManifest", expectedManifest).Return()
+ return mde
+}
+
+func newPrivateBlobReceivedNoAck(peerID string, hash *fftypes.Bytes32, size int64, payloadRef string) *dataexchangemocks.DXEvent {
+ mde := &dataexchangemocks.DXEvent{}
+ mde.On("PrivateBlobReceived").Return(&dataexchange.PrivateBlobReceived{
+ PeerID: peerID,
+ Hash: *hash,
+ Size: size,
+ PayloadRef: payloadRef,
+ })
+ mde.On("Type").Return(dataexchange.DXEventTypePrivateBlobReceived).Maybe()
+ return mde
+}
+
+func newPrivateBlobReceived(peerID string, hash *fftypes.Bytes32, size int64, payloadRef string) *dataexchangemocks.DXEvent {
+ mde := newPrivateBlobReceivedNoAck(peerID, hash, size, payloadRef)
+ mde.On("Ack").Return()
+ return mde
+}
+
+func TestUnknownEvent(t *testing.T) {
+ em, cancel := newTestEventManager(t)
+ defer cancel()
+
+ done := make(chan struct{})
+ mdx := &dataexchangemocks.Plugin{}
+ mdx.On("Name").Return("utdx").Maybe()
+ mde := &dataexchangemocks.DXEvent{}
+ mde.On("Type").Return(dataexchange.DXEventType(99)).Maybe()
+ mde.On("Ack").Run(func(args mock.Arguments) {
+ close(done)
+ })
+ em.DXEvent(mdx, mde)
+ <-done
+
+ mde.AssertExpectations(t)
+ mdx.AssertExpectations(t)
+}
+
func TestPinnedReceiveOK(t *testing.T) {
em, cancel := newTestEventManager(t)
defer cancel()
- _, b := sampleBatchTransfer(t, fftypes.TransactionTypeBatchPin)
+ batch, b := sampleBatchTransfer(t, fftypes.TransactionTypeBatchPin)
org1 := newTestOrg("org1")
node1 := newTestNode("node1", org1)
@@ -102,10 +156,15 @@ func TestPinnedReceiveOK(t *testing.T) {
mdm := em.data.(*datamocks.Manager)
mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return()
- m, err := em.MessageReceived(mdx, "peer1", b)
- assert.NoError(t, err)
- assert.NotNil(t, m)
+ done := make(chan struct{})
+ mde := newMessageReceivedNoAck("peer1", b)
+ mde.On("AckWithManifest", batch.Payload.Manifest(batch.ID).String()).Run(func(args mock.Arguments) {
+ close(done)
+ })
+ em.DXEvent(mdx, mde)
+ <-done
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
mdx.AssertExpectations(t)
mdm.AssertExpectations(t)
@@ -136,10 +195,10 @@ func TestMessageReceiveOkBadBatchIgnored(t *testing.T) {
}).Return(node1, nil)
mim.On("CachedIdentityLookupMustExist", em.ctx, "signingOrg").Return(org1, false, nil)
- m, err := em.MessageReceived(mdx, "peer1", b)
- assert.NoError(t, err)
- assert.Empty(t, m)
+ mde := newMessageReceived("peer1", b, "")
+ em.messageReceived(mdx, mde)
+ mde.AssertExpectations(t)
mdx.AssertExpectations(t)
mim.AssertExpectations(t)
}
@@ -162,10 +221,12 @@ func TestMessageReceivePersistBatchError(t *testing.T) {
}).Return(node1, nil)
mim.On("CachedIdentityLookupMustExist", em.ctx, "signingOrg").Return(org1, false, nil)
mdi.On("UpsertBatch", em.ctx, mock.Anything).Return(fmt.Errorf("pop"))
- m, err := em.MessageReceived(mdx, "peer1", b)
- assert.Regexp(t, "FF10158", err)
- assert.Empty(t, m)
+ // no ack as we are simulating termination mid retry
+ mde := newMessageReceivedNoAck("peer1", b)
+ em.messageReceived(mdx, mde)
+
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
mdx.AssertExpectations(t)
mim.AssertExpectations(t)
@@ -177,9 +238,11 @@ func TestMessageReceivedBadData(t *testing.T) {
mdx := &dataexchangemocks.Plugin{}
mdx.On("Name").Return("utdx")
- m, err := em.MessageReceived(mdx, "peer1", []byte(`!{}`))
- assert.NoError(t, err)
- assert.Empty(t, m)
+
+ mde := newMessageReceived("peer1", []byte(`!{}`), "")
+ em.messageReceived(mdx, mde)
+
+ mde.AssertExpectations(t)
}
@@ -189,12 +252,13 @@ func TestMessageReceivedUnknownType(t *testing.T) {
mdx := &dataexchangemocks.Plugin{}
mdx.On("Name").Return("utdx")
- m, err := em.MessageReceived(mdx, "peer1", []byte(`{
+
+ mde := newMessageReceived("peer1", []byte(`{
"type": "unknown"
- }`))
- assert.NoError(t, err)
- assert.Empty(t, m)
+ }`), "")
+ em.messageReceived(mdx, mde)
+ mde.AssertExpectations(t)
}
func TestMessageReceivedNilBatch(t *testing.T) {
@@ -203,12 +267,13 @@ func TestMessageReceivedNilBatch(t *testing.T) {
mdx := &dataexchangemocks.Plugin{}
mdx.On("Name").Return("utdx")
- m, err := em.MessageReceived(mdx, "peer1", []byte(`{
+
+ mde := newMessageReceived("peer1", []byte(`{
"type": "batch"
- }`))
- assert.NoError(t, err)
- assert.Empty(t, m)
+ }`), "")
+ em.messageReceived(mdx, mde)
+ mde.AssertExpectations(t)
}
func TestMessageReceivedNilMessage(t *testing.T) {
@@ -217,12 +282,13 @@ func TestMessageReceivedNilMessage(t *testing.T) {
mdx := &dataexchangemocks.Plugin{}
mdx.On("Name").Return("utdx")
- m, err := em.MessageReceived(mdx, "peer1", []byte(`{
+
+ mde := newMessageReceived("peer1", []byte(`{
"type": "message"
- }`))
- assert.NoError(t, err)
- assert.Empty(t, m)
+ }`), "")
+ em.messageReceived(mdx, mde)
+ mde.AssertExpectations(t)
}
func TestMessageReceivedNilGroup(t *testing.T) {
@@ -231,12 +297,14 @@ func TestMessageReceivedNilGroup(t *testing.T) {
mdx := &dataexchangemocks.Plugin{}
mdx.On("Name").Return("utdx")
- m, err := em.MessageReceived(mdx, "peer1", []byte(`{
+
+ mde := newMessageReceived("peer1", []byte(`{
"type": "message",
"message": {}
- }`))
- assert.NoError(t, err)
- assert.Empty(t, m)
+ }`), "")
+ em.messageReceived(mdx, mde)
+
+ mde.AssertExpectations(t)
}
func TestMessageReceiveNodeLookupError(t *testing.T) {
@@ -256,9 +324,12 @@ func TestMessageReceiveNodeLookupError(t *testing.T) {
mdx := &dataexchangemocks.Plugin{}
mdx.On("Name").Return("utdx")
- m, err := em.MessageReceived(mdx, "peer1", b)
- assert.Regexp(t, "FF10158", err)
- assert.Empty(t, m)
+
+ // no ack as we are simulating termination mid retry
+ mde := newMessageReceivedNoAck("peer1", b)
+ em.messageReceived(mdx, mde)
+
+ mde.AssertExpectations(t)
}
func TestMessageReceiveGetCandidateOrgFail(t *testing.T) {
@@ -278,10 +349,12 @@ func TestMessageReceiveGetCandidateOrgFail(t *testing.T) {
Value: "peer1",
}).Return(node1, nil)
mim.On("CachedIdentityLookupMustExist", em.ctx, "signingOrg").Return(nil, true, fmt.Errorf("pop"))
- m, err := em.MessageReceived(mdx, "peer1", b)
- assert.Regexp(t, "FF10158", err)
- assert.Empty(t, m)
+ // no ack as we are simulating termination mid retry
+ mde := newMessageReceivedNoAck("peer1", b)
+ em.messageReceived(mdx, mde)
+
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
mdx.AssertExpectations(t)
}
@@ -303,10 +376,10 @@ func TestMessageReceiveGetCandidateOrgNotFound(t *testing.T) {
Value: "peer1",
}).Return(node1, nil)
mim.On("CachedIdentityLookupMustExist", em.ctx, "signingOrg").Return(nil, false, nil)
- m, err := em.MessageReceived(mdx, "peer1", b)
- assert.NoError(t, err)
- assert.Empty(t, m)
+ mde := newMessageReceived("peer1", b, "")
+ em.messageReceived(mdx, mde)
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
mdx.AssertExpectations(t)
}
@@ -328,15 +401,15 @@ func TestMessageReceiveGetCandidateOrgNotMatch(t *testing.T) {
Value: "peer1",
}).Return(node1, nil)
mim.On("CachedIdentityLookupMustExist", em.ctx, "signingOrg").Return(newTestOrg("org2"), false, nil)
- m, err := em.MessageReceived(mdx, "peer1", b)
- assert.NoError(t, err)
- assert.Empty(t, m)
+ mde := newMessageReceived("peer1", b, "")
+ em.messageReceived(mdx, mde)
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
mdx.AssertExpectations(t)
}
-func TestPrivateBLOBReceivedTriggersRewindOk(t *testing.T) {
+func TestPrivateBlobReceivedTriggersRewindOk(t *testing.T) {
em, cancel := newTestEventManager(t)
defer cancel()
hash := fftypes.NewRandB32()
@@ -347,7 +420,8 @@ func TestPrivateBLOBReceivedTriggersRewindOk(t *testing.T) {
mdx.On("Name").Return("utdx")
mdi := em.database.(*databasemocks.Plugin)
- mdi.On("InsertBlob", em.ctx, mock.Anything).Return(nil)
+ mdi.On("GetBlobs", em.ctx, mock.Anything).Return([]*fftypes.Blob{}, nil, nil)
+ mdi.On("InsertBlobs", em.ctx, mock.Anything).Return(nil)
mdi.On("GetDataRefs", em.ctx, mock.Anything).Return(fftypes.DataRefs{
{ID: dataID},
}, nil, nil)
@@ -355,27 +429,34 @@ func TestPrivateBLOBReceivedTriggersRewindOk(t *testing.T) {
{BatchID: batchID},
}, nil, nil)
- err := em.PrivateBLOBReceived(mdx, "peer1", *hash, 12345, "ns1/path1")
- assert.NoError(t, err)
+ done := make(chan struct{})
+ mde := newPrivateBlobReceivedNoAck("peer1", hash, 12345, "ns1/path1")
+ mde.On("Ack").Run(func(args mock.Arguments) {
+ close(done)
+ })
+ em.DXEvent(mdx, mde)
+ <-done
bid := <-em.aggregator.rewindBatches
assert.Equal(t, *batchID, bid)
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
}
-func TestPrivateBLOBReceivedBadEvent(t *testing.T) {
+func TestPrivateBlobReceivedBadEvent(t *testing.T) {
em, cancel := newTestEventManager(t)
defer cancel()
mdx := &dataexchangemocks.Plugin{}
mdx.On("Name").Return("utdx")
- err := em.PrivateBLOBReceived(mdx, "", fftypes.Bytes32{}, 12345, "")
- assert.NoError(t, err)
+ mde := newPrivateBlobReceived("", fftypes.NewRandB32(), 12345, "")
+ em.privateBlobReceived(mdx, mde)
+ mde.AssertExpectations(t)
}
-func TestPrivateBLOBReceivedGetMessagesFail(t *testing.T) {
+func TestPrivateBlobReceivedGetMessagesFail(t *testing.T) {
em, cancel := newTestEventManager(t)
cancel() // retryable error
hash := fftypes.NewRandB32()
@@ -385,19 +466,22 @@ func TestPrivateBLOBReceivedGetMessagesFail(t *testing.T) {
mdx.On("Name").Return("utdx")
mdi := em.database.(*databasemocks.Plugin)
- mdi.On("InsertBlob", em.ctx, mock.Anything).Return(nil)
+ mdi.On("GetBlobs", em.ctx, mock.Anything).Return([]*fftypes.Blob{}, nil, nil)
+ mdi.On("InsertBlobs", em.ctx, mock.Anything).Return(nil)
mdi.On("GetDataRefs", em.ctx, mock.Anything).Return(fftypes.DataRefs{
{ID: dataID},
}, nil, nil)
mdi.On("GetMessagesForData", em.ctx, dataID, mock.Anything).Return(nil, nil, fmt.Errorf("pop"))
- err := em.PrivateBLOBReceived(mdx, "peer1", *hash, 12345, "ns1/path1")
- assert.Regexp(t, "FF10158", err)
+ // no ack as we are simulating termination mid retry
+ mde := newPrivateBlobReceivedNoAck("peer1", hash, 12345, "ns1/path1")
+ em.privateBlobReceived(mdx, mde)
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
}
-func TestPrivateBLOBReceivedGetDataRefsFail(t *testing.T) {
+func TestPrivateBlobReceivedGetDataRefsFail(t *testing.T) {
em, cancel := newTestEventManager(t)
cancel() // retryable error
hash := fftypes.NewRandB32()
@@ -406,16 +490,39 @@ func TestPrivateBLOBReceivedGetDataRefsFail(t *testing.T) {
mdx.On("Name").Return("utdx")
mdi := em.database.(*databasemocks.Plugin)
- mdi.On("InsertBlob", em.ctx, mock.Anything).Return(nil)
+ mdi.On("GetBlobs", em.ctx, mock.Anything).Return([]*fftypes.Blob{}, nil, nil)
+ mdi.On("InsertBlobs", em.ctx, mock.Anything).Return(nil)
mdi.On("GetDataRefs", em.ctx, mock.Anything).Return(nil, nil, fmt.Errorf("pop"))
- err := em.PrivateBLOBReceived(mdx, "peer1", *hash, 12345, "ns1/path1")
- assert.Regexp(t, "FF10158", err)
+ // no ack as we are simulating termination mid retry
+ mde := newPrivateBlobReceivedNoAck("peer1", hash, 12345, "ns1/path1")
+ em.privateBlobReceived(mdx, mde)
+
+ mde.AssertExpectations(t)
+ mdi.AssertExpectations(t)
+}
+
+func TestPrivateBlobReceivedInsertBlobFails(t *testing.T) {
+ em, cancel := newTestEventManager(t)
+ cancel() // retryable error
+ hash := fftypes.NewRandB32()
+
+ mdx := &dataexchangemocks.Plugin{}
+ mdx.On("Name").Return("utdx")
+
+ mdi := em.database.(*databasemocks.Plugin)
+ mdi.On("GetBlobs", em.ctx, mock.Anything).Return([]*fftypes.Blob{}, nil, nil)
+ mdi.On("InsertBlobs", em.ctx, mock.Anything).Return(fmt.Errorf("pop"))
+
+ // no ack as we are simulating termination mid retry
+ mde := newPrivateBlobReceivedNoAck("peer1", hash, 12345, "ns1/path1")
+ em.privateBlobReceived(mdx, mde)
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
}
-func TestPrivateBLOBReceivedInsertBlobFails(t *testing.T) {
+func TestPrivateBlobReceivedGetBlobsFails(t *testing.T) {
em, cancel := newTestEventManager(t)
cancel() // retryable error
hash := fftypes.NewRandB32()
@@ -424,11 +531,13 @@ func TestPrivateBLOBReceivedInsertBlobFails(t *testing.T) {
mdx.On("Name").Return("utdx")
mdi := em.database.(*databasemocks.Plugin)
- mdi.On("InsertBlob", em.ctx, mock.Anything).Return(fmt.Errorf("pop"))
+ mdi.On("GetBlobs", em.ctx, mock.Anything).Return(nil, nil, fmt.Errorf("pop"))
- err := em.PrivateBLOBReceived(mdx, "peer1", *hash, 12345, "ns1/path1")
- assert.Regexp(t, "FF10158", err)
+ // no ack as we are simulating termination mid retry
+ mde := newPrivateBlobReceivedNoAck("peer1", hash, 12345, "ns1/path1")
+ em.privateBlobReceived(mdx, mde)
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
}
@@ -456,10 +565,11 @@ func TestMessageReceiveMessageIdentityFail(t *testing.T) {
mim.On("CachedIdentityLookupMustExist", em.ctx, "signingOrg").Return(org2, false, nil)
mim.On("CachedIdentityLookupByID", em.ctx, org2.Parent).Return(nil, fmt.Errorf("pop"))
- m, err := em.MessageReceived(mdx, "peer1", b)
- assert.Regexp(t, "FF10158", err)
- assert.Empty(t, m)
+ // no ack as we are simulating termination mid retry
+ mde := newMessageReceivedNoAck("peer1", b)
+ em.messageReceived(mdx, mde)
+ mde.AssertExpectations(t)
mdx.AssertExpectations(t)
mim.AssertExpectations(t)
}
@@ -488,10 +598,10 @@ func TestMessageReceiveMessageIdentityParentNotFound(t *testing.T) {
mim.On("CachedIdentityLookupMustExist", em.ctx, "signingOrg").Return(org2, false, nil)
mim.On("CachedIdentityLookupByID", em.ctx, org2.Parent).Return(nil, nil)
- m, err := em.MessageReceived(mdx, "peer1", b)
- assert.NoError(t, err)
- assert.Empty(t, m)
+ mde := newMessageReceived("peer1", b, "")
+ em.messageReceived(mdx, mde)
+ mde.AssertExpectations(t)
mdx.AssertExpectations(t)
mim.AssertExpectations(t)
}
@@ -521,10 +631,10 @@ func TestMessageReceiveMessageIdentityIncorrect(t *testing.T) {
mim.On("CachedIdentityLookupMustExist", em.ctx, "signingOrg").Return(org2, false, nil)
mim.On("CachedIdentityLookupByID", em.ctx, org2.Parent).Return(org3, nil)
- m, err := em.MessageReceived(mdx, "peer1", b)
- assert.NoError(t, err)
- assert.Empty(t, m)
+ mde := newMessageReceived("peer1", b, "")
+ em.messageReceived(mdx, mde)
+ mde.AssertExpectations(t)
mdx.AssertExpectations(t)
mim.AssertExpectations(t)
}
@@ -555,10 +665,11 @@ func TestMessageReceiveMessagePersistMessageFail(t *testing.T) {
mdi.On("InsertMessages", em.ctx, mock.Anything).Return(fmt.Errorf("optimization fail"))
mdi.On("UpsertMessage", em.ctx, mock.Anything, database.UpsertOptimizationExisting).Return(fmt.Errorf("pop"))
- m, err := em.MessageReceived(mdx, "peer1", b)
- assert.Regexp(t, "FF10158", err)
- assert.Empty(t, m)
+ // no ack as we are simulating termination mid retry
+ mde := newMessageReceivedNoAck("peer1", b)
+ em.messageReceived(mdx, mde)
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
mdx.AssertExpectations(t)
}
@@ -588,10 +699,11 @@ func TestMessageReceiveMessagePersistDataFail(t *testing.T) {
mdi.On("InsertDataArray", em.ctx, mock.Anything).Return(fmt.Errorf("optimization miss"))
mdi.On("UpsertData", em.ctx, mock.Anything, database.UpsertOptimizationExisting).Return(fmt.Errorf("pop"))
- m, err := em.MessageReceived(mdx, "peer1", b)
- assert.Regexp(t, "FF10158", err)
- assert.Empty(t, m)
+ // no ack as we are simulating termination mid retry
+ mde := newMessageReceivedNoAck("peer1", b)
+ em.messageReceived(mdx, mde)
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
mdx.AssertExpectations(t)
}
@@ -600,7 +712,7 @@ func TestMessageReceiveUnpinnedBatchOk(t *testing.T) {
em, cancel := newTestEventManager(t)
cancel() // to avoid infinite retry
- _, b := sampleBatchTransfer(t, fftypes.TransactionTypeUnpinned)
+ batch, b := sampleBatchTransfer(t, fftypes.TransactionTypeUnpinned)
mdi := em.database.(*databasemocks.Plugin)
mdx := &dataexchangemocks.Plugin{}
@@ -625,10 +737,10 @@ func TestMessageReceiveUnpinnedBatchOk(t *testing.T) {
mdm := em.data.(*datamocks.Manager)
mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return()
- m, err := em.MessageReceived(mdx, "peer1", b)
- assert.NoError(t, err)
- assert.NotEmpty(t, m)
+ mde := newMessageReceived("peer1", b, batch.Payload.Manifest(batch.ID).String())
+ em.messageReceived(mdx, mde)
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
mdx.AssertExpectations(t)
mdm.AssertExpectations(t)
@@ -662,10 +774,11 @@ func TestMessageReceiveUnpinnedBatchConfirmMessagesFail(t *testing.T) {
mdm := em.data.(*datamocks.Manager)
mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return()
- m, err := em.MessageReceived(mdx, "peer1", b)
- assert.Regexp(t, "FF10158", err)
- assert.Empty(t, m)
+ // no ack as we are simulating termination mid retry
+ mde := newMessageReceivedNoAck("peer1", b)
+ em.messageReceived(mdx, mde)
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
mdx.AssertExpectations(t)
mdm.AssertExpectations(t)
@@ -700,10 +813,11 @@ func TestMessageReceiveUnpinnedBatchPersistEventFail(t *testing.T) {
mdm := em.data.(*datamocks.Manager)
mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return()
- m, err := em.MessageReceived(mdx, "peer1", b)
- assert.Regexp(t, "FF10158", err)
- assert.Empty(t, m)
+ // no ack as we are simulating termination mid retry
+ mde := newMessageReceivedNoAck("peer1", b)
+ em.messageReceived(mdx, mde)
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
mdx.AssertExpectations(t)
mdm.AssertExpectations(t)
@@ -722,10 +836,11 @@ func TestMessageReceiveMessageEnsureLocalGroupFail(t *testing.T) {
msh := em.definitions.(*definitionsmocks.DefinitionHandlers)
msh.On("EnsureLocalGroup", em.ctx, mock.Anything).Return(false, fmt.Errorf("pop"))
- m, err := em.MessageReceived(mdx, "peer1", b)
- assert.Regexp(t, "pop", err)
- assert.Empty(t, m)
+ // no ack as we are simulating termination mid retry
+ mde := newMessageReceivedNoAck("peer1", b)
+ em.messageReceived(mdx, mde)
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
mdx.AssertExpectations(t)
}
@@ -743,10 +858,10 @@ func TestMessageReceiveMessageEnsureLocalGroupReject(t *testing.T) {
msh := em.definitions.(*definitionsmocks.DefinitionHandlers)
msh.On("EnsureLocalGroup", em.ctx, mock.Anything).Return(false, nil)
- m, err := em.MessageReceived(mdx, "peer1", b)
- assert.NoError(t, err)
- assert.Empty(t, m)
+ mde := newMessageReceived("peer1", b, "")
+ em.messageReceived(mdx, mde)
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
mdx.AssertExpectations(t)
}
diff --git a/internal/events/event_manager.go b/internal/events/event_manager.go
index cd137d96eb..4cffd7ba22 100644
--- a/internal/events/event_manager.go
+++ b/internal/events/event_manager.go
@@ -65,12 +65,11 @@ type EventManager interface {
BlockchainEvent(event *blockchain.EventWithSubscription) error
// Bound dataexchange callbacks
- PrivateBLOBReceived(dx dataexchange.Plugin, peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error
- MessageReceived(dx dataexchange.Plugin, peerID string, data []byte) (manifest string, err error)
+ DXEvent(dx dataexchange.Plugin, event dataexchange.DXEvent)
// Bound sharedstorage callbacks
SharedStorageBatchDownloaded(ss sharedstorage.Plugin, ns, payloadRef string, data []byte) (*fftypes.UUID, error)
- SharedStorageBLOBDownloaded(ss sharedstorage.Plugin, hash fftypes.Bytes32, size int64, payloadRef string) error
+ SharedStorageBlobDownloaded(ss sharedstorage.Plugin, hash fftypes.Bytes32, size int64, payloadRef string)
// Bound token callbacks
TokenPoolCreated(ti tokens.Plugin, pool *tokens.TokenPool) error
@@ -97,6 +96,7 @@ type eventManager struct {
messaging privatemessaging.Manager
assets assets.Manager
sharedDownload shareddownload.Manager
+ blobReceiver *blobReceiver
newEventNotifier *eventNotifier
newPinNotifier *eventNotifier
opCorrelationRetries int
@@ -142,6 +142,7 @@ func NewEventManager(ctx context.Context, ni sysmessaging.LocalNodeInfo, si shar
}
ie, _ := eifactory.GetPlugin(ctx, system.SystemEventsTransport)
em.internalEvents = ie.(*system.Events)
+ em.blobReceiver = newBlobReceiver(ctx, em.aggregator)
var err error
if em.subManager, err = newSubscriptionManager(ctx, di, dm, newEventNotifier, dh, txHelper); err != nil {
@@ -155,6 +156,7 @@ func (em *eventManager) Start() (err error) {
err = em.subManager.start()
if err == nil {
em.aggregator.start()
+ em.blobReceiver.start()
}
return err
}
@@ -185,6 +187,7 @@ func (em *eventManager) ChangeEvents() chan<- *fftypes.ChangeEvent {
func (em *eventManager) WaitStop() {
em.subManager.close()
+ em.blobReceiver.stop()
<-em.aggregator.eventPoller.closed
}
diff --git a/internal/events/event_manager_test.go b/internal/events/event_manager_test.go
index 4ff3a71529..a976d165a0 100644
--- a/internal/events/event_manager_test.go
+++ b/internal/events/event_manager_test.go
@@ -38,7 +38,9 @@ import (
"github.com/hyperledger/firefly/mocks/sharedstoragemocks"
"github.com/hyperledger/firefly/mocks/sysmessagingmocks"
"github.com/hyperledger/firefly/mocks/txcommonmocks"
+ "github.com/hyperledger/firefly/pkg/database"
"github.com/hyperledger/firefly/pkg/fftypes"
+ "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
@@ -46,15 +48,22 @@ import (
var testNodeID = fftypes.NewUUID()
func newTestEventManager(t *testing.T) (*eventManager, func()) {
- return newTestEventManagerCommon(t, false)
+ return newTestEventManagerCommon(t, false, false)
}
func newTestEventManagerWithMetrics(t *testing.T) (*eventManager, func()) {
- return newTestEventManagerCommon(t, true)
+ return newTestEventManagerCommon(t, true, false)
}
-func newTestEventManagerCommon(t *testing.T, metrics bool) (*eventManager, func()) {
+func newTestEventManagerWithDBConcurrency(t *testing.T) (*eventManager, func()) {
+ return newTestEventManagerCommon(t, false, true)
+}
+
+func newTestEventManagerCommon(t *testing.T, metrics, dbconcurrency bool) (*eventManager, func()) {
config.Reset()
+ config.Set(config.BlobReceiverWorkerCount, 1)
+ config.Set(config.BlobReceiverWorkerBatchTimeout, "1s")
+ logrus.SetLevel(logrus.DebugLevel)
ctx, cancel := context.WithCancel(context.Background())
mdi := &databasemocks.Plugin{}
mbi := &blockchainmocks.Plugin{}
@@ -77,17 +86,23 @@ func newTestEventManagerCommon(t *testing.T, metrics bool) (*eventManager, func(
mni.On("GetNodeUUID", mock.Anything).Return(testNodeID).Maybe()
met.On("Name").Return("ut").Maybe()
mbi.On("VerifierType").Return(fftypes.VerifierTypeEthAddress).Maybe()
+ mdi.On("Capabilities").Return(&database.Capabilities{Concurrency: dbconcurrency}).Maybe()
emi, err := NewEventManager(ctx, mni, mpi, mdi, mbi, mim, msh, mdm, mbm, mpm, mam, mdd, mmi, txHelper)
em := emi.(*eventManager)
em.txHelper = &txcommonmocks.Helper{}
- rag := mdi.On("RunAsGroup", em.ctx, mock.Anything).Maybe()
- rag.RunFn = func(a mock.Arguments) {
- rag.ReturnArguments = mock.Arguments{a[1].(func(context.Context) error)(a[0].(context.Context))}
- }
+ mockRunAsGroupPassthrough(mdi)
assert.NoError(t, err)
return em, cancel
}
+func mockRunAsGroupPassthrough(mdi *databasemocks.Plugin) {
+ rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything).Maybe()
+ rag.RunFn = func(a mock.Arguments) {
+ fn := a[1].(func(context.Context) error)
+ rag.ReturnArguments = mock.Arguments{fn(a[0].(context.Context))}
+ }
+}
+
func TestStartStop(t *testing.T) {
em, cancel := newTestEventManager(t)
mdi := em.database.(*databasemocks.Plugin)
@@ -129,6 +144,7 @@ func TestStartStopBadTransports(t *testing.T) {
msd := &shareddownloadmocks.Manager{}
mm := &metricsmocks.Manager{}
txHelper := txcommon.NewTransactionHelper(mdi, mdm)
+ mdi.On("Capabilities").Return(&database.Capabilities{Concurrency: false}).Maybe()
mbi.On("VerifierType").Return(fftypes.VerifierTypeEthAddress)
_, err := NewEventManager(context.Background(), mni, mpi, mdi, mbi, mim, msh, mdm, mbm, mpm, mam, msd, mm, txHelper)
assert.Regexp(t, "FF10172", err)
diff --git a/internal/events/ss_callbacks.go b/internal/events/ss_callbacks.go
index 46ae6ba564..57908da97f 100644
--- a/internal/events/ss_callbacks.go
+++ b/internal/events/ss_callbacks.go
@@ -63,9 +63,18 @@ func (em *eventManager) SharedStorageBatchDownloaded(ss sharedstorage.Plugin, ns
return batch.ID, nil
}
-func (em *eventManager) SharedStorageBLOBDownloaded(ss sharedstorage.Plugin, hash fftypes.Bytes32, size int64, payloadRef string) error {
+func (em *eventManager) SharedStorageBlobDownloaded(ss sharedstorage.Plugin, hash fftypes.Bytes32, size int64, payloadRef string) {
l := log.L(em.ctx)
l.Infof("Blob received event from public storage %s: Hash='%v'", ss.Name(), hash)
- return em.blobReceivedCommon("", hash, size, payloadRef)
+ // Dispatch to the blob receiver for efficient batch DB operations
+ blobHash := hash
+ em.blobReceiver.blobReceived(em.ctx, &blobNotification{
+ blob: &fftypes.Blob{
+ PayloadRef: payloadRef,
+ Hash: &blobHash,
+ Size: size,
+ Created: fftypes.Now(),
+ },
+ })
}
diff --git a/internal/events/ss_callbacks_test.go b/internal/events/ss_callbacks_test.go
index 9c46fac42c..cf9307890b 100644
--- a/internal/events/ss_callbacks_test.go
+++ b/internal/events/ss_callbacks_test.go
@@ -115,7 +115,7 @@ func TestSharedStorageBatchDownloadedBadData(t *testing.T) {
}
-func TestSharedStorageBLOBDownloadedOk(t *testing.T) {
+func TestSharedStorageBlobDownloadedOk(t *testing.T) {
em, cancel := newTestEventManager(t)
defer cancel()
@@ -126,7 +126,8 @@ func TestSharedStorageBLOBDownloadedOk(t *testing.T) {
mdi := em.database.(*databasemocks.Plugin)
mss := em.sharedstorage.(*sharedstoragemocks.Plugin)
mss.On("Name").Return("utsd")
- mdi.On("InsertBlob", em.ctx, mock.Anything).Return(nil, nil)
+ mdi.On("GetBlobs", em.ctx, mock.Anything).Return([]*fftypes.Blob{}, nil, nil)
+ mdi.On("InsertBlobs", em.ctx, mock.Anything).Return(nil, nil)
mdi.On("GetDataRefs", em.ctx, mock.Anything).Return(fftypes.DataRefs{
{ID: dataID},
}, nil, nil)
@@ -134,8 +135,7 @@ func TestSharedStorageBLOBDownloadedOk(t *testing.T) {
{Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, BatchID: batchID},
}, nil, nil)
- err := em.SharedStorageBLOBDownloaded(mss, *fftypes.NewRandB32(), 12345, "payload1")
- assert.NoError(t, err)
+ em.SharedStorageBlobDownloaded(mss, *fftypes.NewRandB32(), 12345, "payload1")
assert.Equal(t, *batchID, <-em.aggregator.rewindBatches)
diff --git a/internal/i18n/en_translations.go b/internal/i18n/en_translations.go
index c2342d9519..118a05a966 100644
--- a/internal/i18n/en_translations.go
+++ b/internal/i18n/en_translations.go
@@ -295,4 +295,5 @@ var (
MsgDownloadBatchMaxBytes = ffm("FF10377", "Error downloading batch with reference '%s' from shared storage - maximum size limit reached")
MsgOperationDataIncorrect = ffm("FF10378", "Operation data type incorrect: %T", 400)
MsgDataMissingBlobHash = ffm("FF10379", "Blob for data %s cannot be transferred as it is missing a hash", 500)
+ MsgUnpexectedDXMessageType = ffm("FF10380", "Unexpected websocket event type from DX plugin: %s", 500)
)
diff --git a/internal/operations/manager.go b/internal/operations/manager.go
index 18e77fcdcb..7ff3268a51 100644
--- a/internal/operations/manager.go
+++ b/internal/operations/manager.go
@@ -40,8 +40,8 @@ type Manager interface {
RunOperation(ctx context.Context, op *fftypes.PreparedOperation, options ...RunOperationOption) error
RetryOperation(ctx context.Context, ns string, opID *fftypes.UUID) (*fftypes.Operation, error)
AddOrReuseOperation(ctx context.Context, op *fftypes.Operation) error
- SubmitOperationUpdate(plugin fftypes.Named, update *OperationUpdate) error
- TransferResult(dx dataexchange.Plugin, opIDString string, status fftypes.OpStatus, update fftypes.TransportStatusUpdate) error
+ SubmitOperationUpdate(plugin fftypes.Named, update *OperationUpdate)
+ TransferResult(dx dataexchange.Plugin, event dataexchange.DXEvent)
Start() error
WaitStop()
}
@@ -156,35 +156,40 @@ func (om *operationsManager) RetryOperation(ctx context.Context, ns string, opID
return op, om.RunOperation(ctx, po)
}
-func (om *operationsManager) TransferResult(dx dataexchange.Plugin, opIDString string, status fftypes.OpStatus, update fftypes.TransportStatusUpdate) error {
- log.L(om.ctx).Infof("Transfer result %s=%s error='%s' manifest='%s' info='%s'", opIDString, status, update.Error, update.Manifest, update.Info)
+func (om *operationsManager) TransferResult(dx dataexchange.Plugin, event dataexchange.DXEvent) {
- opID, err := fftypes.ParseUUID(om.ctx, opIDString)
+ tr := event.TransferResult()
+
+ log.L(om.ctx).Infof("Transfer result %s=%s error='%s' manifest='%s' info='%s'", tr.TrackingID, tr.Status, tr.Error, tr.Manifest, tr.Info)
+ opID, err := fftypes.ParseUUID(om.ctx, tr.TrackingID)
if err != nil {
- log.L(om.ctx).Errorf("Invalid UUID for tracking ID from DX: %s", opIDString)
- return nil
+ log.L(om.ctx).Errorf("Invalid UUID for tracking ID from DX: %s", tr.TrackingID)
+ return
}
opUpdate := &OperationUpdate{
ID: opID,
- Status: status,
+ Status: tr.Status,
VerifyManifest: dx.Capabilities().Manifest,
- ErrorMessage: update.Error,
- Output: update.Info,
+ ErrorMessage: tr.Error,
+ Output: tr.Info,
+ OnComplete: func() {
+ event.Ack()
+ },
}
// Pass manifest verification code to the background worker, for once it has loaded the operation
if opUpdate.VerifyManifest {
- if update.Manifest != "" {
+ if tr.Manifest != "" {
// For batches DX passes us a manifest to compare.
- opUpdate.DXManifest = update.Manifest
- } else if update.Hash != "" {
+ opUpdate.DXManifest = tr.Manifest
+ } else if tr.Hash != "" {
// For blobs DX passes us a hash to compare.
- opUpdate.DXHash = update.Hash
+ opUpdate.DXHash = tr.Hash
}
}
- return om.SubmitOperationUpdate(dx, opUpdate)
+ om.SubmitOperationUpdate(dx, opUpdate)
}
func (om *operationsManager) writeOperationSuccess(ctx context.Context, opID *fftypes.UUID, outputs fftypes.JSONObject) {
@@ -199,13 +204,13 @@ func (om *operationsManager) writeOperationFailure(ctx context.Context, opID *ff
}
}
-func (om *operationsManager) SubmitOperationUpdate(plugin fftypes.Named, update *OperationUpdate) error {
+func (om *operationsManager) SubmitOperationUpdate(plugin fftypes.Named, update *OperationUpdate) {
errString := ""
if update.ErrorMessage != "" {
errString = fmt.Sprintf(" error=%s", update.ErrorMessage)
}
log.L(om.ctx).Debugf("%s updating operation %s status=%s%s", plugin.Name(), update.ID, update.Status, errString)
- return om.updater.SubmitOperationUpdate(om.ctx, update)
+ om.updater.SubmitOperationUpdate(om.ctx, update)
}
func (om *operationsManager) Start() error {
diff --git a/internal/operations/manager_test.go b/internal/operations/manager_test.go
index a5aa750fd9..b2fd2155a3 100644
--- a/internal/operations/manager_test.go
+++ b/internal/operations/manager_test.go
@@ -402,12 +402,16 @@ func TestTransferResultBadUUID(t *testing.T) {
mdx.On("Capabilities").Return(&dataexchange.Capabilities{
Manifest: true,
})
- err := om.TransferResult(mdx, "wrongun", fftypes.OpStatusSucceeded, fftypes.TransportStatusUpdate{
- Info: fftypes.JSONObject{"extra": "info"},
- Manifest: "Sally",
+ mde := &dataexchangemocks.DXEvent{}
+ mde.On("TransferResult").Return(&dataexchange.TransferResult{
+ TrackingID: "wrongun",
+ Status: fftypes.OpStatusSucceeded,
+ TransportStatusUpdate: fftypes.TransportStatusUpdate{
+ Info: fftypes.JSONObject{"extra": "info"},
+ Manifest: "Sally",
+ },
})
- assert.NoError(t, err)
-
+ om.TransferResult(mdx, mde)
}
func TestTransferResultManifestMismatch(t *testing.T) {
@@ -440,20 +444,27 @@ func TestTransferResultManifestMismatch(t *testing.T) {
mdx.On("Capabilities").Return(&dataexchange.Capabilities{
Manifest: true,
})
- err := om.TransferResult(mdx, opID1.String(), fftypes.OpStatusSucceeded, fftypes.TransportStatusUpdate{
- Info: fftypes.JSONObject{"extra": "info"},
- Manifest: "Sally",
+ mde := &dataexchangemocks.DXEvent{}
+ mde.On("Ack").Return()
+ mde.On("TransferResult").Return(&dataexchange.TransferResult{
+ TrackingID: opID1.String(),
+ Status: fftypes.OpStatusSucceeded,
+ TransportStatusUpdate: fftypes.TransportStatusUpdate{
+ Info: fftypes.JSONObject{"extra": "info"},
+ Manifest: "Sally",
+ },
})
- assert.NoError(t, err)
+ om.TransferResult(mdx, mde)
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
}
-func TestTransferResultHashtMismatch(t *testing.T) {
+func TestTransferResultHashMismatch(t *testing.T) {
om, cancel := newTestOperations(t)
- defer cancel()
+ cancel()
om.updater.conf.workerCount = 0
opID1 := fftypes.NewUUID()
@@ -478,19 +489,26 @@ func TestTransferResultHashtMismatch(t *testing.T) {
mdx.On("Capabilities").Return(&dataexchange.Capabilities{
Manifest: true,
})
- err := om.TransferResult(mdx, opID1.String(), fftypes.OpStatusSucceeded, fftypes.TransportStatusUpdate{
- Info: fftypes.JSONObject{"extra": "info"},
- Hash: "Sally",
+ mde := &dataexchangemocks.DXEvent{}
+ mde.On("Ack").Return()
+ mde.On("TransferResult").Return(&dataexchange.TransferResult{
+ TrackingID: opID1.String(),
+ Status: fftypes.OpStatusSucceeded,
+ TransportStatusUpdate: fftypes.TransportStatusUpdate{
+ Info: fftypes.JSONObject{"extra": "info"},
+ Hash: "Sally",
+ },
})
- assert.NoError(t, err)
+ om.TransferResult(mdx, mde)
+ mde.AssertExpectations(t)
mdi.AssertExpectations(t)
}
func TestTransferResultBatchLookupFail(t *testing.T) {
om, cancel := newTestOperations(t)
- defer cancel()
+ cancel()
om.updater.conf.workerCount = 0
opID1 := fftypes.NewUUID()
@@ -511,11 +529,16 @@ func TestTransferResultBatchLookupFail(t *testing.T) {
mdx.On("Capabilities").Return(&dataexchange.Capabilities{
Manifest: true,
})
- err := om.TransferResult(mdx, opID1.String(), fftypes.OpStatusSucceeded, fftypes.TransportStatusUpdate{
- Info: fftypes.JSONObject{"extra": "info"},
- Manifest: "Sally",
+ mde := &dataexchangemocks.DXEvent{}
+ mde.On("TransferResult").Return(&dataexchange.TransferResult{
+ TrackingID: opID1.String(),
+ Status: fftypes.OpStatusSucceeded,
+ TransportStatusUpdate: fftypes.TransportStatusUpdate{
+ Info: fftypes.JSONObject{"extra": "info"},
+ Manifest: "Sally",
+ },
})
- assert.Regexp(t, "pop", err)
+ om.TransferResult(mdx, mde)
mdi.AssertExpectations(t)
diff --git a/internal/operations/operation_updater.go b/internal/operations/operation_updater.go
index be692f0c6a..2cb92a193f 100644
--- a/internal/operations/operation_updater.go
+++ b/internal/operations/operation_updater.go
@@ -41,6 +41,7 @@ type OperationUpdate struct {
VerifyManifest bool
DXManifest string
DXHash string
+ OnComplete func()
}
type operationUpdaterBatch struct {
@@ -100,19 +101,20 @@ func (ou *operationUpdater) pickWorker(ctx context.Context, update *OperationUpd
return ou.workQueues[worker]
}
-func (ou *operationUpdater) SubmitOperationUpdate(ctx context.Context, update *OperationUpdate) error {
+func (ou *operationUpdater) SubmitOperationUpdate(ctx context.Context, update *OperationUpdate) {
if ou.conf.workerCount > 0 {
select {
case ou.pickWorker(ctx, update) <- update:
case <-ou.ctx.Done():
- return i18n.NewError(ctx, i18n.MsgContextCanceled)
+ log.L(ctx).Debugf("Not submitting operation update due to cancelled context")
}
- return nil
+ return
}
// Otherwise do it in-line on this context
- return ou.database.RunAsGroup(ctx, func(ctx context.Context) error {
- return ou.doBatchUpdate(ctx, []*OperationUpdate{update})
- })
+ err := ou.doBatchUpdateWithRetry(ctx, []*OperationUpdate{update})
+ if err != nil {
+ log.L(ctx).Warnf("Exiting while updating operation: %s", err)
+ }
}
func (ou *operationUpdater) initQueues() {
@@ -161,11 +163,7 @@ func (ou *operationUpdater) updaterLoop(index int) {
if batch != nil && (timedOut || len(batch.updates) >= ou.conf.maxInserts) {
batch.timeoutCancel()
- err := ou.retry.Do(ctx, "operation batch update", func(attempt int) (retry bool, err error) {
- return true, ou.database.RunAsGroup(ctx, func(ctx context.Context) error {
- return ou.doBatchUpdate(ctx, batch.updates)
- })
- })
+ err := ou.doBatchUpdateWithRetry(ctx, batch.updates)
if err != nil {
log.L(ctx).Debugf("Operation update worker exiting: %s", err)
return
@@ -175,6 +173,23 @@ func (ou *operationUpdater) updaterLoop(index int) {
}
}
+func (ou *operationUpdater) doBatchUpdateWithRetry(ctx context.Context, updates []*OperationUpdate) error {
+ return ou.retry.Do(ctx, "operation update", func(attempt int) (retry bool, err error) {
+ err = ou.database.RunAsGroup(ctx, func(ctx context.Context) error {
+ return ou.doBatchUpdate(ctx, updates)
+ })
+ if err != nil {
+ return true, err
+ }
+ for _, update := range updates {
+ if update.OnComplete != nil {
+ update.OnComplete()
+ }
+ }
+ return false, nil
+ })
+}
+
func (ou *operationUpdater) doBatchUpdate(ctx context.Context, updates []*OperationUpdate) error {
// Get all the operations that match
diff --git a/internal/operations/operation_updater_test.go b/internal/operations/operation_updater_test.go
index 9caf6444bc..e43028e3dc 100644
--- a/internal/operations/operation_updater_test.go
+++ b/internal/operations/operation_updater_test.go
@@ -73,10 +73,9 @@ func TestSubmitUpdateClosed(t *testing.T) {
make(chan *OperationUpdate),
}
ou.cancelFunc()
- err := ou.SubmitOperationUpdate(ou.ctx, &OperationUpdate{
+ ou.SubmitOperationUpdate(ou.ctx, &OperationUpdate{
ID: fftypes.NewUUID(),
})
- assert.Regexp(t, "FF10158", err)
}
func TestSubmitUpdateSyncFallbackOpNotFound(t *testing.T) {
@@ -91,11 +90,10 @@ func TestSubmitUpdateSyncFallbackOpNotFound(t *testing.T) {
}).Return(nil)
mdi.On("GetOperations", customCtx, mock.Anything, mock.Anything).Return(nil, nil, nil)
- err := ou.SubmitOperationUpdate(customCtx, &OperationUpdate{
+ ou.SubmitOperationUpdate(customCtx, &OperationUpdate{
ID: fftypes.NewUUID(),
})
- assert.NoError(t, err)
mdi.AssertExpectations(t)
}
@@ -134,29 +132,25 @@ func TestSubmitUpdateWorkerE2ESuccess(t *testing.T) {
om.Start()
- err := om.SubmitOperationUpdate(&mockplug{}, &OperationUpdate{
+ om.SubmitOperationUpdate(&mockplug{}, &OperationUpdate{
ID: opID1,
Status: fftypes.OpStatusSucceeded,
BlockchainTXID: "tx12345",
})
- assert.NoError(t, err)
- err = om.SubmitOperationUpdate(&mockplug{}, &OperationUpdate{
+ om.SubmitOperationUpdate(&mockplug{}, &OperationUpdate{
ID: opID2,
Status: fftypes.OpStatusFailed,
ErrorMessage: "err1",
Output: fftypes.JSONObject{"test": true},
})
- assert.NoError(t, err)
- err = om.SubmitOperationUpdate(&mockplug{}, &OperationUpdate{
+ om.SubmitOperationUpdate(&mockplug{}, &OperationUpdate{
ID: opID3,
Status: fftypes.OpStatusFailed,
ErrorMessage: "err2",
})
- assert.NoError(t, err)
<-done
- assert.NoError(t, err)
mdi.AssertExpectations(t)
}
@@ -171,10 +165,9 @@ func TestUpdateLoopExitRetryCancelledContext(t *testing.T) {
ou.cancelFunc()
})
- err := ou.SubmitOperationUpdate(ou.ctx, &OperationUpdate{
+ ou.SubmitOperationUpdate(ou.ctx, &OperationUpdate{
ID: fftypes.NewUUID(),
})
- assert.NoError(t, err)
ou.updaterLoop(0)
diff --git a/internal/orchestrator/bound_callbacks.go b/internal/orchestrator/bound_callbacks.go
index 0020b07b11..de1d44f879 100644
--- a/internal/orchestrator/bound_callbacks.go
+++ b/internal/orchestrator/bound_callbacks.go
@@ -34,8 +34,8 @@ type boundCallbacks struct {
om operations.Manager
}
-func (bc *boundCallbacks) BlockchainOpUpdate(plugin blockchain.Plugin, operationID *fftypes.UUID, txState blockchain.TransactionStatus, blockchainTXID, errorMessage string, opOutput fftypes.JSONObject) error {
- return bc.om.SubmitOperationUpdate(plugin, &operations.OperationUpdate{
+func (bc *boundCallbacks) BlockchainOpUpdate(plugin blockchain.Plugin, operationID *fftypes.UUID, txState blockchain.TransactionStatus, blockchainTXID, errorMessage string, opOutput fftypes.JSONObject) {
+ bc.om.SubmitOperationUpdate(plugin, &operations.OperationUpdate{
ID: operationID,
Status: txState,
BlockchainTXID: blockchainTXID,
@@ -44,8 +44,8 @@ func (bc *boundCallbacks) BlockchainOpUpdate(plugin blockchain.Plugin, operation
})
}
-func (bc *boundCallbacks) TokenOpUpdate(plugin tokens.Plugin, operationID *fftypes.UUID, txState fftypes.OpStatus, blockchainTXID, errorMessage string, opOutput fftypes.JSONObject) error {
- return bc.om.SubmitOperationUpdate(plugin, &operations.OperationUpdate{
+func (bc *boundCallbacks) TokenOpUpdate(plugin tokens.Plugin, operationID *fftypes.UUID, txState fftypes.OpStatus, blockchainTXID, errorMessage string, opOutput fftypes.JSONObject) {
+ bc.om.SubmitOperationUpdate(plugin, &operations.OperationUpdate{
ID: operationID,
Status: txState,
BlockchainTXID: blockchainTXID,
@@ -58,16 +58,13 @@ func (bc *boundCallbacks) BatchPinComplete(batch *blockchain.BatchPin, signingKe
return bc.ei.BatchPinComplete(bc.bi, batch, signingKey)
}
-func (bc *boundCallbacks) TransferResult(trackingID string, status fftypes.OpStatus, update fftypes.TransportStatusUpdate) error {
- return bc.om.TransferResult(bc.dx, trackingID, status, update)
-}
-
-func (bc *boundCallbacks) PrivateBLOBReceived(peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error {
- return bc.ei.PrivateBLOBReceived(bc.dx, peerID, hash, size, payloadRef)
-}
-
-func (bc *boundCallbacks) MessageReceived(peerID string, data []byte) (manifest string, err error) {
- return bc.ei.MessageReceived(bc.dx, peerID, data)
+func (bc *boundCallbacks) DXEvent(event dataexchange.DXEvent) {
+ switch event.Type() {
+ case dataexchange.DXEventTypeTransferResult:
+ bc.om.TransferResult(bc.dx, event)
+ default:
+ bc.ei.DXEvent(bc.dx, event)
+ }
}
func (bc *boundCallbacks) TokenPoolCreated(plugin tokens.Plugin, pool *tokens.TokenPool) error {
@@ -90,6 +87,6 @@ func (bc *boundCallbacks) SharedStorageBatchDownloaded(ns, payloadRef string, da
return bc.ei.SharedStorageBatchDownloaded(bc.ss, ns, payloadRef, data)
}
-func (bc *boundCallbacks) SharedStorageBLOBDownloaded(hash fftypes.Bytes32, size int64, payloadRef string) error {
- return bc.ei.SharedStorageBLOBDownloaded(bc.ss, hash, size, payloadRef)
+func (bc *boundCallbacks) SharedStorageBlobDownloaded(hash fftypes.Bytes32, size int64, payloadRef string) {
+ bc.ei.SharedStorageBlobDownloaded(bc.ss, hash, size, payloadRef)
}
diff --git a/internal/orchestrator/bound_callbacks_test.go b/internal/orchestrator/bound_callbacks_test.go
index 65cb50efd2..685ba8f9b7 100644
--- a/internal/orchestrator/bound_callbacks_test.go
+++ b/internal/orchestrator/bound_callbacks_test.go
@@ -28,6 +28,7 @@ import (
"github.com/hyperledger/firefly/mocks/sharedstoragemocks"
"github.com/hyperledger/firefly/mocks/tokenmocks"
"github.com/hyperledger/firefly/pkg/blockchain"
+ "github.com/hyperledger/firefly/pkg/dataexchange"
"github.com/hyperledger/firefly/pkg/fftypes"
"github.com/hyperledger/firefly/pkg/tokens"
"github.com/stretchr/testify/assert"
@@ -61,27 +62,21 @@ func TestBoundCallbacks(t *testing.T) {
BlockchainTXID: "0xffffeeee",
ErrorMessage: "error info",
Output: info,
- }).Return(fmt.Errorf("pop"))
+ }).Return()
- err = bc.BlockchainOpUpdate(mbi, opID, fftypes.OpStatusFailed, "0xffffeeee", "error info", info)
- assert.EqualError(t, err, "pop")
+ bc.BlockchainOpUpdate(mbi, opID, fftypes.OpStatusFailed, "0xffffeeee", "error info", info)
- err = bc.TokenOpUpdate(mti, opID, fftypes.OpStatusFailed, "0xffffeeee", "error info", info)
- assert.EqualError(t, err, "pop")
+ bc.TokenOpUpdate(mti, opID, fftypes.OpStatusFailed, "0xffffeeee", "error info", info)
- mom.On("TransferResult", mdx, "tracking12345", fftypes.OpStatusFailed, mock.Anything).Return(fmt.Errorf("pop"))
- err = bc.TransferResult("tracking12345", fftypes.OpStatusFailed, fftypes.TransportStatusUpdate{
- Error: "error info", Info: info,
- })
- assert.EqualError(t, err, "pop")
+ mde := &dataexchangemocks.DXEvent{}
+ mom.On("TransferResult", mdx, mde).Return()
+ mei.On("DXEvent", mdx, mde).Return()
- mei.On("PrivateBLOBReceived", mdx, "peer1", *hash, int64(12345), "ns1/id1").Return(fmt.Errorf("pop"))
- err = bc.PrivateBLOBReceived("peer1", *hash, 12345, "ns1/id1")
- assert.EqualError(t, err, "pop")
+ mde.On("Type").Return(dataexchange.DXEventTypeTransferResult).Once()
+ bc.DXEvent(mde)
- mei.On("MessageReceived", mdx, "peer1", []byte{}).Return("manifest data", fmt.Errorf("pop"))
- _, err = bc.MessageReceived("peer1", []byte{})
- assert.EqualError(t, err, "pop")
+ mde.On("Type").Return(dataexchange.DXEventTypeMessageReceived).Once()
+ bc.DXEvent(mde)
mei.On("TokenPoolCreated", mti, pool).Return(fmt.Errorf("pop"))
err = bc.TokenPoolCreated(mti, pool)
@@ -103,7 +98,6 @@ func TestBoundCallbacks(t *testing.T) {
_, err = bc.SharedStorageBatchDownloaded("ns1", "payload1", []byte(`{}`))
assert.EqualError(t, err, "pop")
- mei.On("SharedStorageBLOBDownloaded", mss, *hash, int64(12345), "payload1").Return(fmt.Errorf("pop"))
- err = bc.SharedStorageBLOBDownloaded(*hash, 12345, "payload1")
- assert.EqualError(t, err, "pop")
+ mei.On("SharedStorageBlobDownloaded", mss, *hash, int64(12345), "payload1").Return()
+ bc.SharedStorageBlobDownloaded(*hash, 12345, "payload1")
}
diff --git a/internal/privatemessaging/operations.go b/internal/privatemessaging/operations.go
index 9e62a0c04e..38b1832abe 100644
--- a/internal/privatemessaging/operations.go
+++ b/internal/privatemessaging/operations.go
@@ -127,7 +127,7 @@ func (pm *privateMessaging) PrepareOperation(ctx context.Context, op *fftypes.Op
func (pm *privateMessaging) RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) {
switch data := op.Data.(type) {
case transferBlobData:
- return nil, false, pm.exchange.TransferBLOB(ctx, op.ID, data.Node.Profile.GetString("id"), data.Blob.PayloadRef)
+ return nil, false, pm.exchange.TransferBlob(ctx, op.ID, data.Node.Profile.GetString("id"), data.Blob.PayloadRef)
case batchSendData:
payload, err := json.Marshal(data.Transport)
diff --git a/internal/privatemessaging/operations_test.go b/internal/privatemessaging/operations_test.go
index 51b35dcddf..9baa4dbda1 100644
--- a/internal/privatemessaging/operations_test.go
+++ b/internal/privatemessaging/operations_test.go
@@ -55,7 +55,7 @@ func TestPrepareAndRunTransferBlob(t *testing.T) {
mdx := pm.exchange.(*dataexchangemocks.Plugin)
mdi.On("GetIdentityByID", context.Background(), node.ID).Return(node, nil)
mdi.On("GetBlobMatchingHash", context.Background(), blob.Hash).Return(blob, nil)
- mdx.On("TransferBLOB", context.Background(), op.ID, "peer1", "payload").Return(nil)
+ mdx.On("TransferBlob", context.Background(), op.ID, "peer1", "payload").Return(nil)
po, err := pm.PrepareOperation(context.Background(), op)
assert.NoError(t, err)
diff --git a/internal/privatemessaging/privatemessaging_test.go b/internal/privatemessaging/privatemessaging_test.go
index 25c3aa562d..5bd3edca4b 100644
--- a/internal/privatemessaging/privatemessaging_test.go
+++ b/internal/privatemessaging/privatemessaging_test.go
@@ -522,7 +522,7 @@ func TestTransferBlobsOpInsertFail(t *testing.T) {
mom := pm.operations.(*operationmocks.Manager)
mdi.On("GetBlobMatchingHash", pm.ctx, mock.Anything).Return(&fftypes.Blob{PayloadRef: "blob/1"}, nil)
- mdx.On("TransferBLOB", pm.ctx, mock.Anything, "peer1", "blob/1").Return(nil)
+ mdx.On("TransferBlob", pm.ctx, mock.Anything, "peer1", "blob/1").Return(nil)
mom.On("AddOrReuseOperation", pm.ctx, mock.Anything).Return(fmt.Errorf("pop"))
_, err := pm.prepareBlobTransfers(pm.ctx, fftypes.DataArray{
diff --git a/internal/shareddownload/download_manager.go b/internal/shareddownload/download_manager.go
index 56caec6682..74c02a83b7 100644
--- a/internal/shareddownload/download_manager.go
+++ b/internal/shareddownload/download_manager.go
@@ -72,7 +72,7 @@ type downloadWork struct {
type Callbacks interface {
SharedStorageBatchDownloaded(ns string, payloadRef string, data []byte) (batchID *fftypes.UUID, err error)
- SharedStorageBLOBDownloaded(hash fftypes.Bytes32, size int64, payloadRef string) error
+ SharedStorageBlobDownloaded(hash fftypes.Bytes32, size int64, payloadRef string)
}
func NewDownloadManager(ctx context.Context, di database.Plugin, ss sharedstorage.Plugin, dx dataexchange.Plugin, om operations.Manager, cb Callbacks) (Manager, error) {
diff --git a/internal/shareddownload/download_manager_test.go b/internal/shareddownload/download_manager_test.go
index 24bb4a017c..5747ab6818 100644
--- a/internal/shareddownload/download_manager_test.go
+++ b/internal/shareddownload/download_manager_test.go
@@ -128,7 +128,8 @@ func TestDownloadBlobWithRetryOk(t *testing.T) {
mss.On("DownloadData", mock.Anything, "ref1").Return(reader, nil)
mdx := dm.dataexchange.(*dataexchangemocks.Plugin)
- mdx.On("UploadBLOB", mock.Anything, "ns1", *dataID, mock.Anything).Return("privateRef1", blobHash, int64(12345), nil)
+ mdx.On("UploadBlob", mock.Anything, "ns1", *dataID, mock.Anything).Return("", nil, int64(-1), fmt.Errorf("pop")).Twice()
+ mdx.On("UploadBlob", mock.Anything, "ns1", *dataID, mock.Anything).Return("privateRef1", blobHash, int64(12345), nil)
called := make(chan struct{})
@@ -136,7 +137,7 @@ func TestDownloadBlobWithRetryOk(t *testing.T) {
mdi.On("InsertOperation", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
args[2].(database.PostCompletionHook)()
}).Return(nil)
- mdi.On("ResolveOperation", mock.Anything, mock.Anything, fftypes.OpStatusPending, "pop", mock.Anything).Return(nil)
+ mdi.On("ResolveOperation", mock.Anything, mock.Anything, fftypes.OpStatusPending, mock.Anything, mock.Anything).Return(nil)
mdi.On("ResolveOperation", mock.Anything, mock.Anything, fftypes.OpStatusSucceeded, "", fftypes.JSONObject{
"hash": blobHash,
"size": int64(12345),
@@ -146,8 +147,7 @@ func TestDownloadBlobWithRetryOk(t *testing.T) {
}).Return(nil).Once()
mci := dm.callbacks.(*shareddownloadmocks.Callbacks)
- mci.On("SharedStorageBLOBDownloaded", *blobHash, int64(12345), "privateRef1").Return(fmt.Errorf("pop")).Twice()
- mci.On("SharedStorageBLOBDownloaded", *blobHash, int64(12345), "privateRef1").Return(nil)
+ mci.On("SharedStorageBlobDownloaded", *blobHash, int64(12345), "privateRef1").Return()
err := dm.InitiateDownloadBlob(dm.ctx, "ns1", txID, dataID, "ref1")
assert.NoError(t, err)
diff --git a/internal/shareddownload/operations.go b/internal/shareddownload/operations.go
index 51139a20ad..e6202ad3bb 100644
--- a/internal/shareddownload/operations.go
+++ b/internal/shareddownload/operations.go
@@ -152,17 +152,14 @@ func (dm *downloadManager) downloadBlob(ctx context.Context, data downloadBlobDa
defer reader.Close()
// ... to data exchange
- dxPayloadRef, hash, blobSize, err := dm.dataexchange.UploadBLOB(ctx, data.Namespace, *data.DataID, reader)
+ dxPayloadRef, hash, blobSize, err := dm.dataexchange.UploadBlob(ctx, data.Namespace, *data.DataID, reader)
if err != nil {
return nil, false, i18n.WrapError(ctx, err, i18n.MsgDownloadSharedFailed, data.PayloadRef)
}
log.L(ctx).Infof("Transferred blob '%s' (%s) from shared storage '%s' to local data exchange '%s'", hash, units.HumanSizeWithPrecision(float64(blobSize), 2), data.PayloadRef, dxPayloadRef)
// then callback to store metadata
- err = dm.callbacks.SharedStorageBLOBDownloaded(*hash, blobSize, dxPayloadRef)
- if err != nil {
- return nil, false, err
- }
+ dm.callbacks.SharedStorageBlobDownloaded(*hash, blobSize, dxPayloadRef)
return getDownloadBlobOutputs(hash, blobSize, dxPayloadRef), true, nil
}
diff --git a/internal/shareddownload/operations_test.go b/internal/shareddownload/operations_test.go
index 1c7f7f027e..526a63d069 100644
--- a/internal/shareddownload/operations_test.go
+++ b/internal/shareddownload/operations_test.go
@@ -122,7 +122,7 @@ func TestDownloadBlobDownloadDataReadFail(t *testing.T) {
mss.On("DownloadData", mock.Anything, "ref1").Return(reader, nil)
mdx := dm.dataexchange.(*dataexchangemocks.Plugin)
- mdx.On("UploadBLOB", mock.Anything, "ns1", mock.Anything, reader).Return("", nil, int64(-1), fmt.Errorf("pop"))
+ mdx.On("UploadBlob", mock.Anything, "ns1", mock.Anything, reader).Return("", nil, int64(-1), fmt.Errorf("pop"))
_, _, err := dm.downloadBlob(dm.ctx, downloadBlobData{
Namespace: "ns1",
diff --git a/internal/tokens/fftokens/fftokens.go b/internal/tokens/fftokens/fftokens.go
index d37ad442bc..eb954ff72c 100644
--- a/internal/tokens/fftokens/fftokens.go
+++ b/internal/tokens/fftokens/fftokens.go
@@ -163,7 +163,7 @@ func (ft *FFTokens) Capabilities() *tokens.Capabilities {
return ft.capabilities
}
-func (ft *FFTokens) handleReceipt(ctx context.Context, data fftypes.JSONObject) error {
+func (ft *FFTokens) handleReceipt(ctx context.Context, data fftypes.JSONObject) {
l := log.L(ctx)
requestID := data.GetString("id")
@@ -172,19 +172,19 @@ func (ft *FFTokens) handleReceipt(ctx context.Context, data fftypes.JSONObject)
transactionHash := data.GetString("transactionHash")
if requestID == "" {
l.Errorf("Reply cannot be processed - missing fields: %+v", data)
- return nil // Swallow this and move on
+ return
}
opID, err := fftypes.ParseUUID(ctx, requestID)
if err != nil {
l.Errorf("Reply cannot be processed - bad ID: %+v", data)
- return nil // Swallow this and move on
+ return
}
replyType := fftypes.OpStatusSucceeded
if !success {
replyType = fftypes.OpStatusFailed
}
l.Infof("Tokens '%s' reply: request=%s message=%s", replyType, requestID, message)
- return ft.callbacks.TokenOpUpdate(ft, opID, replyType, transactionHash, message, data)
+ ft.callbacks.TokenOpUpdate(ft, opID, replyType, transactionHash, message, data)
}
func (ft *FFTokens) handleTokenPoolCreate(ctx context.Context, data fftypes.JSONObject) (err error) {
@@ -446,7 +446,7 @@ func (ft *FFTokens) eventLoop() {
l.Debugf("Received %s event %s", msg.Event, msg.ID)
switch msg.Event {
case messageReceipt:
- err = ft.handleReceipt(ctx, msg.Data)
+ ft.handleReceipt(ctx, msg.Data)
case messageTokenPool:
err = ft.handleTokenPoolCreate(ctx, msg.Data)
case messageTokenMint:
diff --git a/manifest.json b/manifest.json
index eb03bc03ab..95331cdc1a 100644
--- a/manifest.json
+++ b/manifest.json
@@ -1,8 +1,8 @@
{
"ethconnect": {
"image": "ghcr.io/hyperledger/firefly-ethconnect",
- "tag": "v3.1.5",
- "sha": "dc7f4f9a1eb1ba608a89f54876bb84324571bdd765ac735d2fafbb5a0862cd7c"
+ "tag": "v3.1.6",
+ "sha": "7336bb15640a01eea1c440195461862b6982a24e1a33b4ea0d89a30acb8bcf7b"
},
"fabconnect": {
"image": "ghcr.io/hyperledger/firefly-fabconnect",
@@ -11,8 +11,8 @@
},
"dataexchange-https": {
"image": "ghcr.io/hyperledger/firefly-dataexchange-https",
- "tag": "v0.10.5",
- "sha": "70344c0f856be14304e4cba37c8c1620de3720262ad3d08de7ba46d633b83cbd"
+ "tag": "v0.11.0",
+ "sha": "307975fd75181bb3f8eb4510cc7f0df4b1dc8b500635ab782e9b3d547a0891fb"
},
"tokens-erc1155": {
"image": "ghcr.io/hyperledger/firefly-tokens-erc1155",
diff --git a/mocks/blockchainmocks/callbacks.go b/mocks/blockchainmocks/callbacks.go
index a7da43acab..b533fbd096 100644
--- a/mocks/blockchainmocks/callbacks.go
+++ b/mocks/blockchainmocks/callbacks.go
@@ -43,15 +43,6 @@ func (_m *Callbacks) BlockchainEvent(event *blockchain.EventWithSubscription) er
}
// BlockchainOpUpdate provides a mock function with given fields: plugin, operationID, txState, blockchainTXID, errorMessage, opOutput
-func (_m *Callbacks) BlockchainOpUpdate(plugin blockchain.Plugin, operationID *fftypes.UUID, txState fftypes.OpStatus, blockchainTXID string, errorMessage string, opOutput fftypes.JSONObject) error {
- ret := _m.Called(plugin, operationID, txState, blockchainTXID, errorMessage, opOutput)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(blockchain.Plugin, *fftypes.UUID, fftypes.OpStatus, string, string, fftypes.JSONObject) error); ok {
- r0 = rf(plugin, operationID, txState, blockchainTXID, errorMessage, opOutput)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
+func (_m *Callbacks) BlockchainOpUpdate(plugin blockchain.Plugin, operationID *fftypes.UUID, txState fftypes.OpStatus, blockchainTXID string, errorMessage string, opOutput fftypes.JSONObject) {
+ _m.Called(plugin, operationID, txState, blockchainTXID, errorMessage, opOutput)
}
diff --git a/mocks/databasemocks/plugin.go b/mocks/databasemocks/plugin.go
index 7ee0e95c23..164ec4b121 100644
--- a/mocks/databasemocks/plugin.go
+++ b/mocks/databasemocks/plugin.go
@@ -2253,6 +2253,20 @@ func (_m *Plugin) InsertBlob(ctx context.Context, blob *fftypes.Blob) error {
return r0
}
+// InsertBlobs provides a mock function with given fields: ctx, blobs
+func (_m *Plugin) InsertBlobs(ctx context.Context, blobs []*fftypes.Blob) error {
+ ret := _m.Called(ctx, blobs)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, []*fftypes.Blob) error); ok {
+ r0 = rf(ctx, blobs)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
// InsertBlockchainEvent provides a mock function with given fields: ctx, event
func (_m *Plugin) InsertBlockchainEvent(ctx context.Context, event *fftypes.BlockchainEvent) error {
ret := _m.Called(ctx, event)
diff --git a/mocks/dataexchangemocks/callbacks.go b/mocks/dataexchangemocks/callbacks.go
index 37aaf0ed7e..0d390497a6 100644
--- a/mocks/dataexchangemocks/callbacks.go
+++ b/mocks/dataexchangemocks/callbacks.go
@@ -3,7 +3,7 @@
package dataexchangemocks
import (
- fftypes "github.com/hyperledger/firefly/pkg/fftypes"
+ dataexchange "github.com/hyperledger/firefly/pkg/dataexchange"
mock "github.com/stretchr/testify/mock"
)
@@ -12,51 +12,7 @@ type Callbacks struct {
mock.Mock
}
-// MessageReceived provides a mock function with given fields: peerID, data
-func (_m *Callbacks) MessageReceived(peerID string, data []byte) (string, error) {
- ret := _m.Called(peerID, data)
-
- var r0 string
- if rf, ok := ret.Get(0).(func(string, []byte) string); ok {
- r0 = rf(peerID, data)
- } else {
- r0 = ret.Get(0).(string)
- }
-
- var r1 error
- if rf, ok := ret.Get(1).(func(string, []byte) error); ok {
- r1 = rf(peerID, data)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
-}
-
-// PrivateBLOBReceived provides a mock function with given fields: peerID, hash, size, payloadRef
-func (_m *Callbacks) PrivateBLOBReceived(peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error {
- ret := _m.Called(peerID, hash, size, payloadRef)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(string, fftypes.Bytes32, int64, string) error); ok {
- r0 = rf(peerID, hash, size, payloadRef)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
-}
-
-// TransferResult provides a mock function with given fields: trackingID, status, info
-func (_m *Callbacks) TransferResult(trackingID string, status fftypes.OpStatus, info fftypes.TransportStatusUpdate) error {
- ret := _m.Called(trackingID, status, info)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(string, fftypes.OpStatus, fftypes.TransportStatusUpdate) error); ok {
- r0 = rf(trackingID, status, info)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
+// DXEvent provides a mock function with given fields: event
+func (_m *Callbacks) DXEvent(event dataexchange.DXEvent) {
+ _m.Called(event)
}
diff --git a/mocks/dataexchangemocks/dx_event.go b/mocks/dataexchangemocks/dx_event.go
new file mode 100644
index 0000000000..6ce27f9b25
--- /dev/null
+++ b/mocks/dataexchangemocks/dx_event.go
@@ -0,0 +1,99 @@
+// Code generated by mockery v1.0.0. DO NOT EDIT.
+
+package dataexchangemocks
+
+import (
+ dataexchange "github.com/hyperledger/firefly/pkg/dataexchange"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// DXEvent is an autogenerated mock type for the DXEvent type
+type DXEvent struct {
+ mock.Mock
+}
+
+// Ack provides a mock function with given fields:
+func (_m *DXEvent) Ack() {
+ _m.Called()
+}
+
+// AckWithManifest provides a mock function with given fields: manifest
+func (_m *DXEvent) AckWithManifest(manifest string) {
+ _m.Called(manifest)
+}
+
+// ID provides a mock function with given fields:
+func (_m *DXEvent) ID() string {
+ ret := _m.Called()
+
+ var r0 string
+ if rf, ok := ret.Get(0).(func() string); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ return r0
+}
+
+// MessageReceived provides a mock function with given fields:
+func (_m *DXEvent) MessageReceived() *dataexchange.MessageReceived {
+ ret := _m.Called()
+
+ var r0 *dataexchange.MessageReceived
+ if rf, ok := ret.Get(0).(func() *dataexchange.MessageReceived); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*dataexchange.MessageReceived)
+ }
+ }
+
+ return r0
+}
+
+// PrivateBlobReceived provides a mock function with given fields:
+func (_m *DXEvent) PrivateBlobReceived() *dataexchange.PrivateBlobReceived {
+ ret := _m.Called()
+
+ var r0 *dataexchange.PrivateBlobReceived
+ if rf, ok := ret.Get(0).(func() *dataexchange.PrivateBlobReceived); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*dataexchange.PrivateBlobReceived)
+ }
+ }
+
+ return r0
+}
+
+// TransferResult provides a mock function with given fields:
+func (_m *DXEvent) TransferResult() *dataexchange.TransferResult {
+ ret := _m.Called()
+
+ var r0 *dataexchange.TransferResult
+ if rf, ok := ret.Get(0).(func() *dataexchange.TransferResult); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*dataexchange.TransferResult)
+ }
+ }
+
+ return r0
+}
+
+// Type provides a mock function with given fields:
+func (_m *DXEvent) Type() dataexchange.DXEventType {
+ ret := _m.Called()
+
+ var r0 dataexchange.DXEventType
+ if rf, ok := ret.Get(0).(func() dataexchange.DXEventType); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(dataexchange.DXEventType)
+ }
+
+ return r0
+}
diff --git a/mocks/dataexchangemocks/plugin.go b/mocks/dataexchangemocks/plugin.go
index 6a1209921e..143d77f98d 100644
--- a/mocks/dataexchangemocks/plugin.go
+++ b/mocks/dataexchangemocks/plugin.go
@@ -51,8 +51,8 @@ func (_m *Plugin) Capabilities() *dataexchange.Capabilities {
return r0
}
-// CheckBLOBReceived provides a mock function with given fields: ctx, peerID, ns, id
-func (_m *Plugin) CheckBLOBReceived(ctx context.Context, peerID string, ns string, id fftypes.UUID) (*fftypes.Bytes32, int64, error) {
+// CheckBlobReceived provides a mock function with given fields: ctx, peerID, ns, id
+func (_m *Plugin) CheckBlobReceived(ctx context.Context, peerID string, ns string, id fftypes.UUID) (*fftypes.Bytes32, int64, error) {
ret := _m.Called(ctx, peerID, ns, id)
var r0 *fftypes.Bytes32
@@ -81,8 +81,8 @@ func (_m *Plugin) CheckBLOBReceived(ctx context.Context, peerID string, ns strin
return r0, r1, r2
}
-// DownloadBLOB provides a mock function with given fields: ctx, payloadRef
-func (_m *Plugin) DownloadBLOB(ctx context.Context, payloadRef string) (io.ReadCloser, error) {
+// DownloadBlob provides a mock function with given fields: ctx, payloadRef
+func (_m *Plugin) DownloadBlob(ctx context.Context, payloadRef string) (io.ReadCloser, error) {
ret := _m.Called(ctx, payloadRef)
var r0 io.ReadCloser
@@ -188,8 +188,8 @@ func (_m *Plugin) Start() error {
return r0
}
-// TransferBLOB provides a mock function with given fields: ctx, opID, peerID, payloadRef
-func (_m *Plugin) TransferBLOB(ctx context.Context, opID *fftypes.UUID, peerID string, payloadRef string) error {
+// TransferBlob provides a mock function with given fields: ctx, opID, peerID, payloadRef
+func (_m *Plugin) TransferBlob(ctx context.Context, opID *fftypes.UUID, peerID string, payloadRef string) error {
ret := _m.Called(ctx, opID, peerID, payloadRef)
var r0 error
@@ -202,8 +202,8 @@ func (_m *Plugin) TransferBLOB(ctx context.Context, opID *fftypes.UUID, peerID s
return r0
}
-// UploadBLOB provides a mock function with given fields: ctx, ns, id, content
-func (_m *Plugin) UploadBLOB(ctx context.Context, ns string, id fftypes.UUID, content io.Reader) (string, *fftypes.Bytes32, int64, error) {
+// UploadBlob provides a mock function with given fields: ctx, ns, id, content
+func (_m *Plugin) UploadBlob(ctx context.Context, ns string, id fftypes.UUID, content io.Reader) (string, *fftypes.Bytes32, int64, error) {
ret := _m.Called(ctx, ns, id, content)
var r0 string
diff --git a/mocks/datamocks/manager.go b/mocks/datamocks/manager.go
index b0190008b0..72ea527805 100644
--- a/mocks/datamocks/manager.go
+++ b/mocks/datamocks/manager.go
@@ -32,8 +32,8 @@ func (_m *Manager) CheckDatatype(ctx context.Context, ns string, datatype *fftyp
return r0
}
-// DownloadBLOB provides a mock function with given fields: ctx, ns, dataID
-func (_m *Manager) DownloadBLOB(ctx context.Context, ns string, dataID string) (*fftypes.Blob, io.ReadCloser, error) {
+// DownloadBlob provides a mock function with given fields: ctx, ns, dataID
+func (_m *Manager) DownloadBlob(ctx context.Context, ns string, dataID string) (*fftypes.Blob, io.ReadCloser, error) {
ret := _m.Called(ctx, ns, dataID)
var r0 *fftypes.Blob
@@ -231,8 +231,8 @@ func (_m *Manager) UpdateMessageStateIfCached(ctx context.Context, id *fftypes.U
_m.Called(ctx, id, state, confirmed)
}
-// UploadBLOB provides a mock function with given fields: ctx, ns, inData, blob, autoMeta
-func (_m *Manager) UploadBLOB(ctx context.Context, ns string, inData *fftypes.DataRefOrValue, blob *fftypes.Multipart, autoMeta bool) (*fftypes.Data, error) {
+// UploadBlob provides a mock function with given fields: ctx, ns, inData, blob, autoMeta
+func (_m *Manager) UploadBlob(ctx context.Context, ns string, inData *fftypes.DataRefOrValue, blob *fftypes.Multipart, autoMeta bool) (*fftypes.Data, error) {
ret := _m.Called(ctx, ns, inData, blob, autoMeta)
var r0 *fftypes.Data
diff --git a/mocks/eventmocks/event_manager.go b/mocks/eventmocks/event_manager.go
index a16a1310a8..6912537f55 100644
--- a/mocks/eventmocks/event_manager.go
+++ b/mocks/eventmocks/event_manager.go
@@ -97,6 +97,11 @@ func (_m *EventManager) CreateUpdateDurableSubscription(ctx context.Context, sub
return r0
}
+// DXEvent provides a mock function with given fields: dx, event
+func (_m *EventManager) DXEvent(dx dataexchange.Plugin, event dataexchange.DXEvent) {
+ _m.Called(dx, event)
+}
+
// DeleteDurableSubscription provides a mock function with given fields: ctx, subDef
func (_m *EventManager) DeleteDurableSubscription(ctx context.Context, subDef *fftypes.Subscription) error {
ret := _m.Called(ctx, subDef)
@@ -127,27 +132,6 @@ func (_m *EventManager) DeletedSubscriptions() chan<- *fftypes.UUID {
return r0
}
-// MessageReceived provides a mock function with given fields: dx, peerID, data
-func (_m *EventManager) MessageReceived(dx dataexchange.Plugin, peerID string, data []byte) (string, error) {
- ret := _m.Called(dx, peerID, data)
-
- var r0 string
- if rf, ok := ret.Get(0).(func(dataexchange.Plugin, string, []byte) string); ok {
- r0 = rf(dx, peerID, data)
- } else {
- r0 = ret.Get(0).(string)
- }
-
- var r1 error
- if rf, ok := ret.Get(1).(func(dataexchange.Plugin, string, []byte) error); ok {
- r1 = rf(dx, peerID, data)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
-}
-
// NewEvents provides a mock function with given fields:
func (_m *EventManager) NewEvents() chan<- int64 {
ret := _m.Called()
@@ -196,34 +180,6 @@ func (_m *EventManager) NewSubscriptions() chan<- *fftypes.UUID {
return r0
}
-// PrivateBLOBReceived provides a mock function with given fields: dx, peerID, hash, size, payloadRef
-func (_m *EventManager) PrivateBLOBReceived(dx dataexchange.Plugin, peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error {
- ret := _m.Called(dx, peerID, hash, size, payloadRef)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(dataexchange.Plugin, string, fftypes.Bytes32, int64, string) error); ok {
- r0 = rf(dx, peerID, hash, size, payloadRef)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
-}
-
-// SharedStorageBLOBDownloaded provides a mock function with given fields: ss, hash, size, payloadRef
-func (_m *EventManager) SharedStorageBLOBDownloaded(ss sharedstorage.Plugin, hash fftypes.Bytes32, size int64, payloadRef string) error {
- ret := _m.Called(ss, hash, size, payloadRef)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(sharedstorage.Plugin, fftypes.Bytes32, int64, string) error); ok {
- r0 = rf(ss, hash, size, payloadRef)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
-}
-
// SharedStorageBatchDownloaded provides a mock function with given fields: ss, ns, payloadRef, data
func (_m *EventManager) SharedStorageBatchDownloaded(ss sharedstorage.Plugin, ns string, payloadRef string, data []byte) (*fftypes.UUID, error) {
ret := _m.Called(ss, ns, payloadRef, data)
@@ -247,6 +203,11 @@ func (_m *EventManager) SharedStorageBatchDownloaded(ss sharedstorage.Plugin, ns
return r0, r1
}
+// SharedStorageBlobDownloaded provides a mock function with given fields: ss, hash, size, payloadRef
+func (_m *EventManager) SharedStorageBlobDownloaded(ss sharedstorage.Plugin, hash fftypes.Bytes32, size int64, payloadRef string) {
+ _m.Called(ss, hash, size, payloadRef)
+}
+
// Start provides a mock function with given fields:
func (_m *EventManager) Start() error {
ret := _m.Called()
diff --git a/mocks/operationmocks/manager.go b/mocks/operationmocks/manager.go
index a43a3d8efc..8b3a8f7cad 100644
--- a/mocks/operationmocks/manager.go
+++ b/mocks/operationmocks/manager.go
@@ -119,31 +119,13 @@ func (_m *Manager) Start() error {
}
// SubmitOperationUpdate provides a mock function with given fields: plugin, update
-func (_m *Manager) SubmitOperationUpdate(plugin fftypes.Named, update *operations.OperationUpdate) error {
- ret := _m.Called(plugin, update)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(fftypes.Named, *operations.OperationUpdate) error); ok {
- r0 = rf(plugin, update)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
+func (_m *Manager) SubmitOperationUpdate(plugin fftypes.Named, update *operations.OperationUpdate) {
+ _m.Called(plugin, update)
}
-// TransferResult provides a mock function with given fields: dx, opIDString, status, update
-func (_m *Manager) TransferResult(dx dataexchange.Plugin, opIDString string, status fftypes.OpStatus, update fftypes.TransportStatusUpdate) error {
- ret := _m.Called(dx, opIDString, status, update)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(dataexchange.Plugin, string, fftypes.OpStatus, fftypes.TransportStatusUpdate) error); ok {
- r0 = rf(dx, opIDString, status, update)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
+// TransferResult provides a mock function with given fields: dx, event
+func (_m *Manager) TransferResult(dx dataexchange.Plugin, event dataexchange.DXEvent) {
+ _m.Called(dx, event)
}
// WaitStop provides a mock function with given fields:
diff --git a/mocks/shareddownloadmocks/callbacks.go b/mocks/shareddownloadmocks/callbacks.go
index 973e1599f2..7448b63929 100644
--- a/mocks/shareddownloadmocks/callbacks.go
+++ b/mocks/shareddownloadmocks/callbacks.go
@@ -12,20 +12,6 @@ type Callbacks struct {
mock.Mock
}
-// SharedStorageBLOBDownloaded provides a mock function with given fields: hash, size, payloadRef
-func (_m *Callbacks) SharedStorageBLOBDownloaded(hash fftypes.Bytes32, size int64, payloadRef string) error {
- ret := _m.Called(hash, size, payloadRef)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(fftypes.Bytes32, int64, string) error); ok {
- r0 = rf(hash, size, payloadRef)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
-}
-
// SharedStorageBatchDownloaded provides a mock function with given fields: ns, payloadRef, data
func (_m *Callbacks) SharedStorageBatchDownloaded(ns string, payloadRef string, data []byte) (*fftypes.UUID, error) {
ret := _m.Called(ns, payloadRef, data)
@@ -48,3 +34,8 @@ func (_m *Callbacks) SharedStorageBatchDownloaded(ns string, payloadRef string,
return r0, r1
}
+
+// SharedStorageBlobDownloaded provides a mock function with given fields: hash, size, payloadRef
+func (_m *Callbacks) SharedStorageBlobDownloaded(hash fftypes.Bytes32, size int64, payloadRef string) {
+ _m.Called(hash, size, payloadRef)
+}
diff --git a/mocks/tokenmocks/callbacks.go b/mocks/tokenmocks/callbacks.go
index f72c46eb5e..af90cc38b9 100644
--- a/mocks/tokenmocks/callbacks.go
+++ b/mocks/tokenmocks/callbacks.go
@@ -15,17 +15,8 @@ type Callbacks struct {
}
// TokenOpUpdate provides a mock function with given fields: plugin, operationID, txState, blockchainTXID, errorMessage, opOutput
-func (_m *Callbacks) TokenOpUpdate(plugin tokens.Plugin, operationID *fftypes.UUID, txState fftypes.OpStatus, blockchainTXID string, errorMessage string, opOutput fftypes.JSONObject) error {
- ret := _m.Called(plugin, operationID, txState, blockchainTXID, errorMessage, opOutput)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(tokens.Plugin, *fftypes.UUID, fftypes.OpStatus, string, string, fftypes.JSONObject) error); ok {
- r0 = rf(plugin, operationID, txState, blockchainTXID, errorMessage, opOutput)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
+func (_m *Callbacks) TokenOpUpdate(plugin tokens.Plugin, operationID *fftypes.UUID, txState fftypes.OpStatus, blockchainTXID string, errorMessage string, opOutput fftypes.JSONObject) {
+ _m.Called(plugin, operationID, txState, blockchainTXID, errorMessage, opOutput)
}
// TokenPoolCreated provides a mock function with given fields: plugin, pool
diff --git a/pkg/blockchain/plugin.go b/pkg/blockchain/plugin.go
index 72ccacde7b..953cf19e72 100644
--- a/pkg/blockchain/plugin.go
+++ b/pkg/blockchain/plugin.go
@@ -84,7 +84,7 @@ type Callbacks interface {
// Only the party submitting the transaction will see this data.
//
// Error should will only be returned in shutdown scenarios
- BlockchainOpUpdate(plugin Plugin, operationID *fftypes.UUID, txState TransactionStatus, blockchainTXID, errorMessage string, opOutput fftypes.JSONObject) error
+ BlockchainOpUpdate(plugin Plugin, operationID *fftypes.UUID, txState TransactionStatus, blockchainTXID, errorMessage string, opOutput fftypes.JSONObject)
// BatchPinComplete notifies on the arrival of a sequenced batch of messages, which might have been
// submitted by us, or by any other authorized party in the network.
diff --git a/pkg/database/plugin.go b/pkg/database/plugin.go
index 330967cf2e..696646e88b 100644
--- a/pkg/database/plugin.go
+++ b/pkg/database/plugin.go
@@ -354,6 +354,9 @@ type iBlobCollection interface {
// InsertBlob - insert a blob
InsertBlob(ctx context.Context, blob *fftypes.Blob) (err error)
+ // InsertBlobs performs a batch insert of blobs assured to be new records - fails if they already exist, so caller can fall back to upsert individually
+ InsertBlobs(ctx context.Context, blobs []*fftypes.Blob) (err error)
+
// GetBlobMatchingHash - lookup first blob batching a hash
GetBlobMatchingHash(ctx context.Context, hash *fftypes.Bytes32) (message *fftypes.Blob, err error)
diff --git a/pkg/dataexchange/plugin.go b/pkg/dataexchange/plugin.go
index 1cbebd04fd..3795087580 100644
--- a/pkg/dataexchange/plugin.go
+++ b/pkg/dataexchange/plugin.go
@@ -31,7 +31,7 @@ import (
// - Security of transfer of data between participants (transport and payload authorization & encryption)
// - Reliability of transfer of data between participants (many transports can be supported - HTTPs/AMQP/MQTT etc.)
//
-// Each plugin must handle network addressing, as well as transfer of messages and BLOBs.
+// Each plugin must handle network addressing, as well as transfer of messages and Blobs.
//
// Network addressing:
// - Each node must have a "peerID" (<=256b) that uniquely identifies a node within the data exchange network
@@ -42,11 +42,11 @@ import (
// - Must drive events on the target node that contain the input data
// - No requirement to retain the data beyond the confirmation of receipt of the event at the target
//
-// BLOBS
+// Blobs
// - Can be stored and retrieved separately from their transfer
// - Transfers are initiated via reference (not in-line data)
// - Are hashed by the DX plugin using the same hashing algorithm as FireFly (SHA256)
-// - DX plugins can mainain their own internal IDs for BLOBs within the following requirements:
+// - DX plugins can mainain their own internal IDs for Blobs within the following requirements:
// - Given a namespace and ID, map to a "payloadRef" string (<1024chars) that allows that same payload to be retrieved using only that payloadRef
// - Example would be a logical filesystem path like "local/namespace/ID"
// - When data is recevied from other members in the network, be able to return the hash when provided with the remote peerID string, namespace and ID
@@ -74,34 +74,64 @@ type Plugin interface {
// AddPeer translates the configuration published by another peer, into a reference string that is used between DX and FireFly to refer to the peer
AddPeer(ctx context.Context, peer fftypes.JSONObject) (err error)
- // UploadBLOB streams a blob to storage, and returns the hash to confirm the hash calculated in Core matches the hash calculated in the plugin
- UploadBLOB(ctx context.Context, ns string, id fftypes.UUID, content io.Reader) (payloadRef string, hash *fftypes.Bytes32, size int64, err error)
+ // UploadBlob streams a blob to storage, and returns the hash to confirm the hash calculated in Core matches the hash calculated in the plugin
+ UploadBlob(ctx context.Context, ns string, id fftypes.UUID, content io.Reader) (payloadRef string, hash *fftypes.Bytes32, size int64, err error)
- // DownloadBLOB streams a received blob out of storage
- DownloadBLOB(ctx context.Context, payloadRef string) (content io.ReadCloser, err error)
+ // DownloadBlob streams a received blob out of storage
+ DownloadBlob(ctx context.Context, payloadRef string) (content io.ReadCloser, err error)
- // CheckBLOBReceived confirms that a blob with the specified hash has been received from the specified peer
- CheckBLOBReceived(ctx context.Context, peerID, ns string, id fftypes.UUID) (hash *fftypes.Bytes32, size int64, err error)
+ // CheckBlobReceived confirms that a blob with the specified hash has been received from the specified peer
+ CheckBlobReceived(ctx context.Context, peerID, ns string, id fftypes.UUID) (hash *fftypes.Bytes32, size int64, err error)
// SendMessage sends an in-line package of data to another network node.
// Should return as quickly as possible for parallelsim, then report completion asynchronously via the operation ID
SendMessage(ctx context.Context, opID *fftypes.UUID, peerID string, data []byte) (err error)
- // TransferBLOB initiates a transfer of a previoiusly stored blob to another node
- TransferBLOB(ctx context.Context, opID *fftypes.UUID, peerID string, payloadRef string) (err error)
+ // TransferBlob initiates a transfer of a previoiusly stored blob to another node
+ TransferBlob(ctx context.Context, opID *fftypes.UUID, peerID string, payloadRef string) (err error)
}
// Callbacks is the interface provided to the data exchange plugin, to allow it to pass events back to firefly.
type Callbacks interface {
+ // Event has sub-types as defined below, and can be processed and ack'd asynchronously
+ DXEvent(event DXEvent)
+}
+
+type DXEventType int
+
+// DXEvent is a single interface that can be passed to all events
+type DXEvent interface {
+ ID() string
+ Ack()
+ AckWithManifest(manifest string)
+ Type() DXEventType
+ MessageReceived() *MessageReceived
+ PrivateBlobReceived() *PrivateBlobReceived
+ TransferResult() *TransferResult
+}
- // MessageReceived notifies of a message received from another node in the network
- MessageReceived(peerID string, data []byte) (manifest string, err error)
+const (
+ DXEventTypeMessageReceived DXEventType = iota
+ DXEventTypePrivateBlobReceived
+ DXEventTypeTransferResult
+)
- // PrivateBLOBReceived notifies of the ID of a BLOB that has been stored by DX after being received from another node in the network
- PrivateBLOBReceived(peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error
+type MessageReceived struct {
+ PeerID string
+ Data []byte
+}
+
+type PrivateBlobReceived struct {
+ PeerID string
+ Hash fftypes.Bytes32
+ Size int64
+ PayloadRef string
+}
- // TransferResult notifies of a status update of a transfer (can have multiple status updates).
- TransferResult(trackingID string, status fftypes.OpStatus, info fftypes.TransportStatusUpdate) error
+type TransferResult struct {
+ TrackingID string
+ Status fftypes.OpStatus
+ fftypes.TransportStatusUpdate
}
// Capabilities the supported featureset of the data exchange
diff --git a/pkg/tokens/plugin.go b/pkg/tokens/plugin.go
index 6bd705fb0a..e54439b282 100644
--- a/pkg/tokens/plugin.go
+++ b/pkg/tokens/plugin.go
@@ -73,7 +73,7 @@ type Callbacks interface {
// Only the party submitting the transaction will see this data.
//
// Error should only be returned in shutdown scenarios
- TokenOpUpdate(plugin Plugin, operationID *fftypes.UUID, txState fftypes.OpStatus, blockchainTXID, errorMessage string, opOutput fftypes.JSONObject) error
+ TokenOpUpdate(plugin Plugin, operationID *fftypes.UUID, txState fftypes.OpStatus, blockchainTXID, errorMessage string, opOutput fftypes.JSONObject)
// TokenPoolCreated notifies on the creation of a new token pool, which might have been
// submitted by us, or by any other authorized party in the network.