Commit bc0d4cf1 authored by manish's avatar manish Committed by Yacov Manevich
Browse files

[FAB-10041] Ledger: Metadata - pvt data updates



This CR covers a special case of metadata update for the
pvt data, which is - allow only metadata update for a pvt data.

Specifically,
 - Enhances the rwset builder to include the info about metadata update
   in pvt write-set
 - During validation and commit, detects whether the version of the
   already committed pvt data needs to be increased so as to match the
   version with the corresponding hashed entry

Change-Id: I70724b2cac9a12ec16d7c6ef4a3dcc075d0c1e08
Signed-off-by: default avatarmanish <manish.sethi@gmail.com>
parent 0cc98742
......@@ -57,8 +57,9 @@ type nsPvtRwBuilder struct {
}
type collPvtRwBuilder struct {
collectionName string
writeMap map[string]*kvrwset.KVWrite
collectionName string
writeMap map[string]*kvrwset.KVWrite
metadataWriteMap map[string]*kvrwset.KVMetadataWrite
}
type rangeQueryKey struct {
......@@ -117,6 +118,11 @@ func (b *RWSetBuilder) AddToPvtAndHashedWriteSet(ns string, coll string, key str
// AddToHashedMetadataWriteSet adds a metadata to a key in the hashed write-set
func (b *RWSetBuilder) AddToHashedMetadataWriteSet(ns, coll, key string, metadata map[string][]byte) {
// pvt write set just need the key; not the entire metadata. The metadata is stored only
// by the hashed key. Pvt write-set need to know the key for handling a special case where only
// metadata is updated so, the version of the key present in the pvt data should be incremented
b.getOrCreateCollPvtRwBuilder(ns, coll).
metadataWriteMap[key] = &kvrwset.KVMetadataWrite{Key: key, Entries: nil}
b.getOrCreateCollHashedRwBuilder(ns, coll).
metadataWriteMap[key] = mapToMetadataWriteHash(key, metadata)
}
......@@ -252,11 +258,14 @@ func (b *collHashRwBuilder) build() *CollHashedRwSet {
func (b *collPvtRwBuilder) build() *CollPvtRwSet {
var writeSet []*kvrwset.KVWrite
var metadataWriteSet []*kvrwset.KVMetadataWrite
util.GetValuesBySortedKeys(&(b.writeMap), &writeSet)
util.GetValuesBySortedKeys(&(b.metadataWriteMap), &metadataWriteSet)
return &CollPvtRwSet{
CollectionName: b.collectionName,
KvRwSet: &kvrwset.KVRWSet{
Writes: writeSet,
Writes: writeSet,
MetadataWrites: metadataWriteSet,
},
}
}
......@@ -326,7 +335,11 @@ func newCollHashRwBuilder(collName string) *collHashRwBuilder {
}
func newCollPvtRwBuilder(collName string) *collPvtRwBuilder {
return &collPvtRwBuilder{collName, make(map[string]*kvrwset.KVWrite)}
return &collPvtRwBuilder{
collName,
make(map[string]*kvrwset.KVWrite),
make(map[string]*kvrwset.KVMetadataWrite),
}
}
func mapToMetadataWrite(key string, m map[string][]byte) *kvrwset.KVMetadataWrite {
......
/*
Copyright IBM Corp. 2016 All Rights Reserved.
Copyright IBM Corp. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package rwsetutil
......@@ -250,8 +240,14 @@ func TestTxSimulationResultWithMetadata(t *testing.T) {
// construct the expected pvt rwset and compare with the one present in the txSimulationResults
pvtNs1Coll1 := &kvrwset.KVRWSet{
Writes: []*kvrwset.KVWrite{newKVWrite("key1", []byte("pvt-ns1-coll1-key1-value"))},
Writes: []*kvrwset.KVWrite{newKVWrite("key1", []byte("pvt-ns1-coll1-key1-value"))},
MetadataWrites: []*kvrwset.KVMetadataWrite{{Key: "key1"}},
}
pvtNs1Coll2 := &kvrwset.KVRWSet{
MetadataWrites: []*kvrwset.KVMetadataWrite{{Key: "key1"}},
}
expectedPvtRWSet := &rwset.TxPvtReadWriteSet{
DataModel: rwset.TxReadWriteSet_KV,
NsPvtRwset: []*rwset.NsPvtReadWriteSet{
......@@ -262,12 +258,15 @@ func TestTxSimulationResultWithMetadata(t *testing.T) {
CollectionName: "coll1",
Rwset: serializeTestProtoMsg(t, pvtNs1Coll1),
},
{
CollectionName: "coll2",
Rwset: serializeTestProtoMsg(t, pvtNs1Coll2),
},
},
},
},
}
assert.Equal(t, expectedPvtRWSet, actualSimRes.PvtSimulationResults)
// construct the public and hashed rwset (which will be part of the block) and compare with the one present in the txSimulationResults
pubNs1 := &kvrwset.KVRWSet{
Reads: []*kvrwset.KVRead{NewKVRead("key1", version.NewHeight(1, 1))},
......@@ -327,7 +326,7 @@ func TestTxSimulationResultWithMetadata(t *testing.T) {
{
CollectionName: "coll2",
HashedRwset: serializeTestProtoMsg(t, hashedNs1Coll2),
PvtRwsetHash: nil,
PvtRwsetHash: util.ComputeHash(serializeTestProtoMsg(t, pvtNs1Coll2)),
},
},
}
......
......@@ -18,10 +18,17 @@ func SerializeMetadata(metadataEntries []*kvrwset.KVMetadataEntry) ([]byte, erro
}
// DeserializeMetadata deserializes metadata bytes from statedb
func DeserializeMetadata(metadataBytes []byte) ([]*kvrwset.KVMetadataEntry, error) {
func DeserializeMetadata(metadataBytes []byte) (map[string][]byte, error) {
if metadataBytes == nil {
return nil, nil
}
metadata := &kvrwset.KVMetadataWrite{}
if err := proto.Unmarshal(metadataBytes, metadata); err != nil {
return nil, err
}
return metadata.Entries, nil
m := make(map[string][]byte, len(metadata.Entries))
for _, metadataEntry := range metadata.Entries {
m[metadataEntry.Name] = metadataEntry.Value
}
return m, nil
}
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package storageutil
import (
"testing"
"github.com/hyperledger/fabric/protos/ledger/rwset/kvrwset"
"github.com/stretchr/testify/assert"
)
func TestSerializeDeSerialize(t *testing.T) {
sampleMetadata := []*kvrwset.KVMetadataEntry{
{Name: "metadata_1", Value: []byte("metadata_value_1")},
{Name: "metadata_2", Value: []byte("metadata_value_2")},
{Name: "metadata_3", Value: []byte("metadata_value_3")},
}
serializedMetadata, err := SerializeMetadata(sampleMetadata)
assert.NoError(t, err)
metadataMap, err := DeserializeMetadata(serializedMetadata)
assert.NoError(t, err)
assert.Len(t, metadataMap, 3)
assert.Equal(t, []byte("metadata_value_1"), metadataMap["metadata_1"])
assert.Equal(t, []byte("metadata_value_2"), metadataMap["metadata_2"])
assert.Equal(t, []byte("metadata_value_3"), metadataMap["metadata_3"])
}
......@@ -53,7 +53,7 @@ func (impl *DefaultImpl) ValidateAndPrepareBatch(blockAndPvtdata *ledger.BlockAn
return nil, err
}
logger.Debug("validating rwset...")
if pvtUpdates, err = validateAndPreparePvtBatch(internalBlock, blockAndPvtdata.BlockPvtData); err != nil {
if pvtUpdates, err = validateAndPreparePvtBatch(internalBlock, impl.db, pubAndHashUpdates, blockAndPvtdata.BlockPvtData); err != nil {
return nil, err
}
logger.Debug("postprocessing ProtoBlock...")
......
......@@ -14,6 +14,7 @@ import (
"github.com/hyperledger/fabric/core/ledger/customtx"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/privacyenabledstate"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwsetutil"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/txmgr"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/validator"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/validator/valinternal"
......@@ -28,8 +29,10 @@ import (
// validateAndPreparePvtBatch pulls out the private write-set for the transactions that are marked as valid
// by the internal public data validator. Finally, it validates (if not already self-endorsed) the pvt rwset against the
// corresponding hash present in the public rwset
func validateAndPreparePvtBatch(block *valinternal.Block, pvtdata map[uint64]*ledger.TxPvtData) (*privacyenabledstate.PvtUpdateBatch, error) {
func validateAndPreparePvtBatch(block *valinternal.Block, db privacyenabledstate.DB,
pubAndHashUpdates *valinternal.PubAndHashUpdates, pvtdata map[uint64]*ledger.TxPvtData) (*privacyenabledstate.PvtUpdateBatch, error) {
pvtUpdates := privacyenabledstate.NewPvtUpdateBatch()
metadataUpdates := metadataUpdates{}
for _, tx := range block.Txs {
if tx.ValidationCode != peer.TxValidationCode_VALID {
continue
......@@ -52,6 +55,10 @@ func validateAndPreparePvtBatch(block *valinternal.Block, pvtdata map[uint64]*le
return nil, err
}
addPvtRWSetToPvtUpdateBatch(pvtRWSet, pvtUpdates, version.NewHeight(block.Num, uint64(tx.IndexInBlock)))
addEntriesToMetadataUpdates(metadataUpdates, pvtRWSet)
}
if err := incrementPvtdataVersionIfNeeded(metadataUpdates, pvtUpdates, pubAndHashUpdates, db); err != nil {
return nil, err
}
return pvtUpdates, nil
}
......@@ -220,3 +227,69 @@ func addPvtRWSetToPvtUpdateBatch(pvtRWSet *rwsetutil.TxPvtRwSet, pvtUpdateBatch
}
}
}
// incrementPvtdataVersionIfNeeded changes the versions of the private data keys if the version of the corresponding hashed key has
// been upgrded. A metadata-update-only type of transaction may have caused the version change of the existing value in the hashed space.
// Iterate through all the metadata writes and try to get these keys and increment the version in the private writes to be the same as of the hashed key version - if the latest
// value of the key is available. Otherwise, in this scenario, we end up having the latest value in the private state but the version
// gets left as stale and will cause simulation failure because of wrongly assuming that we have stale value
func incrementPvtdataVersionIfNeeded(
metadataUpdates metadataUpdates,
pvtUpdateBatch *privacyenabledstate.PvtUpdateBatch,
pubAndHashUpdates *valinternal.PubAndHashUpdates,
db privacyenabledstate.DB) error {
for collKey := range metadataUpdates {
ns, coll, key := collKey.ns, collKey.coll, collKey.key
keyHash := util.ComputeStringHash(key)
hashedVal := pubAndHashUpdates.HashUpdates.Get(ns, coll, string(keyHash))
if hashedVal == nil {
// This key is finally not getting updated in the hashed space by this block -
// either the metadata update was on a non-existing key or the key gets deleted by a latter transaction in the block
// ignore the metadata update for this key
continue
}
latestVal, err := retrieveLatestVal(ns, coll, key, pvtUpdateBatch, db)
if err != nil {
return err
}
if latestVal == nil || // latest value not found either in db or in the pvt updates (caused by commit with missing data)
version.AreSame(latestVal.Version, hashedVal.Version) { // version is already same as in hashed space - No version increment because of metadata-only transaction took place
continue
}
// TODO - computing hash could be avoided. In the hashed updates, we can augment additional info that
// which original version has been renewed
latestValHash := util.ComputeHash(latestVal.Value)
if bytes.Equal(latestValHash, hashedVal.Value) { // since we allow block commits with missing pvt data, the private value available may be stale.
// upgrade the version only if the pvt value matches with corresponding hash in the hashed space
pvtUpdateBatch.Put(ns, coll, key, latestVal.Value, hashedVal.Version)
}
}
return nil
}
type collKey struct {
ns, coll, key string
}
type metadataUpdates map[collKey]bool
func addEntriesToMetadataUpdates(metadataUpdates metadataUpdates, pvtRWSet *rwsetutil.TxPvtRwSet) {
for _, ns := range pvtRWSet.NsPvtRwSet {
for _, coll := range ns.CollPvtRwSets {
for _, metadataWrite := range coll.KvRwSet.MetadataWrites {
ns, coll, key := ns.NameSpace, coll.CollectionName, metadataWrite.Key
metadataUpdates[collKey{ns, coll, key}] = true
}
}
}
}
func retrieveLatestVal(ns, coll, key string, pvtUpdateBatch *privacyenabledstate.PvtUpdateBatch,
db privacyenabledstate.DB) (val *statedb.VersionedValue, err error) {
val = pvtUpdateBatch.Get(ns, coll, key)
if val == nil {
val, err = db.GetPrivateData(ns, coll, key)
}
return
}
......@@ -8,53 +8,39 @@ package valimpl
import (
"fmt"
"os"
"testing"
"github.com/hyperledger/fabric/common/flogging"
"github.com/hyperledger/fabric/common/ledger/testutil"
"github.com/hyperledger/fabric/common/util"
"github.com/hyperledger/fabric/core/ledger"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/privacyenabledstate"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwsetutil"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/validator/valinternal"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version"
lgrutil "github.com/hyperledger/fabric/core/ledger/util"
lutils "github.com/hyperledger/fabric/core/ledger/util"
"github.com/hyperledger/fabric/protos/common"
"github.com/hyperledger/fabric/protos/peer"
putils "github.com/hyperledger/fabric/protos/utils"
"github.com/op/go-logging"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
)
func getPubAndPvtSimulationResults(t *testing.T, key string) *ledger.TxSimulationResults {
rwSetBuilder := rwsetutil.NewRWSetBuilder()
// public rws ns1 + ns2
rwSetBuilder.AddToReadSet("ns1", key, version.NewHeight(1, 1))
rwSetBuilder.AddToReadSet("ns2", key, version.NewHeight(1, 1))
rwSetBuilder.AddToWriteSet("ns2", key, []byte("ns2-key1-value"))
// pvt rwset ns1
rwSetBuilder.AddToHashedReadSet("ns1", "coll1", key, version.NewHeight(1, 1))
rwSetBuilder.AddToHashedReadSet("ns1", "coll2", key, version.NewHeight(1, 1))
rwSetBuilder.AddToPvtAndHashedWriteSet("ns1", "coll2", key, []byte("pvt-ns1-coll2-key1-value"))
// pvt rwset ns2
rwSetBuilder.AddToHashedReadSet("ns2", "coll1", key, version.NewHeight(1, 1))
rwSetBuilder.AddToHashedReadSet("ns2", "coll2", key, version.NewHeight(1, 1))
rwSetBuilder.AddToPvtAndHashedWriteSet("ns2", "coll2", key, []byte("pvt-ns2-coll2-key1-value"))
rwSetBuilder.AddToPvtAndHashedWriteSet("ns2", "coll3", key, nil)
rwSetBuilder.AddToHashedReadSet("ns3", "coll1", key, version.NewHeight(1, 1))
pubAndPvtSimulationResults, err := rwSetBuilder.GetTxSimulationResults()
if err != nil {
t.Fatalf("ConstructSimulationResultsWithPvtData failed while getting simulation results, err %s", err)
}
return pubAndPvtSimulationResults
func TestMain(m *testing.M) {
flogging.SetModuleLevel("valinternal", "debug")
viper.Set("peer.fileSystemPath", "/tmp/fabric/ledgertests/kvledger/txmgmt/validator/valinternal")
os.Exit(m.Run())
}
func TestValidateAndPreparePvtBatch(t *testing.T) {
testDBEnv := &privacyenabledstate.LevelDBCommonStorageTestEnv{}
testDBEnv.Init(t)
defer testDBEnv.Cleanup()
testDB := testDBEnv.GetDBHandle("emptydb")
pubSimulationResults := [][]byte{}
pvtDataMap := make(map[uint64]*ledger.TxPvtData)
......@@ -66,7 +52,7 @@ func TestValidateAndPreparePvtBatch(t *testing.T) {
// Tx 1
// Get simulation results for tx1
tx1SimulationResults := getPubAndPvtSimulationResults(t, "key1")
tx1SimulationResults := testutilSampleTxSimulationResults(t, "key1")
res, err := tx1SimulationResults.GetPubSimulationBytes()
assert.NoError(t, err)
......@@ -79,7 +65,7 @@ func TestValidateAndPreparePvtBatch(t *testing.T) {
// Tx 2
// Get simulation results for tx2
tx2SimulationResults := getPubAndPvtSimulationResults(t, "key2")
tx2SimulationResults := testutilSampleTxSimulationResults(t, "key2")
res, err = tx2SimulationResults.GetPubSimulationBytes()
assert.NoError(t, err)
......@@ -91,7 +77,7 @@ func TestValidateAndPreparePvtBatch(t *testing.T) {
// Tx 3
// Get simulation results for tx3
tx3SimulationResults := getPubAndPvtSimulationResults(t, "key3")
tx3SimulationResults := testutilSampleTxSimulationResults(t, "key3")
res, err = tx3SimulationResults.GetPubSimulationBytes()
assert.NoError(t, err)
......@@ -141,7 +127,7 @@ func TestValidateAndPreparePvtBatch(t *testing.T) {
assert.NoError(t, err)
addPvtRWSetToPvtUpdateBatch(tx1TxPvtRWSet, expectedPvtUpdates, version.NewHeight(uint64(10), uint64(0)))
actualPvtUpdates, err := validateAndPreparePvtBatch(mvccValidatedBlock, pvtDataMap)
actualPvtUpdates, err := validateAndPreparePvtBatch(mvccValidatedBlock, testDB, nil, pvtDataMap)
assert.NoError(t, err)
assert.Equal(t, expectedPvtUpdates, actualPvtUpdates)
......@@ -246,7 +232,7 @@ func TestPreprocessProtoBlockInvalidWriteset(t *testing.T) {
block := testutil.ConstructBlock(t, 1, testutil.ConstructRandomBytes(t, 32),
[][]byte{simulation1Bytes, simulation2Bytes}, false) // block with two txs
txfilter := lgrutil.TxValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])
txfilter := lutils.TxValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])
assert.True(t, txfilter.IsValid(0))
assert.True(t, txfilter.IsValid(1)) // both txs are valid initially at the time of block cutting
......@@ -258,6 +244,56 @@ func TestPreprocessProtoBlockInvalidWriteset(t *testing.T) {
assert.Equal(t, internalBlock.Txs[0].IndexInBlock, 1)
}
func TestIncrementPvtdataVersionIfNeeded(t *testing.T) {
testDBEnv := &privacyenabledstate.LevelDBCommonStorageTestEnv{}
testDBEnv.Init(t)
defer testDBEnv.Cleanup()
testDB := testDBEnv.GetDBHandle("testdb")
updateBatch := privacyenabledstate.NewUpdateBatch()
// populate db with some pvt data
updateBatch.PvtUpdates.Put("ns", "coll1", "key1", []byte("value1"), version.NewHeight(1, 1))
updateBatch.PvtUpdates.Put("ns", "coll2", "key2", []byte("value2"), version.NewHeight(1, 2))
updateBatch.PvtUpdates.Put("ns", "coll3", "key3", []byte("value3"), version.NewHeight(1, 3))
updateBatch.PvtUpdates.Put("ns", "col4", "key4", []byte("value4"), version.NewHeight(1, 4))
testDB.ApplyPrivacyAwareUpdates(updateBatch, version.NewHeight(1, 4))
// for the current block, mimic the resultant hashed updates
hashUpdates := privacyenabledstate.NewHashedUpdateBatch()
hashUpdates.PutValHashAndMetadata("ns", "coll1", lutils.ComputeStringHash("key1"),
lutils.ComputeStringHash("value1_set_by_tx1"), []byte("metadata1_set_by_tx2"), version.NewHeight(2, 2)) // mimics the situation - value set by tx1 and metadata by tx2
hashUpdates.PutValHashAndMetadata("ns", "coll2", lutils.ComputeStringHash("key2"),
lutils.ComputeStringHash("value2"), []byte("metadata2_set_by_tx4"), version.NewHeight(2, 4)) // only metadata set by tx4
hashUpdates.PutValHashAndMetadata("ns", "coll3", lutils.ComputeStringHash("key3"),
lutils.ComputeStringHash("value3_set_by_tx6"), []byte("metadata3"), version.NewHeight(2, 6)) // only value set by tx6
pubAndHashedUpdatesBatch := &valinternal.PubAndHashUpdates{HashUpdates: hashUpdates}
// for the current block, mimic the resultant pvt updates (without metadata taking into account). Assume that Tx6 pvt data is missing
pvtUpdateBatch := privacyenabledstate.NewPvtUpdateBatch()
pvtUpdateBatch.Put("ns", "coll1", "key1", []byte("value1_set_by_tx1"), version.NewHeight(2, 1))
pvtUpdateBatch.Put("ns", "coll3", "key3", []byte("value3_set_by_tx5"), version.NewHeight(2, 5))
// metadata updated for key1 and key3
metadataUpdates := metadataUpdates{collKey{"ns", "coll1", "key1"}: true, collKey{"ns", "coll2", "key2"}: true}
// invoke function and test results
err := incrementPvtdataVersionIfNeeded(metadataUpdates, pvtUpdateBatch, pubAndHashedUpdatesBatch, testDB)
assert.NoError(t, err)
assert.Equal(t,
&statedb.VersionedValue{Value: []byte("value1_set_by_tx1"), Version: version.NewHeight(2, 2)}, // key1 value should be same and version should be upgraded to (2,2)
pvtUpdateBatch.Get("ns", "coll1", "key1"),
)
assert.Equal(t,
&statedb.VersionedValue{Value: []byte("value2"), Version: version.NewHeight(2, 4)}, // key2 entry should get added with value in the db and version (2,4)
pvtUpdateBatch.Get("ns", "coll2", "key2"),
)
assert.Equal(t,
&statedb.VersionedValue{Value: []byte("value3_set_by_tx5"), Version: version.NewHeight(2, 5)}, // key3 should be unaffected because the tx6 was missing from pvt data
pvtUpdateBatch.Get("ns", "coll3", "key3"),
)
}
// from go-logging memory_test.go
func memoryRecordN(b *logging.MemoryBackend, n int) *logging.Record {
node := b.Head()
......@@ -272,3 +308,31 @@ func memoryRecordN(b *logging.MemoryBackend, n int) *logging.Record {
}
return node.Record
}
func testutilSampleTxSimulationResults(t *testing.T, key string) *ledger.TxSimulationResults {
rwSetBuilder := rwsetutil.NewRWSetBuilder()
// public rws ns1 + ns2
rwSetBuilder.AddToReadSet("ns1", key, version.NewHeight(1, 1))
rwSetBuilder.AddToReadSet("ns2", key, version.NewHeight(1, 1))
rwSetBuilder.AddToWriteSet("ns2", key, []byte("ns2-key1-value"))
// pvt rwset ns1
rwSetBuilder.AddToHashedReadSet("ns1", "coll1", key, version.NewHeight(1, 1))
rwSetBuilder.AddToHashedReadSet("ns1", "coll2", key, version.NewHeight(1, 1))
rwSetBuilder.AddToPvtAndHashedWriteSet("ns1", "coll2", key, []byte("pvt-ns1-coll2-key1-value"))
// pvt rwset ns2
rwSetBuilder.AddToHashedReadSet("ns2", "coll1", key, version.NewHeight(1, 1))
rwSetBuilder.AddToHashedReadSet("ns2", "coll2", key, version.NewHeight(1, 1))
rwSetBuilder.AddToPvtAndHashedWriteSet("ns2", "coll2", key, []byte("pvt-ns2-coll2-key1-value"))
rwSetBuilder.AddToPvtAndHashedWriteSet("ns2", "coll3", key, nil)
rwSetBuilder.AddToHashedReadSet("ns3", "coll1", key, version.NewHeight(1, 1))
pubAndPvtSimulationResults, err := rwSetBuilder.GetTxSimulationResults()
if err != nil {
t.Fatalf("ConstructSimulationResultsWithPvtData failed while getting simulation results, err %s", err)
}
return pubAndPvtSimulationResults
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment