Commit f37beaa9 authored by manish's avatar manish
Browse files

Missingdata-recon: Handle coll eligibility change



This CR handles the event when a peer becomes eligible
for receiving data for an existing collection. All the
missing data entries for the collection that were previously
 marked as 'ineligible' are converted to 'eligible' in a
background goroutine so that the query results for reporting
missing data also include these entries for previous blocks

FAB-11437 #done

Change-Id: I145a079b69e8bf02b4c97da23fbf08d7ce2ae268
Signed-off-by: default avatarmanish <manish.sethi@gmail.com>
parent a2bf9dc7
......@@ -136,6 +136,11 @@ func (batch *UpdateBatch) Delete(key []byte) {
batch.KVs[string(key)] = nil
}
// Len returns the number of entries in the batch
func (batch *UpdateBatch) Len() int {
return len(batch.KVs)
}
// Iterator extends actual leveldb iterator
type Iterator struct {
iterator.Iterator
......
......@@ -38,6 +38,9 @@ const confMaxBatchSize = "ledger.state.couchDBConfig.maxBatchUpdateSize"
const confAutoWarmIndexes = "ledger.state.couchDBConfig.autoWarmIndexes"
const confWarmIndexesAfterNBlocks = "ledger.state.couchDBConfig.warmIndexesAfterNBlocks"
var confCollElgProcMaxDbBatchSize = &conf{"ledger.pvtdataStore.collElgProcMaxDbBatchSize", 5000}
var confCollElgProcDbBatchesInterval = &conf{"ledger.pvtdataStore.collElgProcDbBatchesInterval", 1000}
// GetRootPath returns the filesystem path.
// All ledger related contents are expected to be stored under this path
func GetRootPath() string {
......@@ -85,7 +88,7 @@ func GetMaxBlockfileSize() int {
return 64 * 1024 * 1024
}
//GetTotalLimit exposes the totalLimit variable
// GetTotalQueryLimit exposes the totalLimit variable
func GetTotalQueryLimit() int {
totalQueryLimit := viper.GetInt(confTotalQueryLimit)
// if queryLimit was unset, default to 10000
......@@ -95,7 +98,7 @@ func GetTotalQueryLimit() int {
return totalQueryLimit
}
//GetQueryLimit exposes the queryLimit variable
// GetInternalQueryLimit exposes the queryLimit variable
func GetInternalQueryLimit() int {
internalQueryLimit := viper.GetInt(confInternalQueryLimit)
// if queryLimit was unset, default to 1000
......@@ -125,6 +128,26 @@ func GetPvtdataStorePurgeInterval() uint64 {
return uint64(purgeInterval)
}
// GetPvtdataStoreCollElgProcMaxDbBatchSize returns the maximum db batch size for converting
// the ineligible missing data entries to eligible missing data entries
func GetPvtdataStoreCollElgProcMaxDbBatchSize() int {
collElgProcMaxDbBatchSize := viper.GetInt(confCollElgProcMaxDbBatchSize.Name)
if collElgProcMaxDbBatchSize <= 0 {
collElgProcMaxDbBatchSize = confCollElgProcMaxDbBatchSize.DefaultVal
}
return collElgProcMaxDbBatchSize
}
// GetPvtdataStoreCollElgProcDbBatchesInterval returns the minimum duration (in milliseconds) between writing
// two consecutive db batches for converting the ineligible missing data entries to eligible missing data entries
func GetPvtdataStoreCollElgProcDbBatchesInterval() int {
collElgProcDbBatchesInterval := viper.GetInt(confCollElgProcDbBatchesInterval.Name)
if collElgProcDbBatchesInterval <= 0 {
collElgProcDbBatchesInterval = confCollElgProcDbBatchesInterval.DefaultVal
}
return collElgProcDbBatchesInterval
}
//IsHistoryDBEnabled exposes the historyDatabase variable
func IsHistoryDBEnabled() bool {
return viper.GetBool(confEnableHistoryDatabase)
......@@ -162,3 +185,8 @@ func GetWarmIndexesAfterNBlocks() int {
}
return warmAfterNBlocks
}
type conf struct {
Name string
DefaultVal int
}
......@@ -158,6 +158,22 @@ func TestPvtdataStorePurgeInterval(t *testing.T) {
assert.Equal(t, uint64(1000), updatedValue) //test config returns 1000
}
func TestPvtdataStoreCollElgProcMaxDbBatchSize(t *testing.T) {
defaultVal := confCollElgProcMaxDbBatchSize.DefaultVal
testVal := defaultVal + 1
assert.Equal(t, defaultVal, GetPvtdataStoreCollElgProcMaxDbBatchSize())
viper.Set("ledger.pvtdataStore.collElgProcMaxDbBatchSize", testVal)
assert.Equal(t, testVal, GetPvtdataStoreCollElgProcMaxDbBatchSize())
}
func TestCollElgProcDbBatchesInterval(t *testing.T) {
defaultVal := confCollElgProcDbBatchesInterval.DefaultVal
testVal := defaultVal + 1
assert.Equal(t, defaultVal, GetPvtdataStoreCollElgProcDbBatchesInterval())
viper.Set("ledger.pvtdataStore.collElgProcDbBatchesInterval", testVal)
assert.Equal(t, testVal, GetPvtdataStoreCollElgProcDbBatchesInterval())
}
func TestIsHistoryDBEnabledDefault(t *testing.T) {
setUpCoreYAMLConfig()
defaultValue := IsHistoryDBEnabled()
......
......@@ -8,11 +8,13 @@ package pvtdatastorage
import (
"bytes"
"math"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version"
"github.com/hyperledger/fabric/core/ledger/util"
"github.com/hyperledger/fabric/protos/ledger/rwset"
"github.com/pkg/errors"
"github.com/willf/bitset"
)
......@@ -23,6 +25,7 @@ var (
expiryKeyPrefix = []byte{3}
eligibleMissingDataKeyPrefix = []byte{4}
ineligibleMissingDataKeyPrefix = []byte{5}
collElgKeyPrefix = []byte{6}
nilByte = byte(0)
emptyValue = []byte{}
......@@ -145,9 +148,51 @@ func decodeMissingDataValue(bitmapBytes []byte) (*bitset.BitSet, error) {
return bitmap, nil
}
func encodeCollElgKey(blkNum uint64) []byte {
return append(collElgKeyPrefix, util.EncodeReverseOrderVarUint64(blkNum)...)
}
func decodeCollElgKey(b []byte) uint64 {
blkNum, _ := util.DecodeReverseOrderVarUint64(b[1:])
return blkNum
}
func encodeCollElgVal(m *CollElgInfo) ([]byte, error) {
return proto.Marshal(m)
}
func decodeCollElgVal(b []byte) (*CollElgInfo, error) {
m := &CollElgInfo{}
if err := proto.Unmarshal(b, m); err != nil {
return nil, errors.WithStack(err)
}
return m, nil
}
func createRangeScanKeysForEligibleMissingDataEntries(blkNum uint64) (startKey, endKey []byte) {
startKey = append(eligibleMissingDataKeyPrefix, util.EncodeReverseOrderVarUint64(blkNum)...)
endKey = append(eligibleMissingDataKeyPrefix, util.EncodeReverseOrderVarUint64(0)...)
return startKey, endKey
}
func createRangeScanKeysForIneligibleMissingData(maxBlkNum uint64, ns, coll string) (startKey, endKey []byte) {
startKey = encodeMissingDataKey(
&missingDataKey{
nsCollBlk: nsCollBlk{ns: ns, coll: coll, blkNum: maxBlkNum},
isEligible: false,
},
)
endKey = encodeMissingDataKey(
&missingDataKey{
nsCollBlk: nsCollBlk{ns: ns, coll: coll, blkNum: 0},
isEligible: false,
},
)
return
}
func createRangeScanKeysForCollElg() (startKey, endKey []byte) {
return encodeCollElgKey(math.MaxUint64),
encodeCollElgKey(0)
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: expiry_data.proto
// source: persistent_msgs.proto
package pvtdatastorage // import "github.com/hyperledger/fabric/core/ledger/pvtdatastorage"
......@@ -29,7 +29,7 @@ func (m *ExpiryData) Reset() { *m = ExpiryData{} }
func (m *ExpiryData) String() string { return proto.CompactTextString(m) }
func (*ExpiryData) ProtoMessage() {}
func (*ExpiryData) Descriptor() ([]byte, []int) {
return fileDescriptor_expiry_data_bdde9920f0b783de, []int{0}
return fileDescriptor_persistent_msgs_f79691cf3913ef09, []int{0}
}
func (m *ExpiryData) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExpiryData.Unmarshal(m, b)
......@@ -72,7 +72,7 @@ func (m *Collections) Reset() { *m = Collections{} }
func (m *Collections) String() string { return proto.CompactTextString(m) }
func (*Collections) ProtoMessage() {}
func (*Collections) Descriptor() ([]byte, []int) {
return fileDescriptor_expiry_data_bdde9920f0b783de, []int{1}
return fileDescriptor_persistent_msgs_f79691cf3913ef09, []int{1}
}
func (m *Collections) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Collections.Unmarshal(m, b)
......@@ -117,7 +117,7 @@ func (m *TxNums) Reset() { *m = TxNums{} }
func (m *TxNums) String() string { return proto.CompactTextString(m) }
func (*TxNums) ProtoMessage() {}
func (*TxNums) Descriptor() ([]byte, []int) {
return fileDescriptor_expiry_data_bdde9920f0b783de, []int{2}
return fileDescriptor_persistent_msgs_f79691cf3913ef09, []int{2}
}
func (m *TxNums) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_TxNums.Unmarshal(m, b)
......@@ -144,6 +144,82 @@ func (m *TxNums) GetList() []uint64 {
return nil
}
type CollElgInfo struct {
NsCollMap map[string]*CollNames `protobuf:"bytes,1,rep,name=nsCollMap" json:"nsCollMap,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CollElgInfo) Reset() { *m = CollElgInfo{} }
func (m *CollElgInfo) String() string { return proto.CompactTextString(m) }
func (*CollElgInfo) ProtoMessage() {}
func (*CollElgInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_persistent_msgs_f79691cf3913ef09, []int{3}
}
func (m *CollElgInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CollElgInfo.Unmarshal(m, b)
}
func (m *CollElgInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CollElgInfo.Marshal(b, m, deterministic)
}
func (dst *CollElgInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_CollElgInfo.Merge(dst, src)
}
func (m *CollElgInfo) XXX_Size() int {
return xxx_messageInfo_CollElgInfo.Size(m)
}
func (m *CollElgInfo) XXX_DiscardUnknown() {
xxx_messageInfo_CollElgInfo.DiscardUnknown(m)
}
var xxx_messageInfo_CollElgInfo proto.InternalMessageInfo
func (m *CollElgInfo) GetNsCollMap() map[string]*CollNames {
if m != nil {
return m.NsCollMap
}
return nil
}
type CollNames struct {
Entries []string `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CollNames) Reset() { *m = CollNames{} }
func (m *CollNames) String() string { return proto.CompactTextString(m) }
func (*CollNames) ProtoMessage() {}
func (*CollNames) Descriptor() ([]byte, []int) {
return fileDescriptor_persistent_msgs_f79691cf3913ef09, []int{4}
}
func (m *CollNames) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CollNames.Unmarshal(m, b)
}
func (m *CollNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CollNames.Marshal(b, m, deterministic)
}
func (dst *CollNames) XXX_Merge(src proto.Message) {
xxx_messageInfo_CollNames.Merge(dst, src)
}
func (m *CollNames) XXX_Size() int {
return xxx_messageInfo_CollNames.Size(m)
}
func (m *CollNames) XXX_DiscardUnknown() {
xxx_messageInfo_CollNames.DiscardUnknown(m)
}
var xxx_messageInfo_CollNames proto.InternalMessageInfo
func (m *CollNames) GetEntries() []string {
if m != nil {
return m.Entries
}
return nil
}
func init() {
proto.RegisterType((*ExpiryData)(nil), "pvtdatastorage.ExpiryData")
proto.RegisterMapType((map[string]*Collections)(nil), "pvtdatastorage.ExpiryData.MapEntry")
......@@ -151,30 +227,40 @@ func init() {
proto.RegisterMapType((map[string]*TxNums)(nil), "pvtdatastorage.Collections.MapEntry")
proto.RegisterMapType((map[string]bool)(nil), "pvtdatastorage.Collections.MissingDataMapEntry")
proto.RegisterType((*TxNums)(nil), "pvtdatastorage.TxNums")
proto.RegisterType((*CollElgInfo)(nil), "pvtdatastorage.CollElgInfo")
proto.RegisterMapType((map[string]*CollNames)(nil), "pvtdatastorage.CollElgInfo.NsCollMapEntry")
proto.RegisterType((*CollNames)(nil), "pvtdatastorage.CollNames")
}
func init() {
proto.RegisterFile("persistent_msgs.proto", fileDescriptor_persistent_msgs_f79691cf3913ef09)
}
func init() { proto.RegisterFile("expiry_data.proto", fileDescriptor_expiry_data_bdde9920f0b783de) }
var fileDescriptor_expiry_data_bdde9920f0b783de = []byte{
// 309 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x4a, 0xf3, 0x40,
0x14, 0xc5, 0x99, 0xa4, 0x5f, 0xe9, 0x77, 0x03, 0x45, 0x47, 0x91, 0x10, 0x5d, 0x84, 0xea, 0x22,
0x0b, 0x49, 0xb0, 0xa2, 0x94, 0xee, 0xfc, 0xd3, 0x65, 0xbb, 0x88, 0x82, 0xe0, 0x46, 0x26, 0xe9,
0x98, 0x0e, 0x26, 0x99, 0x61, 0x32, 0x29, 0xcd, 0x9b, 0xf8, 0x1a, 0xbe, 0xa1, 0x24, 0x51, 0x4c,
0x42, 0xc9, 0xee, 0xce, 0x9d, 0x33, 0xbf, 0x7b, 0xce, 0x65, 0xe0, 0x90, 0xee, 0x04, 0x93, 0xc5,
0xdb, 0x9a, 0x28, 0xe2, 0x0a, 0xc9, 0x15, 0xc7, 0x63, 0xb1, 0x55, 0xe5, 0x31, 0x53, 0x5c, 0x92,
0x88, 0x4e, 0x3e, 0x11, 0xc0, 0xa2, 0x52, 0x3d, 0x12, 0x45, 0xf0, 0x0d, 0xe8, 0x09, 0x11, 0x26,
0xb2, 0x75, 0xc7, 0x98, 0x9e, 0xbb, 0x6d, 0xb1, 0xfb, 0x27, 0x74, 0x97, 0x44, 0x2c, 0x52, 0x25,
0x0b, 0xbf, 0xd4, 0x5b, 0x4f, 0x30, 0xfa, 0x6d, 0xe0, 0x03, 0xd0, 0x3f, 0x68, 0x61, 0x22, 0x1b,
0x39, 0xff, 0xfd, 0xb2, 0xc4, 0x57, 0xf0, 0x6f, 0x4b, 0xe2, 0x9c, 0x9a, 0x9a, 0x8d, 0x1c, 0x63,
0x7a, 0xda, 0xc5, 0x3e, 0xf0, 0x38, 0xa6, 0xa1, 0x62, 0x3c, 0xcd, 0xfc, 0x5a, 0x39, 0xd7, 0x66,
0x68, 0xf2, 0xa5, 0x81, 0xd1, 0xb8, 0xc2, 0xb7, 0x4d, 0x6f, 0x17, 0x3d, 0x90, 0xb6, 0x39, 0xfc,
0x02, 0xe3, 0x84, 0x65, 0x19, 0x4b, 0xa3, 0xd2, 0xf9, 0x92, 0x08, 0x53, 0xab, 0x10, 0x5e, 0x2f,
0xa2, 0xf5, 0xa2, 0xa6, 0x75, 0x30, 0xd6, 0xaa, 0x37, 0xf5, 0x65, 0x3b, 0xf5, 0x49, 0x77, 0xda,
0xf3, 0x6e, 0x95, 0x27, 0xcd, 0xc0, 0xd6, 0x1d, 0x1c, 0xed, 0x19, 0xbb, 0x07, 0x7d, 0xdc, 0x44,
0x8f, 0x9a, 0x3b, 0x3b, 0x83, 0x61, 0xcd, 0xc5, 0x18, 0x06, 0x31, 0xcb, 0x54, 0xb5, 0xae, 0x81,
0x5f, 0xd5, 0xf7, 0xf3, 0xd7, 0x59, 0xc4, 0xd4, 0x26, 0x0f, 0xdc, 0x90, 0x27, 0xde, 0xa6, 0x10,
0x54, 0xc6, 0x74, 0x1d, 0x51, 0xe9, 0xbd, 0x93, 0x40, 0xb2, 0xd0, 0x0b, 0xb9, 0xa4, 0xde, 0x4f,
0xab, 0x6d, 0x37, 0x18, 0x56, 0xff, 0xe7, 0xfa, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x3e, 0x0e, 0xe9,
0x91, 0x54, 0x02, 0x00, 0x00,
var fileDescriptor_persistent_msgs_f79691cf3913ef09 = []byte{
// 389 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0xdf, 0x6a, 0xdb, 0x30,
0x14, 0xc6, 0xb1, 0x93, 0x65, 0xf1, 0x09, 0x84, 0xa1, 0xfd, 0xc1, 0xf3, 0x76, 0x11, 0xb2, 0x0d,
0xc2, 0x18, 0x36, 0xcb, 0xd8, 0x08, 0xb9, 0x5b, 0xdb, 0x40, 0x7b, 0x11, 0x5f, 0xb8, 0x85, 0x40,
0x6f, 0x8a, 0xe2, 0x28, 0x8e, 0xa8, 0x6d, 0x09, 0x49, 0x09, 0xf1, 0x9b, 0xf4, 0x31, 0xda, 0x37,
0x2c, 0xb6, 0xf3, 0xc7, 0x0a, 0x26, 0x77, 0xf2, 0xd1, 0x77, 0x7e, 0xe7, 0x3b, 0x1f, 0x16, 0x7c,
0xe4, 0x44, 0x48, 0x2a, 0x15, 0x49, 0xd5, 0x43, 0x22, 0x23, 0xe9, 0x72, 0xc1, 0x14, 0x43, 0x5d,
0xbe, 0x51, 0x0b, 0xac, 0xb0, 0x54, 0x4c, 0xe0, 0x88, 0xf4, 0x9f, 0x0c, 0x80, 0xc9, 0x96, 0x53,
0x91, 0x5d, 0x61, 0x85, 0xd1, 0x5f, 0x68, 0x24, 0x98, 0xdb, 0x46, 0xaf, 0x31, 0xe8, 0x0c, 0xbf,
0xb9, 0xba, 0xd8, 0x3d, 0x0a, 0xdd, 0x29, 0xe6, 0x93, 0x54, 0x89, 0x2c, 0xc8, 0xf5, 0xce, 0x2d,
0xb4, 0xf7, 0x05, 0xf4, 0x0e, 0x1a, 0x8f, 0x24, 0xb3, 0x8d, 0x9e, 0x31, 0xb0, 0x82, 0xfc, 0x88,
0x7e, 0xc3, 0x9b, 0x0d, 0x8e, 0xd7, 0xc4, 0x36, 0x7b, 0xc6, 0xa0, 0x33, 0xfc, 0x72, 0x8a, 0xbd,
0x64, 0x71, 0x4c, 0x42, 0x45, 0x59, 0x2a, 0x83, 0x52, 0x39, 0x36, 0x47, 0x46, 0xff, 0xc5, 0x84,
0x4e, 0xe5, 0x0a, 0xfd, 0xab, 0x7a, 0xfb, 0x7e, 0x06, 0xa2, 0x9b, 0x43, 0x33, 0xe8, 0x26, 0x54,
0x4a, 0x9a, 0x46, 0xb9, 0xf3, 0x29, 0xe6, 0xb6, 0x59, 0x20, 0xbc, 0xb3, 0x08, 0xad, 0xa3, 0xa4,
0x9d, 0x60, 0x1c, 0xff, 0xec, 0xd6, 0xbf, 0xf4, 0xad, 0x3f, 0x9d, 0x4e, 0xbb, 0xdb, 0xfa, 0xeb,
0xa4, 0xba, 0xb0, 0xf3, 0x1f, 0xde, 0xd7, 0x8c, 0xad, 0x41, 0x7f, 0xa8, 0xa2, 0xdb, 0xd5, 0xcc,
0xbe, 0x42, 0xab, 0xe4, 0x22, 0x04, 0xcd, 0x98, 0x4a, 0x55, 0xc4, 0xd5, 0x0c, 0x8a, 0x73, 0xff,
0xd9, 0x28, 0x13, 0x9d, 0xc4, 0xd1, 0x4d, 0xba, 0x64, 0xe8, 0x1a, 0xac, 0x54, 0xe6, 0x85, 0xe9,
0x21, 0xd7, 0x9f, 0x75, 0xa1, 0xec, 0xf4, 0xae, 0xbf, 0x17, 0x97, 0x79, 0x1c, 0x9b, 0x9d, 0x19,
0x74, 0xf5, 0xcb, 0x1a, 0xd7, 0x9e, 0x1e, 0xc8, 0xe7, 0xba, 0x49, 0x3e, 0x4e, 0x88, 0xf6, 0x13,
0xfc, 0x00, 0xeb, 0x50, 0x47, 0x36, 0xbc, 0x25, 0xa9, 0x12, 0x94, 0xc8, 0xc2, 0xad, 0x15, 0xec,
0x3f, 0x2f, 0xc6, 0xf7, 0xa3, 0x88, 0xaa, 0xd5, 0x7a, 0xee, 0x86, 0x2c, 0xf1, 0x56, 0x19, 0x27,
0x22, 0x26, 0x8b, 0x88, 0x08, 0x6f, 0x89, 0xe7, 0x82, 0x86, 0x5e, 0xc8, 0x04, 0xf1, 0x76, 0x25,
0x7d, 0xee, 0xbc, 0x55, 0xbc, 0x8c, 0x3f, 0xaf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x3e, 0x75,
0xee, 0x32, 0x03, 0x00, 0x00,
}
......@@ -26,3 +26,11 @@ message Collections {
message TxNums {
repeated uint64 list = 1;
}
message CollElgInfo {
map<string, CollNames> nsCollMap = 1;
}
message CollNames {
repeated string entries = 1;
}
......@@ -37,3 +37,16 @@ func (e *ExpiryData) addMissingData(ns, coll string) {
collections := e.getOrCreateCollections(ns)
collections.MissingDataMap[coll] = true
}
func newCollElgInfo(nsCollMap map[string][]string) *CollElgInfo {
m := &CollElgInfo{NsCollMap: map[string]*CollNames{}}
for ns, colls := range nsCollMap {
collNames, ok := m.NsCollMap[ns]
if !ok {
collNames = &CollNames{}
m.NsCollMap[ns] = collNames
}
collNames.Entries = colls
}
return m
}
......@@ -60,6 +60,11 @@ type Store interface {
Commit() error
// Rollback rolls back the pvt data passed in the previous invoke to the `Prepare` function
Rollback() error
// ProcessCollsEligibilityEnabled notifies the store when the peer becomes eligible to recieve data for an
// existing collection. Parameter 'committingBlk' refers to the block number that contains the corresponding
// collection upgrade transaction and the parameter 'nsCollMap' contains the collections for which the peer
// is now eligible to recieve pvt data
ProcessCollsEligibilityEnabled(committingBlk uint64, nsCollMap map[string][]string) error
// IsEmpty returns true if the store does not have any block committed yet
IsEmpty() (bool, error)
// LastCommittedBlockHeight returns the height of the last committed block
......
......@@ -10,6 +10,7 @@ import (
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/hyperledger/fabric/common/flogging"
"github.com/hyperledger/fabric/common/ledger/util/leveldbhelper"
......@@ -35,6 +36,7 @@ type store struct {
lastCommittedBlock uint64
batchPending bool
purgerLock sync.Mutex
collElgProcSync *collElgProcSync
}
type blkTranNumKey []byte
......@@ -88,10 +90,16 @@ func NewProvider() Provider {
// OpenStore returns a handle to a store
func (p *provider) OpenStore(ledgerid string) (Store, error) {
dbHandle := p.dbProvider.GetDBHandle(ledgerid)
s := &store{db: dbHandle, ledgerid: ledgerid}
s := &store{db: dbHandle, ledgerid: ledgerid,
collElgProcSync: &collElgProcSync{
notification: make(chan bool, 1),
procComplete: make(chan bool, 1),
},
}
if err := s.initState(); err != nil {
return nil, err
}
s.launchCollElgProc()
logger.Debugf("Pvtdata store opened. Initial state: isEmpty [%t], lastCommittedBlock [%d], batchPending [%t]",
s.isEmpty, s.lastCommittedBlock, s.batchPending)
return s, nil
......@@ -352,6 +360,22 @@ func (s *store) GetMissingPvtDataInfoForMostRecentBlocks(maxBlock int) (ledger.M
return missingPvtDataInfo, nil
}
func (s *store) ProcessCollsEligibilityEnabled(committingBlk uint64, nsCollMap map[string][]string) error {
key := encodeCollElgKey(committingBlk)
m := newCollElgInfo(nsCollMap)
val, err := encodeCollElgVal(m)
if err != nil {
return err
}
batch := leveldbhelper.NewUpdateBatch()
batch.Put(key, val)
if err = s.db.WriteBatch(batch, true); err != nil {
return err
}
s.collElgProcSync.notify()
return nil
}
func (s *store) performPurgeIfScheduled(latestCommittedBlk uint64) {
if latestCommittedBlk%ledgerconfig.GetPvtdataStorePurgeInterval() != 0 {
return
......@@ -411,6 +435,80 @@ func (s *store) retrieveExpiryEntries(minBlkNum, maxBlkNum uint64) ([]*expiryEnt
return expiryEntries, nil
}
func (s *store) launchCollElgProc() {
go func() {
s.processCollElgEvents() // process collection eligibility events when store is opened - in case there is an unprocessed events from previous run
for {
logger.Debugf("Waiting for collection eligibility event")
s.collElgProcSync.waitForNotification()
s.processCollElgEvents()
s.collElgProcSync.done()
}
}()
}
func (s *store) processCollElgEvents() {
logger.Debugf("Starting to process collection eligibility events")
maxBatchSize := ledgerconfig.GetPvtdataStoreCollElgProcMaxDbBatchSize()
batchesInterval := ledgerconfig.GetPvtdataStoreCollElgProcDbBatchesInterval()
s.purgerLock.Lock()
defer s.purgerLock.Unlock()
collElgStartKey, collElgEndKey := createRangeScanKeysForCollElg()
eventItr := s.db.GetIterator(collElgStartKey, collElgEndKey)
defer eventItr.Release()
batch := leveldbhelper.NewUpdateBatch()
totalEntriesConverted := 0
for eventItr.Next() {
collElgKey, collElgVal := eventItr.Key(), eventItr.Value()
blkNum := decodeCollElgKey(collElgKey)
CollElgInfo, err := decodeCollElgVal(collElgVal)
logger.Debugf("Processing collection eligibility event [blkNum=%d], CollElgInfo=%s", blkNum, CollElgInfo)
if err != nil {
logger.Errorf("This error is not expected %s", err)
continue
}
for ns, colls := range CollElgInfo.NsCollMap {
var coll string
for _, coll = range colls.Entries {
logger.Infof("Converting missing data entries from inelligible to eligible for [ns=%s, coll=%s]", ns, coll)
startKey, endKey := createRangeScanKeysForIneligibleMissingData(blkNum, ns, coll)
collItr := s.db.GetIterator(startKey, endKey)
collEntriesConverted := 0
for collItr.Next() { // each entry
originalKey, originalVal := collItr.Key(), collItr.Value()
modifiedKey := decodeMissingDataKey(originalKey)
modifiedKey.isEligible = true
batch.Delete(originalKey)
copyVal := make([]byte, len(originalVal))
copy(copyVal, originalVal)
batch.Put(encodeMissingDataKey(modifiedKey), copyVal)
collEntriesConverted++
if batch.Len() > maxBatchSize {
s.db.WriteBatch(batch, true)
batch = leveldbhelper.NewUpdateBatch()
sleepTime := time.Duration(batchesInterval)
logger.Infof("Going to sleep for %d milliseconds between batches. Entries for [ns=%s, coll=%s] converted so far = %d",
sleepTime, ns, coll, collEntriesConverted)
s.purgerLock.Unlock()
time.Sleep(sleepTime * time.Millisecond)
s.purgerLock.Lock()
}
} // entry loop
collItr.Release()
logger.Infof("Converted all [%d] entries for [ns=%s, coll=%s]", collEntriesConverted, ns, coll)
totalEntriesConverted += collEntriesConverted
} // coll loop
} // ns loop
batch.Delete(collElgKey) // delete the collection eligibility event key as well
} // event loop
s.db.WriteBatch(batch, true)
logger.Debugf("Converted [%d] inelligible mising data entries to elligible", totalEntriesConverted)
}
// LastCommittedBlockHeight implements the function in the interface `Store`
func (s *store) LastCommittedBlockHeight() (uint64, error) {
if s.isEmpty {
......@@ -458,3 +556,31 @@ func (s *store) getLastCommittedBlockNum() (bool, uint64, error) {
}
return false, decodeLastCommittedBlockVal(v), nil
}
type collElgProcSync struct {
notification, procComplete chan bool
}
func (sync *collElgProcSync) notify() {
select {
case sync.notification <- true:
logger.Debugf("Signaled to collection elgibility processing routine")
default: //noop
logger.Debugf("Previous signal still pending. Skipping new signal")
}
}
func (sync *collElgProcSync) waitForNotification() {
<-sync.notification
}
func (sync *collElgProcSync) done() {
select {
case sync.procComplete <- true:
default:
}
}
func (sync *collElgProcSync) waitForDone() {
<-sync.procComplete
}
......@@ -13,12 +13,13 @@ import (
"testing"
"time"
"github.com/hyperledger/fabric/core/ledger/pvtdatapolicy"
"github.com/hyperledger/fabric/core/ledger/ledgerconfig"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric/common/flogging"
"github.com/hyperledger/fabric/core/ledger"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwsetutil"
"github.com/hyperledger/fabric/core/ledger/pvtdatapolicy"
btltestutil "github.com/hyperledger/fabric/core/ledger/pvtdatapolicy/testutil"
"github.com/spf13/viper"