Unverified Commit b6dc844a authored by yacovm's avatar yacovm Committed by Artem Barger
Browse files

[FAB-13363] Block verification for onboarding



This change set connects the block verification infrastructure
for onboarding to the production code.

Now, whenever an orderer onboards a channel - it also verifies the blocks
of the application channels, by:

1) Creating a bundle from the genesis block, which is derived from
   the system channel (which is verified using backward hash chain validation).
2) Verifying blocks using the bundle.
3) Replacing the bundle with a new bundle whenever a config block is pulled.

It also adds a check in the integration test, that ensures that no errors
are reported in the log of the onboarded OSN.

Change-Id: I3c5714f9d4491cdfd78e4e47407925136906d413
Signed-off-by: default avataryacovm <yacovm@il.ibm.com>
Signed-off-by: default avatarArtem Barger <bartem@il.ibm.com>
parent 100e1ad7
......@@ -330,6 +330,9 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() {
"testchannel2": 2,
}, []*nwo.Orderer{o4}, peer, network)
By("Ensuring orderer4 doesn't have any errors in the logs")
Expect(orderer4Runner.Err()).ToNot(gbytes.Say("ERRO"))
By("Ensuring that all orderers don't log errors to the log")
assertNoErrorsAreLogged(ordererRunners)
......
......@@ -138,7 +138,7 @@ func (p *BlockPuller) tryFetchBlock(seq uint64) *common.Block {
return nil
}
if err := p.VerifyBlockSequence(p.blockBuff); err != nil {
if err := p.VerifyBlockSequence(p.blockBuff, p.Channel); err != nil {
p.Close()
p.Logger.Errorf("Failed verifying received blocks: %v", err)
return nil
......@@ -360,7 +360,7 @@ func (p *BlockPuller) fetchLastBlockSeq(minRequestedSequence uint64, endpoint st
block, err := extractBlockFromResponse(resp)
if err != nil {
p.Logger.Errorf("Received %v from %s: %v", resp, endpoint, err)
p.Logger.Warningf("Received %v from %s: %v", resp, endpoint, err)
return 0, err
}
stream.CloseSend()
......
......@@ -107,7 +107,7 @@ func (d *countingDialer) Dial(address string) (*grpc.ClientConn, error) {
return grpc.DialContext(ctx, address, grpc.WithBlock(), grpc.WithInsecure(), balancer)
}
func noopBlockVerifierf(_ []*common.Block) error {
func noopBlockVerifierf(_ []*common.Block, _ string) error {
return nil
}
......@@ -739,7 +739,7 @@ func TestBlockPullerFailures(t *testing.T) {
}
malformBlockSignatureAndRecreateOSNBuffer := func(osn *deliverServer, bp *cluster.BlockPuller) {
bp.VerifyBlockSequence = func([]*common.Block) error {
bp.VerifyBlockSequence = func(_ []*common.Block, _ string) error {
close(osn.blocks())
osn.setBlocks(make(chan *orderer.DeliverResponse, 100))
osn.enqueueResponse(1)
......
......@@ -124,34 +124,33 @@ func (r *Replicator) ReplicateChains() []string {
pullHints := r.channelsToPull(channels)
totalChannelCount := len(pullHints.channelsToPull) + len(pullHints.channelsNotToPull)
r.Logger.Info("Found myself in", len(pullHints.channelsToPull), "channels out of", totalChannelCount, ":", pullHints)
// Append the genesis blocks of the application channels we have into the ledger
for _, channels := range [][]ChannelGenesisBlock{pullHints.channelsToPull, pullHints.channelsNotToPull} {
for _, channel := range channels {
ledger, err := r.LedgerFactory.GetOrCreate(channel.ChannelName)
if err != nil {
r.Logger.Panicf("Failed to create a ledger for channel %s: %v", channel.ChannelName, err)
}
gb, err := ChannelCreationBlockToGenesisBlock(channel.GenesisBlock)
if err != nil {
r.Logger.Panicf("Failed converting channel creation block for channel %s to genesis block: %v",
channel.ChannelName, err)
}
r.appendBlock(gb, ledger, channel.ChannelName)
}
}
for _, channel := range pullHints.channelsToPull {
err := r.PullChannel(channel.ChannelName)
if err == nil {
replicatedChains = append(replicatedChains, channel.ChannelName)
} else {
r.Logger.Warningf("Failed pulling channel %s: %v", channel.ChannelName, err)
// Append the channel we failed pulling to the channels not to pull, in order to commit the genesis block
// so that we mark it for replication in the future.
pullHints.channelsNotToPull = append(pullHints.channelsNotToPull, channel)
}
}
// Next, just commit the genesis blocks of the channels we shouldn't pull.
for _, channel := range pullHints.channelsNotToPull {
ledger, err := r.LedgerFactory.GetOrCreate(channel.ChannelName)
if err != nil {
r.Logger.Panicf("Failed to create a ledger for channel %s: %v", channel.ChannelName, err)
}
// Write a placeholder genesis block to the ledger, just to make an inactive.Chain start
// when onboarding is finished
gb, err := ChannelCreationBlockToGenesisBlock(channel.GenesisBlock)
if err != nil {
r.Logger.Panicf("Failed converting channel creation block for channel %s to genesis block: %v",
channel.ChannelName, err)
}
r.appendBlockIfNeeded(gb, ledger, channel.ChannelName)
}
// Last, pull the system chain
// Last, pull the system chain.
if err := r.PullChannel(r.SystemChannel); err != nil && err != ErrSkipped {
r.Logger.Panicf("Failed pulling system channel: %v", err)
}
......@@ -179,6 +178,11 @@ func (r *Replicator) PullChannel(channel string) error {
defer puller.Close()
puller.Channel = channel
ledger, err := r.LedgerFactory.GetOrCreate(channel)
if err != nil {
r.Logger.Panicf("Failed to create a ledger for channel %s: %v", channel, err)
}
endpoint, latestHeight, _ := latestHeightAndEndpoint(puller)
if endpoint == "" {
return errors.Errorf("failed obtaining the latest block for channel %s", channel)
......@@ -191,23 +195,20 @@ func (r *Replicator) PullChannel(channel string) error {
return errors.Errorf("latest height found among system channel(%s) orderers is %d, but the boot block's "+
"sequence is %d", r.SystemChannel, latestHeight, r.BootBlock.Header.Number)
}
return r.pullChannelBlocks(channel, puller, latestHeight)
return r.pullChannelBlocks(channel, puller, latestHeight, ledger)
}
func (r *Replicator) pullChannelBlocks(channel string, puller ChainPuller, latestHeight uint64) error {
ledger, err := r.LedgerFactory.GetOrCreate(channel)
if err != nil {
r.Logger.Panicf("Failed to create a ledger for channel %s: %v", channel, err)
}
// Pull the genesis block and remember its hash.
genesisBlock := puller.PullBlock(0)
if genesisBlock == nil {
func (r *Replicator) pullChannelBlocks(channel string, puller *BlockPuller, latestHeight uint64, ledger LedgerWriter) error {
nextBlockToPull := ledger.Height()
// Pull the next block and remember its hash.
nextBlock := puller.PullBlock(nextBlockToPull)
if nextBlock == nil {
return ErrRetryCountExhausted
}
r.appendBlockIfNeeded(genesisBlock, ledger, channel)
actualPrevHash := genesisBlock.Header.Hash()
r.appendBlock(nextBlock, ledger, channel)
actualPrevHash := nextBlock.Header.Hash()
for seq := uint64(1); seq < latestHeight; seq++ {
for seq := uint64(nextBlockToPull + 1); seq < latestHeight; seq++ {
block := puller.PullBlock(seq)
if block == nil {
return ErrRetryCountExhausted
......@@ -220,20 +221,19 @@ func (r *Replicator) pullChannelBlocks(channel string, puller ChainPuller, lates
actualPrevHash = block.Header.Hash()
if channel == r.SystemChannel && block.Header.Number == r.BootBlock.Header.Number {
r.compareBootBlockWithSystemChannelLastConfigBlock(block)
r.appendBlockIfNeeded(block, ledger, channel)
r.appendBlock(block, ledger, channel)
// No need to pull further blocks from the system channel
return nil
}
r.appendBlockIfNeeded(block, ledger, channel)
r.appendBlock(block, ledger, channel)
}
return nil
}
func (r *Replicator) appendBlockIfNeeded(block *common.Block, ledger LedgerWriter, channel string) {
currHeight := ledger.Height()
if currHeight >= block.Header.Number+1 {
r.Logger.Infof("Already at height %d for channel %s, skipping commit of block %d",
currHeight, channel, block.Header.Number)
func (r *Replicator) appendBlock(block *common.Block, ledger LedgerWriter, channel string) {
height := ledger.Height()
if height > block.Header.Number {
r.Logger.Infof("Skipping commit of block %d for channel %s because height is at %d", block.Header.Number, channel, height)
return
}
if err := ledger.Append(block); err != nil {
......@@ -262,6 +262,18 @@ type channelPullHints struct {
func (r *Replicator) channelsToPull(channels GenesisBlocks) channelPullHints {
r.Logger.Info("Evaluating channels to pull:", channels.Names())
// Backup the verifier of the puller
verifier := r.Puller.VerifyBlockSequence
// Restore it at the end of the function
defer func() {
r.Puller.VerifyBlockSequence = verifier
}()
// Set it to be a no-op verifier, because we can't verify latest blocks of channels.
r.Puller.VerifyBlockSequence = func(blocks []*common.Block, channel string) error {
return nil
}
var channelsNotToPull []ChannelGenesisBlock
var channelsToPull []ChannelGenesisBlock
for _, channel := range channels {
......@@ -318,8 +330,16 @@ type PullerConfig struct {
MaxTotalBufferBytes int
}
//go:generate mockery -dir . -name VerifierRetriever -case underscore -output mocks/
// VerifierRetriever retrieves BlockVerifiers for channels.
type VerifierRetriever interface {
// RetrieveVerifier retrieves a BlockVerifier for the given channel.
RetrieveVerifier(channel string) BlockVerifier
}
// BlockPullerFromConfigBlock returns a BlockPuller that doesn't verify signatures on blocks.
func BlockPullerFromConfigBlock(conf PullerConfig, block *common.Block) (*BlockPuller, error) {
func BlockPullerFromConfigBlock(conf PullerConfig, block *common.Block, verifierRetriever VerifierRetriever) (*BlockPuller, error) {
if block == nil {
return nil, errors.New("nil block")
}
......@@ -350,8 +370,12 @@ func BlockPullerFromConfigBlock(conf PullerConfig, block *common.Block) (*BlockP
Logger: flogging.MustGetLogger("orderer.common.cluster.replication"),
Dialer: dialer,
TLSCert: tlsCertAsDER.Bytes,
VerifyBlockSequence: func(blocks []*common.Block) error {
return VerifyBlocks(blocks, &NoopBlockVerifier{})
VerifyBlockSequence: func(blocks []*common.Block, channel string) error {
verifier := verifierRetriever.RetrieveVerifier(channel)
if verifier == nil {
return errors.Errorf("couldn't acquire verifier for channel %s", channel)
}
return VerifyBlocks(blocks, verifier)
},
MaxTotalBufferBytes: conf.MaxTotalBufferBytes,
Endpoints: endpointconfig.Endpoints,
......
......@@ -9,6 +9,7 @@ package cluster_test
import (
"io/ioutil"
"path/filepath"
"strings"
"testing"
"time"
......@@ -468,7 +469,7 @@ func TestReplicateChainsGreenPath(t *testing.T) {
channelLister.On("Channels").Return([]cluster.ChannelGenesisBlock{
{ChannelName: "E", GenesisBlock: fakeGB},
{ChannelName: "D", GenesisBlock: fakeGB}, {ChannelName: "C", GenesisBlock: fakeGB},
{ChannelName: "A"}, {ChannelName: "B", GenesisBlock: fakeGB},
{ChannelName: "A", GenesisBlock: fakeGB}, {ChannelName: "B", GenesisBlock: fakeGB},
})
channelLister.On("Close")
......@@ -651,9 +652,9 @@ func TestReplicateChainsGreenPath(t *testing.T) {
// From this point onwards, we pull the blocks for the chain.
osn.enqueueResponse(30)
osn.addExpectProbeAssert()
osn.addExpectPullAssert(0)
osn.addExpectPullAssert(10)
// Enqueue 31 blocks in its belly
for _, block := range createBlockChain(0, 30) {
for _, block := range createBlockChain(10, 30) {
osn.blockResponses <- &orderer.DeliverResponse{
Type: &orderer.DeliverResponse_Block{Block: block},
}
......@@ -895,14 +896,21 @@ func TestBlockPullerFromConfigBlockFailures(t *testing.T) {
},
} {
t.Run(testCase.name, func(t *testing.T) {
bp, err := cluster.BlockPullerFromConfigBlock(testCase.pullerConfig, testCase.block)
verifierRetriever := &mocks.VerifierRetriever{}
verifierRetriever.On("RetrieveVerifier", mock.Anything).Return(&cluster.NoopBlockVerifier{})
bp, err := cluster.BlockPullerFromConfigBlock(testCase.pullerConfig, testCase.block, verifierRetriever)
assert.EqualError(t, err, testCase.expectedErr)
assert.Nil(t, bp)
})
}
}
func TestBlockPullerFromConfigBlockGreenPath(t *testing.T) {
func testBlockPullerFromConfig(t *testing.T, blockVerifiers []cluster.BlockVerifier, expectedLogMsg string, iterations int) {
verifierRetriever := &mocks.VerifierRetriever{}
for _, blockVerifier := range blockVerifiers {
verifierRetriever.On("RetrieveVerifier", mock.Anything).Return(blockVerifier).Once()
}
caCert, err := ioutil.ReadFile(filepath.Join("testdata", "ca.crt"))
assert.NoError(t, err)
......@@ -942,16 +950,23 @@ func TestBlockPullerFromConfigBlockGreenPath(t *testing.T) {
injectOrdererEndpoint(t, validBlock, osn.srv.Address())
validBlock.Header.DataHash = validBlock.Data.Hash()
blockMsg := &orderer.DeliverResponse_Block{
Block: validBlock,
}
for attempt := 0; attempt < iterations; attempt++ {
blockMsg := &orderer.DeliverResponse_Block{
Block: validBlock,
}
osn.blockResponses <- &orderer.DeliverResponse{
Type: blockMsg,
}
osn.blockResponses <- &orderer.DeliverResponse{
Type: blockMsg,
}
osn.blockResponses <- &orderer.DeliverResponse{
Type: blockMsg,
osn.blockResponses <- &orderer.DeliverResponse{
Type: blockMsg,
}
osn.blockResponses <- nil
osn.addExpectProbeAssert()
osn.addExpectPullAssert(0)
}
bp, err := cluster.BlockPullerFromConfigBlock(cluster.PullerConfig{
......@@ -961,15 +976,52 @@ func TestBlockPullerFromConfigBlockGreenPath(t *testing.T) {
Channel: "mychannel",
Signer: &crypto.LocalSigner{},
Timeout: time.Hour,
}, validBlock)
}, validBlock, verifierRetriever)
bp.RetryTimeout = time.Millisecond * 10
assert.NoError(t, err)
defer bp.Close()
osn.addExpectProbeAssert()
osn.addExpectPullAssert(0)
var seenExpectedLogMsg bool
bp.Logger = bp.Logger.WithOptions(zap.Hooks(func(entry zapcore.Entry) error {
if strings.Contains(entry.Message, expectedLogMsg) {
seenExpectedLogMsg = true
}
return nil
}))
block := bp.PullBlock(0)
assert.Equal(t, uint64(0), block.Header.Number)
assert.True(t, seenExpectedLogMsg)
}
func TestBlockPullerFromConfigBlockGreenPath(t *testing.T) {
for _, testCase := range []struct {
description string
blockVerifiers []cluster.BlockVerifier
expectedLogMessage string
iterations int
}{
{
description: "Success",
blockVerifiers: []cluster.BlockVerifier{&cluster.NoopBlockVerifier{}},
expectedLogMessage: "Got block 0 of size",
iterations: 1,
},
{
description: "Failure",
iterations: 2,
// First time it returns nil, second time returns like the success case
blockVerifiers: []cluster.BlockVerifier{nil, &cluster.NoopBlockVerifier{}},
expectedLogMessage: "Failed verifying received blocks: " +
"couldn't acquire verifier for channel mychannel",
},
} {
t.Run(testCase.description, func(t *testing.T) {
testBlockPullerFromConfig(t, testCase.blockVerifiers,
testCase.expectedLogMessage, testCase.iterations)
})
}
}
func TestNoopBlockVerifier(t *testing.T) {
......
......@@ -48,7 +48,7 @@ func (s *Service) Step(ctx context.Context, request *orderer.StepRequest) (*orde
defer s.StepLogger.Debugf("Closing connection from %s", addr)
response, err := s.Dispatcher.DispatchStep(ctx, request)
if err != nil {
s.Logger.Warningf("Handling of Step() from %s failed: %+v", addr, err)
s.Logger.Warningf("Handling of Step() from %s failed: %v", addr, err)
}
return response, err
}
......
......@@ -205,7 +205,7 @@ type BlockVerifier interface {
// BlockSequenceVerifier verifies that the given consecutive sequence
// of blocks is valid.
type BlockSequenceVerifier func(blocks []*common.Block) error
type BlockSequenceVerifier func(blocks []*common.Block, channel string) error
// Dialer creates a gRPC connection to a remote address
type Dialer interface {
......@@ -268,6 +268,13 @@ func ConfigFromBlock(block *common.Block) (*common.ConfigEnvelope, error) {
if err != nil {
return nil, errors.WithStack(err)
}
if block.Header.Number == 0 {
configEnvelope, err := configtx.UnmarshalConfigEnvelope(payload.Data)
if err != nil {
return nil, errors.Wrap(err, "invalid config envelope")
}
return configEnvelope, nil
}
if payload.Header == nil {
return nil, errors.New("nil header in payload")
}
......
......@@ -566,6 +566,21 @@ func TestConfigFromBlockBadInput(t *testing.T) {
expectedError: "empty block",
block: &common.Block{Data: &common.BlockData{}},
},
{
name: "invalid payload",
expectedError: "error unmarshaling Envelope: proto: common.Envelope: illegal tag 0 (wire type 1)",
block: &common.Block{Data: &common.BlockData{Data: [][]byte{{1, 2, 3}}}},
},
{
name: "bad genesis block",
expectedError: "invalid config envelope: proto: common.ConfigEnvelope: illegal tag 0 (wire type 1)",
block: &common.Block{
Header: &common.BlockHeader{}, Data: &common.BlockData{Data: [][]byte{utils.MarshalOrPanic(&common.Envelope{
Payload: utils.MarshalOrPanic(&common.Payload{
Data: []byte{1, 2, 3},
}),
})}}},
},
{
name: "invalid envelope in block",
expectedError: "error unmarshaling Envelope: proto: common.Envelope: illegal tag 0 (wire type 1)",
......@@ -578,35 +593,34 @@ func TestConfigFromBlockBadInput(t *testing.T) {
Payload: []byte{1, 2, 3},
})}}},
},
{
name: "nil header in payload",
expectedError: "nil header in payload",
block: &common.Block{Data: &common.BlockData{Data: [][]byte{utils.MarshalOrPanic(&common.Envelope{})}}},
},
{
name: "invalid channel header",
expectedError: "error unmarshaling ChannelHeader: proto: common.ChannelHeader: illegal tag 0 (wire type 1)",
block: &common.Block{Data: &common.BlockData{Data: [][]byte{utils.MarshalOrPanic(&common.Envelope{
Payload: utils.MarshalOrPanic(&common.Payload{
Header: &common.Header{
ChannelHeader: []byte{1, 2, 3},
},
}),
})}}},
block: &common.Block{
Header: &common.BlockHeader{Number: 1},
Data: &common.BlockData{Data: [][]byte{utils.MarshalOrPanic(&common.Envelope{
Payload: utils.MarshalOrPanic(&common.Payload{
Header: &common.Header{
ChannelHeader: []byte{1, 2, 3},
},
}),
})}}},
},
{
name: "invalid config block",
expectedError: "invalid config envelope: proto: common.ConfigEnvelope: illegal tag 0 (wire type 1)",
block: &common.Block{Data: &common.BlockData{Data: [][]byte{utils.MarshalOrPanic(&common.Envelope{
Payload: utils.MarshalOrPanic(&common.Payload{
Data: []byte{1, 2, 3},
Header: &common.Header{
ChannelHeader: utils.MarshalOrPanic(&common.ChannelHeader{
Type: int32(common.HeaderType_CONFIG),
}),
},
}),
})}}},
block: &common.Block{
Header: &common.BlockHeader{},
Data: &common.BlockData{Data: [][]byte{utils.MarshalOrPanic(&common.Envelope{
Payload: utils.MarshalOrPanic(&common.Payload{
Data: []byte{1, 2, 3},
Header: &common.Header{
ChannelHeader: utils.MarshalOrPanic(&common.ChannelHeader{
Type: int32(common.HeaderType_CONFIG),
}),
},
}),
})}}},
},
} {
t.Run(testCase.name, func(t *testing.T) {
......
......@@ -33,6 +33,7 @@ import (
"github.com/hyperledger/fabric/common/metrics/disabled"
"github.com/hyperledger/fabric/common/tools/configtxgen/encoder"
genesisconfig "github.com/hyperledger/fabric/common/tools/configtxgen/localconfig"
"github.com/hyperledger/fabric/common/tools/protolator"
"github.com/hyperledger/fabric/common/util"
"github.com/hyperledger/fabric/core/comm"
"github.com/hyperledger/fabric/core/operations"
......@@ -102,13 +103,9 @@ func Start(cmd string, conf *localconfig.TopLevel) {
clusterClientConfig := initializeClusterClientConfig(conf)
clusterDialer.SetConfig(clusterClientConfig)
r := &replicationInitiator{
logger: flogging.MustGetLogger("orderer.common.cluster"),
secOpts: clusterClientConfig.SecOpts,
conf: conf,
lf: &ledgerFactory{lf},
signer: signer,
}
logger := flogging.MustGetLogger("orderer.common.cluster")
r := createReplicator(lf, bootstrapBlock, conf, clusterClientConfig.SecOpts, signer)
// Only clusters that are equipped with a recent config block can replicate.
if clusterType && conf.General.GenesisMethod == "file" {
r.replicateIfNeeded(bootstrapBlock)
......@@ -181,6 +178,54 @@ func Start(cmd string, conf *localconfig.TopLevel) {
grpcServer.Start()
}
func createReplicator(
lf blockledger.Factory,
bootstrapBlock *cb.Block,
conf *localconfig.TopLevel,
secOpts *comm.SecureOptions,
signer crypto.LocalSigner,
) *replicationInitiator {
logger := flogging.MustGetLogger("orderer.common.cluster")
vl := &verifierLoader{
verifierFactory: &cluster.BlockVerifierAssembler{Logger: logger},
onFailure: func(block *cb.Block) {
protolator.DeepMarshalJSON(os.Stdout, block)
},
ledgerFactory: lf,
logger: logger,
}
systemChannelName, err := utils.GetChainIDFromBlock(bootstrapBlock)
if err != nil {
logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err)
}
// System channel is not verified because we trust the bootstrap block
// and use backward hash chain verification.
verifiersByChannel := vl.loadVerifiers()
verifiersByChannel[systemChannelName] = &cluster.NoopBlockVerifier{}
vr := &cluster.VerificationRegistry{
Logger: logger,
VerifiersByChannel: verifiersByChannel,
VerifierFactory: &cluster.BlockVerifierAssembler{Logger: logger},
}
ledgerFactory := &ledgerFactory{
Factory: lf,
onBlockCommit: vr.BlockCommitted,
}
return &replicationInitiator{
verifierRetriever: vr,
logger: logger,
secOpts: secOpts,
conf: conf,
lf: ledgerFactory,
signer: signer,
}
}
func initializeLogging() {
loggingSpec := os.Getenv("FABRIC_LOGGING_SPEC")
loggingFormat := os.Getenv("FABRIC_LOGGING_FORMAT")
......
......@@ -18,12 +18,15 @@ import (
"github.com/hyperledger/fabric/bccsp/factory"
"github.com/hyperledger/fabric/common/channelconfig"
"github.com/hyperledger/fabric/common/crypto/tlsgen"
deliver_mocks "github.com/hyperledger/fabric/common/deliver/mock"
"github.com/hyperledger/fabric/common/flogging"
"github.com/hyperledger/fabric/common/flogging/floggingtest"
ledger_mocks "github.com/hyperledger/fabric/common/ledger/blockledger/mocks"
"github.com/hyperledger/fabric/common/ledger/blockledger/ram"
"github.com/hyperledger/fabric/common/localmsp"
"github.com/hyperledger/fabric/common/metrics/disabled"
"github.com/hyperledger/fabric/common/metrics/prometheus"
"github.com/hyperledger/fabric/common/mocks/crypto"
"github.com/hyperledger/fabric/common/tools/configtxgen/configtxgentest"
"github.com/hyperledger/fabric/common/tools/configtxgen/encoder"
genesisconfig "github.com/hyperledger/fabric/common/tools/configtxgen/localconfig"
......@@ -33,9 +36,12 @@ import (
"github.com/hyperledger/fabric/orderer/common/localconfig"
"github.com/hyperledger/fabric/orderer/common/multichannel"
"github.com/hyperledger/fabric/orderer/common/server/mocks"
server_mocks "github.com/hyperledger/fabric/orderer/common/server/mocks"
"github.com/hyperledger/fabric/orderer/consensus"
"github.com/hyperledger/fabric/protos/common"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
......@@ -642,3 +648,30 @@ func panicMsg(f func()) string {
return message.(string)
}
func TestCreateReplicator(t *testing.T) {
cleanup := configtest.SetDevFabricConfigPath(t)
defer cleanup()
bootBlock := encoder.New(genesisconfig.Load(genesisconfig.SampleDevModeSoloProfile)).GenesisBlockForChannel("system")