Commit 70b88048 authored by Jay Guo's avatar Jay Guo
Browse files

FAB-14766 improve logs text for serviceability



- block %d -> block [%d]
- remove [channel: %s] from error body, because normally caller
  would decorate it before logging, and we sometimes end up with
  ERRO: [channel: mychannel] there's an error: [channel: mychannel] foo

Change-Id: I6cba4a34bb326d17a5439dd10909edd5f799e0e4
Signed-off-by: default avatarJay Guo <guojiannan1101@gmail.com>
parent d87e72c0
......@@ -788,7 +788,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() {
Eventually(ordererRunners[2].Err(), time.Minute, time.Second).Should(gbytes.Say("certificate extracted from TLS connection isn't authorized"))
By("Ensuring it detects its eviction")
evictionDetection := gbytes.Say("Detected our own eviction from the chain in block 1 channel=testchannel")
evictionDetection := gbytes.Say(`Detected our own eviction from the channel in block \[1\] channel=testchannel`)
Eventually(ordererRunner.Err(), time.Minute, time.Second).Should(evictionDetection)
By("Ensuring all blocks are pulled up to the block that evicts the OSN")
......
......@@ -93,7 +93,7 @@ func (p *BlockPuller) PullBlock(seq uint64) *common.Block {
}
retriesLeft--
if retriesLeft == 0 && p.MaxPullBlockRetries > 0 {
p.Logger.Errorf("Failed pulling block %d: retry count exhausted(%d)", seq, p.MaxPullBlockRetries)
p.Logger.Errorf("Failed pulling block [%d]: retry count exhausted(%d)", seq, p.MaxPullBlockRetries)
return nil
}
time.Sleep(p.RetryTimeout)
......@@ -189,7 +189,7 @@ func (p *BlockPuller) pullBlocks(seq uint64, reConnected bool) error {
totalSize += size
p.blockBuff = append(p.blockBuff, block)
nextExpectedSequence++
p.Logger.Infof("Got block %d of size %dKB from %s", seq, size/1024, p.endpoint)
p.Logger.Infof("Got block [%d] of size %d KB from %s", seq, size/1024, p.endpoint)
}
return nil
}
......@@ -198,7 +198,7 @@ func (p *BlockPuller) obtainStream(reConnected bool, env *common.Envelope, seq u
var stream *ImpatientStream
var err error
if reConnected {
p.Logger.Infof("Sending request for block %d to %s", seq, p.endpoint)
p.Logger.Infof("Sending request for block [%d] to %s", seq, p.endpoint)
stream, err = p.requestBlocks(p.endpoint, NewImpatientStream(p.conn, p.FetchTimeout), env)
if err != nil {
return nil, err
......
......@@ -594,7 +594,7 @@ func TestBlockPullerFailover(t *testing.T) {
pulledBlock1.Add(1)
var once sync.Once
bp.Logger = bp.Logger.WithOptions(zap.Hooks(func(entry zapcore.Entry) error {
if strings.Contains(entry.Message, "Got block 1 of size") {
if strings.Contains(entry.Message, "Got block [1] of size") {
once.Do(pulledBlock1.Done)
}
return nil
......@@ -646,11 +646,11 @@ func TestBlockPullerNoneResponsiveOrderer(t *testing.T) {
waitForConnection.Add(1)
var once sync.Once
bp.Logger = bp.Logger.WithOptions(zap.Hooks(func(entry zapcore.Entry) error {
if !strings.Contains(entry.Message, "Sending request for block 1") {
if !strings.Contains(entry.Message, "Sending request for block [1]") {
return nil
}
defer once.Do(waitForConnection.Done)
s := entry.Message[len("Sending request for block 1 to 127.0.0.1:"):]
s := entry.Message[len("Sending request for block [1] to 127.0.0.1:"):]
port, err := strconv.ParseInt(s, 10, 32)
assert.NoError(t, err)
// If osn2 is the current orderer we're connected to,
......@@ -812,7 +812,7 @@ func TestBlockPullerFailures(t *testing.T) {
},
{
name: "failure at pull",
logTrigger: "Sending request for block 1",
logTrigger: "Sending request for block [1]",
beforeFunc: func(osn *deliverServer, bp *cluster.BlockPuller) {
// The first seek request asks for the latest block and succeeds
osn.addExpectProbeAssert()
......@@ -827,7 +827,7 @@ func TestBlockPullerFailures(t *testing.T) {
},
{
name: "failure at verifying pulled block",
logTrigger: "Sending request for block 1",
logTrigger: "Sending request for block [1]",
beforeFunc: func(osn *deliverServer, bp *cluster.BlockPuller) {
// The first seek request asks for the latest block and succeeds
osn.addExpectProbeAssert()
......@@ -1082,7 +1082,7 @@ func TestBlockPullerMaxRetriesExhausted(t *testing.T) {
var exhaustedRetryAttemptsLogged bool
bp.Logger = bp.Logger.WithOptions(zap.Hooks(func(entry zapcore.Entry) error {
if entry.Message == "Failed pulling block 3: retry count exhausted(2)" {
if entry.Message == "Failed pulling block [3]: retry count exhausted(2)" {
exhaustedRetryAttemptsLogged = true
}
return nil
......
......@@ -237,13 +237,13 @@ func (r *Replicator) pullChannelBlocks(channel string, puller *BlockPuller, late
func (r *Replicator) appendBlock(block *common.Block, ledger LedgerWriter, channel string) {
height := ledger.Height()
if height > block.Header.Number {
r.Logger.Infof("Skipping commit of block %d for channel %s because height is at %d", block.Header.Number, channel, height)
r.Logger.Infof("Skipping commit of block [%d] for channel %s because height is at %d", block.Header.Number, channel, height)
return
}
if err := ledger.Append(block); err != nil {
r.Logger.Panicf("Failed to write block %d: %v", block.Header.Number, err)
r.Logger.Panicf("Failed to write block [%d]: %v", block.Header.Number, err)
}
r.Logger.Infof("Committed block %d for channel %s", block.Header.Number, channel)
r.Logger.Infof("Committed block [%d] for channel %s", block.Header.Number, channel)
}
func (r *Replicator) compareBootBlockWithSystemChannelLastConfigBlock(block *common.Block) {
......@@ -538,14 +538,14 @@ func (ci *ChainInspector) Channels() []ChannelGenesisBlock {
for seq := uint64(0); seq < lastConfigBlockNum; seq++ {
block = ci.Puller.PullBlock(seq)
if block == nil {
ci.Logger.Panicf("Failed pulling block %d from the system channel", seq)
ci.Logger.Panicf("Failed pulling block [%d] from the system channel", seq)
}
ci.validateHashPointer(block, prevHash)
channel, err := IsNewChannelBlock(block)
if err != nil {
// If we failed to classify a block, something is wrong in the system chain
// we're trying to pull, so abort.
ci.Logger.Panic("Failed classifying block", seq, ":", err)
ci.Logger.Panicf("Failed classifying block [%d]: %s", seq, err)
continue
}
// Set the previous hash for the next iteration
......@@ -580,7 +580,7 @@ func (ci *ChainInspector) validateHashPointer(block *common.Block, prevHash []by
if bytes.Equal(block.Header.PreviousHash, prevHash) {
return
}
ci.Logger.Panicf("Claimed previous hash of block %d is %x but actual previous hash is %x",
ci.Logger.Panicf("Claimed previous hash of block [%d] is %x but actual previous hash is %x",
block.Header.Number, block.Header.PreviousHash, prevHash)
}
......
......@@ -156,7 +156,7 @@ func TestReplicateChainsFailures(t *testing.T) {
name: "failure in appending a block to the ledger",
latestBlockSeqInOrderer: 21,
appendBlockError: errors.New("IO error"),
expectedPanic: "Failed to write block 0: IO error",
expectedPanic: "Failed to write block [0]: IO error",
},
{
name: "failure pulling the system chain",
......@@ -1057,7 +1057,7 @@ func TestBlockPullerFromConfigBlockGreenPath(t *testing.T) {
{
description: "Success",
blockVerifiers: []cluster.BlockVerifier{&cluster.NoopBlockVerifier{}},
expectedLogMessage: "Got block 0 of size",
expectedLogMessage: "Got block [0] of size",
iterations: 1,
},
{
......@@ -1452,7 +1452,7 @@ func TestChannels(t *testing.T) {
},
assertion: func(t *testing.T, ci *cluster.ChainInspector) {
panicValue := "System channel pulled doesn't match the boot last config block:" +
" block 2's hash (bc4ef5cc8a61ac0747cc82df58bac9ad3278622c1cfc7a119b9b1068e422c9f1)" +
" block [2]'s hash (bc4ef5cc8a61ac0747cc82df58bac9ad3278622c1cfc7a119b9b1068e422c9f1)" +
" mismatches 3's prev block hash ()"
assert.PanicsWithValue(t, panicValue, func() {
ci.Channels()
......@@ -1466,7 +1466,7 @@ func TestChannels(t *testing.T) {
systemChain[len(systemChain)-2].Header.PreviousHash = nil
},
assertion: func(t *testing.T, ci *cluster.ChainInspector) {
panicValue := "Claimed previous hash of block 2 is but actual previous " +
panicValue := "Claimed previous hash of block [2] is but actual previous " +
"hash is 920faeb0bd8a02b3f2553247359fb3b684819c75c6e5487bc7eed632841ddc5f"
assert.PanicsWithValue(t, panicValue, func() {
ci.Channels()
......@@ -1480,7 +1480,7 @@ func TestChannels(t *testing.T) {
systemChain[len(systemChain)-2].Data.Data = [][]byte{{1, 2, 3}}
},
assertion: func(t *testing.T, ci *cluster.ChainInspector) {
panicValue := "Failed classifying block 2 : block data does not carry" +
panicValue := "Failed classifying block [2]: block data does not carry" +
" an envelope at index 0: error unmarshaling Envelope: " +
"proto: common.Envelope: illegal tag 0 (wire type 1)"
assert.PanicsWithValue(t, panicValue, func() {
......@@ -1497,7 +1497,7 @@ func TestChannels(t *testing.T) {
systemChain[len(systemChain)/2] = nil
},
assertion: func(t *testing.T, ci *cluster.ChainInspector) {
panicValue := "Failed pulling block 2 from the system channel"
panicValue := "Failed pulling block [2] from the system channel"
assert.PanicsWithValue(t, panicValue, func() {
ci.Channels()
})
......
......@@ -330,7 +330,7 @@ func VerifyBlockHash(indexInBuffer int, blockBuff []*common.Block) error {
if !bytes.Equal(block.Header.PreviousHash, prevBlock.Header.Hash()) {
claimedPrevHash := hex.EncodeToString(block.Header.PreviousHash)
actualPrevHash := hex.EncodeToString(prevBlock.Header.Hash())
return errors.Errorf("block %d's hash (%s) mismatches %d's prev block hash (%s)",
return errors.Errorf("block [%d]'s hash (%s) mismatches %d's prev block hash (%s)",
prevSeq, actualPrevHash, currSeq, claimedPrevHash)
}
}
......@@ -467,7 +467,7 @@ func (vr *VerificationRegistry) BlockCommitted(block *common.Block, channel stri
conf, err := ConfigFromBlock(block)
// The block doesn't contain a config block, but is a valid block
if err == errNotAConfig {
vr.Logger.Debugf("Committed block %d for channel %s that is not a config block",
vr.Logger.Debugf("Committed block [%d] for channel %s that is not a config block",
block.Header.Number, channel)
return
}
......@@ -488,7 +488,7 @@ func (vr *VerificationRegistry) BlockCommitted(block *common.Block, channel stri
vr.VerifiersByChannel[channel] = verifier
vr.Logger.Debugf("Committed config block %d for channel %s", block.Header.Number, channel)
vr.Logger.Debugf("Committed config block [%d] for channel %s", block.Header.Number, channel)
}
// BlockToString returns a string representation of this block.
......@@ -589,7 +589,7 @@ func LastConfigBlock(block *common.Block, blockRetriever BlockRetriever) (*commo
}
lastConfigBlock := blockRetriever.Block(lastConfigBlockNum)
if lastConfigBlock == nil {
return nil, errors.Errorf("unable to retrieve last config block %d", lastConfigBlockNum)
return nil, errors.Errorf("unable to retrieve last config block [%d]", lastConfigBlockNum)
}
return lastConfigBlock, nil
}
......
......@@ -234,7 +234,7 @@ func TestVerifyBlockHash(t *testing.T) {
},
{
name: "prev hash mismatch",
errorContains: "block 12's hash " +
errorContains: "block [12]'s hash " +
"(866351705f1c2f13e10d52ead9d0ca3b80689ede8cc8bf70a6d60c67578323f4) " +
"mismatches 13's prev block hash (07)",
mutateBlockSequence: func(blockSequence []*common.Block) []*common.Block {
......@@ -307,7 +307,7 @@ func TestVerifyBlocks(t *testing.T) {
blockSequence[len(blockSequence)/2].Header.PreviousHash = []byte{7}
return blockSequence
},
expectedError: "block 74's hash " +
expectedError: "block [74]'s hash " +
"(5cb4bd1b6a73f81afafd96387bb7ff4473c2425929d0862586f5fbfa12d762dd) " +
"mismatches 75's prev block hash (07)",
},
......@@ -785,7 +785,7 @@ func TestLastConfigBlock(t *testing.T) {
})},
},
},
expectedError: "unable to retrieve last config block 666",
expectedError: "unable to retrieve last config block [666]",
blockRetriever: blockRetriever,
},
{
......@@ -897,8 +897,8 @@ func TestVerificationRegistry(t *testing.T) {
channelRetrieved: "foo",
channelCommitted: "foo",
loggedMessages: map[string]struct{}{
"No verifier for channel foo exists": {},
"Committed block 5 for channel foo that is not a config block": {},
"No verifier for channel foo exists": {},
"Committed block [5] for channel foo that is not a config block": {},
},
expectedVerifier: nil,
},
......@@ -923,8 +923,8 @@ func TestVerificationRegistry(t *testing.T) {
channelRetrieved: "foo",
channelCommitted: "bar",
loggedMessages: map[string]struct{}{
"No verifier for channel foo exists": {},
"Committed config block 0 for channel bar": {},
"No verifier for channel foo exists": {},
"Committed config block [0] for channel bar": {},
},
expectedVerifier: nil,
verifiersByChannel: make(map[string]cluster.BlockVerifier),
......@@ -936,7 +936,7 @@ func TestVerificationRegistry(t *testing.T) {
channelRetrieved: "bar",
channelCommitted: "bar",
loggedMessages: map[string]struct{}{
"Committed config block 0 for channel bar": {},
"Committed config block [0] for channel bar": {},
},
expectedVerifier: verifier,
verifiersByChannel: make(map[string]cluster.BlockVerifier),
......
......@@ -175,7 +175,7 @@ func (bw *BlockWriter) commitBlock(encodedMetadataValue []byte) {
if err != nil {
logger.Panicf("[channel: %s] Could not append block: %s", bw.support.ChainID(), err)
}
logger.Debugf("[channel: %s] Wrote block %d", bw.support.ChainID(), bw.lastBlock.GetHeader().Number)
logger.Debugf("[channel: %s] Wrote block [%d]", bw.support.ChainID(), bw.lastBlock.GetHeader().Number)
}
func (bw *BlockWriter) addBlockSignature(block *cb.Block) {
......
......@@ -307,24 +307,24 @@ func (vl *verifierLoader) loadVerifier(chain string) cluster.BlockVerifier {
lastBlockIndex := height - 1
lastBlock := blockRetriever.Block(lastBlockIndex)
if lastBlock == nil {
vl.logger.Panicf("Failed retrieving block %d for channel %s", lastBlockIndex, chain)
vl.logger.Panicf("Failed retrieving block [%d] for channel %s", lastBlockIndex, chain)
}
lastConfigBlock, err := cluster.LastConfigBlock(lastBlock, blockRetriever)
if err != nil {
vl.logger.Panicf("Failed retrieving config block %d for channel %s", lastBlockIndex, chain)
vl.logger.Panicf("Failed retrieving config block [%d] for channel %s", lastBlockIndex, chain)
}
conf, err := cluster.ConfigFromBlock(lastConfigBlock)
if err != nil {
vl.onFailure(lastConfigBlock)
vl.logger.Panicf("Failed extracting configuration for channel %s from block %d: %v",
vl.logger.Panicf("Failed extracting configuration for channel %s from block [%d]: %v",
chain, lastConfigBlock.Header.Number, err)
}
verifier, err := vl.verifierFactory.VerifierFromConfig(conf, chain)
if err != nil {
vl.onFailure(lastConfigBlock)
vl.logger.Panicf("Failed creating verifier for channel %s from block %d: %v", chain, lastBlockIndex, err)
vl.logger.Panicf("Failed creating verifier for channel %s from block [%d]: %v", chain, lastBlockIndex, err)
}
vl.logger.Infof("Loaded verifier for channel %s from config block at index %d", chain, lastBlockIndex)
return verifier
......
......@@ -952,13 +952,13 @@ func TestVerifierLoader(t *testing.T) {
{
description: "block retrieval fails",
ledgerHeight: 100,
expectedPanic: "Failed retrieving block 99 for channel mychannel",
expectedPanic: "Failed retrieving block [99] for channel mychannel",
},
{
description: "block retrieval succeeds but the block is bad",
ledgerHeight: 100,
lastBlock: &common.Block{},
expectedPanic: "Failed retrieving config block 99 for channel mychannel",
expectedPanic: "Failed retrieving config block [99] for channel mychannel",
},
{
description: "config block retrieved is bad",
......@@ -971,7 +971,7 @@ func TestVerifierLoader(t *testing.T) {
},
},
lastConfigBlock: &common.Block{Header: &common.BlockHeader{Number: 21}},
expectedPanic: "Failed extracting configuration for channel mychannel from block 21: empty block",
expectedPanic: "Failed extracting configuration for channel mychannel from block [21]: empty block",
onFailureInvoked: true,
},
{
......@@ -986,7 +986,7 @@ func TestVerifierLoader(t *testing.T) {
},
lastConfigBlock: configBlock,
verifierFromConfigErr: errors.New("failed initializing MSP"),
expectedPanic: "Failed creating verifier for channel mychannel from block 99: failed initializing MSP",
expectedPanic: "Failed creating verifier for channel mychannel from block [99]: failed initializing MSP",
onFailureInvoked: true,
},
{
......
......@@ -388,8 +388,7 @@ func (c *Chain) detectMigration() bool {
}
if startOfChain {
c.logger.Infof("[channel: %s], Restarting after consensus-type migration. Type: %s, just starting the channel.",
c.support.ChainID(), c.support.SharedConfig().ConsensusType())
c.logger.Infof("Restarting after consensus-type migration. Type: %s, just starting the channel.", c.support.SharedConfig().ConsensusType())
}
}
return startOfChain
......@@ -631,10 +630,10 @@ func (c *Chain) serveRequest() {
case b := <-ch:
data := utils.MarshalOrPanic(b)
if err := c.Node.Propose(ctx, data); err != nil {
c.logger.Errorf("Failed to propose block %d to raft and discard %d blocks in queue: %s", b.Header.Number, len(ch), err)
c.logger.Errorf("Failed to propose block [%d] to raft and discard %d blocks in queue: %s", b.Header.Number, len(ch), err)
return
}
c.logger.Debugf("Proposed block %d to raft consensus", b.Header.Number)
c.logger.Debugf("Proposed block [%d] to raft consensus", b.Header.Number)
case <-ctx.Done():
c.logger.Debugf("Quit proposing blocks, discarded %d blocks in the queue", len(ch))
......@@ -762,7 +761,7 @@ func (c *Chain) serveRequest() {
continue
}
c.logger.Infof("Start accepting requests as Raft leader at block %d", c.lastBlock.Header.Number)
c.logger.Infof("Start accepting requests as Raft leader at block [%d]", c.lastBlock.Header.Number)
bc = &blockCreator{
hash: c.lastBlock.Header.Hash(),
number: c.lastBlock.Header.Number,
......@@ -825,9 +824,9 @@ func (c *Chain) serveRequest() {
func (c *Chain) writeBlock(block *common.Block, index uint64) {
if block.Header.Number > c.lastBlock.Header.Number+1 {
c.logger.Panicf("Got block %d, expect block %d", block.Header.Number, c.lastBlock.Header.Number+1)
c.logger.Panicf("Got block [%d], expect block [%d]", block.Header.Number, c.lastBlock.Header.Number+1)
} else if block.Header.Number < c.lastBlock.Header.Number+1 {
c.logger.Infof("Got block %d, expect block %d, this node was forced to catch up", block.Header.Number, c.lastBlock.Header.Number+1)
c.logger.Infof("Got block [%d], expect block [%d], this node was forced to catch up", block.Header.Number, c.lastBlock.Header.Number+1)
return
}
......@@ -836,7 +835,7 @@ func (c *Chain) writeBlock(block *common.Block, index uint64) {
}
c.lastBlock = block
c.logger.Infof("Writing block %d (Raft index: %d) to ledger", block.Header.Number, index)
c.logger.Infof("Writing block [%d] (Raft index: %d) to ledger", block.Header.Number, index)
if utils.IsConfigBlock(block) {
c.writeConfigBlock(block, index)
......@@ -899,7 +898,7 @@ func (c *Chain) ordered(msg *orderer.SubmitRequest) (batches [][]*common.Envelop
func (c *Chain) propose(ch chan<- *common.Block, bc *blockCreator, batches ...[]*common.Envelope) {
for _, batch := range batches {
b := bc.createNextBlock(batch)
c.logger.Infof("Created block %d, there are %d blocks in flight", b.Header.Number, c.blockInflight)
c.logger.Infof("Created block [%d], there are %d blocks in flight", b.Header.Number, c.blockInflight)
select {
case ch <- b:
......@@ -925,7 +924,7 @@ func (c *Chain) catchUp(snap *raftpb.Snapshot) error {
}
if c.lastBlock.Header.Number >= b.Header.Number {
c.logger.Warnf("Snapshot is at block %d, local block number is %d, no sync needed", b.Header.Number, c.lastBlock.Header.Number)
c.logger.Warnf("Snapshot is at block [%d], local block number is %d, no sync needed", b.Header.Number, c.lastBlock.Header.Number)
return nil
}
......@@ -937,12 +936,12 @@ func (c *Chain) catchUp(snap *raftpb.Snapshot) error {
next := c.lastBlock.Header.Number + 1
c.logger.Infof("Catching up with snapshot taken at block %d, starting from block %d", b.Header.Number, next)
c.logger.Infof("Catching up with snapshot taken at block [%d], starting from block [%d]", b.Header.Number, next)
for next <= b.Header.Number {
block := puller.PullBlock(next)
if block == nil {
return errors.Errorf("failed to fetch block %d from cluster", next)
return errors.Errorf("failed to fetch block [%d] from cluster", next)
}
if utils.IsConfigBlock(block) {
c.support.WriteConfigBlock(block, nil)
......@@ -950,7 +949,7 @@ func (c *Chain) catchUp(snap *raftpb.Snapshot) error {
configMembership := c.detectConfChange(block)
if configMembership != nil && configMembership.Changed() {
c.logger.Infof("Config block %d changes consenter set, communication should be reconfigured", block.Header.Number)
c.logger.Infof("Config block [%d] changes consenter set, communication should be reconfigured", block.Header.Number)
c.raftMetadataLock.Lock()
c.opts.BlockMetadata = configMembership.NewBlockMetadata
......@@ -969,7 +968,7 @@ func (c *Chain) catchUp(snap *raftpb.Snapshot) error {
next++
}
c.logger.Infof("Finished syncing with cluster up to block %d (incl.)", b.Header.Number)
c.logger.Infof("Finished syncing with cluster up to and including block [%d]", b.Header.Number)
return nil
}
......@@ -996,7 +995,7 @@ func (c *Chain) detectConfChange(block *common.Block) *MembershipChanges {
}
if changes.Rotated() {
c.logger.Infof("Config block %d rotates TLS certificate of node %d", block.Header.Number, changes.RotatedNode)
c.logger.Infof("Config block [%d] rotates TLS certificate of node %d", block.Header.Number, changes.RotatedNode)
}
return changes
......@@ -1100,7 +1099,7 @@ func (c *Chain) apply(ents []raftpb.Entry) {
select {
case c.gcC <- &gc{index: c.appliedIndex, state: c.confState, data: ents[position].Data}:
c.logger.Infof("Accumulated %d bytes since last snapshot, exceeding size limit (%d bytes), "+
"taking snapshot at block %d (index: %d), last snapshotted block number is %d, current nodes: %+v",
"taking snapshot at block [%d] (index: %d), last snapshotted block number is %d, current nodes: %+v",
c.accDataSize, c.sizeLimit, b.Header.Number, c.appliedIndex, c.lastSnapBlockNum, c.confState.Nodes)
c.accDataSize = 0
c.lastSnapBlockNum = b.Header.Number
......@@ -1318,8 +1317,7 @@ func (c *Chain) getInFlightConfChange() *raftpb.ConfChange {
// the block metadata as etcdraft.BlockMetadata (see below). Right after migration the block metadata will carry
// Kafka metadata. The etcdraft.BlockMetadata should be extracted from the ConsensusType.Metadata, instead.
if c.detectMigration() {
c.logger.Infof("[channel: %s], Restarting after consensus-type migration. Type: %s, just starting the chain.",
c.support.ChainID(), c.support.SharedConfig().ConsensusType())
c.logger.Infof("Restarting after consensus-type migration. Type: %s, just starting the chain.", c.support.SharedConfig().ConsensusType())
return nil
}
......
......@@ -1161,7 +1161,7 @@ var _ = Describe("Chain", func() {
m := &raftprotos.BlockMetadata{}
proto.Unmarshal(metadata, m)
By("Cutting block 3")
By("Cutting block [3]")
// order another envelope. this should not trigger snapshot
err = chain.Order(largeEnv, uint64(0))
Expect(err).NotTo(HaveOccurred())
......@@ -1177,7 +1177,7 @@ var _ = Describe("Chain", func() {
c.opts.SnapshotIntervalSize = 1024
By("Restarting node at block 2")
By("Restarting node at block [2]")
c.init()
c.Start()
defer c.Halt()
......
......@@ -73,7 +73,7 @@ func lastConfigBlockFromSupport(support consensus.ConsenterSupport) (*common.Blo
lastBlockSeq := support.Height() - 1
lastBlock := support.Block(lastBlockSeq)
if lastBlock == nil {
return nil, errors.Errorf("unable to retrieve block %d", lastBlockSeq)
return nil, errors.Errorf("unable to retrieve block [%d]", lastBlockSeq)
}
lastConfigBlock, err := cluster.LastConfigBlock(lastBlock, support)
if err != nil {
......@@ -563,7 +563,7 @@ func (es *evictionSuspector) confirmSuspicion(cumulativeSuspicion time.Duration)
return
}
es.logger.Infof("Last config block was found to be block %d", lastConfigBlock.Header.Number)
es.logger.Infof("Last config block was found to be block [%d]", lastConfigBlock.Header.Number)
height := es.height()
......@@ -584,7 +584,7 @@ func (es *evictionSuspector) confirmSuspicion(cumulativeSuspicion time.Duration)
return
}
es.logger.Warningf("Detected our own eviction from the chain in block %d", lastConfigBlock.Header.Number)
es.logger.Warningf("Detected our own eviction from the channel in block [%d]", lastConfigBlock.Header.Number)
es.logger.Infof("Waiting for chain to halt")
es.halt()
......@@ -594,11 +594,11 @@ func (es *evictionSuspector) confirmSuspicion(cumulativeSuspicion time.Duration)
nextBlock := height
es.logger.Infof("Will now pull blocks %d to %d", nextBlock, lastConfigBlock.Header.Number)
for seq := nextBlock; seq <= lastConfigBlock.Header.Number; seq++ {
es.logger.Infof("Pulling block %d", seq)
es.logger.Infof("Pulling block [%d]", seq)
block := puller.PullBlock(seq)
err := es.writeBlock(block)
if err != nil {
es.logger.Panicf("Failed writing block %d to the ledger: %v", block.Header.Number, err)
es.logger.Panicf("Failed writing block [%d] to the ledger: %v", block.Header.Number, err)
}
}
......
......@@ -124,7 +124,7 @@ func TestEndpointconfigFromFromSupport(t *testing.T) {
}{
{
name: "Block returns nil",
expectedError: "unable to retrieve block 99",
expectedError: "unable to retrieve block [99]",
height: 100,
},
{
......@@ -142,7 +142,7 @@ func TestEndpointconfigFromFromSupport(t *testing.T) {
})},
},
},
expectedError: "unable to retrieve last config block 42",
expectedError: "unable to retrieve last config block [42]",
height: 100,
},
{
......@@ -249,7 +249,7 @@ func TestNewBlockPuller(t *testing.T) {
HeightVal: 100,
},
certificate: ca.CertBytes(),
expectedError: "unable to retrieve block 99",
expectedError: "unable to retrieve block [99]",
dialer: dialer,
},
{
......@@ -425,7 +425,7 @@ func TestEvictionSuspector(t *testing.T) {
},
{
description: "we are not in the channel",
expectedLog: "Detected our own eviction from the chain in block 9",
expectedLog: "Detected our own eviction from the channel in block [9]",
evictionSuspicionThreshold: 10*time.Minute - time.Second,
amIInChannelReturns: cluster.ErrNotInChannel,
blockPuller: puller,
......
......@@ -51,7 +51,7 @@ func newChain(
lastResubmittedConfigOffset int64,
) (*chainImpl, error) {
lastCutBlockNumber := getLastCutBlockNumber(support.Height())
logger.Infof("[channel: %s] Starting chain with last persisted offset %d and last recorded block %d",
logger.Infof("[channel: %s] Starting chain with last persisted offset %d and last recorded block [%d]",
support.ChainID(), lastOffsetPersisted, lastCutBlockNumber)
doneReprocessingMsgInFlight := make(chan struct{})
......@@ -228,15 +228,15 @@ func (chain *chainImpl) Order(env *cb.Envelope, configSeq uint64) error {
func (chain *chainImpl) order(env *cb.Envelope, configSeq uint64, originalOffset int64) error {
// During consensus-type migration: stop all normal txs on the system-channel and standard-channels.
if chain.migrationStatusStepper.IsPending() || chain.migrationStatusStepper.IsCommitted() {
return fmt.Errorf("[channel: %s] cannot enqueue, consensus-type migration pending", chain.ChainID())
return errors.Errorf("cannot enqueue, consensus-type migration pending")
}
marshaledEnv, err := utils.Marshal(env)
if err != nil {
return fmt.Errorf("[channel: %s] cannot enqueue, unable to marshal envelope because = %s", chain.ChainID(), err)
return errors.Errorf("cannot enqueue, unable to marshal envelope: %s", err)
}
if !chain.enqueue(newNormalMessage(marshaledEnv, configSeq, originalOffset)) {
return fmt.Errorf("[channel: %s] cannot enqueue", chain.ChainID())
return errors.Errorf("cannot enqueue")
}
return nil