Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clean up DEBUG category logs #2776

Merged
merged 1 commit into from
Oct 5, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmd/rpcdaemon/commands/eth_block.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ func (api *APIImpl) CallBundle(ctx context.Context, txHashes []common.Hash, stat
}
txs = append(txs, txn)
}
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
defer func(start time.Time) { log.Trace("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())

stateBlockNumber, hash, err := rpchelper.GetBlockNumber(stateBlockNumberOrHash, tx, api.filters)
if err != nil {
Expand Down
4 changes: 2 additions & 2 deletions cmd/rpcdaemon/commands/eth_uncles.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func (api *APIImpl) GetUncleByBlockNumberAndIndex(ctx context.Context, number rp

uncles := block.Uncles()
if index >= hexutil.Uint(len(uncles)) {
log.Debug("Requested uncle not found", "number", block.Number(), "hash", hash, "index", index)
log.Trace("Requested uncle not found", "number", block.Number(), "hash", hash, "index", index)
return nil, nil
}
uncle := types.NewBlockWithHeader(uncles[index])
Expand Down Expand Up @@ -73,7 +73,7 @@ func (api *APIImpl) GetUncleByBlockHashAndIndex(ctx context.Context, hash common

uncles := block.Uncles()
if index >= hexutil.Uint(len(uncles)) {
log.Debug("Requested uncle not found", "number", block.Number(), "hash", hash, "index", index)
log.Trace("Requested uncle not found", "number", block.Number(), "hash", hash, "index", index)
return nil, nil
}
uncle := types.NewBlockWithHeader(uncles[index])
Expand Down
10 changes: 5 additions & 5 deletions cmd/sentry/download/downloader.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ func RecvUploadHeadersMessage(ctx context.Context,
if rlp.IsDecodeError(err) {
log.Debug("[RecvUploadHeadersMessage] Handling incoming message", "error", err)
} else {
log.Error("[RecvUploadHeadersMessage] Handling incoming message", "error", err)
log.Warn("[RecvUploadHeadersMessage] Handling incoming message", "error", err)
}
}
if wg != nil {
Expand Down Expand Up @@ -312,7 +312,7 @@ func RecvMessage(
if rlp.IsDecodeError(err) {
log.Debug("[RecvMessage] Handling incoming message", "error", err)
} else {
log.Error("[RecvMessage] Handling incoming message", "error", err)
log.Warn("[RecvMessage] Handling incoming message", "error", err)
}
}

Expand Down Expand Up @@ -519,7 +519,7 @@ func (cs *ControlServerImpl) blockHeaders66(ctx context.Context, in *proto_sentr
if req != nil {
if peer := cs.SendHeaderRequest(ctx, req); peer != nil {
cs.Hd.SentRequest(req, currentTime, 5 /* timeout */)
log.Debug("Sent request", "height", req.Number)
log.Trace("Sent request", "height", req.Number)
}
}
cs.Penalize(ctx, penalties)
Expand Down Expand Up @@ -594,7 +594,7 @@ func (cs *ControlServerImpl) blockHeaders65(ctx context.Context, in *proto_sentr
if req != nil {
if peer := cs.SendHeaderRequest(ctx, req); peer != nil {
cs.Hd.SentRequest(req, currentTime, 5 /* timeout */)
log.Debug("Sent request", "height", req.Number)
log.Trace("Sent request", "height", req.Number)
}
}
cs.Penalize(ctx, penalties)
Expand Down Expand Up @@ -673,7 +673,7 @@ func (cs *ControlServerImpl) newBlock65(ctx context.Context, inreq *proto_sentry
if _, err1 := sentry.PeerMinBlock(ctx, &outreq, &grpc.EmptyCallOption{}); err1 != nil {
log.Error("Could not send min block for peer", "err", err1)
}
log.Debug(fmt.Sprintf("NewBlockMsg{blockNumber: %d} from [%s]", request.Block.NumberU64(), gointerfaces.ConvertH512ToBytes(inreq.PeerId)))
log.Trace(fmt.Sprintf("NewBlockMsg{blockNumber: %d} from [%s]", request.Block.NumberU64(), gointerfaces.ConvertH512ToBytes(inreq.PeerId)))
return nil
}

Expand Down
16 changes: 8 additions & 8 deletions cmd/sentry/download/sentry.go
Original file line number Diff line number Diff line change
Expand Up @@ -267,13 +267,13 @@ func runPeer(
default:
}
if peerPrinted {
log.Debug("Peer disconnected", "id", peerID, "name", peerInfo.peer.Fullname())
log.Trace("Peer disconnected", "id", peerID, "name", peerInfo.peer.Fullname())
}
}()
for {
if !peerPrinted {
if time.Now().After(printTime) {
log.Debug("Peer stable", "id", peerID, "name", peerInfo.peer.Fullname())
log.Trace("Peer stable", "id", peerID, "name", peerInfo.peer.Fullname())
peerPrinted = true
}
}
Expand Down Expand Up @@ -469,10 +469,10 @@ func NewSentryServer(ctx context.Context, dialCandidates enode.Iterator, readNod
Run: func(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
peerID := peer.ID().String()
if _, ok := ss.GoodPeers.Load(peerID); ok {
log.Debug(fmt.Sprintf("[%s] Peer already has connection", peerID))
log.Trace(fmt.Sprintf("[%s] Peer already has connection", peerID))
return nil
}
log.Debug(fmt.Sprintf("[%s] Start with peer", peerID))
log.Trace(fmt.Sprintf("[%s] Start with peer", peerID))

peerInfo := &PeerInfo{
peer: peer,
Expand All @@ -488,7 +488,7 @@ func NewSentryServer(ctx context.Context, dialCandidates enode.Iterator, readNod
if err != nil {
return fmt.Errorf("handshake to peer %s: %w", peerID, err)
}
log.Debug(fmt.Sprintf("[%s] Received status message OK", peerID), "name", peer.Name())
log.Trace(fmt.Sprintf("[%s] Received status message OK", peerID), "name", peer.Name())

if err := runPeer(
ctx,
Expand All @@ -499,7 +499,7 @@ func NewSentryServer(ctx context.Context, dialCandidates enode.Iterator, readNod
ss.send,
ss.hasSubscribers,
); err != nil {
log.Debug(fmt.Sprintf("[%s] Error while running peer: %v", peerID, err))
log.Trace(fmt.Sprintf("[%s] Error while running peer: %v", peerID, err))
}
return nil
},
Expand Down Expand Up @@ -889,7 +889,7 @@ func (ss *SentryServerImpl) send(msgID proto_sentry.MessageId, peerID string, b
ch := ss.messageStreams[msgID][i]
ch <- req
if len(ch) > MessagesQueueSize/2 {
log.Debug("[sentry] consuming is slow", "msgID", msgID.String())
log.Warn("[sentry] consuming is slow", "msgID", msgID.String())
// evict old messages from channel
for j := 0; j < MessagesQueueSize/4; j++ {
select {
Expand Down Expand Up @@ -937,7 +937,7 @@ func (ss *SentryServerImpl) addMessagesStream(ids []proto_sentry.MessageId, ch c

const MessagesQueueSize = 1024 // one such queue per client of .Messages stream
func (ss *SentryServerImpl) Messages(req *proto_sentry.MessagesRequest, server proto_sentry.Sentry_MessagesServer) error {
log.Debug("[Messages] new subscriber", "to", req.Ids)
log.Trace("[Messages] new subscriber", "to", req.Ids)
ch := make(chan *proto_sentry.InboundMessage, MessagesQueueSize)
defer close(ch)
clean := ss.addMessagesStream(req.Ids, ch)
Expand Down
4 changes: 2 additions & 2 deletions cmd/snapshots/tracker/commands/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ var rootCmd = &cobra.Command{
return true, nil
}
if time.Since(a.UpdatedAt) > 24*time.Hour {
log.Debug("Skipped", "k", common.Bytes2Hex(k), "last updated", a.UpdatedAt)
log.Trace("Skipped", "k", common.Bytes2Hex(k), "last updated", a.UpdatedAt)
return true, nil
}
if a.Left == 0 {
Expand Down Expand Up @@ -268,7 +268,7 @@ func (t *Tracker) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return nil
}
if time.Since(a.UpdatedAt) > 5*DisconnectInterval {
log.Debug("Skipped requset", "peer", common.Bytes2Hex(a.PeerID), "last updated", a.UpdatedAt, "now", time.Now())
log.Trace("Skipped requset", "peer", common.Bytes2Hex(a.PeerID), "last updated", a.UpdatedAt, "now", time.Now())
return nil
}
if a.Left == 0 {
Expand Down
16 changes: 8 additions & 8 deletions consensus/aura/aura.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ func (e *EpochManager) zoomToAfter(chain consensus.ChainHeaderReader, er consens
panic(fmt.Errorf("proof produced by this engine is invalid: %w", err))
}
epochSet := list.validators
log.Debug("[aura] Updating finality checker with new validator set extracted from epoch", "num", lastTransition.BlockNumber)
log.Trace("[aura] Updating finality checker with new validator set extracted from epoch", "num", lastTransition.BlockNumber)
e.finalityChecker = NewRollingFinality(epochSet)
if proof.SignalNumber >= DEBUG_LOG_FROM {
fmt.Printf("new rolling finality: %d\n", proof.SignalNumber)
Expand Down Expand Up @@ -564,7 +564,7 @@ func (c *AuRa) verifyFamily(chain consensus.ChainHeaderReader, e consensus.Epoch
//nolint
if step == parentStep ||
(header.Number.Uint64() >= c.cfg.ValidateStepTransition && step <= parentStep) {
log.Debug("[engine] Multiple blocks proposed for step", "num", parentStep)
log.Trace("[aura] Multiple blocks proposed for step", "num", parentStep)
_ = setNumber
/*
self.validators.report_malicious(
Expand Down Expand Up @@ -1096,7 +1096,7 @@ func (c *AuRa) GenerateSeal(chain consensus.ChainHeaderReader, current, parent *
// first check to avoid generating signature most of the time
// (but there's still a race to the `compare_exchange`)
if !c.step.canPropose.Load() {
log.Trace("[engine] Aborting seal generation. Can't propose.")
log.Trace("[aura] Aborting seal generation. Can't propose.")
return nil
}
parentStep, err := headerStep(parent)
Expand All @@ -1108,24 +1108,24 @@ func (c *AuRa) GenerateSeal(chain consensus.ChainHeaderReader, current, parent *
// filter messages from old and future steps and different parents
expectedDiff := calculateScore(parentStep, step, 0)
if current.Difficulty.Cmp(expectedDiff.ToBig()) != 0 {
log.Debug(fmt.Sprintf("[engine] Aborting seal generation. The step or empty_steps have changed in the meantime. %d != %d", current.Difficulty, expectedDiff))
log.Trace(fmt.Sprintf("[aura] Aborting seal generation. The step or empty_steps have changed in the meantime. %d != %d", current.Difficulty, expectedDiff))
return nil
}

if parentStep > step {
log.Warn(fmt.Sprintf("[engine] Aborting seal generation for invalid step: %d > %d", parentStep, step))
log.Warn(fmt.Sprintf("[aura] Aborting seal generation for invalid step: %d > %d", parentStep, step))
return nil
}

validators, setNumber, err := c.epochSet(chain, nil, current, nil)
if err != nil {
log.Warn("[engine] Unable to generate seal", "err", err)
log.Warn("[aura] Unable to generate seal", "err", err)
return nil
}

stepProposerAddr, err := stepProposer(validators, current.ParentHash, step, call)
if err != nil {
log.Warn("[engine] Unable to get stepProposer", "err", err)
log.Warn("[aura] Unable to get stepProposer", "err", err)
return nil
}
if stepProposerAddr != current.Coinbase {
Expand All @@ -1143,7 +1143,7 @@ func (c *AuRa) GenerateSeal(chain consensus.ChainHeaderReader, current, parent *
/*
signature, err := c.sign(current.bareHash())
if err != nil {
log.Warn("[engine] generate_seal: FAIL: Accounts secret key unavailable.", "err", err)
log.Warn("[aura] generate_seal: FAIL: Accounts secret key unavailable.", "err", err)
return nil
}
*/
Expand Down
4 changes: 2 additions & 2 deletions consensus/ethash/algorithm.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,8 +168,8 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
defer func() {
elapsed := time.Since(start)

logFn := logger.Debug
if elapsed > 3*time.Second {
logFn := logger.Trace
if elapsed > 5*time.Second {
logFn = logger.Info
}
logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed))
Expand Down
4 changes: 2 additions & 2 deletions consensus/ethash/sealer.go
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ func (s *remoteSealer) sendNotification(ctx context.Context, url string, json []
// any other error, like no pending work or stale mining result).
func (s *remoteSealer) submitWork(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool {
if s.currentBlock == nil {
s.ethash.config.Log.Error("Pending work without block", "sealhash", sealhash)
s.ethash.config.Log.Warn("Pending work without block", "sealhash", sealhash)
return false
}
// Make sure the work submitted is present
Expand Down Expand Up @@ -318,7 +318,7 @@ func (s *remoteSealer) submitWork(nonce types.BlockNonce, mixDigest common.Hash,
if solution.NumberU64()+staleThreshold > s.currentBlock.NumberU64() {
select {
case s.results <- solution:
s.ethash.config.Log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
s.ethash.config.Log.Trace("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
return true
default:
s.ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash)
Expand Down
2 changes: 1 addition & 1 deletion consensus/result.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ type Cancel struct {
}

func (c *Cancel) CancelFunc() {
log.Debug("cancel mining task", "callers", debug.Callers(10))
log.Trace("Cancel mining task", "callers", debug.Callers(10))
c.cancel()
}

Expand Down
2 changes: 1 addition & 1 deletion core/tx_journal.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ func (journal *txJournal) load(add func([]types.Transaction) []error) error {
loadBatch := func(txs types.Transactions) {
for _, err := range add(txs) {
if err != nil {
log.Debug("Failed to add journaled transaction", "err", err)
log.Trace("Failed to add journaled transaction", "err", err)
dropped++
}
}
Expand Down
2 changes: 1 addition & 1 deletion core/tx_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ func (pool *TxPool) loop() {
pool.mu.RUnlock()

if pending != prevPending || queued != prevQueued || stales != prevStales {
log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
log.Trace("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
prevPending, prevQueued, prevStales = pending, queued, stales
}

Expand Down
18 changes: 9 additions & 9 deletions eth/fetcher/block_fetcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -381,14 +381,14 @@ func (f *BlockFetcher) loop() {

count := f.announces[notification.origin] + 1
if count > hashLimit {
log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
log.Trace("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
//blockAnnounceDOSMeter.Mark(1)
break
}
// If we have a valid block number, check that it's potentially useful
if notification.number > 0 {
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
log.Trace("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
//blockAnnounceDropMeter.Mark(1)
break
}
Expand Down Expand Up @@ -689,14 +689,14 @@ func (f *BlockFetcher) enqueue(peer string, header *types.Header, block *types.B
// Ensure the peer isn't DOSing us
count := f.queues[peer] + 1
if count > blockLimit {
log.Debug("Discarded delivered header or block, exceeded allowance", "peer", peer, "number", number, "hash", hash, "limit", blockLimit)
log.Trace("Discarded delivered header or block, exceeded allowance", "peer", peer, "number", number, "hash", hash, "limit", blockLimit)
//blockBroadcastDOSMeter.Mark(1)
f.forgetHash(hash)
return
}
// Discard any past or too distant blocks
if dist := int64(number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
log.Debug("Discarded delivered header or block, too far away", "peer", peer, "number", number, "hash", hash, "distance", dist)
log.Trace("Discarded delivered header or block, too far away", "peer", peer, "number", number, "hash", hash, "distance", dist)
//blockBroadcastDropMeter.Mark(1)
f.forgetHash(hash)
return
Expand All @@ -715,7 +715,7 @@ func (f *BlockFetcher) enqueue(peer string, header *types.Header, block *types.B
if f.queueChangeHook != nil {
f.queueChangeHook(hash, true)
}
log.Debug("Queued delivered header or block", "peer", peer, "number", number, "hash", hash, "queued", f.queue.Size())
log.Trace("Queued delivered header or block", "peer", peer, "number", number, "hash", hash, "queued", f.queue.Size())
}
}

Expand All @@ -726,7 +726,7 @@ func (f *BlockFetcher) importBlocks(peer string, block *types.Block) {
hash := block.Hash()

// Run the import on a new thread
log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash)
log.Trace("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash)
defer func() {
f.forgetHash(hash)
f.forgetBlock(hash)
Expand All @@ -735,7 +735,7 @@ func (f *BlockFetcher) importBlocks(peer string, block *types.Block) {
// If the parent's unknown, abort insertion
parent := f.getBlock(block.ParentHash())
if parent == nil {
log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash())
log.Trace("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash())
return
}
// Quickly validate the header and propagate the block if it passes
Expand All @@ -750,13 +750,13 @@ func (f *BlockFetcher) importBlocks(peer string, block *types.Block) {

default:
// Something went very wrong, drop the peer
log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
log.Trace("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
f.dropPeer(peer)
return
}
// Run the actual import and log any issues
if _, err := f.insertChain(types.Blocks{block}); err != nil {
log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
log.Trace("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
return
}
// If import succeeded, broadcast the block
Expand Down
Loading