From d5c55313917b9cfc8b39ba8a3d9552800f739212 Mon Sep 17 00:00:00 2001 From: kien6034 Date: Sat, 11 May 2024 11:14:00 +0700 Subject: [PATCH] refactore: remove errors directive comparison --- cannon/mipsevm/memory.go | 3 ++- cannon/mipsevm/page.go | 3 ++- op-batcher/batcher/channel_builder.go | 6 +++--- op-batcher/batcher/channel_manager_test.go | 3 ++- op-batcher/batcher/driver.go | 4 ++-- op-chain-ops/cmd/receipt-reference-builder/pull.go | 2 +- op-e2e/actions/l2_batcher.go | 7 ++++--- op-e2e/actions/l2_verifier.go | 2 +- op-node/cmd/batch_decoder/reassemble/reassemble.go | 3 ++- op-node/p2p/store/ip_ban_book.go | 3 ++- op-node/p2p/store/mdbook.go | 3 ++- op-node/p2p/store/peer_ban_book.go | 3 ++- op-node/p2p/store/records_book.go | 2 +- op-node/p2p/store/scorebook.go | 3 ++- op-node/rollup/derive/batch_queue.go | 6 +++--- op-node/rollup/derive/channel_bank.go | 5 +++-- op-node/rollup/derive/channel_in_reader.go | 5 +++-- op-node/rollup/derive/engine_queue.go | 4 ++-- op-node/rollup/derive/l1_retrieval.go | 5 +++-- op-node/rollup/derive/pipeline.go | 4 ++-- op-node/rollup/derive/plasma_data_source_test.go | 3 ++- op-node/rollup/driver/sequencer_test.go | 2 +- op-node/rollup/driver/state.go | 2 +- op-preimage/hints.go | 3 ++- op-preimage/hints_test.go | 2 +- op-preimage/oracle.go | 3 ++- op-program/host/host.go | 4 ++-- op-service/solabi/util.go | 2 +- op-service/sources/batching/batching_test.go | 4 ++-- op-service/sources/batching/multicall.go | 3 ++- op-service/sources/receipts_basic.go | 3 ++- proxyd/cache.go | 3 ++- proxyd/consensus_tracker.go | 7 ++++--- proxyd/server.go | 2 +- 34 files changed, 69 insertions(+), 50 deletions(-) diff --git a/cannon/mipsevm/memory.go b/cannon/mipsevm/memory.go index 600930bd4dce..f6fdffbfd567 100644 --- a/cannon/mipsevm/memory.go +++ b/cannon/mipsevm/memory.go @@ -3,6 +3,7 @@ package mipsevm import ( "encoding/binary" "encoding/json" + "errors" "fmt" "io" "math/bits" @@ -278,7 +279,7 @@ func (m *Memory) SetMemoryRange(addr uint32, r io.Reader) error { p.InvalidateFull() n, err := r.Read(p.Data[pageAddr:]) if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil } return err diff --git a/cannon/mipsevm/page.go b/cannon/mipsevm/page.go index 5a1321582f1e..00585f3078bf 100644 --- a/cannon/mipsevm/page.go +++ b/cannon/mipsevm/page.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "encoding/hex" "encoding/json" + "errors" "fmt" "io" "sync" @@ -47,7 +48,7 @@ func (p *Page) UnmarshalJSON(dat []byte) error { defer r.Close() if n, err := r.Read(p[:]); n != PageSize { return fmt.Errorf("epxeted %d bytes, but got %d", PageSize, n) - } else if err == io.EOF { + } else if errors.Is(err, io.EOF) { return nil } else { return err diff --git a/op-batcher/batcher/channel_builder.go b/op-batcher/batcher/channel_builder.go index e364570d48b0..93cfa511325c 100644 --- a/op-batcher/batcher/channel_builder.go +++ b/op-batcher/batcher/channel_builder.go @@ -300,7 +300,7 @@ func (c *ChannelBuilder) outputReadyFrames() error { // When creating a frame from the ready compression data, the frame overhead // will be added to the total output size, so we can add it in the condition. for c.co.ReadyBytes()+derive.FrameV0OverHeadSize >= int(c.cfg.MaxFrameSize) { - if err := c.outputFrame(); err == io.EOF { + if err := c.outputFrame(); errors.Is(err, io.EOF) { return nil } else if err != nil { return err @@ -315,7 +315,7 @@ func (c *ChannelBuilder) closeAndOutputAllFrames() error { } for { - if err := c.outputFrame(); err == io.EOF { + if err := c.outputFrame(); errors.Is(err, io.EOF) { return nil } else if err != nil { return err @@ -329,7 +329,7 @@ func (c *ChannelBuilder) closeAndOutputAllFrames() error { func (c *ChannelBuilder) outputFrame() error { var buf bytes.Buffer fn, err := c.co.OutputFrame(&buf, c.cfg.MaxFrameSize) - if err != io.EOF && err != nil { + if !errors.Is(err, io.EOF) && err != nil { return fmt.Errorf("writing frame[%d]: %w", fn, err) } diff --git a/op-batcher/batcher/channel_manager_test.go b/op-batcher/batcher/channel_manager_test.go index e029e69e658c..2364a05b01e8 100644 --- a/op-batcher/batcher/channel_manager_test.go +++ b/op-batcher/batcher/channel_manager_test.go @@ -1,6 +1,7 @@ package batcher import ( + "errors" "io" "math/big" "math/rand" @@ -409,7 +410,7 @@ func ChannelManagerCloseAllTxsFailed(t *testing.T, batchType uint) { drainTxData := func() (txdatas []txData) { for { txdata, err := m.TxData(eth.BlockID{}) - if err == io.EOF { + if errors.Is(err, io.EOF) { return } require.NoError(err, "Expected channel manager to produce valid tx data") diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index ef0a2fbe15ac..481bd14d647e 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -376,7 +376,7 @@ func (l *BatchSubmitter) publishStateToL1(queue *txmgr.Queue[txID], receiptsCh c } err := l.publishTxToL1(l.killCtx, queue, receiptsCh) if err != nil { - if err != io.EOF { + if !errors.Is(err, io.EOF) { l.Log.Error("error publishing tx to l1", "err", err) } return @@ -436,7 +436,7 @@ func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[t // Collect next transaction data txdata, err := l.state.TxData(l1tip.ID()) - if err == io.EOF { + if errors.Is(err, io.EOF) { l.Log.Trace("no transaction data available") return err } else if err != nil { diff --git a/op-chain-ops/cmd/receipt-reference-builder/pull.go b/op-chain-ops/cmd/receipt-reference-builder/pull.go index d6fa95eef33c..6ea92146a8c1 100644 --- a/op-chain-ops/cmd/receipt-reference-builder/pull.go +++ b/op-chain-ops/cmd/receipt-reference-builder/pull.go @@ -335,7 +335,7 @@ func batchBlockByNumber(ctx context.Context, c *ethclient.Client, blockNumbers [ int(MaxBatchSize), ) for { - if err := batchReq.Fetch(ctx); err == io.EOF { + if err := batchReq.Fetch(ctx); errors.Is(err, io.EOF) { break } else if err != nil { log.Warn("Failed to Fetch Blocks", "Err", err, "Start", blockNumbers[0], "End", blockNumbers[len(blockNumbers)-1]) diff --git a/op-e2e/actions/l2_batcher.go b/op-e2e/actions/l2_batcher.go index 5eb96ee110ac..32b7273375a3 100644 --- a/op-e2e/actions/l2_batcher.go +++ b/op-e2e/actions/l2_batcher.go @@ -5,6 +5,7 @@ import ( "context" "crypto/ecdsa" "crypto/rand" + "errors" "io" "math/big" @@ -239,7 +240,7 @@ func (s *L2Batcher) ActL2BatchSubmit(t Testing, txOpts ...func(tx *types.Dynamic data := new(bytes.Buffer) data.WriteByte(derive.DerivationVersion0) // subtract one, to account for the version byte - if _, err := s.l2ChannelOut.OutputFrame(data, s.l2BatcherCfg.MaxL1TxSize-1); err == io.EOF { + if _, err := s.l2ChannelOut.OutputFrame(data, s.l2BatcherCfg.MaxL1TxSize-1); errors.Is(err, io.EOF) { s.l2ChannelOut = nil s.l2Submitting = false } else if err != nil { @@ -342,7 +343,7 @@ func (s *L2Batcher) ActL2BatchSubmitMultiBlob(t Testing, numBlobs int) { // subtract one, to account for the version byte l = s.l2BatcherCfg.MaxL1TxSize - 1 } - if _, err := s.l2ChannelOut.OutputFrame(data, l); err == io.EOF { + if _, err := s.l2ChannelOut.OutputFrame(data, l); errors.Is(err, io.EOF) { s.l2Submitting = false if i < numBlobs-1 { t.Fatalf("failed to fill up %d blobs, only filled %d", numBlobs, i+1) @@ -410,7 +411,7 @@ func (s *L2Batcher) ActL2BatchSubmitGarbage(t Testing, kind GarbageKind) { data.WriteByte(derive.DerivationVersion0) // subtract one, to account for the version byte - if _, err := s.l2ChannelOut.OutputFrame(data, s.l2BatcherCfg.MaxL1TxSize-1); err == io.EOF { + if _, err := s.l2ChannelOut.OutputFrame(data, s.l2BatcherCfg.MaxL1TxSize-1); errors.Is(err, io.EOF) { s.l2ChannelOut = nil s.l2Submitting = false } else if err != nil { diff --git a/op-e2e/actions/l2_verifier.go b/op-e2e/actions/l2_verifier.go index 305f161e8d06..65b05457f9b5 100644 --- a/op-e2e/actions/l2_verifier.go +++ b/op-e2e/actions/l2_verifier.go @@ -226,7 +226,7 @@ func (s *L2Verifier) ActL2PipelineStep(t Testing) { s.l2PipelineIdle = false err := s.derivation.Step(t.Ctx()) - if err == io.EOF || (err != nil && errors.Is(err, derive.EngineELSyncing)) { + if errors.Is(err, io.EOF) || (err != nil && errors.Is(err, derive.EngineELSyncing)) { s.l2PipelineIdle = true return } else if err != nil && errors.Is(err, derive.NotEnoughData) { diff --git a/op-node/cmd/batch_decoder/reassemble/reassemble.go b/op-node/cmd/batch_decoder/reassemble/reassemble.go index c4a76493944c..4c663a1a886f 100644 --- a/op-node/cmd/batch_decoder/reassemble/reassemble.go +++ b/op-node/cmd/batch_decoder/reassemble/reassemble.go @@ -2,6 +2,7 @@ package reassemble import ( "encoding/json" + "errors" "fmt" "io" "log" @@ -113,7 +114,7 @@ func processFrames(cfg Config, rollupCfg *rollup.Config, id derive.ChannelID, fr if ch.IsReady() { br, err := derive.BatchReader(ch.Reader(), spec.MaxRLPBytesPerChannel(ch.HighestBlock().Time)) if err == nil { - for batchData, err := br(); err != io.EOF; batchData, err = br() { + for batchData, err := br(); !errors.Is(err, io.EOF); batchData, err = br() { if err != nil { fmt.Printf("Error reading batchData for channel %v. Err: %v\n", id.String(), err) invalidBatches = true diff --git a/op-node/p2p/store/ip_ban_book.go b/op-node/p2p/store/ip_ban_book.go index 475bf21a791c..09607677a20b 100644 --- a/op-node/p2p/store/ip_ban_book.go +++ b/op-node/p2p/store/ip_ban_book.go @@ -3,6 +3,7 @@ package store import ( "context" "encoding/json" + "errors" "net" "time" @@ -71,7 +72,7 @@ func (d *ipBanBook) startGC() { func (d *ipBanBook) GetIPBanExpiration(ip net.IP) (time.Time, error) { rec, err := d.book.getRecord(ip.To16().String()) - if err == UnknownRecordErr { + if errors.Is(err, UnknownRecordErr) { return time.Time{}, UnknownBanErr } if err != nil { diff --git a/op-node/p2p/store/mdbook.go b/op-node/p2p/store/mdbook.go index 6c1362674956..5aaa6088712e 100644 --- a/op-node/p2p/store/mdbook.go +++ b/op-node/p2p/store/mdbook.go @@ -3,6 +3,7 @@ package store import ( "context" "encoding/json" + "errors" "sync/atomic" "time" @@ -69,7 +70,7 @@ func (m *metadataBook) startGC() { func (m *metadataBook) GetPeerMetadata(id peer.ID) (PeerMetadata, error) { record, err := m.book.getRecord(id) // If the record is not found, return an empty PeerMetadata - if err == UnknownRecordErr { + if errors.Is(err, UnknownRecordErr) { return PeerMetadata{}, nil } if err != nil { diff --git a/op-node/p2p/store/peer_ban_book.go b/op-node/p2p/store/peer_ban_book.go index 1e61f8c6eae9..188a26f8c984 100644 --- a/op-node/p2p/store/peer_ban_book.go +++ b/op-node/p2p/store/peer_ban_book.go @@ -3,6 +3,7 @@ package store import ( "context" "encoding/json" + "errors" "time" "github.com/ethereum-optimism/optimism/op-service/clock" @@ -67,7 +68,7 @@ func (d *peerBanBook) startGC() { func (d *peerBanBook) GetPeerBanExpiration(id peer.ID) (time.Time, error) { rec, err := d.book.getRecord(id) - if err == UnknownRecordErr { + if errors.Is(err, UnknownRecordErr) { return time.Time{}, UnknownBanErr } if err != nil { diff --git a/op-node/p2p/store/records_book.go b/op-node/p2p/store/records_book.go index 9afac37abfbb..2d942b3aa1a7 100644 --- a/op-node/p2p/store/records_book.go +++ b/op-node/p2p/store/records_book.go @@ -128,7 +128,7 @@ func (d *recordsBook[K, V]) SetRecord(key K, diff recordDiff[V]) (V, error) { d.Lock() defer d.Unlock() rec, err := d.getRecord(key) - if err == UnknownRecordErr { // instantiate new record if it does not exist yet + if errors.Is(err, UnknownRecordErr) { // instantiate new record if it does not exist yet rec = d.newRecord() } else if err != nil { return d.newRecord(), err diff --git a/op-node/p2p/store/scorebook.go b/op-node/p2p/store/scorebook.go index 68043ef8a9e1..f93eb76aee49 100644 --- a/op-node/p2p/store/scorebook.go +++ b/op-node/p2p/store/scorebook.go @@ -2,6 +2,7 @@ package store import ( "context" + "errors" "sync/atomic" "time" @@ -71,7 +72,7 @@ func (d *scoreBook) startGC() { func (d *scoreBook) GetPeerScores(id peer.ID) (PeerScores, error) { record, err := d.book.getRecord(id) - if err == UnknownRecordErr { + if errors.Is(err, UnknownRecordErr) { return PeerScores{}, nil // return zeroed scores by default } if err != nil { diff --git a/op-node/rollup/derive/batch_queue.go b/op-node/rollup/derive/batch_queue.go index 4ac92bf04b8b..4135fecfac6e 100644 --- a/op-node/rollup/derive/batch_queue.go +++ b/op-node/rollup/derive/batch_queue.go @@ -148,7 +148,7 @@ func (bq *BatchQueue) NextBatch(ctx context.Context, parent eth.L2BlockRef) (*Si // Load more data into the batch queue outOfData := false - if batch, err := bq.prev.NextBatch(ctx); err == io.EOF { + if batch, err := bq.prev.NextBatch(ctx); errors.Is(err, io.EOF) { outOfData = true } else if err != nil { return nil, false, err @@ -168,9 +168,9 @@ func (bq *BatchQueue) NextBatch(ctx context.Context, parent eth.L2BlockRef) (*Si // Finally attempt to derive more batches batch, err := bq.deriveNextBatch(ctx, outOfData, parent) - if err == io.EOF && outOfData { + if errors.Is(err, io.EOF) && outOfData { return nil, false, io.EOF - } else if err == io.EOF { + } else if errors.Is(err, io.EOF) { return nil, false, NotEnoughData } else if err != nil { return nil, false, err diff --git a/op-node/rollup/derive/channel_bank.go b/op-node/rollup/derive/channel_bank.go index 2a821bb868b1..10bf268b509b 100644 --- a/op-node/rollup/derive/channel_bank.go +++ b/op-node/rollup/derive/channel_bank.go @@ -2,6 +2,7 @@ package derive import ( "context" + "errors" "io" "slices" @@ -180,7 +181,7 @@ func (cb *ChannelBank) tryReadChannelAtIndex(i int) (data []byte, err error) { func (cb *ChannelBank) NextData(ctx context.Context) ([]byte, error) { // Do the read from the channel bank first data, err := cb.Read() - if err == io.EOF { + if errors.Is(err, io.EOF) { // continue - We will attempt to load data into the channel bank } else if err != nil { return nil, err @@ -189,7 +190,7 @@ func (cb *ChannelBank) NextData(ctx context.Context) ([]byte, error) { } // Then load data into the channel bank - if frame, err := cb.prev.NextFrame(ctx); err == io.EOF { + if frame, err := cb.prev.NextFrame(ctx); errors.Is(err, io.EOF) { return nil, io.EOF } else if err != nil { return nil, err diff --git a/op-node/rollup/derive/channel_in_reader.go b/op-node/rollup/derive/channel_in_reader.go index 31a5746bb737..c90853659409 100644 --- a/op-node/rollup/derive/channel_in_reader.go +++ b/op-node/rollup/derive/channel_in_reader.go @@ -3,6 +3,7 @@ package derive import ( "bytes" "context" + "errors" "fmt" "io" @@ -65,7 +66,7 @@ func (cr *ChannelInReader) NextChannel() { // It will return a temporary error if it needs to be called again to advance some internal state. func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) { if cr.nextBatchFn == nil { - if data, err := cr.prev.NextData(ctx); err == io.EOF { + if data, err := cr.prev.NextData(ctx); errors.Is(err, io.EOF) { return nil, io.EOF } else if err != nil { return nil, err @@ -79,7 +80,7 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) { // TODO: can batch be non nil while err == io.EOF // This depends on the behavior of rlp.Stream batchData, err := cr.nextBatchFn() - if err == io.EOF { + if errors.Is(err, io.EOF) { cr.NextChannel() return nil, NotEnoughData } else if err != nil { diff --git a/op-node/rollup/derive/engine_queue.go b/op-node/rollup/derive/engine_queue.go index 392fef4b8ade..f9d95d139ff4 100644 --- a/op-node/rollup/derive/engine_queue.go +++ b/op-node/rollup/derive/engine_queue.go @@ -304,7 +304,7 @@ func (eq *EngineQueue) Step(ctx context.Context) error { // Trying unsafe payload should be done before safe attributes // It allows the unsafe head can move forward while the long-range consolidation is in progress. if eq.unsafePayloads.Len() > 0 { - if err := eq.tryNextUnsafePayload(ctx); err != io.EOF { + if err := eq.tryNextUnsafePayload(ctx); !errors.Is(err, io.EOF) { return err } // EOF error means we can't process the next unsafe payload. Then we should process next safe attributes. @@ -331,7 +331,7 @@ func (eq *EngineQueue) Step(ctx context.Context) error { if err := eq.tryFinalizePastL2Blocks(ctx); err != nil { return err } - if next, err := eq.prev.NextAttributes(ctx, eq.ec.PendingSafeL2Head()); err == io.EOF { + if next, err := eq.prev.NextAttributes(ctx, eq.ec.PendingSafeL2Head()); errors.Is(err, io.EOF) { return io.EOF } else if err != nil { return err diff --git a/op-node/rollup/derive/l1_retrieval.go b/op-node/rollup/derive/l1_retrieval.go index 87b68b96522f..f0dfb2908ce1 100644 --- a/op-node/rollup/derive/l1_retrieval.go +++ b/op-node/rollup/derive/l1_retrieval.go @@ -2,6 +2,7 @@ package derive import ( "context" + "errors" "fmt" "io" @@ -49,7 +50,7 @@ func (l1r *L1Retrieval) Origin() eth.L1BlockRef { func (l1r *L1Retrieval) NextData(ctx context.Context) ([]byte, error) { if l1r.datas == nil { next, err := l1r.prev.NextL1Block(ctx) - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, io.EOF } else if err != nil { return nil, err @@ -61,7 +62,7 @@ func (l1r *L1Retrieval) NextData(ctx context.Context) ([]byte, error) { l1r.log.Debug("fetching next piece of data") data, err := l1r.datas.Next(ctx) - if err == io.EOF { + if errors.Is(err, io.EOF) { l1r.datas = nil return nil, io.EOF } else if err != nil { diff --git a/op-node/rollup/derive/pipeline.go b/op-node/rollup/derive/pipeline.go index 405222bba565..606464df4071 100644 --- a/op-node/rollup/derive/pipeline.go +++ b/op-node/rollup/derive/pipeline.go @@ -162,7 +162,7 @@ func (dp *DerivationPipeline) Step(ctx context.Context) error { // if any stages need to be reset, do that first. if dp.resetting < len(dp.stages) { - if err := dp.stages[dp.resetting].Reset(ctx, dp.eng.Origin(), dp.eng.SystemConfig()); err == io.EOF { + if err := dp.stages[dp.resetting].Reset(ctx, dp.eng.Origin(), dp.eng.SystemConfig()); errors.Is(err, io.EOF) { dp.log.Debug("reset of stage completed", "stage", dp.resetting, "origin", dp.eng.Origin()) dp.resetting += 1 return nil @@ -174,7 +174,7 @@ func (dp *DerivationPipeline) Step(ctx context.Context) error { } // Now step the engine queue. It will pull earlier data as needed. - if err := dp.eng.Step(ctx); err == io.EOF { + if err := dp.eng.Step(ctx); errors.Is(err, io.EOF) { // If every stage has returned io.EOF, try to advance the L1 Origin return dp.traversal.AdvanceL1Block(ctx) } else if errors.Is(err, EngineELSyncing) { diff --git a/op-node/rollup/derive/plasma_data_source_test.go b/op-node/rollup/derive/plasma_data_source_test.go index 1e3f4d4375a8..68e0494ae89f 100644 --- a/op-node/rollup/derive/plasma_data_source_test.go +++ b/op-node/rollup/derive/plasma_data_source_test.go @@ -2,6 +2,7 @@ package derive import ( "context" + "errors" "io" "math/big" "math/rand" @@ -260,7 +261,7 @@ func TestPlasmaDataSource(t *testing.T) { break } - for data, err := src.Next(ctx); err != io.EOF; data, err = src.Next(ctx) { + for data, err := src.Next(ctx); !errors.Is(err, io.EOF); data, err = src.Next(ctx) { logger.Info("yielding data") // check that each commitment is resolved require.NoError(t, err) diff --git a/op-node/rollup/driver/sequencer_test.go b/op-node/rollup/driver/sequencer_test.go index 6edf0960f08a..6b2a399b49df 100644 --- a/op-node/rollup/driver/sequencer_test.go +++ b/op-node/rollup/driver/sequencer_test.go @@ -324,7 +324,7 @@ func TestSequencerChaosMonkey(t *testing.T) { // reset errors originErr = nil attrsErr = nil - if engControl.err != mockResetErr { // the mockResetErr requires the sequencer to Reset() to recover. + if !errors.Is(engControl.err, mockResetErr) { // the mockResetErr requires the sequencer to Reset() to recover. engControl.err = nil } engControl.errTyp = derive.BlockInsertOK diff --git a/op-node/rollup/driver/state.go b/op-node/rollup/driver/state.go index c206204e03a0..abfe5adba723 100644 --- a/op-node/rollup/driver/state.go +++ b/op-node/rollup/driver/state.go @@ -372,7 +372,7 @@ func (s *Driver) eventLoop() { s.log.Debug("Derivation process step", "onto_origin", s.derivation.Origin(), "attempts", stepAttempts) err := s.derivation.Step(s.driverCtx) stepAttempts += 1 // count as attempt by default. We reset to 0 if we are making healthy progress. - if err == io.EOF { + if errors.Is(err, io.EOF) { s.log.Debug("Derivation process went idle", "progress", s.derivation.Origin(), "err", err) stepAttempts = 0 s.metrics.SetDerivationIdle(true) diff --git a/op-preimage/hints.go b/op-preimage/hints.go index dd53fc3c85d5..e3ee56227a69 100644 --- a/op-preimage/hints.go +++ b/op-preimage/hints.go @@ -2,6 +2,7 @@ package preimage import ( "encoding/binary" + "errors" "fmt" "io" ) @@ -48,7 +49,7 @@ type HintHandler func(hint string) error func (hr *HintReader) NextHint(router HintHandler) error { var length uint32 if err := binary.Read(hr.rw, binary.BigEndian, &length); err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return io.EOF } return fmt.Errorf("failed to read hint length prefix: %w", err) diff --git a/op-preimage/hints_test.go b/op-preimage/hints_test.go index b25acc8f0ca1..e0a5f9bf75f9 100644 --- a/op-preimage/hints_test.go +++ b/op-preimage/hints_test.go @@ -43,7 +43,7 @@ func TestHints(t *testing.T) { got <- hint return nil }) - if err == io.EOF { + if errors.Is(err, io.EOF) { break } require.NoError(t, err) diff --git a/op-preimage/oracle.go b/op-preimage/oracle.go index e1fdac2ca7b2..df0230eb3036 100644 --- a/op-preimage/oracle.go +++ b/op-preimage/oracle.go @@ -3,6 +3,7 @@ package preimage import ( "encoding/binary" "encoding/hex" + "errors" "fmt" "io" ) @@ -51,7 +52,7 @@ type PreimageGetter func(key [32]byte) ([]byte, error) func (o *OracleServer) NextPreimageRequest(getPreimage PreimageGetter) error { var key [32]byte if _, err := io.ReadFull(o.rw, key[:]); err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return io.EOF } return fmt.Errorf("failed to read requested pre-image key: %w", err) diff --git a/op-program/host/host.go b/op-program/host/host.go index 2933d0be59b0..7ec69b03875f 100644 --- a/op-program/host/host.go +++ b/op-program/host/host.go @@ -218,7 +218,7 @@ func routeHints(logger log.Logger, hHostRW io.ReadWriter, hinter preimage.HintHa defer close(chErr) for { if err := hintReader.NextHint(hinter); err != nil { - if err == io.EOF || errors.Is(err, fs.ErrClosed) { + if errors.Is(err, io.EOF) || errors.Is(err, fs.ErrClosed) { logger.Debug("closing pre-image hint handler") return } @@ -238,7 +238,7 @@ func launchOracleServer(logger log.Logger, pHostRW io.ReadWriteCloser, getter pr defer close(chErr) for { if err := server.NextPreimageRequest(getter); err != nil { - if err == io.EOF || errors.Is(err, fs.ErrClosed) { + if errors.Is(err, io.EOF) || errors.Is(err, fs.ErrClosed) { logger.Debug("closing pre-image server") return } diff --git a/op-service/solabi/util.go b/op-service/solabi/util.go index ce26aaf76c58..6beb2c9ae870 100644 --- a/op-service/solabi/util.go +++ b/op-service/solabi/util.go @@ -85,7 +85,7 @@ func ReadUint256(r io.Reader) (*big.Int, error) { func EmptyReader(r io.Reader) bool { var t [1]byte n, err := r.Read(t[:]) - return n == 0 && err == io.EOF + return n == 0 && errors.Is(err, io.EOF) } func WriteSignature(w io.Writer, sig []byte) error { diff --git a/op-service/sources/batching/batching_test.go b/op-service/sources/batching/batching_test.go index c2880dd0c92b..04cd4d5ab6f3 100644 --- a/op-service/sources/batching/batching_test.go +++ b/op-service/sources/batching/batching_test.go @@ -133,7 +133,7 @@ func (tc *batchTestCase) Run(t *testing.T) { } err := iter.Fetch(ctx) - if err == io.EOF { + if errors.Is(err, io.EOF) { require.Equal(t, i, len(tc.batchCalls)-1, "EOF only on last call") } else { require.False(t, iter.Complete()) @@ -147,7 +147,7 @@ func (tc *batchTestCase) Run(t *testing.T) { for i, ec := range tc.singleCalls { ctx := context.Background() err := iter.Fetch(ctx) - if err == io.EOF { + if errors.Is(err, io.EOF) { require.Equal(t, i, len(tc.singleCalls)-1, "EOF only on last call") } else { require.False(t, iter.Complete()) diff --git a/op-service/sources/batching/multicall.go b/op-service/sources/batching/multicall.go index 2a02ce774f73..29422d1b48a8 100644 --- a/op-service/sources/batching/multicall.go +++ b/op-service/sources/batching/multicall.go @@ -2,6 +2,7 @@ package batching import ( "context" + "errors" "fmt" "io" @@ -58,7 +59,7 @@ func (m *MultiCaller) Call(ctx context.Context, block rpcblock.Block, calls ...C m.rpc.CallContext, m.batchSize) for { - if err := fetcher.Fetch(ctx); err == io.EOF { + if err := fetcher.Fetch(ctx); errors.Is(err, io.EOF) { break } else if err != nil { return nil, fmt.Errorf("failed to fetch batch: %w", err) diff --git a/op-service/sources/receipts_basic.go b/op-service/sources/receipts_basic.go index ec63c0565bf2..f2b4337f8842 100644 --- a/op-service/sources/receipts_basic.go +++ b/op-service/sources/receipts_basic.go @@ -2,6 +2,7 @@ package sources import ( "context" + "errors" "io" "sync" @@ -39,7 +40,7 @@ func (f *BasicRPCReceiptsFetcher) FetchReceipts(ctx context.Context, blockInfo e // Fetch all receipts for { - if err := call.Fetch(ctx); err == io.EOF { + if err := call.Fetch(ctx); errors.Is(err, io.EOF) { break } else if err != nil { return nil, err diff --git a/proxyd/cache.go b/proxyd/cache.go index 5add4f23627e..8932512f5202 100644 --- a/proxyd/cache.go +++ b/proxyd/cache.go @@ -3,6 +3,7 @@ package proxyd import ( "context" "encoding/json" + "errors" "strings" "time" @@ -66,7 +67,7 @@ func (c *redisCache) Get(ctx context.Context, key string) (string, error) { val, err := c.rdb.Get(ctx, c.namespaced(key)).Result() redisCacheDurationSumm.WithLabelValues("GET").Observe(float64(time.Since(start).Milliseconds())) - if err == redis.Nil { + if errors.Is(err, redis.Nil) { return "", nil } else if err != nil { RecordRedisError("CacheGet") diff --git a/proxyd/consensus_tracker.go b/proxyd/consensus_tracker.go index 77e0fdba9912..729aea798336 100644 --- a/proxyd/consensus_tracker.go +++ b/proxyd/consensus_tracker.go @@ -3,6 +3,7 @@ package proxyd import ( "context" "encoding/json" + "errors" "fmt" "os" "sync" @@ -192,7 +193,7 @@ func (ct *RedisConsensusTracker) stateHeartbeat() { key := ct.key("mutex") val, err := ct.client.Get(ct.ctx, key).Result() - if err != nil && err != redis.Nil { + if err != nil && !errors.Is(err, redis.Nil) { log.Error("failed to read the lock", "err", err) RecordGroupConsensusError(ct.backendGroup, "read_lock", err) if ct.leader { @@ -226,7 +227,7 @@ func (ct *RedisConsensusTracker) stateHeartbeat() { } else { // retrieve current leader leaderName, err := ct.client.Get(ct.ctx, ct.key(fmt.Sprintf("leader:%s", val))).Result() - if err != nil && err != redis.Nil { + if err != nil && !errors.Is(err, redis.Nil) { log.Error("failed to read the remote leader", "err", err) RecordGroupConsensusError(ct.backendGroup, "read_leader", err) return @@ -235,7 +236,7 @@ func (ct *RedisConsensusTracker) stateHeartbeat() { log.Debug("following", "val", val, "leader", leaderName) // retrieve payload val, err := ct.client.Get(ct.ctx, ct.key(fmt.Sprintf("state:%s", val))).Result() - if err != nil && err != redis.Nil { + if err != nil && !errors.Is(err, redis.Nil) { log.Error("failed to read the remote state", "err", err) RecordGroupConsensusError(ct.backendGroup, "read_state", err) return diff --git a/proxyd/server.go b/proxyd/server.go index 527c2e6c1ff8..859bae8203be 100644 --- a/proxyd/server.go +++ b/proxyd/server.go @@ -369,7 +369,7 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) { } batchRes, batchContainsCached, servedBy, err := s.handleBatchRPC(ctx, reqs, isLimited, true) - if err == context.DeadlineExceeded { + if errors.Is(err, context.DeadlineExceeded) { writeRPCError(ctx, w, nil, ErrGatewayTimeout) return }