Skip to content

Commit

Permalink
Merge branch 'ethereum:master' into portal
Browse files Browse the repository at this point in the history
  • Loading branch information
GrapeBaBa authored Nov 20, 2024
2 parents 8bb4364 + aa63692 commit da91ea2
Show file tree
Hide file tree
Showing 18 changed files with 404 additions and 130 deletions.
2 changes: 1 addition & 1 deletion core/state/statedb_hooked_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ func TestBurn(t *testing.T) {
// the following occur:
// 1. contract B creates contract A
// 2. contract A is destructed
// 3. constract B sends ether to A
// 3. contract B sends ether to A

var burned = new(uint256.Int)
s, _ := New(types.EmptyRootHash, NewDatabaseForTesting())
Expand Down
50 changes: 50 additions & 0 deletions core/txpool/blobpool/blobpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -1714,3 +1714,53 @@ func (p *BlobPool) Status(hash common.Hash) txpool.TxStatus {
}
return txpool.TxStatusUnknown
}

// Clear implements txpool.SubPool, removing all tracked transactions
// from the blob pool and persistent store.
func (p *BlobPool) Clear() {
p.lock.Lock()
defer p.lock.Unlock()

// manually iterating and deleting every entry is super sub-optimal
// However, Clear is not currently used in production so
// performance is not critical at the moment.
for hash := range p.lookup.txIndex {
id, _ := p.lookup.storeidOfTx(hash)
if err := p.store.Delete(id); err != nil {
log.Warn("failed to delete blob tx from backing store", "err", err)
}
}
for hash := range p.lookup.blobIndex {
id, _ := p.lookup.storeidOfBlob(hash)
if err := p.store.Delete(id); err != nil {
log.Warn("failed to delete blob from backing store", "err", err)
}
}

// unreserve each tracked account. Ideally, we could just clear the
// reservation map in the parent txpool context. However, if we clear in
// parent context, to avoid exposing the subpool lock, we have to lock the
// reservations and then lock each subpool.
//
// This creates the potential for a deadlock situation:
//
// * TxPool.Clear locks the reservations
// * a new transaction is received which locks the subpool mutex
// * TxPool.Clear attempts to lock subpool mutex
//
// The transaction addition may attempt to reserve the sender addr which
// can't happen until Clear releases the reservation lock. Clear cannot
// acquire the subpool lock until the transaction addition is completed.
for acct, _ := range p.index {
p.reserve(acct, false)
}
p.lookup = newLookup()
p.index = make(map[common.Address][]*blobTxMeta)
p.spent = make(map[common.Address]*uint256.Int)

var (
basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head))
blobfee = uint256.NewInt(params.BlobTxMinBlobGasprice)
)
p.evict = newPriceHeap(basefee, blobfee, p.index)
}
41 changes: 41 additions & 0 deletions core/txpool/legacypool/legacypool.go
Original file line number Diff line number Diff line change
Expand Up @@ -1961,3 +1961,44 @@ func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions {
func numSlots(tx *types.Transaction) int {
return int((tx.Size() + txSlotSize - 1) / txSlotSize)
}

// Clear implements txpool.SubPool, removing all tracked txs from the pool
// and rotating the journal.
func (pool *LegacyPool) Clear() {
pool.mu.Lock()
defer pool.mu.Unlock()

// unreserve each tracked account. Ideally, we could just clear the
// reservation map in the parent txpool context. However, if we clear in
// parent context, to avoid exposing the subpool lock, we have to lock the
// reservations and then lock each subpool.
//
// This creates the potential for a deadlock situation:
//
// * TxPool.Clear locks the reservations
// * a new transaction is received which locks the subpool mutex
// * TxPool.Clear attempts to lock subpool mutex
//
// The transaction addition may attempt to reserve the sender addr which
// can't happen until Clear releases the reservation lock. Clear cannot
// acquire the subpool lock until the transaction addition is completed.
for _, tx := range pool.all.remotes {
senderAddr, _ := types.Sender(pool.signer, tx)
pool.reserve(senderAddr, false)
}
for localSender, _ := range pool.locals.accounts {
pool.reserve(localSender, false)
}

pool.all = newLookup()
pool.priced = newPricedList(pool.all)
pool.pending = make(map[common.Address]*list)
pool.queue = make(map[common.Address]*list)

if !pool.config.NoLocals && pool.config.Journal != "" {
pool.journal = newTxJournal(pool.config.Journal)
if err := pool.journal.rotate(pool.local()); err != nil {
log.Warn("Failed to rotate transaction journal", "err", err)
}
}
}
3 changes: 3 additions & 0 deletions core/txpool/subpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,4 +168,7 @@ type SubPool interface {
// Status returns the known status (unknown/pending/queued) of a transaction
// identified by their hashes.
Status(hash common.Hash) TxStatus

// Clear removes all tracked transactions from the pool
Clear()
}
7 changes: 7 additions & 0 deletions core/txpool/txpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -497,3 +497,10 @@ func (p *TxPool) Sync() error {
return errors.New("pool already terminated")
}
}

// Clear removes all tracked txs from the subpools.
func (p *TxPool) Clear() {
for _, subpool := range p.subpools {
subpool.Clear()
}
}
2 changes: 1 addition & 1 deletion core/verkle_witness_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ func TestProcessVerkleInvalidContractCreation(t *testing.T) {
}
}
} else if bytes.Equal(stemStateDiff.Stem[:], tx1ContractStem) {
// For this contract creation, check that only the accound header and storage slot 41
// For this contract creation, check that only the account header and storage slot 41
// are found in the witness.
for _, suffixDiff := range stemStateDiff.SuffixDiffs {
if suffixDiff.Suffix != 105 && suffixDiff.Suffix != 0 && suffixDiff.Suffix != 1 {
Expand Down
9 changes: 1 addition & 8 deletions eth/catalyst/simulated_beacon.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import (
"crypto/sha256"
"errors"
"fmt"
"math/big"
"sync"
"time"

Expand All @@ -34,7 +33,6 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
)

Expand Down Expand Up @@ -287,12 +285,7 @@ func (c *SimulatedBeacon) Commit() common.Hash {

// Rollback un-sends previously added transactions.
func (c *SimulatedBeacon) Rollback() {
// Flush all transactions from the transaction pools
maxUint256 := new(big.Int).Sub(new(big.Int).Lsh(common.Big1, 256), common.Big1)
c.eth.TxPool().SetGasTip(maxUint256)
// Set the gas tip back to accept new transactions
// TODO (Marius van der Wijden): set gas tip to parameter passed by config
c.eth.TxPool().SetGasTip(big.NewInt(params.GWei))
c.eth.TxPool().Clear()
}

// Fork sets the head to the provided hash.
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ require (
github.com/jackpal/go-nat-pmp v1.0.2
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267
github.com/karalabe/hid v1.0.1-0.20240306101548-573246063e52
github.com/kilic/bls12-381 v0.1.0
github.com/kylelemons/godebug v1.1.0
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.20
Expand Down Expand Up @@ -123,6 +122,7 @@ require (
github.com/hashicorp/go-retryablehttp v0.7.4 // indirect
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/kilic/bls12-381 v0.1.0 // indirect
github.com/klauspost/compress v1.16.0 // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/kr/pretty v0.3.1 // indirect
Expand Down
12 changes: 12 additions & 0 deletions oss-fuzz.sh
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,10 @@ compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzG1Add fuzz_g1_add\
$repo/tests/fuzzers/bls12381/bls12381_test.go

compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzCrossG1Mul fuzz_cross_g1_mul\
$repo/tests/fuzzers/bls12381/bls12381_test.go

compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzG1Mul fuzz_g1_mul\
$repo/tests/fuzzers/bls12381/bls12381_test.go
Expand All @@ -172,6 +176,10 @@ compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzG2Add fuzz_g2_add \
$repo/tests/fuzzers/bls12381/bls12381_test.go

compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzCrossG2Mul fuzz_cross_g2_mul\
$repo/tests/fuzzers/bls12381/bls12381_test.go

compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzG2Mul fuzz_g2_mul\
$repo/tests/fuzzers/bls12381/bls12381_test.go
Expand Down Expand Up @@ -204,6 +212,10 @@ compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzCrossG2Add fuzz_cross_g2_add \
$repo/tests/fuzzers/bls12381/bls12381_test.go

compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzCrossG2MultiExp fuzz_cross_g2_multiexp \
$repo/tests/fuzzers/bls12381/bls12381_test.go

compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzCrossPairing fuzz_cross_pairing\
$repo/tests/fuzzers/bls12381/bls12381_test.go
Expand Down
38 changes: 38 additions & 0 deletions rpc/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ import (
)

func TestClientRequest(t *testing.T) {
t.Parallel()

server := newTestServer()
defer server.Stop()
client := DialInProc(server)
Expand All @@ -53,6 +55,8 @@ func TestClientRequest(t *testing.T) {
}

func TestClientResponseType(t *testing.T) {
t.Parallel()

server := newTestServer()
defer server.Stop()
client := DialInProc(server)
Expand All @@ -71,6 +75,8 @@ func TestClientResponseType(t *testing.T) {

// This test checks calling a method that returns 'null'.
func TestClientNullResponse(t *testing.T) {
t.Parallel()

server := newTestServer()
defer server.Stop()

Expand All @@ -91,6 +97,8 @@ func TestClientNullResponse(t *testing.T) {

// This test checks that server-returned errors with code and data come out of Client.Call.
func TestClientErrorData(t *testing.T) {
t.Parallel()

server := newTestServer()
defer server.Stop()
client := DialInProc(server)
Expand Down Expand Up @@ -121,6 +129,8 @@ func TestClientErrorData(t *testing.T) {
}

func TestClientBatchRequest(t *testing.T) {
t.Parallel()

server := newTestServer()
defer server.Stop()
client := DialInProc(server)
Expand Down Expand Up @@ -172,6 +182,8 @@ func TestClientBatchRequest(t *testing.T) {
// This checks that, for HTTP connections, the length of batch responses is validated to
// match the request exactly.
func TestClientBatchRequest_len(t *testing.T) {
t.Parallel()

b, err := json.Marshal([]jsonrpcMessage{
{Version: "2.0", ID: json.RawMessage("1"), Result: json.RawMessage(`"0x1"`)},
{Version: "2.0", ID: json.RawMessage("2"), Result: json.RawMessage(`"0x2"`)},
Expand All @@ -188,6 +200,8 @@ func TestClientBatchRequest_len(t *testing.T) {
t.Cleanup(s.Close)

t.Run("too-few", func(t *testing.T) {
t.Parallel()

client, err := Dial(s.URL)
if err != nil {
t.Fatal("failed to dial test server:", err)
Expand Down Expand Up @@ -218,6 +232,8 @@ func TestClientBatchRequest_len(t *testing.T) {
})

t.Run("too-many", func(t *testing.T) {
t.Parallel()

client, err := Dial(s.URL)
if err != nil {
t.Fatal("failed to dial test server:", err)
Expand Down Expand Up @@ -249,6 +265,8 @@ func TestClientBatchRequest_len(t *testing.T) {
// This checks that the client can handle the case where the server doesn't
// respond to all requests in a batch.
func TestClientBatchRequestLimit(t *testing.T) {
t.Parallel()

server := newTestServer()
defer server.Stop()
server.SetBatchLimits(2, 100000)
Expand Down Expand Up @@ -285,6 +303,8 @@ func TestClientBatchRequestLimit(t *testing.T) {
}

func TestClientNotify(t *testing.T) {
t.Parallel()

server := newTestServer()
defer server.Stop()
client := DialInProc(server)
Expand Down Expand Up @@ -392,6 +412,8 @@ func testClientCancel(transport string, t *testing.T) {
}

func TestClientSubscribeInvalidArg(t *testing.T) {
t.Parallel()

server := newTestServer()
defer server.Stop()
client := DialInProc(server)
Expand Down Expand Up @@ -422,6 +444,8 @@ func TestClientSubscribeInvalidArg(t *testing.T) {
}

func TestClientSubscribe(t *testing.T) {
t.Parallel()

server := newTestServer()
defer server.Stop()
client := DialInProc(server)
Expand Down Expand Up @@ -454,6 +478,8 @@ func TestClientSubscribe(t *testing.T) {

// In this test, the connection drops while Subscribe is waiting for a response.
func TestClientSubscribeClose(t *testing.T) {
t.Parallel()

server := newTestServer()
service := &notificationTestService{
gotHangSubscriptionReq: make(chan struct{}),
Expand Down Expand Up @@ -498,6 +524,8 @@ func TestClientSubscribeClose(t *testing.T) {
// This test reproduces https://github.com/ethereum/go-ethereum/issues/17837 where the
// client hangs during shutdown when Unsubscribe races with Client.Close.
func TestClientCloseUnsubscribeRace(t *testing.T) {
t.Parallel()

server := newTestServer()
defer server.Stop()

Expand Down Expand Up @@ -540,6 +568,8 @@ func (b *unsubscribeBlocker) readBatch() ([]*jsonrpcMessage, bool, error) {
// not respond.
// It reproducers the issue https://github.com/ethereum/go-ethereum/issues/30156
func TestUnsubscribeTimeout(t *testing.T) {
t.Parallel()

srv := NewServer()
srv.RegisterName("nftest", new(notificationTestService))

Expand Down Expand Up @@ -674,6 +704,8 @@ func TestClientSubscriptionChannelClose(t *testing.T) {
// This test checks that Client doesn't lock up when a single subscriber
// doesn't read subscription events.
func TestClientNotificationStorm(t *testing.T) {
t.Parallel()

server := newTestServer()
defer server.Stop()

Expand Down Expand Up @@ -726,6 +758,8 @@ func TestClientNotificationStorm(t *testing.T) {
}

func TestClientSetHeader(t *testing.T) {
t.Parallel()

var gotHeader bool
srv := newTestServer()
httpsrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Expand Down Expand Up @@ -762,6 +796,8 @@ func TestClientSetHeader(t *testing.T) {
}

func TestClientHTTP(t *testing.T) {
t.Parallel()

server := newTestServer()
defer server.Stop()

Expand Down Expand Up @@ -804,6 +840,8 @@ func TestClientHTTP(t *testing.T) {
}

func TestClientReconnect(t *testing.T) {
t.Parallel()

startServer := func(addr string) (*Server, net.Listener) {
srv := newTestServer()
l, err := net.Listen("tcp", addr)
Expand Down
Loading

0 comments on commit da91ea2

Please sign in to comment.