Skip to content

Commit

Permalink
Merge branch 'ethereum:master' into portal
Browse files Browse the repository at this point in the history
  • Loading branch information
GrapeBaBa authored Nov 27, 2024
2 parents 06751b3 + e0deac7 commit 72186c2
Show file tree
Hide file tree
Showing 4 changed files with 56 additions and 23 deletions.
23 changes: 14 additions & 9 deletions cmd/evm/runner.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,19 +84,20 @@ type execStats struct {

func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) ([]byte, execStats, error) {
if bench {
testing.Init()
// Do one warm-up run
output, gasUsed, err := execFunc()
result := testing.Benchmark(func(b *testing.B) {
for i := 0; i < b.N; i++ {
haveOutput, haveGasUsed, haveErr := execFunc()
if !bytes.Equal(haveOutput, output) {
b.Fatalf("output differs, have\n%x\nwant%x\n", haveOutput, output)
panic(fmt.Sprintf("output differs\nhave %x\nwant %x\n", haveOutput, output))
}
if haveGasUsed != gasUsed {
b.Fatalf("gas differs, have %v want%v", haveGasUsed, gasUsed)
panic(fmt.Sprintf("gas differs, have %v want %v", haveGasUsed, gasUsed))
}
if haveErr != err {
b.Fatalf("err differs, have %v want%v", haveErr, err)
panic(fmt.Sprintf("err differs, have %v want %v", haveErr, err))
}
}
})
Expand Down Expand Up @@ -137,7 +138,7 @@ func runCmd(ctx *cli.Context) error {
var (
tracer *tracing.Hooks
debugLogger *logger.StructLogger
statedb *state.StateDB
prestate *state.StateDB
chainConfig *params.ChainConfig
sender = common.BytesToAddress([]byte("sender"))
receiver = common.BytesToAddress([]byte("receiver"))
Expand Down Expand Up @@ -174,7 +175,7 @@ func runCmd(ctx *cli.Context) error {
defer triedb.Close()
genesis := genesisConfig.MustCommit(db, triedb)
sdb := state.NewDatabase(triedb, nil)
statedb, _ = state.New(genesis.Root(), sdb)
prestate, _ = state.New(genesis.Root(), sdb)
chainConfig = genesisConfig.Config

if ctx.String(SenderFlag.Name) != "" {
Expand Down Expand Up @@ -231,7 +232,7 @@ func runCmd(ctx *cli.Context) error {
}
runtimeConfig := runtime.Config{
Origin: sender,
State: statedb,
State: prestate,
GasLimit: initialGas,
GasPrice: flags.GlobalBig(ctx, PriceFlag.Name),
Value: flags.GlobalBig(ctx, ValueFlag.Name),
Expand Down Expand Up @@ -274,14 +275,18 @@ func runCmd(ctx *cli.Context) error {
if ctx.Bool(CreateFlag.Name) {
input = append(code, input...)
execFunc = func() ([]byte, uint64, error) {
// don't mutate the state!
runtimeConfig.State = prestate.Copy()
output, _, gasLeft, err := runtime.Create(input, &runtimeConfig)
return output, gasLeft, err
}
} else {
if len(code) > 0 {
statedb.SetCode(receiver, code)
prestate.SetCode(receiver, code)
}
execFunc = func() ([]byte, uint64, error) {
// don't mutate the state!
runtimeConfig.State = prestate.Copy()
output, gasLeft, err := runtime.Call(receiver, input, &runtimeConfig)
return output, initialGas - gasLeft, err
}
Expand All @@ -291,7 +296,7 @@ func runCmd(ctx *cli.Context) error {
output, stats, err := timedExec(bench, execFunc)

if ctx.Bool(DumpFlag.Name) {
root, err := statedb.Commit(genesisConfig.Number, true)
root, err := runtimeConfig.State.Commit(genesisConfig.Number, true)
if err != nil {
fmt.Printf("Failed to commit changes %v\n", err)
return err
Expand All @@ -310,7 +315,7 @@ func runCmd(ctx *cli.Context) error {
logger.WriteTrace(os.Stderr, debugLogger.StructLogs())
}
fmt.Fprintln(os.Stderr, "#### LOGS ####")
logger.WriteLogs(os.Stderr, statedb.Logs())
logger.WriteLogs(os.Stderr, runtimeConfig.State.Logs())
}

if bench || ctx.Bool(StatDumpFlag.Name) {
Expand Down
7 changes: 7 additions & 0 deletions core/blockchain_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,13 @@ func (bc *BlockChain) GetTransactionLookup(hash common.Hash) (*rawdb.LegacyTxLoo
if tx == nil {
progress, err := bc.TxIndexProgress()
if err != nil {
// No error is returned if the transaction indexing progress is unreachable
// due to unexpected internal errors. In such cases, it is impossible to
// determine whether the transaction does not exist or has simply not been
// indexed yet without a progress marker.
//
// In such scenarios, the transaction is treated as unreachable, though
// this is clearly an unintended and unexpected situation.
return nil, nil, nil
}
// The transaction indexing is not finished yet, returning an
Expand Down
48 changes: 34 additions & 14 deletions core/state/snapshot/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,24 +206,47 @@ func New(config Config, diskdb ethdb.KeyValueStore, triedb *triedb.Database, roo
log.Warn("Snapshot maintenance disabled (syncing)")
return snap, nil
}
// Create the building waiter iff the background generation is allowed
if !config.NoBuild && !config.AsyncBuild {
defer snap.waitBuild()
}
if err != nil {
log.Warn("Failed to load snapshot", "err", err)
if config.NoBuild {
return nil, err
}
wait := snap.Rebuild(root)
if !config.AsyncBuild {
wait()
if !config.NoBuild {
snap.Rebuild(root)
return snap, nil
}
return snap, nil
return nil, err // Bail out the error, don't rebuild automatically.
}
// Existing snapshot loaded, seed all the layers
for ; head != nil; head = head.Parent() {
for head != nil {
snap.layers[head.Root()] = head
head = head.Parent()
}
return snap, nil
}

// waitBuild blocks until the snapshot finishes rebuilding. This method is meant
// to be used by tests to ensure we're testing what we believe we are.
func (t *Tree) waitBuild() {
// Find the rebuild termination channel
var done chan struct{}

t.lock.RLock()
for _, layer := range t.layers {
if layer, ok := layer.(*diskLayer); ok {
done = layer.genPending
break
}
}
t.lock.RUnlock()

// Wait until the snapshot is generated
if done != nil {
<-done
}
}

// Disable interrupts any pending snapshot generator, deletes all the snapshot
// layers in memory and marks snapshots disabled globally. In order to resume
// the snapshot functionality, the caller must invoke Rebuild.
Expand Down Expand Up @@ -665,9 +688,8 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) {

// Rebuild wipes all available snapshot data from the persistent database and
// discard all caches and diff layers. Afterwards, it starts a new snapshot
// generator with the given root hash. The returned function blocks until
// regeneration is complete.
func (t *Tree) Rebuild(root common.Hash) (wait func()) {
// generator with the given root hash.
func (t *Tree) Rebuild(root common.Hash) {
t.lock.Lock()
defer t.lock.Unlock()

Expand Down Expand Up @@ -699,11 +721,9 @@ func (t *Tree) Rebuild(root common.Hash) (wait func()) {
// Start generating a new snapshot from scratch on a background thread. The
// generator will run a wiper first if there's not one running right now.
log.Info("Rebuilding state snapshot")
disk := generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, root)
t.layers = map[common.Hash]snapshot{
root: disk,
root: generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, root),
}
return func() { <-disk.genPending }
}

// AccountIterator creates a new account iterator for the specified root hash and
Expand Down
1 change: 1 addition & 0 deletions core/tracing/hooks.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ type StateDB interface {
GetBalance(common.Address) *uint256.Int
GetNonce(common.Address) uint64
GetCode(common.Address) []byte
GetCodeHash(common.Address) common.Hash
GetState(common.Address, common.Hash) common.Hash
GetTransientState(common.Address, common.Hash) common.Hash
Exist(common.Address) bool
Expand Down

0 comments on commit 72186c2

Please sign in to comment.