diff --git a/accounts/abi/bind/auth.go b/accounts/abi/bind/auth.go index a0d5a5e76f..f179fc7e06 100644 --- a/accounts/abi/bind/auth.go +++ b/accounts/abi/bind/auth.go @@ -209,7 +209,7 @@ func NewWalletTransactor(w accounts.Wallet, account accounts.Account, chainId *b From: account.Address, Signer: func(address common.Address, transaction *types.Transaction) (*types.Transaction, error) { if address != account.Address { - return nil, errors.New("not authorized to sign this account") + return nil, ErrNotAuthorized } if transaction.ChainId() == nil { chainId = transaction.ChainId() diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 5900e9e837..6a8ff10a6c 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -755,11 +755,11 @@ type filterBackend struct { bc *core.BlockChain } -func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db } -func (fb *filterBackend) PSMR() mps.PrivateStateMetadataResolver { return fb.bc.PrivateStateManager() } - +func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db } func (fb *filterBackend) EventMux() *event.TypeMux { panic("not supported") } +func (fb *filterBackend) PSMR() mps.PrivateStateMetadataResolver { return fb.bc.PrivateStateManager() } + func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumber) (*types.Header, error) { if block == rpc.LatestBlockNumber { return fb.bc.CurrentHeader(), nil diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go index 711ec66a5b..5d32806a0e 100644 --- a/accounts/abi/bind/base.go +++ b/accounts/abi/bind/base.go @@ -63,6 +63,8 @@ type TransactOpts struct { Context context.Context // Network context to support cancellation and timeouts (nil = no timeout) + NoSend bool // Do all transact steps but do not send the transaction + // Quorum PrivateFrom string // The public key of the Tessera/Constellation identity to send this tx from. PrivateFor []string // The public keys of the Tessera/Constellation identities this tx is intended for. @@ -293,7 +295,9 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i if err != nil { return nil, err } - + if opts.NoSend { + return signedTx, nil + } if err := c.transactor.SendTransaction(ensureContext(opts.Context), signedTx, PrivateTxArgs{PrivateFor: opts.PrivateFor}); err != nil { return nil, err } diff --git a/accounts/external/backend.go b/accounts/external/backend.go index 4dd59ec187..ae6ae6ba78 100644 --- a/accounts/external/backend.go +++ b/accounts/external/backend.go @@ -204,13 +204,14 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio to = &t } args := &core.SendTxArgs{ - Data: &data, - Nonce: hexutil.Uint64(tx.Nonce()), - Value: hexutil.Big(*tx.Value()), - Gas: hexutil.Uint64(tx.Gas()), - GasPrice: hexutil.Big(*tx.GasPrice()), - To: to, - From: common.NewMixedcaseAddress(account.Address), + Data: &data, + Nonce: hexutil.Uint64(tx.Nonce()), + Value: hexutil.Big(*tx.Value()), + Gas: hexutil.Uint64(tx.Gas()), + GasPrice: hexutil.Big(*tx.GasPrice()), + To: to, + From: common.NewMixedcaseAddress(account.Address), + // Quorum IsPrivate: tx.IsPrivate(), } var res signTransactionResult diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go index 2b0ccf1c89..9977a1957f 100644 --- a/accounts/keystore/keystore.go +++ b/accounts/keystore/keystore.go @@ -316,7 +316,6 @@ func (ks *KeyStore) SignTxWithPassphrase(a accounts.Account, passphrase string, return nil, err } defer zeroKey(key.PrivateKey) - if tx.IsPrivate() { return types.SignTx(tx, types.QuorumPrivateTxSigner{}, key.PrivateKey) } diff --git a/accounts/usbwallet/ledger.go b/accounts/usbwallet/ledger.go index 71f0f9392f..3de3b4091c 100644 --- a/accounts/usbwallet/ledger.go +++ b/accounts/usbwallet/ledger.go @@ -52,8 +52,10 @@ const ( ledgerOpRetrieveAddress ledgerOpcode = 0x02 // Returns the public key and Ethereum address for a given BIP 32 path ledgerOpSignTransaction ledgerOpcode = 0x04 // Signs an Ethereum transaction after having the user validate the parameters ledgerOpGetConfiguration ledgerOpcode = 0x06 // Returns specific wallet application configuration + ledgerOpSignTypedMessage ledgerOpcode = 0x0c // Signs an Ethereum message following the EIP 712 specification ledgerP1DirectlyFetchAddress ledgerParam1 = 0x00 // Return address directly from the wallet + ledgerP1InitTypedMessageData ledgerParam1 = 0x00 // First chunk of Typed Message data ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address @@ -170,6 +172,24 @@ func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio return w.ledgerSign(path, tx, chainID) } +// SignTypedMessage implements usbwallet.driver, sending the message to the Ledger and +// waiting for the user to sign or deny the transaction. +// +// Note: this was introduced in the ledger 1.5.0 firmware +func (w *ledgerDriver) SignTypedMessage(path accounts.DerivationPath, domainHash []byte, messageHash []byte) ([]byte, error) { + // If the Ethereum app doesn't run, abort + if w.offline() { + return nil, accounts.ErrWalletClosed + } + // Ensure the wallet is capable of signing the given transaction + if w.version[0] < 1 && w.version[1] < 5 { + //lint:ignore ST1005 brand name displayed on the console + return nil, fmt.Errorf("Ledger version >= 1.5.0 required for EIP-712 signing (found version v%d.%d.%d)", w.version[0], w.version[1], w.version[2]) + } + // All infos gathered and metadata checks out, request signing + return w.ledgerSignTypedMessage(path, domainHash, messageHash) +} + // ledgerVersion retrieves the current version of the Ethereum wallet app running // on the Ledger wallet. // @@ -367,6 +387,68 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction return sender, signed, nil } +// ledgerSignTypedMessage sends the transaction to the Ledger wallet, and waits for the user +// to confirm or deny the transaction. +// +// The signing protocol is defined as follows: +// +// CLA | INS | P1 | P2 | Lc | Le +// ----+-----+----+-----------------------------+-----+--- +// E0 | 0C | 00 | implementation version : 00 | variable | variable +// +// Where the input is: +// +// Description | Length +// -------------------------------------------------+---------- +// Number of BIP 32 derivations to perform (max 10) | 1 byte +// First derivation index (big endian) | 4 bytes +// ... | 4 bytes +// Last derivation index (big endian) | 4 bytes +// domain hash | 32 bytes +// message hash | 32 bytes +// +// +// +// And the output data is: +// +// Description | Length +// ------------+--------- +// signature V | 1 byte +// signature R | 32 bytes +// signature S | 32 bytes +func (w *ledgerDriver) ledgerSignTypedMessage(derivationPath []uint32, domainHash []byte, messageHash []byte) ([]byte, error) { + // Flatten the derivation path into the Ledger request + path := make([]byte, 1+4*len(derivationPath)) + path[0] = byte(len(derivationPath)) + for i, component := range derivationPath { + binary.BigEndian.PutUint32(path[1+4*i:], component) + } + // Create the 712 message + payload := append(path, domainHash...) + payload = append(payload, messageHash...) + + // Send the request and wait for the response + var ( + op = ledgerP1InitTypedMessageData + reply []byte + err error + ) + + // Send the message over, ensuring it's processed correctly + reply, err = w.ledgerExchange(ledgerOpSignTypedMessage, op, 0, payload) + + if err != nil { + return nil, err + } + + // Extract the Ethereum signature and do a sanity validation + if len(reply) != crypto.SignatureLength { + return nil, errors.New("reply lacks signature") + } + signature := append(reply[1:], reply[0]) + return signature, nil +} + // ledgerExchange performs a data exchange with the Ledger wallet, sending it a // message and retrieving the response. // diff --git a/accounts/usbwallet/trezor.go b/accounts/usbwallet/trezor.go index 0546458c47..c2182b88d0 100644 --- a/accounts/usbwallet/trezor.go +++ b/accounts/usbwallet/trezor.go @@ -185,6 +185,10 @@ func (w *trezorDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio return w.trezorSign(path, tx, chainID) } +func (w *trezorDriver) SignTypedMessage(path accounts.DerivationPath, domainHash []byte, messageHash []byte) ([]byte, error) { + return nil, accounts.ErrNotSupported +} + // trezorDerive sends a derivation request to the Trezor device and returns the // Ethereum address located on that path. func (w *trezorDriver) trezorDerive(derivationPath []uint32) (common.Address, error) { diff --git a/accounts/usbwallet/wallet.go b/accounts/usbwallet/wallet.go index 337162afd8..b1006d24b4 100644 --- a/accounts/usbwallet/wallet.go +++ b/accounts/usbwallet/wallet.go @@ -68,6 +68,8 @@ type driver interface { // SignTx sends the transaction to the USB device and waits for the user to confirm // or deny the transaction. SignTx(path accounts.DerivationPath, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) + + SignTypedMessage(path accounts.DerivationPath, messageHash []byte, domainHash []byte) ([]byte, error) } // wallet represents the common functionality shared by all USB hardware @@ -525,7 +527,46 @@ func (w *wallet) signHash(account accounts.Account, hash []byte) ([]byte, error) // SignData signs keccak256(data). The mimetype parameter describes the type of data being signed func (w *wallet) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) { - return w.signHash(account, crypto.Keccak256(data)) + + // Unless we are doing 712 signing, simply dispatch to signHash + if !(mimeType == accounts.MimetypeTypedData && len(data) == 66 && data[0] == 0x19 && data[1] == 0x01) { + return w.signHash(account, crypto.Keccak256(data)) + } + + // dispatch to 712 signing if the mimetype is TypedData and the format matches + w.stateLock.RLock() // Comms have own mutex, this is for the state fields + defer w.stateLock.RUnlock() + + // If the wallet is closed, abort + if w.device == nil { + return nil, accounts.ErrWalletClosed + } + // Make sure the requested account is contained within + path, ok := w.paths[account.Address] + if !ok { + return nil, accounts.ErrUnknownAccount + } + // All infos gathered and metadata checks out, request signing + <-w.commsLock + defer func() { w.commsLock <- struct{}{} }() + + // Ensure the device isn't screwed with while user confirmation is pending + // TODO(karalabe): remove if hotplug lands on Windows + w.hub.commsLock.Lock() + w.hub.commsPend++ + w.hub.commsLock.Unlock() + + defer func() { + w.hub.commsLock.Lock() + w.hub.commsPend-- + w.hub.commsLock.Unlock() + }() + // Sign the transaction + signature, err := w.driver.SignTypedMessage(path, data[2:34], data[34:66]) + if err != nil { + return nil, err + } + return signature, nil } // SignDataWithPassphrase implements accounts.Wallet, attempting to sign the given diff --git a/cmd/clef/README.md b/cmd/clef/README.md index 50ba93b681..206b96fc08 100644 --- a/cmd/clef/README.md +++ b/cmd/clef/README.md @@ -920,4 +920,4 @@ There are a couple of implementation for a UI. We'll try to keep this list up to | QtSigner| https://github.com/holiman/qtsigner/| Python3/QT-based| :+1:| :+1:| :+1:| :+1:| :+1:| :x: | :+1: (partially)| | GtkSigner| https://github.com/holiman/gtksigner| Python3/GTK-based| :+1:| :x:| :x:| :+1:| :+1:| :x: | :x: | | Frame | https://github.com/floating/frame/commits/go-signer| Electron-based| :x:| :x:| :x:| :x:| ?| :x: | :x: | -| Clef UI| https://github.com/kyokan/clef-ui| Golang/QT-based| :+1:| :+1:| :x:| :+1:| :+1:| :x: | :+1: (approve tx only)| +| Clef UI| https://github.com/ethereum/clef-ui| Golang/QT-based| :+1:| :+1:| :x:| :+1:| :+1:| :x: | :+1: (approve tx only)| diff --git a/cmd/clef/main.go b/cmd/clef/main.go index 52a5ee9e4f..b11d088b27 100644 --- a/cmd/clef/main.go +++ b/cmd/clef/main.go @@ -253,12 +253,12 @@ var AppHelpFlagGroups = []flags.FlagGroup{ testFlag, advancedMode, acceptFlag, - // + + // Quorum utils.PluginSettingsFlag, utils.PluginLocalVerifyFlag, utils.PluginPublicKeyFlag, utils.PluginSkipVerifyFlag, - // }, }, } @@ -288,12 +288,12 @@ func init() { testFlag, advancedMode, acceptFlag, - // + + // Quorum utils.PluginSettingsFlag, utils.PluginLocalVerifyFlag, utils.PluginPublicKeyFlag, utils.PluginSkipVerifyFlag, - // } app.Action = signer app.Commands = []cli.Command{initCommand, diff --git a/cmd/clef/sign_flow.png b/cmd/clef/sign_flow.png index 93ef81a32e..e7010ab43f 100644 Binary files a/cmd/clef/sign_flow.png and b/cmd/clef/sign_flow.png differ diff --git a/cmd/devp2p/dns_cloudflare.go b/cmd/devp2p/dns_cloudflare.go index a4d10dcfdd..596254df91 100644 --- a/cmd/devp2p/dns_cloudflare.go +++ b/cmd/devp2p/dns_cloudflare.go @@ -17,6 +17,7 @@ package main import ( + "context" "fmt" "strings" @@ -79,7 +80,7 @@ func (c *cloudflareClient) checkZone(name string) error { c.zoneID = id } log.Info(fmt.Sprintf("Checking Permissions on zone %s", c.zoneID)) - zone, err := c.ZoneDetails(c.zoneID) + zone, err := c.ZoneDetails(context.Background(), c.zoneID) if err != nil { return err } @@ -112,7 +113,7 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string) records = lrecords log.Info(fmt.Sprintf("Retrieving existing TXT records on %s", name)) - entries, err := c.DNSRecords(c.zoneID, cloudflare.DNSRecord{Type: "TXT"}) + entries, err := c.DNSRecords(context.Background(), c.zoneID, cloudflare.DNSRecord{Type: "TXT"}) if err != nil { return err } @@ -134,14 +135,15 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string) if path != name { ttl = treeNodeTTL // Max TTL permitted by Cloudflare } - _, err = c.CreateDNSRecord(c.zoneID, cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl}) + record := cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl} + _, err = c.CreateDNSRecord(context.Background(), c.zoneID, record) } else if old.Content != val { // Entry already exists, only change its content. log.Info(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val)) old.Content = val - err = c.UpdateDNSRecord(c.zoneID, old.ID, old) + err = c.UpdateDNSRecord(context.Background(), c.zoneID, old.ID, old) } else { - log.Info(fmt.Sprintf("Skipping %s = %q", path, val)) + log.Debug(fmt.Sprintf("Skipping %s = %q", path, val)) } if err != nil { return fmt.Errorf("failed to publish %s: %v", path, err) @@ -155,7 +157,7 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string) } // Stale entry, nuke it. log.Info(fmt.Sprintf("Deleting %s = %q", path, entry.Content)) - if err := c.DeleteDNSRecord(c.zoneID, entry.ID); err != nil { + if err := c.DeleteDNSRecord(context.Background(), c.zoneID, entry.ID); err != nil { return fmt.Errorf("failed to delete %s: %v", path, err) } } diff --git a/cmd/devp2p/dns_route53.go b/cmd/devp2p/dns_route53.go index c5f99529b8..2b6a30e60f 100644 --- a/cmd/devp2p/dns_route53.go +++ b/cmd/devp2p/dns_route53.go @@ -17,16 +17,19 @@ package main import ( + "context" "errors" "fmt" "sort" "strconv" "strings" + "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/route53" + "github.com/aws/aws-sdk-go-v2/service/route53/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/dnsdisc" "gopkg.in/urfave/cli.v1" @@ -38,6 +41,7 @@ const ( // https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html#limits-api-requests-changeresourcerecordsets route53ChangeSizeLimit = 32000 route53ChangeCountLimit = 1000 + maxRetryLimit = 60 ) var ( @@ -55,10 +59,15 @@ var ( Name: "zone-id", Usage: "Route53 Zone ID", } + route53RegionFlag = cli.StringFlag{ + Name: "aws-region", + Usage: "AWS Region", + Value: "eu-central-1", + } ) type route53Client struct { - api *route53.Route53 + api *route53.Client zoneID string } @@ -72,15 +81,16 @@ func newRoute53Client(ctx *cli.Context) *route53Client { akey := ctx.String(route53AccessKeyFlag.Name) asec := ctx.String(route53AccessSecretFlag.Name) if akey == "" || asec == "" { - exit(fmt.Errorf("need Route53 Access Key ID and secret proceed")) + exit(fmt.Errorf("need Route53 Access Key ID and secret to proceed")) } - config := &aws.Config{Credentials: credentials.NewStaticCredentials(akey, asec, "")} - session, err := session.NewSession(config) + creds := aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(akey, asec, "")) + cfg, err := config.LoadDefaultConfig(context.Background(), config.WithCredentialsProvider(creds)) if err != nil { - exit(fmt.Errorf("can't create AWS session: %v", err)) + exit(fmt.Errorf("can't initialize AWS configuration: %v", err)) } + cfg.Region = ctx.String(route53RegionFlag.Name) return &route53Client{ - api: route53.New(session), + api: route53.NewFromConfig(cfg), zoneID: ctx.String(route53ZoneIDFlag.Name), } } @@ -105,25 +115,43 @@ func (c *route53Client) deploy(name string, t *dnsdisc.Tree) error { return nil } - // Submit change batches. + // Submit all change batches. batches := splitChanges(changes, route53ChangeSizeLimit, route53ChangeCountLimit) + changesToCheck := make([]*route53.ChangeResourceRecordSetsOutput, len(batches)) for i, changes := range batches { log.Info(fmt.Sprintf("Submitting %d changes to Route53", len(changes))) - batch := new(route53.ChangeBatch) - batch.SetChanges(changes) - batch.SetComment(fmt.Sprintf("enrtree update %d/%d of %s at seq %d", i+1, len(batches), name, t.Seq())) + batch := &types.ChangeBatch{ + Changes: changes, + Comment: aws.String(fmt.Sprintf("enrtree update %d/%d of %s at seq %d", i+1, len(batches), name, t.Seq())), + } req := &route53.ChangeResourceRecordSetsInput{HostedZoneId: &c.zoneID, ChangeBatch: batch} - resp, err := c.api.ChangeResourceRecordSets(req) + changesToCheck[i], err = c.api.ChangeResourceRecordSets(context.TODO(), req) if err != nil { return err } + } - log.Info(fmt.Sprintf("Waiting for change request %s", *resp.ChangeInfo.Id)) - wreq := &route53.GetChangeInput{Id: resp.ChangeInfo.Id} - if err := c.api.WaitUntilResourceRecordSetsChanged(wreq); err != nil { - return err + // Wait for all change batches to propagate. + for _, change := range changesToCheck { + log.Info(fmt.Sprintf("Waiting for change request %s", *change.ChangeInfo.Id)) + wreq := &route53.GetChangeInput{Id: change.ChangeInfo.Id} + var count int + for { + wresp, err := c.api.GetChange(context.TODO(), wreq) + if err != nil { + return err + } + + count++ + + if wresp.ChangeInfo.Status == types.ChangeStatusInsync || count >= maxRetryLimit { + break + } + + time.Sleep(30 * time.Second) } } + return nil } @@ -140,7 +168,7 @@ func (c *route53Client) findZoneID(name string) (string, error) { log.Info(fmt.Sprintf("Finding Route53 Zone ID for %s", name)) var req route53.ListHostedZonesByNameInput for { - resp, err := c.api.ListHostedZonesByName(&req) + resp, err := c.api.ListHostedZonesByName(context.TODO(), &req) if err != nil { return "", err } @@ -149,7 +177,7 @@ func (c *route53Client) findZoneID(name string) (string, error) { return *zone.Id, nil } } - if !*resp.IsTruncated { + if !resp.IsTruncated { break } req.DNSName = resp.NextDNSName @@ -159,7 +187,7 @@ func (c *route53Client) findZoneID(name string) (string, error) { } // computeChanges creates DNS changes for the given record. -func (c *route53Client) computeChanges(name string, records map[string]string, existing map[string]recordSet) []*route53.Change { +func (c *route53Client) computeChanges(name string, records map[string]string, existing map[string]recordSet) []types.Change { // Convert all names to lowercase. lrecords := make(map[string]string, len(records)) for name, r := range records { @@ -167,25 +195,30 @@ func (c *route53Client) computeChanges(name string, records map[string]string, e } records = lrecords - var changes []*route53.Change - for path, val := range records { + var changes []types.Change + for path, newValue := range records { + prevRecords, exists := existing[path] + prevValue := strings.Join(prevRecords.values, "") + + // prevValue contains quoted strings, encode newValue to compare. + newValue = splitTXT(newValue) + + // Assign TTL. ttl := int64(rootTTL) if path != name { ttl = int64(treeNodeTTL) } - prevRecords, exists := existing[path] - prevValue := strings.Join(prevRecords.values, "") if !exists { // Entry is unknown, push a new one - log.Info(fmt.Sprintf("Creating %s = %q", path, val)) - changes = append(changes, newTXTChange("CREATE", path, ttl, splitTXT(val))) - } else if prevValue != val || prevRecords.ttl != ttl { + log.Info(fmt.Sprintf("Creating %s = %s", path, newValue)) + changes = append(changes, newTXTChange("CREATE", path, ttl, newValue)) + } else if prevValue != newValue || prevRecords.ttl != ttl { // Entry already exists, only change its content. - log.Info(fmt.Sprintf("Updating %s from %q to %q", path, prevValue, val)) - changes = append(changes, newTXTChange("UPSERT", path, ttl, splitTXT(val))) + log.Info(fmt.Sprintf("Updating %s from %s to %s", path, prevValue, newValue)) + changes = append(changes, newTXTChange("UPSERT", path, ttl, newValue)) } else { - log.Info(fmt.Sprintf("Skipping %s = %q", path, val)) + log.Debug(fmt.Sprintf("Skipping %s = %s", path, newValue)) } } @@ -204,21 +237,21 @@ func (c *route53Client) computeChanges(name string, records map[string]string, e } // sortChanges ensures DNS changes are in leaf-added -> root-changed -> leaf-deleted order. -func sortChanges(changes []*route53.Change) { +func sortChanges(changes []types.Change) { score := map[string]int{"CREATE": 1, "UPSERT": 2, "DELETE": 3} sort.Slice(changes, func(i, j int) bool { - if *changes[i].Action == *changes[j].Action { + if changes[i].Action == changes[j].Action { return *changes[i].ResourceRecordSet.Name < *changes[j].ResourceRecordSet.Name } - return score[*changes[i].Action] < score[*changes[j].Action] + return score[string(changes[i].Action)] < score[string(changes[j].Action)] }) } // splitChanges splits up DNS changes such that each change batch // is smaller than the given RDATA limit. -func splitChanges(changes []*route53.Change, sizeLimit, countLimit int) [][]*route53.Change { +func splitChanges(changes []types.Change, sizeLimit, countLimit int) [][]types.Change { var ( - batches [][]*route53.Change + batches [][]types.Change batchSize int batchCount int ) @@ -241,7 +274,7 @@ func splitChanges(changes []*route53.Change, sizeLimit, countLimit int) [][]*rou } // changeSize returns the RDATA size of a DNS change. -func changeSize(ch *route53.Change) int { +func changeSize(ch types.Change) int { size := 0 for _, rr := range ch.ResourceRecordSet.ResourceRecords { if rr.Value != nil { @@ -251,8 +284,8 @@ func changeSize(ch *route53.Change) int { return size } -func changeCount(ch *route53.Change) int { - if *ch.Action == "UPSERT" { +func changeCount(ch types.Change) int { + if ch.Action == types.ChangeActionUpsert { return 2 } return 1 @@ -260,13 +293,17 @@ func changeCount(ch *route53.Change) int { // collectRecords collects all TXT records below the given name. func (c *route53Client) collectRecords(name string) (map[string]recordSet, error) { - log.Info(fmt.Sprintf("Retrieving existing TXT records on %s (%s)", name, c.zoneID)) var req route53.ListResourceRecordSetsInput - req.SetHostedZoneId(c.zoneID) + req.HostedZoneId = &c.zoneID existing := make(map[string]recordSet) - err := c.api.ListResourceRecordSetsPages(&req, func(resp *route53.ListResourceRecordSetsOutput, last bool) bool { + for page := 0; ; page++ { + log.Info("Loading existing TXT records", "name", name, "zone", c.zoneID, "page", page) + resp, err := c.api.ListResourceRecordSets(context.TODO(), &req) + if err != nil { + return existing, err + } for _, set := range resp.ResourceRecordSets { - if !isSubdomain(*set.Name, name) || *set.Type != "TXT" { + if !isSubdomain(*set.Name, name) || set.Type != types.RRTypeTxt { continue } s := recordSet{ttl: *set.TTL} @@ -276,28 +313,44 @@ func (c *route53Client) collectRecords(name string) (map[string]recordSet, error name := strings.TrimSuffix(*set.Name, ".") existing[name] = s } - return true - }) - return existing, err + + if !resp.IsTruncated { + break + } + // Set the cursor to the next batch. From the AWS docs: + // + // To display the next page of results, get the values of NextRecordName, + // NextRecordType, and NextRecordIdentifier (if any) from the response. Then submit + // another ListResourceRecordSets request, and specify those values for + // StartRecordName, StartRecordType, and StartRecordIdentifier. + req.StartRecordIdentifier = resp.NextRecordIdentifier + req.StartRecordName = resp.NextRecordName + req.StartRecordType = resp.NextRecordType + } + + return existing, nil } // newTXTChange creates a change to a TXT record. -func newTXTChange(action, name string, ttl int64, values ...string) *route53.Change { - var c route53.Change - var r route53.ResourceRecordSet - var rrs []*route53.ResourceRecord +func newTXTChange(action, name string, ttl int64, values ...string) types.Change { + r := types.ResourceRecordSet{ + Type: types.RRTypeTxt, + Name: &name, + TTL: &ttl, + } + var rrs []types.ResourceRecord for _, val := range values { - rr := new(route53.ResourceRecord) - rr.SetValue(val) + var rr types.ResourceRecord + rr.Value = aws.String(val) rrs = append(rrs, rr) } - r.SetType("TXT") - r.SetName(name) - r.SetTTL(ttl) - r.SetResourceRecords(rrs) - c.SetAction(action) - c.SetResourceRecordSet(&r) - return &c + + r.ResourceRecords = rrs + + return types.Change{ + Action: types.ChangeAction(action), + ResourceRecordSet: &r, + } } // isSubdomain returns true if name is a subdomain of domain. diff --git a/cmd/devp2p/dns_route53_test.go b/cmd/devp2p/dns_route53_test.go index a2ef3791f6..e6eb516e6b 100644 --- a/cmd/devp2p/dns_route53_test.go +++ b/cmd/devp2p/dns_route53_test.go @@ -20,7 +20,7 @@ import ( "reflect" "testing" - "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go-v2/service/route53/types" ) // This test checks that computeChanges/splitChanges create DNS changes in @@ -43,93 +43,93 @@ func TestRoute53ChangeSort(t *testing.T) { "MHTDO6TMUBRIA2XWG5LUDACK24.n": "enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o", } - wantChanges := []*route53.Change{ + wantChanges := []types.Change{ { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("2xs2367yhaxjfglzhvawlqd4zy.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("c7hrfpf3blgf3yr4dy5kx3smbe.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("h4fht4b454p6uxfd7jcyq5pwdy.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("jwxydbpxywg6fx3gmdibfa6cj4.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("mhtdo6tmubria2xwg5ludack24.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("UPSERT"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "UPSERT", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA"`), }}, TTL: ip(rootTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("DELETE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "DELETE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("2kfjogvxdqtxxugbh7gs7naaai.n"), - ResourceRecords: []*route53.ResourceRecord{ + ResourceRecords: []types.ResourceRecord{ {Value: sp(`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`)}, }, TTL: ip(3333), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("DELETE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "DELETE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("fdxn3sn67na5dka4j2gok7bvqi.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enrtree-branch:"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, } @@ -141,7 +141,7 @@ func TestRoute53ChangeSort(t *testing.T) { } // Check splitting according to size. - wantSplit := [][]*route53.Change{ + wantSplit := [][]types.Change{ wantChanges[:4], wantChanges[4:6], wantChanges[6:], @@ -152,7 +152,7 @@ func TestRoute53ChangeSort(t *testing.T) { } // Check splitting according to count. - wantSplit = [][]*route53.Change{ + wantSplit = [][]types.Change{ wantChanges[:5], wantChanges[5:], } @@ -162,5 +162,29 @@ func TestRoute53ChangeSort(t *testing.T) { } } +// This test checks that computeChanges compares the quoted value of the records correctly. +func TestRoute53NoChange(t *testing.T) { + // Existing record set. + testTree0 := map[string]recordSet{ + "n": {ttl: rootTTL, values: []string{ + `"enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA"`, + }}, + "2xs2367yhaxjfglzhvawlqd4zy.n": {ttl: treeNodeTTL, values: []string{ + `"enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA"`, + }}, + } + // New set. + testTree1 := map[string]string{ + "n": "enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA", + "2XS2367YHAXJFGLZHVAWLQD4ZY.n": "enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA", + } + + var client route53Client + changes := client.computeChanges("n", testTree1, testTree0) + if len(changes) > 0 { + t.Fatalf("wrong changes (got %d, want 0)", len(changes)) + } +} + func sp(s string) *string { return &s } func ip(i int64) *int64 { return &i } diff --git a/cmd/devp2p/dnscmd.go b/cmd/devp2p/dnscmd.go index f56f0f34e4..50ab7bf983 100644 --- a/cmd/devp2p/dnscmd.go +++ b/cmd/devp2p/dnscmd.go @@ -77,7 +77,12 @@ var ( Usage: "Deploy DNS TXT records to Amazon Route53", ArgsUsage: "", Action: dnsToRoute53, - Flags: []cli.Flag{route53AccessKeyFlag, route53AccessSecretFlag, route53ZoneIDFlag}, + Flags: []cli.Flag{ + route53AccessKeyFlag, + route53AccessSecretFlag, + route53ZoneIDFlag, + route53RegionFlag, + }, } ) diff --git a/cmd/devp2p/internal/ethtest/chain_test.go b/cmd/devp2p/internal/ethtest/chain_test.go index 5e4289d80a..ec98833ab5 100644 --- a/cmd/devp2p/internal/ethtest/chain_test.go +++ b/cmd/devp2p/internal/ethtest/chain_test.go @@ -35,7 +35,31 @@ func TestEthProtocolNegotiation(t *testing.T) { expected uint32 }{ { - conn: &Conn{}, + conn: &Conn{ + ourHighestProtoVersion: 65, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + expected: uint32(65), + }, + { + conn: &Conn{ + ourHighestProtoVersion: 65, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + expected: uint32(65), + }, + { + conn: &Conn{ + ourHighestProtoVersion: 65, + }, caps: []p2p.Cap{ {Name: "eth", Version: 63}, {Name: "eth", Version: 64}, @@ -44,7 +68,20 @@ func TestEthProtocolNegotiation(t *testing.T) { expected: uint32(65), }, { - conn: &Conn{}, + conn: &Conn{ + ourHighestProtoVersion: 64, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + expected: 64, + }, + { + conn: &Conn{ + ourHighestProtoVersion: 65, + }, caps: []p2p.Cap{ {Name: "eth", Version: 0}, {Name: "eth", Version: 89}, @@ -53,7 +90,20 @@ func TestEthProtocolNegotiation(t *testing.T) { expected: uint32(65), }, { - conn: &Conn{}, + conn: &Conn{ + ourHighestProtoVersion: 64, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "wrongProto", Version: 65}, + }, + expected: uint32(64), + }, + { + conn: &Conn{ + ourHighestProtoVersion: 65, + }, caps: []p2p.Cap{ {Name: "eth", Version: 63}, {Name: "eth", Version: 64}, @@ -66,7 +116,7 @@ func TestEthProtocolNegotiation(t *testing.T) { for i, tt := range tests { t.Run(strconv.Itoa(i), func(t *testing.T) { tt.conn.negotiateEthProtocol(tt.caps) - assert.Equal(t, tt.expected, uint32(tt.conn.ethProtocolVersion)) + assert.Equal(t, tt.expected, uint32(tt.conn.negotiatedProtoVersion)) }) } } diff --git a/cmd/devp2p/internal/ethtest/eth66_suite.go b/cmd/devp2p/internal/ethtest/eth66_suite.go index 644fed61eb..0995dcb3e4 100644 --- a/cmd/devp2p/internal/ethtest/eth66_suite.go +++ b/cmd/devp2p/internal/ethtest/eth66_suite.go @@ -26,6 +26,16 @@ import ( "github.com/ethereum/go-ethereum/p2p" ) +// Is_66 checks if the node supports the eth66 protocol version, +// and if not, exists the test suite +func (s *Suite) Is_66(t *utesting.T) { + conn := s.dial66(t) + conn.handshake(t) + if conn.negotiatedProtoVersion < 66 { + t.Fail() + } +} + // TestStatus_66 attempts to connect to the given node and exchange // a status message with it on the eth66 protocol, and then check to // make sure the chain head is correct. @@ -205,6 +215,10 @@ func (s *Suite) TestLargeAnnounce_66(t *utesting.T) { } } +func (s *Suite) TestOldAnnounce_66(t *utesting.T) { + s.oldAnnounce(t, s.setupConnection66(t), s.setupConnection66(t)) +} + // TestMaliciousHandshake_66 tries to send malicious data during the handshake. func (s *Suite) TestMaliciousHandshake_66(t *utesting.T) { conn := s.dial66(t) diff --git a/cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go b/cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go index b7fa1dce26..4ef349740f 100644 --- a/cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go +++ b/cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go @@ -46,6 +46,7 @@ func (s *Suite) dial66(t *utesting.T) *Conn { t.Fatalf("could not dial: %v", err) } conn.caps = append(conn.caps, p2p.Cap{Name: "eth", Version: 66}) + conn.ourHighestProtoVersion = 66 return conn } @@ -141,8 +142,11 @@ func (c *Conn) readAndServe66(chain *Chain, timeout time.Duration) (uint64, Mess if err != nil { return 0, errorf("could not get headers for inbound header request: %v", err) } - - if err := c.Write(headers); err != nil { + resp := ð.BlockHeadersPacket66{ + RequestId: reqID, + BlockHeadersPacket: eth.BlockHeadersPacket(headers), + } + if err := c.write66(resp, BlockHeaders{}.Code()); err != nil { return 0, errorf("could not write to connection: %v", err) } default: diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index 48010b90dd..66fb8026a0 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -19,6 +19,7 @@ package ethtest import ( "fmt" "net" + "strings" "time" "github.com/davecgh/go-spew/spew" @@ -65,7 +66,7 @@ func NewSuite(dest *enode.Node, chainfile string, genesisfile string) (*Suite, e }, nil } -func (s *Suite) EthTests() []utesting.Test { +func (s *Suite) AllEthTests() []utesting.Test { return []utesting.Test{ // status {Name: "Status", Fn: s.TestStatus}, @@ -84,6 +85,8 @@ func (s *Suite) EthTests() []utesting.Test { {Name: "Broadcast_66", Fn: s.TestBroadcast_66}, {Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce}, {Name: "TestLargeAnnounce_66", Fn: s.TestLargeAnnounce_66}, + {Name: "TestOldAnnounce", Fn: s.TestOldAnnounce}, + {Name: "TestOldAnnounce_66", Fn: s.TestOldAnnounce_66}, // malicious handshakes + status {Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake}, {Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus}, @@ -97,6 +100,38 @@ func (s *Suite) EthTests() []utesting.Test { } } +func (s *Suite) EthTests() []utesting.Test { + return []utesting.Test{ + {Name: "Status", Fn: s.TestStatus}, + {Name: "GetBlockHeaders", Fn: s.TestGetBlockHeaders}, + {Name: "GetBlockBodies", Fn: s.TestGetBlockBodies}, + {Name: "Broadcast", Fn: s.TestBroadcast}, + {Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce}, + {Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake}, + {Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus}, + {Name: "TestMaliciousStatus_66", Fn: s.TestMaliciousStatus}, + {Name: "TestTransactions", Fn: s.TestTransaction}, + {Name: "TestMaliciousTransactions", Fn: s.TestMaliciousTx}, + } +} + +func (s *Suite) Eth66Tests() []utesting.Test { + return []utesting.Test{ + // only proceed with eth66 test suite if node supports eth 66 protocol + {Name: "Status_66", Fn: s.TestStatus_66}, + {Name: "GetBlockHeaders_66", Fn: s.TestGetBlockHeaders_66}, + {Name: "TestSimultaneousRequests_66", Fn: s.TestSimultaneousRequests_66}, + {Name: "TestSameRequestID_66", Fn: s.TestSameRequestID_66}, + {Name: "TestZeroRequestID_66", Fn: s.TestZeroRequestID_66}, + {Name: "GetBlockBodies_66", Fn: s.TestGetBlockBodies_66}, + {Name: "Broadcast_66", Fn: s.TestBroadcast_66}, + {Name: "TestLargeAnnounce_66", Fn: s.TestLargeAnnounce_66}, + {Name: "TestMaliciousHandshake_66", Fn: s.TestMaliciousHandshake_66}, + {Name: "TestTransactions_66", Fn: s.TestTransaction_66}, + {Name: "TestMaliciousTransactions_66", Fn: s.TestMaliciousTx_66}, + } +} + // TestStatus attempts to connect to the given node and exchange // a status message with it, and then check to make sure // the chain head is correct. @@ -125,7 +160,7 @@ func (s *Suite) TestMaliciousStatus(t *utesting.T) { // get protoHandshake conn.handshake(t) status := &Status{ - ProtocolVersion: uint32(conn.ethProtocolVersion), + ProtocolVersion: uint32(conn.negotiatedProtoVersion), NetworkID: s.chain.chainConfig.ChainID.Uint64(), TD: largeNumber(2), Head: s.chain.blocks[s.chain.Len()-1].Hash(), @@ -357,6 +392,36 @@ func (s *Suite) TestLargeAnnounce(t *utesting.T) { } } +func (s *Suite) TestOldAnnounce(t *utesting.T) { + s.oldAnnounce(t, s.setupConnection(t), s.setupConnection(t)) +} + +func (s *Suite) oldAnnounce(t *utesting.T, sendConn, receiveConn *Conn) { + oldBlockAnnounce := &NewBlock{ + Block: s.chain.blocks[len(s.chain.blocks)/2], + TD: s.chain.blocks[len(s.chain.blocks)/2].Difficulty(), + } + + if err := sendConn.Write(oldBlockAnnounce); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + + switch msg := receiveConn.ReadAndServe(s.chain, timeout*2).(type) { + case *NewBlock: + t.Fatalf("unexpected: block propagated: %s", pretty.Sdump(msg)) + case *NewBlockHashes: + t.Fatalf("unexpected: block announced: %s", pretty.Sdump(msg)) + case *Error: + errMsg := *msg + // check to make sure error is timeout (propagation didn't come through == test successful) + if !strings.Contains(errMsg.String(), "timeout") { + t.Fatalf("unexpected error: %v", pretty.Sdump(msg)) + } + default: + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } +} + func (s *Suite) testAnnounce(t *utesting.T, sendConn, receiveConn *Conn, blockAnnouncement *NewBlock) { // Announce the block. if err := sendConn.Write(blockAnnouncement); err != nil { @@ -421,6 +486,7 @@ func (s *Suite) dial() (*Conn, error) { {Name: "eth", Version: 64}, {Name: "eth", Version: 65}, } + conn.ourHighestProtoVersion = 65 return &conn, nil } diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go index 734adff366..1e2ae77965 100644 --- a/cmd/devp2p/internal/ethtest/types.go +++ b/cmd/devp2p/internal/ethtest/types.go @@ -123,9 +123,10 @@ func (nb NewPooledTransactionHashes) Code() int { return 24 } // Conn represents an individual connection with a peer type Conn struct { *rlpx.Conn - ourKey *ecdsa.PrivateKey - ethProtocolVersion uint - caps []p2p.Cap + ourKey *ecdsa.PrivateKey + negotiatedProtoVersion uint + ourHighestProtoVersion uint + caps []p2p.Cap } func (c *Conn) Read() Message { @@ -236,7 +237,7 @@ func (c *Conn) handshake(t *utesting.T) Message { c.SetSnappy(true) } c.negotiateEthProtocol(msg.Caps) - if c.ethProtocolVersion == 0 { + if c.negotiatedProtoVersion == 0 { t.Fatalf("unexpected eth protocol version") } return msg @@ -254,11 +255,11 @@ func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) { if capability.Name != "eth" { continue } - if capability.Version > highestEthVersion && capability.Version <= 65 { + if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion { highestEthVersion = capability.Version } } - c.ethProtocolVersion = highestEthVersion + c.negotiatedProtoVersion = highestEthVersion } // statusExchange performs a `Status` message exchange with the given @@ -273,14 +274,15 @@ loop: for { switch msg := c.Read().(type) { case *Status: - if msg.Head != chain.blocks[chain.Len()-1].Hash() { - t.Fatalf("wrong head block in status: %s", msg.Head.String()) + if have, want := msg.Head, chain.blocks[chain.Len()-1].Hash(); have != want { + t.Fatalf("wrong head block in status, want: %#x (block %d) have %#x", + want, chain.blocks[chain.Len()-1].NumberU64(), have) } - if msg.TD.Cmp(chain.TD(chain.Len())) != 0 { - t.Fatalf("wrong TD in status: %v", msg.TD) + if have, want := msg.TD.Cmp(chain.TD(chain.Len())), 0; have != want { + t.Fatalf("wrong TD in status: have %v want %v", have, want) } - if !reflect.DeepEqual(msg.ForkID, chain.ForkID()) { - t.Fatalf("wrong fork ID in status: %v", msg.ForkID) + if have, want := msg.ForkID, chain.ForkID(); !reflect.DeepEqual(have, want) { + t.Fatalf("wrong fork ID in status: have %v, want %v", have, want) } message = msg break loop @@ -294,13 +296,13 @@ loop: } } // make sure eth protocol version is set for negotiation - if c.ethProtocolVersion == 0 { + if c.negotiatedProtoVersion == 0 { t.Fatalf("eth protocol version must be set in Conn") } if status == nil { // write status message to client status = &Status{ - ProtocolVersion: uint32(c.ethProtocolVersion), + ProtocolVersion: uint32(c.negotiatedProtoVersion), NetworkID: chain.chainConfig.ChainID.Uint64(), TD: chain.TD(chain.Len()), Head: chain.blocks[chain.Len()-1].Hash(), diff --git a/cmd/devp2p/nodesetcmd.go b/cmd/devp2p/nodesetcmd.go index ba97405abc..33de1fdf31 100644 --- a/cmd/devp2p/nodesetcmd.go +++ b/cmd/devp2p/nodesetcmd.go @@ -173,7 +173,7 @@ func ethFilter(args []string) (nodeFilter, error) { f := func(n nodeJSON) bool { var eth struct { ForkID forkid.ID - _ []rlp.RawValue `rlp:"tail"` + Tail []rlp.RawValue `rlp:"tail"` } if n.N.Load(enr.WithEntry("eth", ð)) != nil { return false @@ -186,7 +186,7 @@ func ethFilter(args []string) (nodeFilter, error) { func lesFilter(args []string) (nodeFilter, error) { f := func(n nodeJSON) bool { var les struct { - _ []rlp.RawValue `rlp:"tail"` + Tail []rlp.RawValue `rlp:"tail"` } return n.N.Load(enr.WithEntry("les", &les)) == nil } @@ -196,7 +196,7 @@ func lesFilter(args []string) (nodeFilter, error) { func snapFilter(args []string) (nodeFilter, error) { f := func(n nodeJSON) bool { var snap struct { - _ []rlp.RawValue `rlp:"tail"` + Tail []rlp.RawValue `rlp:"tail"` } return n.N.Load(enr.WithEntry("snap", &snap)) == nil } diff --git a/cmd/devp2p/rlpxcmd.go b/cmd/devp2p/rlpxcmd.go index ac92818aa4..24a16f0b3c 100644 --- a/cmd/devp2p/rlpxcmd.go +++ b/cmd/devp2p/rlpxcmd.go @@ -22,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/cmd/devp2p/internal/ethtest" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/utesting" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/rlpx" "github.com/ethereum/go-ethereum/rlp" @@ -98,5 +99,10 @@ func rlpxEthTest(ctx *cli.Context) error { if err != nil { exit(err) } - return runTests(ctx, suite.EthTests()) + // check if given node supports eth66, and if so, run eth66 protocol tests as well + is66Failed, _ := utesting.Run(utesting.Test{Name: "Is_66", Fn: suite.Is_66}) + if is66Failed { + return runTests(ctx, suite.EthTests()) + } + return runTests(ctx, suite.AllEthTests()) } diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index e3cee45dd2..b8273c1b83 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -99,16 +99,17 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, return h } var ( - statedb = MakePreState(rawdb.NewMemoryDatabase(), pre.Pre) + statedb = MakePreState(rawdb.NewMemoryDatabase(), pre.Pre) + signer = types.MakeSigner(chainConfig, new(big.Int).SetUint64(pre.Env.Number)) + gaspool = new(core.GasPool) + blockHash = common.Hash{0x13, 0x37} + rejectedTxs []int + includedTxs types.Transactions + gasUsed = uint64(0) + receipts = make(types.Receipts, 0) + txIndex = 0 + // Quorum privateStatedb = MakePreState(rawdb.NewMemoryDatabase(), pre.Pre) // Quorum private state db - signer = types.MakeSigner(chainConfig, new(big.Int).SetUint64(pre.Env.Number)) - gaspool = new(core.GasPool) - blockHash = common.Hash{0x13, 0x37} - rejectedTxs []int - includedTxs types.Transactions - gasUsed = uint64(0) - receipts = make(types.Receipts, 0) - txIndex = 0 ) gaspool.AddGas(pre.Env.GasLimit) vmContext := vm.BlockContext{ @@ -143,6 +144,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, vmConfig.Tracer = tracer vmConfig.Debug = (tracer != nil) statedb.Prepare(tx.Hash(), blockHash, txIndex) + privateStatedb.Prepare(tx.Hash(), blockHash, txIndex) txContext := core.NewEVMTxContext(msg) snapshot := statedb.Snapshot() evm := vm.NewEVM(vmContext, txContext, statedb, privateStatedb, chainConfig, vmConfig) diff --git a/cmd/geth/accountcmd_test.go b/cmd/geth/accountcmd_test.go index c47d05e963..78611fe87e 100644 --- a/cmd/geth/accountcmd_test.go +++ b/cmd/geth/accountcmd_test.go @@ -309,9 +309,8 @@ Fatal: Failed to unlock account 0 (could not decrypt key with given password) func TestUnlockFlagAmbiguous(t *testing.T) { defer SetResetPrivateConfig("ignore")() - datadir := tmpDatadirWithKeystore(t) store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes") - geth := runMinimalGethWithRaftConsensus(t, "--datadir", datadir, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + geth := runMinimalGethWithRaftConsensus(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore", store, "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "js", "testdata/empty.js") diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index e35a9564ed..db29a87e94 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -33,14 +33,11 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth/downloader" - "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/private" "github.com/ethereum/go-ethereum/private/engine/notinuse" "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/trie" "gopkg.in/urfave/cli.v1" ) @@ -67,7 +64,11 @@ It expects the genesis file as argument.`, Usage: "Dumps genesis block JSON configuration to stdout", ArgsUsage: "", Flags: []cli.Flag{ - utils.DataDirFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.YoloV3Flag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` @@ -115,7 +116,7 @@ processing will proceed even if an individual RLP-file import failure occurs.`, utils.DataDirFlag, }, Description: ` -Checks if the chain config isMPS parameter value. +Checks if the chain config isMPS parameter value. If false, it upgrades the DB to be MPS enabled (builds the trie of private states) and if successful sets isMPS to true. If true, exits displaying an error message that the DB is already MPS.`, Category: "BLOCKCHAIN COMMANDS", @@ -165,27 +166,6 @@ be gzipped.`, Category: "BLOCKCHAIN COMMANDS", Description: ` The export-preimages command export hash preimages to an RLP encoded stream`, - } - copydbCommand = cli.Command{ - Action: utils.MigrateFlags(copyDb), - Name: "copydb", - Usage: "Create a local chain from a target chaindata folder", - ArgsUsage: "", - Flags: []cli.Flag{ - utils.DataDirFlag, - utils.CacheFlag, - utils.SyncModeFlag, - utils.FakePoWFlag, - utils.MainnetFlag, - utils.RopstenFlag, - utils.RinkebyFlag, - utils.TxLookupLimitFlag, - utils.GoerliFlag, - utils.YoloV3Flag, - }, - Category: "BLOCKCHAIN COMMANDS", - Description: ` -The first argument must be the directory containing the blockchain to download from`, } dumpCommand = cli.Command{ Action: utils.MigrateFlags(dump), @@ -282,7 +262,7 @@ func initGenesis(ctx *cli.Context) error { defer stack.Close() for _, name := range []string{"chaindata", "lightchaindata"} { - chaindb, err := stack.OpenDatabase(name, 0, 0, "") + chaindb, err := stack.OpenDatabase(name, 0, 0, "", false) if err != nil { utils.Fatalf("Failed to open database: %v", err) } @@ -297,6 +277,7 @@ func initGenesis(ctx *cli.Context) error { } func dumpGenesis(ctx *cli.Context) error { + // TODO(rjl493456442) support loading from the custom datadir genesis := utils.MakeGenesis(ctx) if genesis == nil { genesis = core.DefaultGenesisBlock() @@ -319,7 +300,7 @@ func importChain(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - chain, db := utils.MakeChain(ctx, stack, false, true) + chain, db := utils.MakeChain(ctx, stack, true) defer db.Close() // Start periodically gathering memory profiles @@ -393,7 +374,7 @@ func mpsdbUpgrade(ctx *cli.Context) error { // initialise the tx manager with the dummy tx mgr private.P = ¬inuse.DBUpgradePrivateTransactionManager{} - chain, db := utils.MakeChain(ctx, stack, false, true) + chain, db := utils.MakeChain(ctx, stack, true) if chain.Config().IsMPS { utils.Fatalf("The database is already upgraded to support multiple private states.") @@ -413,7 +394,7 @@ func exportChain(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - chain, _ := utils.MakeChain(ctx, stack, true, true) + chain, _ := utils.MakeChain(ctx, stack, true) start := time.Now() var err error @@ -430,6 +411,9 @@ func exportChain(ctx *cli.Context) error { if first < 0 || last < 0 { utils.Fatalf("Export error: block number must be greater than 0\n") } + if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() { + utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64()) + } err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) } @@ -449,7 +433,7 @@ func importPreimages(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - db := utils.MakeChainDatabase(ctx, stack) + db := utils.MakeChainDatabase(ctx, stack, false) start := time.Now() if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil { @@ -468,7 +452,7 @@ func exportPreimages(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - db := utils.MakeChainDatabase(ctx, stack) + db := utils.MakeChainDatabase(ctx, stack, true) start := time.Now() if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil { @@ -478,81 +462,31 @@ func exportPreimages(ctx *cli.Context) error { return nil } -func copyDb(ctx *cli.Context) error { - // Ensure we have a source chain directory to copy - if len(ctx.Args()) < 1 { - utils.Fatalf("Source chaindata directory path argument missing") - } - if len(ctx.Args()) < 2 { - utils.Fatalf("Source ancient chain directory path argument missing") - } - // Initialize a new chain for the running node to sync into - stack, _ := makeConfigNode(ctx) - defer stack.Close() - - chain, chainDb := utils.MakeChain(ctx, stack, false, false) - syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode) - - var syncBloom *trie.SyncBloom - if syncMode == downloader.FastSync { - syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb) - } - dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil) - - // Create a source peer to satisfy downloader requests from - db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "") - if err != nil { - return err - } - hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false }) - if err != nil { - return err - } - peer := downloader.NewFakePeer("local", db, hc, dl) - if err = dl.RegisterPeer("local", 63, peer); err != nil { - return err - } - // Synchronise with the simulated peer - start := time.Now() - - currentHeader := hc.CurrentHeader() - if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil { - return err - } - for dl.Synchronising() { - time.Sleep(10 * time.Millisecond) - } - fmt.Printf("Database copy done in %v\n", time.Since(start)) - - // Compact the entire database to remove any sync overhead - start = time.Now() - fmt.Println("Compacting entire database...") - if err = db.Compact(nil, nil); err != nil { - utils.Fatalf("Compaction failed: %v", err) - } - fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) - return nil -} - func dump(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - chain, chainDb := utils.MakeChain(ctx, stack, true, false) - defer chainDb.Close() + db := utils.MakeChainDatabase(ctx, stack, false) for _, arg := range ctx.Args() { - var block *types.Block + var header *types.Header if hashish(arg) { - block = chain.GetBlockByHash(common.HexToHash(arg)) + hash := common.HexToHash(arg) + number := rawdb.ReadHeaderNumber(db, hash) + if number != nil { + header = rawdb.ReadHeader(db, hash, *number) + } } else { - num, _ := strconv.Atoi(arg) - block = chain.GetBlockByNumber(uint64(num)) + number, _ := strconv.Atoi(arg) + hash := rawdb.ReadCanonicalHash(db, uint64(number)) + if hash != (common.Hash{}) { + header = rawdb.ReadHeader(db, hash, uint64(number)) + } } - if block == nil { + if header == nil { fmt.Println("{}") utils.Fatalf("block not found") } else { - state, err := state.New(block.Root(), state.NewDatabase(chainDb), nil) + state, err := state.New(header.Root, state.NewDatabase(db), nil) if err != nil { utils.Fatalf("could not create new state: %v", err) } diff --git a/cmd/geth/config.go b/cmd/geth/config.go index a51ced1448..08270bc644 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -151,6 +151,8 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name) } applyMetricConfig(ctx, &cfg) + + // Quorum if cfg.Eth.QuorumLightServer { p2p.SetQLightTLSConfig(readQLightServerTLSConfig(ctx)) // permissioning for the qlight P2P server @@ -168,57 +170,11 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { p2p.SetQLightTLSConfig(readQLightClientTLSConfig(ctx)) stack.Server().SetNewTransportFunc(p2p.NewQlightClientTransport) } + // End Quorum return stack, cfg } -func readQLightClientTLSConfig(ctx *cli.Context) *tls.Config { - if !ctx.GlobalIsSet(utils.QuorumLightTLSFlag.Name) { - return nil - } - if !ctx.GlobalIsSet(utils.QuorumLightTLSCACertsFlag.Name) { - utils.Fatalf("QLight tls flag is set but no client certificate authorities has been provided") - } - tlsConfig, err := qlight.NewTLSConfig(&qlight.TLSConfig{ - CACertFileName: ctx.GlobalString(utils.QuorumLightTLSCACertsFlag.Name), - CertFileName: ctx.GlobalString(utils.QuorumLightTLSCertFlag.Name), - KeyFileName: ctx.GlobalString(utils.QuorumLightTLSKeyFlag.Name), - ServerName: enode.MustParse(ctx.GlobalString(utils.QuorumLightClientServerNodeFlag.Name)).IP().String(), - CipherSuites: ctx.GlobalString(utils.QuorumLightTLSCipherSuitesFlag.Name), - }) - - if err != nil { - utils.Fatalf("Unable to load the specified tls configuration: %v", err) - } - return tlsConfig -} - -func readQLightServerTLSConfig(ctx *cli.Context) *tls.Config { - if !ctx.GlobalIsSet(utils.QuorumLightTLSFlag.Name) { - return nil - } - if !ctx.GlobalIsSet(utils.QuorumLightTLSCertFlag.Name) { - utils.Fatalf("QLight TLS is enabled but no server certificate has been provided") - } - if !ctx.GlobalIsSet(utils.QuorumLightTLSKeyFlag.Name) { - utils.Fatalf("QLight TLS is enabled but no server key has been provided") - } - - tlsConfig, err := qlight.NewTLSConfig(&qlight.TLSConfig{ - CertFileName: ctx.GlobalString(utils.QuorumLightTLSCertFlag.Name), - KeyFileName: ctx.GlobalString(utils.QuorumLightTLSKeyFlag.Name), - ClientCACertFileName: ctx.GlobalString(utils.QuorumLightTLSCACertsFlag.Name), - ClientAuth: ctx.GlobalInt(utils.QuorumLightTLSClientAuthFlag.Name), - CipherSuites: ctx.GlobalString(utils.QuorumLightTLSCipherSuitesFlag.Name), - }) - - if err != nil { - utils.Fatalf("QLight TLS - unable to read server tls configuration: %v", err) - } - - return tlsConfig -} - // makeFullNode loads geth configuration and creates the Ethereum backend. func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { stack, cfg := makeConfigNode(ctx) @@ -226,7 +182,7 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { cfg.Eth.OverrideBerlin = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideBerlinFlag.Name)) } - //Must occur before registering the extension service, as it needs an initialised PTM to be enabled + // Quorum: Must occur before registering the extension service, as it needs an initialised PTM to be enabled if err := quorumInitialisePrivacy(ctx); err != nil { utils.Fatalf("Error initialising Private Transaction Manager: %s", err.Error()) } @@ -332,6 +288,55 @@ func applyMetricConfig(ctx *cli.Context, cfg *gethConfig) { } } +// Quorum + +func readQLightClientTLSConfig(ctx *cli.Context) *tls.Config { + if !ctx.GlobalIsSet(utils.QuorumLightTLSFlag.Name) { + return nil + } + if !ctx.GlobalIsSet(utils.QuorumLightTLSCACertsFlag.Name) { + utils.Fatalf("QLight tls flag is set but no client certificate authorities has been provided") + } + tlsConfig, err := qlight.NewTLSConfig(&qlight.TLSConfig{ + CACertFileName: ctx.GlobalString(utils.QuorumLightTLSCACertsFlag.Name), + CertFileName: ctx.GlobalString(utils.QuorumLightTLSCertFlag.Name), + KeyFileName: ctx.GlobalString(utils.QuorumLightTLSKeyFlag.Name), + ServerName: enode.MustParse(ctx.GlobalString(utils.QuorumLightClientServerNodeFlag.Name)).IP().String(), + CipherSuites: ctx.GlobalString(utils.QuorumLightTLSCipherSuitesFlag.Name), + }) + + if err != nil { + utils.Fatalf("Unable to load the specified tls configuration: %v", err) + } + return tlsConfig +} + +func readQLightServerTLSConfig(ctx *cli.Context) *tls.Config { + if !ctx.GlobalIsSet(utils.QuorumLightTLSFlag.Name) { + return nil + } + if !ctx.GlobalIsSet(utils.QuorumLightTLSCertFlag.Name) { + utils.Fatalf("QLight TLS is enabled but no server certificate has been provided") + } + if !ctx.GlobalIsSet(utils.QuorumLightTLSKeyFlag.Name) { + utils.Fatalf("QLight TLS is enabled but no server key has been provided") + } + + tlsConfig, err := qlight.NewTLSConfig(&qlight.TLSConfig{ + CertFileName: ctx.GlobalString(utils.QuorumLightTLSCertFlag.Name), + KeyFileName: ctx.GlobalString(utils.QuorumLightTLSKeyFlag.Name), + ClientCACertFileName: ctx.GlobalString(utils.QuorumLightTLSCACertsFlag.Name), + ClientAuth: ctx.GlobalInt(utils.QuorumLightTLSClientAuthFlag.Name), + CipherSuites: ctx.GlobalString(utils.QuorumLightTLSCipherSuitesFlag.Name), + }) + + if err != nil { + utils.Fatalf("QLight TLS - unable to read server tls configuration: %v", err) + } + + return tlsConfig +} + // quorumValidateEthService checks quorum features that depend on the ethereum service func quorumValidateEthService(stack *node.Node, isRaft bool) { var ethereum *eth.Ethereum diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go index 75ca1e0fd7..e4fe6ff019 100644 --- a/cmd/geth/consolecmd.go +++ b/cmd/geth/consolecmd.go @@ -38,7 +38,8 @@ import ( ) var ( - consoleFlags = []cli.Flag{utils.JSpathFlag, utils.ExecFlag, utils.PreloadJSFlag} + consoleFlags = []cli.Flag{utils.JSpathFlag, utils.ExecFlag, utils.PreloadJSFlag} + rpcClientFlags = []cli.Flag{utils.RPCClientToken, utils.RPCClientTLSCert, utils.RPCClientTLSCaCert, utils.RPCClientTLSCipherSuites, utils.RPCClientTLSInsecureSkipVerify} consoleCommand = cli.Command{ @@ -228,20 +229,20 @@ func remoteConsole(ctx *cli.Context) error { Preload: utils.MakeConsolePreloads(ctx), } - consl, err := console.New(config) + console, err := console.New(config) if err != nil { utils.Fatalf("Failed to start the JavaScript console: %v", err) } - defer consl.Stop(false) + defer console.Stop(false) if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" { - consl.Evaluate(script) + console.Evaluate(script) return nil } // Otherwise print the welcome screen and enter interactive mode - consl.Welcome() - consl.Interactive() + console.Welcome() + console.Interactive() return nil } diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go index 76ebbd3236..3d9dbfe307 100644 --- a/cmd/geth/consolecmd_test.go +++ b/cmd/geth/consolecmd_test.go @@ -165,40 +165,6 @@ func TestAttachWelcome(t *testing.T) { }) } -func TestHTTPAttachWelcome(t *testing.T) { - defer SetResetPrivateConfig("ignore")() - coinbase := "0x491937757d1b26e29c507b8d4c0b233c2747e68d" - port := strconv.Itoa(trulyRandInt(1024, 65536)) // Yeah, sometimes this will fail, sorry :P - - datadir := setupIstanbul(t) - defer os.RemoveAll(datadir) - - geth := runGeth(t, - "--datadir", datadir, "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", - "--miner.etherbase", coinbase, "--http", "--http.port", port, "--http.api", "admin,eth,net,web3") - - endpoint := "http://127.0.0.1:" + port - waitForEndpoint(t, endpoint, 3*time.Second) - testAttachWelcome(t, geth, endpoint, httpAPIs) -} - -func TestWSAttachWelcome(t *testing.T) { - defer SetResetPrivateConfig("ignore")() - coinbase := "0x491937757d1b26e29c507b8d4c0b233c2747e68d" - port := strconv.Itoa(trulyRandInt(1024, 65536)) // Yeah, sometimes this will fail, sorry :P - - datadir := setupIstanbul(t) - defer os.RemoveAll(datadir) - - geth := runGeth(t, - "--datadir", datadir, "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", - "--miner.etherbase", coinbase, "--ws", "--ws.port", port, "--ws.api", "admin,eth,net,web3") - - endpoint := "ws://127.0.0.1:" + port - waitForEndpoint(t, endpoint, 3*time.Second) - testAttachWelcome(t, geth, endpoint, httpAPIs) -} - func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) { // Attach to a running geth note and terminate immediately attach := runGeth(t, "attach", endpoint) diff --git a/cmd/geth/dao_test.go b/cmd/geth/dao_test.go index 4fd650c951..ee5fef7800 100644 --- a/cmd/geth/dao_test.go +++ b/cmd/geth/dao_test.go @@ -124,7 +124,7 @@ func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBloc } // Retrieve the DAO config flag from the database path := filepath.Join(datadir, "geth", "chaindata") - db, err := rawdb.NewLevelDBDatabase(path, 0, 0, "") + db, err := rawdb.NewLevelDBDatabase(path, 0, 0, "", false) if err != nil { t.Fatalf("test %d: failed to open test database: %v", test, err) } diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index 8c91ca5669..4172362258 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -20,6 +20,7 @@ import ( "fmt" "os" "path/filepath" + "strconv" "time" "github.com/ethereum/go-ethereum/cmd/utils" @@ -28,9 +29,8 @@ import ( "github.com/ethereum/go-ethereum/console/prompt" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/ethdb/leveldb" "github.com/ethereum/go-ethereum/log" - "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/ethereum/go-ethereum/trie" "gopkg.in/urfave/cli.v1" ) @@ -59,52 +59,124 @@ Remove blockchain and state databases`, dbGetCmd, dbDeleteCmd, dbPutCmd, + dbGetSlotsCmd, }, } dbInspectCmd = cli.Command{ Action: utils.MigrateFlags(inspect), Name: "inspect", ArgsUsage: " ", - + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + }, Usage: "Inspect the storage size for each type of data in the database", Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`, } dbStatCmd = cli.Command{ - Action: dbStats, + Action: utils.MigrateFlags(dbStats), Name: "stats", Usage: "Print leveldb statistics", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + }, } dbCompactCmd = cli.Command{ - Action: dbCompact, + Action: utils.MigrateFlags(dbCompact), Name: "compact", Usage: "Compact leveldb database. WARNING: May take a very long time", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + utils.CacheFlag, + utils.CacheDatabaseFlag, + }, Description: `This command performs a database compaction. WARNING: This operation may take a very long time to finish, and may cause database corruption if it is aborted during execution'!`, } dbGetCmd = cli.Command{ - Action: dbGet, - Name: "get", - Usage: "Show the value of a database key", - ArgsUsage: "", + Action: utils.MigrateFlags(dbGet), + Name: "get", + Usage: "Show the value of a database key", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + }, Description: "This command looks up the specified database key from the database.", } dbDeleteCmd = cli.Command{ - Action: dbDelete, + Action: utils.MigrateFlags(dbDelete), Name: "delete", Usage: "Delete a database key (WARNING: may corrupt your database)", ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + }, Description: `This command deletes the specified database key from the database. WARNING: This is a low-level operation which may cause database corruption!`, } dbPutCmd = cli.Command{ - Action: dbPut, + Action: utils.MigrateFlags(dbPut), Name: "put", Usage: "Set the value of a database key (WARNING: may corrupt your database)", ArgsUsage: " ", - Description: `This command sets a given database key to the given value. + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + }, + Description: `This command sets a given database key to the given value. WARNING: This is a low-level operation which may cause database corruption!`, } + dbGetSlotsCmd = cli.Command{ + Action: utils.MigrateFlags(dbDumpTrie), + Name: "dumptrie", + Usage: "Show the storage key/values of a given storage trie", + ArgsUsage: " ", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + }, + Description: "This command looks up the specified database key from the database.", + } ) func removeDB(ctx *cli.Context) error { @@ -192,10 +264,10 @@ func inspect(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - _, chainDb := utils.MakeChain(ctx, stack, true, false) - defer chainDb.Close() + db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() - return rawdb.InspectDatabase(chainDb, prefix, start) + return rawdb.InspectDatabase(db, prefix, start) } func showLeveldbStats(db ethdb.Stater) { @@ -214,48 +286,32 @@ func showLeveldbStats(db ethdb.Stater) { func dbStats(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - path := stack.ResolvePath("chaindata") - db, err := leveldb.NewCustom(path, "", func(options *opt.Options) { - options.ReadOnly = true - }) - if err != nil { - return err - } + + db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() + showLeveldbStats(db) - err = db.Close() - if err != nil { - log.Info("Close err", "error", err) - } return nil } func dbCompact(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - path := stack.ResolvePath("chaindata") - cache := ctx.GlobalInt(utils.CacheFlag.Name) * ctx.GlobalInt(utils.CacheDatabaseFlag.Name) / 100 - db, err := leveldb.NewCustom(path, "", func(options *opt.Options) { - options.OpenFilesCacheCapacity = utils.MakeDatabaseHandles() - options.BlockCacheCapacity = cache / 2 * opt.MiB - options.WriteBuffer = cache / 4 * opt.MiB // Two of these are used internally - }) - if err != nil { - return err - } + + db := utils.MakeChainDatabase(ctx, stack, false) + defer db.Close() + + log.Info("Stats before compaction") showLeveldbStats(db) + log.Info("Triggering compaction") - err = db.Compact(nil, nil) - if err != nil { + if err := db.Compact(nil, nil); err != nil { log.Info("Compact err", "error", err) + return err } + log.Info("Stats after compaction") showLeveldbStats(db) - log.Info("Closing db") - err = db.Close() - if err != nil { - log.Info("Close err", "error", err) - } - log.Info("Exiting") - return err + return nil } // dbGet shows the value of a given database key @@ -265,14 +321,10 @@ func dbGet(ctx *cli.Context) error { } stack, _ := makeConfigNode(ctx) defer stack.Close() - path := stack.ResolvePath("chaindata") - db, err := leveldb.NewCustom(path, "", func(options *opt.Options) { - options.ReadOnly = true - }) - if err != nil { - return err - } + + db := utils.MakeChainDatabase(ctx, stack, true) defer db.Close() + key, err := hexutil.Decode(ctx.Args().Get(0)) if err != nil { log.Info("Could not decode the key", "error", err) @@ -283,7 +335,7 @@ func dbGet(ctx *cli.Context) error { log.Info("Get operation failed", "error", err) return err } - fmt.Printf("key %#x:\n\t%#x\n", key, data) + fmt.Printf("key %#x: %#x\n", key, data) return nil } @@ -294,13 +346,19 @@ func dbDelete(ctx *cli.Context) error { } stack, _ := makeConfigNode(ctx) defer stack.Close() - db := utils.MakeChainDatabase(ctx, stack) + + db := utils.MakeChainDatabase(ctx, stack, false) defer db.Close() + key, err := hexutil.Decode(ctx.Args().Get(0)) if err != nil { log.Info("Could not decode the key", "error", err) return err } + data, err := db.Get(key) + if err == nil { + fmt.Printf("Previous value: %#x\n", data) + } if err = db.Delete(key); err != nil { log.Info("Delete operation returned an error", "error", err) return err @@ -315,8 +373,10 @@ func dbPut(ctx *cli.Context) error { } stack, _ := makeConfigNode(ctx) defer stack.Close() - db := utils.MakeChainDatabase(ctx, stack) + + db := utils.MakeChainDatabase(ctx, stack, false) defer db.Close() + var ( key []byte value []byte @@ -335,7 +395,57 @@ func dbPut(ctx *cli.Context) error { } data, err = db.Get(key) if err == nil { - fmt.Printf("Previous value:\n%#x\n", data) + fmt.Printf("Previous value: %#x\n", data) } return db.Put(key, value) } + +// dbDumpTrie shows the key-value slots of a given storage trie +func dbDumpTrie(ctx *cli.Context) error { + if ctx.NArg() < 1 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() + var ( + root []byte + start []byte + max = int64(-1) + err error + ) + if root, err = hexutil.Decode(ctx.Args().Get(0)); err != nil { + log.Info("Could not decode the root", "error", err) + return err + } + stRoot := common.BytesToHash(root) + if ctx.NArg() >= 2 { + if start, err = hexutil.Decode(ctx.Args().Get(1)); err != nil { + log.Info("Could not decode the seek position", "error", err) + return err + } + } + if ctx.NArg() >= 3 { + if max, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil { + log.Info("Could not decode the max count", "error", err) + return err + } + } + theTrie, err := trie.New(stRoot, trie.NewDatabase(db)) + if err != nil { + return err + } + var count int64 + it := trie.NewIterator(theTrie.NodeIterator(start)) + for it.Next() { + if max > 0 && count == max { + fmt.Printf("Exiting after %d values\n", count) + break + } + fmt.Printf(" %d. key %#x: %#x\n", count, it.Key, it.Value) + count++ + } + return it.Err +} diff --git a/cmd/geth/genesis_test.go b/cmd/geth/genesis_test.go index 3b744f302c..eb8497f51f 100644 --- a/cmd/geth/genesis_test.go +++ b/cmd/geth/genesis_test.go @@ -43,7 +43,7 @@ var customGenesisTests = []struct { "mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000", "parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000", "timestamp" : "0x00", - "config" : {"isQuorum":false } + "config" : { "isQuorum":false } }`, query: "eth.getBlock(0).nonce", result: "0x0000000000001338", diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 0453e451a8..fb5904450c 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -19,9 +19,7 @@ package main import ( "fmt" - "math" "os" - godebug "runtime/debug" "sort" "strconv" "strings" @@ -44,7 +42,6 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/permission" "github.com/ethereum/go-ethereum/plugin" - gopsutil "github.com/shirou/gopsutil/mem" "gopkg.in/urfave/cli.v1" ) @@ -65,7 +62,6 @@ var ( utils.PasswordFileFlag, utils.BootnodesFlag, utils.DataDirFlag, - utils.RaftLogDirFlag, utils.AncientFlag, utils.MinFreeDiskSpaceFlag, utils.KeyStoreDirFlag, @@ -156,10 +152,12 @@ var ( utils.GpoMaxGasPriceFlag, utils.EWASMInterpreterFlag, utils.EVMInterpreterFlag, + utils.MinerNotifyFullFlag, configFileFlag, // Quorum utils.QuorumImmutabilityThreshold, utils.EnableNodePermissionFlag, + utils.RaftLogDirFlag, utils.RaftModeFlag, utils.RaftBlockTimeFlag, utils.RaftJoinExistingFlag, @@ -275,7 +273,6 @@ func init() { exportCommand, importPreimagesCommand, exportPreimagesCommand, - copydbCommand, removedbCommand, dumpCommand, dumpGenesisCommand, @@ -363,25 +360,6 @@ func prepare(ctx *cli.Context) { log.Info("Dropping default light client cache", "provided", ctx.GlobalInt(utils.CacheFlag.Name), "updated", 128) ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(128)) } - // Cap the cache allowance and tune the garbage collector - mem, err := gopsutil.VirtualMemory() - if err == nil { - if 32<<(^uintptr(0)>>63) == 32 && mem.Total > 2*1024*1024*1024 { - log.Warn("Lowering memory allowance on 32bit arch", "available", mem.Total/1024/1024, "addressable", 2*1024) - mem.Total = 2 * 1024 * 1024 * 1024 - } - allowance := int(mem.Total / 1024 / 1024 / 3) - if cache := ctx.GlobalInt(utils.CacheFlag.Name); cache > allowance { - log.Warn("Sanitizing cache to Go's GC limits", "provided", cache, "updated", allowance) - ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(allowance)) - } - } - // Ensure Go's GC ignores the database cache for trigger percentage - cache := ctx.GlobalInt(utils.CacheFlag.Name) - gogc := math.Max(20, math.Min(100, 100/(float64(cache)/1024))) - - log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc)) - godebug.SetGCPercent(int(gogc)) // Start metrics export if enabled utils.SetupMetrics(ctx) @@ -428,7 +406,6 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) { if err := stack.PluginManager().AddAccountPluginToBackend(b); err != nil { log.Error("failed to setup account plugin", "err", err) } - log.Info("registered account plugin with account backend") } // Unlock any account specifically requested diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index 09cc9ca98f..62ea1a9db5 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -57,6 +57,7 @@ var ( Category: "MISCELLANEOUS COMMANDS", Flags: []cli.Flag{ utils.DataDirFlag, + utils.AncientFlag, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, @@ -86,6 +87,7 @@ the trie clean cache with default directory will be deleted. Category: "MISCELLANEOUS COMMANDS", Flags: []cli.Flag{ utils.DataDirFlag, + utils.AncientFlag, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, @@ -105,6 +107,7 @@ In other words, this command does the snapshot to trie conversion. Category: "MISCELLANEOUS COMMANDS", Flags: []cli.Flag{ utils.DataDirFlag, + utils.AncientFlag, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, @@ -126,6 +129,7 @@ It's also usable without snapshot enabled. Category: "MISCELLANEOUS COMMANDS", Flags: []cli.Flag{ utils.DataDirFlag, + utils.AncientFlag, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, @@ -148,16 +152,16 @@ func pruneState(ctx *cli.Context) error { stack, config := makeConfigNode(ctx) defer stack.Close() - chain, chaindb := utils.MakeChain(ctx, stack, true, false) + chaindb := utils.MakeChainDatabase(ctx, stack, false) defer chaindb.Close() - //Quorum - if chain.Config().IsQuorum { + // Quorum + if config.Eth.Genesis.Config.IsQuorum { log.Error("Can not prune state when using GoQuorum as this has an impact on private state") return errors.New("prune-state is not available when IsQuorum is enabled") } - pruner, err := pruner.NewPruner(chaindb, chain.CurrentBlock().Header(), stack.ResolvePath(""), stack.ResolvePath(config.Eth.TrieCleanCacheJournal), ctx.GlobalUint64(utils.BloomFilterSizeFlag.Name)) + pruner, err := pruner.NewPruner(chaindb, stack.ResolvePath(""), stack.ResolvePath(config.Eth.TrieCleanCacheJournal), ctx.GlobalUint64(utils.BloomFilterSizeFlag.Name)) if err != nil { log.Error("Failed to open snapshot tree", "error", err) return err @@ -185,10 +189,15 @@ func verifyState(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - chain, chaindb := utils.MakeChain(ctx, stack, true, false) + chaindb := utils.MakeChainDatabase(ctx, stack, true) defer chaindb.Close() - snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, chain.CurrentBlock().Root(), false, false, false) + headBlock := rawdb.ReadHeadBlock(chaindb) + if headBlock == nil { + log.Error("Failed to load head block") + return errors.New("no head block") + } + snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false) if err != nil { log.Error("Failed to open snapshot tree", "error", err) return err @@ -197,7 +206,7 @@ func verifyState(ctx *cli.Context) error { log.Error("Too many arguments given") return errors.New("too many arguments") } - var root = chain.CurrentBlock().Root() + var root = headBlock.Root() if ctx.NArg() == 1 { root, err = parseRoot(ctx.Args()[0]) if err != nil { @@ -220,19 +229,18 @@ func traverseState(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - chain, chaindb := utils.MakeChain(ctx, stack, true, false) + chaindb := utils.MakeChainDatabase(ctx, stack, true) defer chaindb.Close() + headBlock := rawdb.ReadHeadBlock(chaindb) + if headBlock == nil { + log.Error("Failed to load head block") + return errors.New("no head block") + } if ctx.NArg() > 1 { log.Error("Too many arguments given") return errors.New("too many arguments") } - // Use the HEAD root as the default - head := chain.CurrentBlock() - if head == nil { - log.Error("Head block is missing") - return errors.New("head block is missing") - } var ( root common.Hash err error @@ -245,8 +253,8 @@ func traverseState(ctx *cli.Context) error { } log.Info("Start traversing the state", "root", root) } else { - root = head.Root() - log.Info("Start traversing the state", "root", root, "number", head.NumberU64()) + root = headBlock.Root() + log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) } triedb := trie.NewDatabase(chaindb) t, err := trie.NewSecure(root, triedb) @@ -313,19 +321,18 @@ func traverseRawState(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - chain, chaindb := utils.MakeChain(ctx, stack, true, false) + chaindb := utils.MakeChainDatabase(ctx, stack, true) defer chaindb.Close() + headBlock := rawdb.ReadHeadBlock(chaindb) + if headBlock == nil { + log.Error("Failed to load head block") + return errors.New("no head block") + } if ctx.NArg() > 1 { log.Error("Too many arguments given") return errors.New("too many arguments") } - // Use the HEAD root as the default - head := chain.CurrentBlock() - if head == nil { - log.Error("Head block is missing") - return errors.New("head block is missing") - } var ( root common.Hash err error @@ -338,8 +345,8 @@ func traverseRawState(ctx *cli.Context) error { } log.Info("Start traversing the state", "root", root) } else { - root = head.Root() - log.Info("Start traversing the state", "root", root, "number", head.NumberU64()) + root = headBlock.Root() + log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) } triedb := trie.NewDatabase(chaindb) t, err := trie.NewSecure(root, triedb) diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 40ccf60cec..2373119619 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -161,6 +161,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.JSpathFlag, utils.ExecFlag, utils.PreloadJSFlag, + // Quorum utils.RPCClientToken, utils.RPCClientTLSInsecureSkipVerify, utils.RPCClientTLSCert, @@ -190,6 +191,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.MiningEnabledFlag, utils.MinerThreadsFlag, utils.MinerNotifyFlag, + utils.MinerNotifyFullFlag, utils.MinerGasPriceFlag, utils.MinerGasTargetFlag, utils.MinerGasLimitFlag, diff --git a/cmd/puppeth/genesis_test.go b/cmd/puppeth/genesis_test.go index 45937d06d3..563483222b 100644 --- a/cmd/puppeth/genesis_test.go +++ b/cmd/puppeth/genesis_test.go @@ -30,10 +30,9 @@ import ( // Tests the go-ethereum to Aleth chainspec conversion for the Stureby testnet. func TestAlethSturebyConverter(t *testing.T) { - // //Quorum - skip this test as MinGasLimit and GasLimitBoundDivisor has been overridden for quorum + // Quorum - skip this test as MinGasLimit and GasLimitBoundDivisor has been overridden for quorum t.Skipf("skipping this test as MinGasLimit and GasLimitBoundDivisor has been overridden for quorum") - // /Quorum blob, err := ioutil.ReadFile("testdata/stureby_geth.json") if err != nil { t.Fatalf("could not read file: %v", err) @@ -73,10 +72,9 @@ func TestAlethSturebyConverter(t *testing.T) { // Tests the go-ethereum to Parity chainspec conversion for the Stureby testnet. func TestParitySturebyConverter(t *testing.T) { - // //Quorum - skip this test as MinGasLimit and GasLimitBoundDivisor has been overridden for quorum + // Quorum - skip this test as MinGasLimit and GasLimitBoundDivisor has been overridden for quorum t.Skipf("skipping this test as MinGasLimit and GasLimitBoundDivisor has been overridden for quorum") - // /Quorum blob, err := ioutil.ReadFile("testdata/stureby_geth.json") if err != nil { t.Fatalf("could not read file: %v", err) diff --git a/cmd/puppeth/module_dashboard.go b/cmd/puppeth/module_dashboard.go index be8b6ec600..a76ee19a06 100644 --- a/cmd/puppeth/module_dashboard.go +++ b/cmd/puppeth/module_dashboard.go @@ -518,6 +518,8 @@ var dashboardMascot = []byte("\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x01s\x var dashboardDockerfile = ` FROM mhart/alpine-node:latest +WORKDIR /usr/app + RUN \ npm install connect serve-static && \ \ diff --git a/cmd/utils/diskusage.go b/cmd/utils/diskusage.go index da696de6bf..c3d51765f8 100644 --- a/cmd/utils/diskusage.go +++ b/cmd/utils/diskusage.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// +build !windows +// +build !windows,!openbsd package utils diff --git a/cmd/utils/diskusage_openbsd.go b/cmd/utils/diskusage_openbsd.go new file mode 100644 index 0000000000..54f759d291 --- /dev/null +++ b/cmd/utils/diskusage_openbsd.go @@ -0,0 +1,43 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// +build openbsd + +package utils + +import ( + "fmt" + + "golang.org/x/sys/unix" +) + +func getFreeDiskSpace(path string) (uint64, error) { + var stat unix.Statfs_t + if err := unix.Statfs(path, &stat); err != nil { + return 0, fmt.Errorf("failed to call Statfs: %v", err) + } + + // Available blocks * size per block = available space in bytes + var bavail = stat.F_bavail + // Not sure if the following check is necessary for OpenBSD + if stat.F_bavail < 0 { + // FreeBSD can have a negative number of blocks available + // because of the grace limit. + bavail = 0 + } + //nolint:unconvert + return uint64(bavail) * uint64(stat.F_bsize), nil +} diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 1077fe5e16..457684a3ac 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -23,10 +23,12 @@ import ( "fmt" "io" "io/ioutil" + "math" "math/big" "net/url" "os" "path/filepath" + godebug "runtime/debug" "strconv" "strings" "text/tabwriter" @@ -77,6 +79,7 @@ import ( "github.com/ethereum/go-ethereum/private" "github.com/ethereum/go-ethereum/raft" pcsclite "github.com/gballet/go-libpcsclite" + gopsutil "github.com/shirou/gopsutil/mem" "gopkg.in/urfave/cli.v1" ) @@ -448,6 +451,10 @@ var ( Name: "miner.notify", Usage: "Comma separated HTTP URL list to notify of new work packages", } + MinerNotifyFullFlag = cli.BoolFlag{ + Name: "miner.notify.full", + Usage: "Notify with pending block headers instead of work packages", + } MinerGasTargetFlag = cli.Uint64Flag{ Name: "miner.gastarget", Usage: "Target gas floor for mined blocks", @@ -528,6 +535,8 @@ var ( Name: "nocompaction", Usage: "Disables db compaction after import", } + + // Quorum // RPC Client Settings RPCClientToken = cli.StringFlag{ Name: "rpcclitoken", @@ -549,6 +558,8 @@ var ( Name: "rpcclitls.insecureskipverify", Usage: "Disable verification of server's TLS certificate on connection by client", } + // End Quorum + // RPC settings IPCDisabledFlag = cli.BoolFlag{ Name: "ipcdisable", @@ -1758,6 +1769,7 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) { if ctx.GlobalIsSet(MinerNotifyFlag.Name) { cfg.Notify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",") } + cfg.NotifyFull = ctx.GlobalBool(MinerNotifyFullFlag.Name) if ctx.GlobalIsSet(MinerExtraDataFlag.Name) { cfg.ExtraData = []byte(ctx.GlobalString(MinerExtraDataFlag.Name)) } @@ -1781,7 +1793,7 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) { } } -func setAuthorizationList(ctx *cli.Context, cfg *eth.Config) { +func setAuthorizationList(ctx *cli.Context, cfg *ethconfig.Config) { authorizationList := ctx.GlobalString(AuthorizationListFlag.Name) if authorizationList == "" { authorizationList = ctx.GlobalString(DeprecatedAuthorizationListFlag.Name) @@ -2002,8 +2014,28 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { setAuthorizationList(ctx, cfg) setLes(ctx, cfg) + // Cap the cache allowance and tune the garbage collector + mem, err := gopsutil.VirtualMemory() + if err == nil { + if 32<<(^uintptr(0)>>63) == 32 && mem.Total > 2*1024*1024*1024 { + log.Warn("Lowering memory allowance on 32bit arch", "available", mem.Total/1024/1024, "addressable", 2*1024) + mem.Total = 2 * 1024 * 1024 * 1024 + } + allowance := int(mem.Total / 1024 / 1024 / 3) + if cache := ctx.GlobalInt(CacheFlag.Name); cache > allowance { + log.Warn("Sanitizing cache to Go's GC limits", "provided", cache, "updated", allowance) + ctx.GlobalSet(CacheFlag.Name, strconv.Itoa(allowance)) + } + } + // Ensure Go's GC ignores the database cache for trigger percentage + cache := ctx.GlobalInt(CacheFlag.Name) + gogc := math.Max(20, math.Min(100, 100/(float64(cache)/1024))) + + log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc)) + godebug.SetGCPercent(int(gogc)) + // Quorum - err := setQuorumConfig(ctx, cfg) + err = setQuorumConfig(ctx, cfg) if err != nil { Fatalf("Quorum configuration has an error: %v", err) } @@ -2179,7 +2211,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.GlobalIsSet(DataDirFlag.Name) { // Check if we have an already initialized chain and fall back to // that if so. Otherwise we need to generate a new genesis spec. - chaindb := MakeChainDatabase(ctx, stack) + chaindb := MakeChainDatabase(ctx, stack, true) if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) { cfg.Genesis = nil // fallback to db content } @@ -2226,11 +2258,15 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend stack.RegisterAPIs(tracers.APIs(backend.ApiBackend)) return backend.ApiBackend, nil } + + // Quorum client, err := stack.Attach() if err != nil { Fatalf("Failed to attach to self: %v", err) } cfg.Istanbul.Client = ethclient.NewClient(client) + // End Quorum + backend, err := eth.New(stack, cfg) if err != nil { Fatalf("Failed to register the Ethereum service: %v", err) @@ -2395,7 +2431,7 @@ func SplitTagsFlag(tagsFlag string) map[string]string { } // MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails. -func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database { +func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.Database { var ( cache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100 handles = MakeDatabaseHandles() @@ -2405,10 +2441,10 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database { ) if ctx.GlobalString(SyncModeFlag.Name) == "light" { name := "lightchaindata" - chainDb, err = stack.OpenDatabase(name, cache, handles, "") + chainDb, err = stack.OpenDatabase(name, cache, handles, "", readonly) } else { name := "chaindata" - chainDb, err = stack.OpenDatabaseWithFreezer(name, cache, handles, ctx.GlobalString(AncientFlag.Name), "") + chainDb, err = stack.OpenDatabaseWithFreezer(name, cache, handles, ctx.GlobalString(AncientFlag.Name), "", readonly) } if err != nil { Fatalf("Could not open database: %v", err) @@ -2419,6 +2455,8 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database { func MakeGenesis(ctx *cli.Context) *core.Genesis { var genesis *core.Genesis switch { + case ctx.GlobalBool(MainnetFlag.Name): + genesis = core.DefaultGenesisBlock() case ctx.GlobalBool(RopstenFlag.Name): genesis = core.DefaultRopstenGenesisBlock() case ctx.GlobalBool(RinkebyFlag.Name): @@ -2434,13 +2472,10 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis { } // MakeChain creates a chain manager from set command line flags. -func MakeChain(ctx *cli.Context, stack *node.Node, readOnly bool, useExist bool) (chain *core.BlockChain, chainDb ethdb.Database) { - var ( - config *params.ChainConfig - err error - ) - chainDb = MakeChainDatabase(ctx, stack) - +func MakeChain(ctx *cli.Context, stack *node.Node, useExist bool) (chain *core.BlockChain, chainDb ethdb.Database) { + var err error + var config *params.ChainConfig + chainDb = MakeChainDatabase(ctx, stack, false) // TODO(rjl493456442) support read-only database if useExist { stored := rawdb.ReadCanonicalHash(chainDb, 0) if (stored == common.Hash{}) { @@ -2541,11 +2576,17 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readOnly bool, useExist bool) cache.TrieDirtyLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100 } vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)} + + // Quorum var limit *uint64 - if ctx.GlobalIsSet(TxLookupLimitFlag.Name) && !readOnly { + if ctx.GlobalIsSet(TxLookupLimitFlag.Name) { l := ctx.GlobalUint64(TxLookupLimitFlag.Name) limit = &l } + // End Quorum + + // TODO(rjl493456442) disable snapshot generation/wiping if the chain is read only. + // Disable transaction indexing/unindexing by default. // TODO should multiple private states work with import/export/inspect commands chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg, nil, limit, nil) if err != nil { diff --git a/common/prque/lazyqueue.go b/common/prque/lazyqueue.go index c74faab7e6..37c2f3bd42 100644 --- a/common/prque/lazyqueue.go +++ b/common/prque/lazyqueue.go @@ -55,7 +55,7 @@ type ( // NewLazyQueue creates a new lazy queue func NewLazyQueue(setIndex SetIndexCallback, priority PriorityCallback, maxPriority MaxPriorityCallback, clock mclock.Clock, refreshPeriod time.Duration) *LazyQueue { q := &LazyQueue{ - popQueue: newSstack(nil), + popQueue: newSstack(nil, false), setIndex: setIndex, priority: priority, maxPriority: maxPriority, @@ -71,8 +71,8 @@ func NewLazyQueue(setIndex SetIndexCallback, priority PriorityCallback, maxPrior // Reset clears the contents of the queue func (q *LazyQueue) Reset() { - q.queue[0] = newSstack(q.setIndex0) - q.queue[1] = newSstack(q.setIndex1) + q.queue[0] = newSstack(q.setIndex0, false) + q.queue[1] = newSstack(q.setIndex1, false) } // Refresh performs queue re-evaluation if necessary diff --git a/common/prque/prque.go b/common/prque/prque.go index 3cc5a1adaf..54c78b5fc2 100755 --- a/common/prque/prque.go +++ b/common/prque/prque.go @@ -28,7 +28,12 @@ type Prque struct { // New creates a new priority queue. func New(setIndex SetIndexCallback) *Prque { - return &Prque{newSstack(setIndex)} + return &Prque{newSstack(setIndex, false)} +} + +// NewWrapAround creates a new priority queue with wrap-around priority handling. +func NewWrapAround(setIndex SetIndexCallback) *Prque { + return &Prque{newSstack(setIndex, true)} } // Pushes a value with a given priority into the queue, expanding if necessary. diff --git a/common/prque/sstack.go b/common/prque/sstack.go index 8518af54ff..b06a95413d 100755 --- a/common/prque/sstack.go +++ b/common/prque/sstack.go @@ -31,22 +31,24 @@ type SetIndexCallback func(data interface{}, index int) // the stack (heap) functionality and the Len, Less and Swap methods for the // sortability requirements of the heaps. type sstack struct { - setIndex SetIndexCallback - size int - capacity int - offset int + setIndex SetIndexCallback + size int + capacity int + offset int + wrapAround bool blocks [][]*item active []*item } // Creates a new, empty stack. -func newSstack(setIndex SetIndexCallback) *sstack { +func newSstack(setIndex SetIndexCallback, wrapAround bool) *sstack { result := new(sstack) result.setIndex = setIndex result.active = make([]*item, blockSize) result.blocks = [][]*item{result.active} result.capacity = blockSize + result.wrapAround = wrapAround return result } @@ -94,7 +96,11 @@ func (s *sstack) Len() int { // Compares the priority of two elements of the stack (higher is first). // Required by sort.Interface. func (s *sstack) Less(i, j int) bool { - return (s.blocks[i/blockSize][i%blockSize].priority - s.blocks[j/blockSize][j%blockSize].priority) > 0 + a, b := s.blocks[i/blockSize][i%blockSize].priority, s.blocks[j/blockSize][j%blockSize].priority + if s.wrapAround { + return a-b > 0 + } + return a > b } // Swaps two elements in the stack. Required by sort.Interface. @@ -110,5 +116,5 @@ func (s *sstack) Swap(i, j int) { // Resets the stack, effectively clearing its contents. func (s *sstack) Reset() { - *s = *newSstack(s.setIndex) + *s = *newSstack(s.setIndex, false) } diff --git a/common/prque/sstack_test.go b/common/prque/sstack_test.go index 2ff093579d..bc6298979c 100644 --- a/common/prque/sstack_test.go +++ b/common/prque/sstack_test.go @@ -21,7 +21,7 @@ func TestSstack(t *testing.T) { for i := 0; i < size; i++ { data[i] = &item{rand.Int(), rand.Int63()} } - stack := newSstack(nil) + stack := newSstack(nil, false) for rep := 0; rep < 2; rep++ { // Push all the data into the stack, pop out every second secs := []*item{} @@ -55,7 +55,7 @@ func TestSstackSort(t *testing.T) { data[i] = &item{rand.Int(), int64(i)} } // Push all the data into the stack - stack := newSstack(nil) + stack := newSstack(nil, false) for _, val := range data { stack.Push(val) } @@ -76,7 +76,7 @@ func TestSstackReset(t *testing.T) { for i := 0; i < size; i++ { data[i] = &item{rand.Int(), rand.Int63()} } - stack := newSstack(nil) + stack := newSstack(nil, false) for rep := 0; rep < 2; rep++ { // Push all the data into the stack, pop out every second secs := []*item{} diff --git a/common/types_test.go b/common/types_test.go index e880581994..756e4a0748 100644 --- a/common/types_test.go +++ b/common/types_test.go @@ -603,6 +603,8 @@ func TestHash_Format(t *testing.T) { } } +// Quorum + func TestFormatTerminalString_Value(t *testing.T) { assert.Equal(t, "", FormatTerminalString(nil)) assert.Equal(t, "", FormatTerminalString([]byte{})) diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go index 80379597e2..065e60b90b 100644 --- a/consensus/ethash/algorithm.go +++ b/consensus/ethash/algorithm.go @@ -174,7 +174,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) { case <-done: return case <-time.After(3 * time.Second): - logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/4, "elapsed", common.PrettyDuration(time.Since(start))) + logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/(cacheRounds+1), "elapsed", common.PrettyDuration(time.Since(start))) } } }() diff --git a/consensus/ethash/algorithm_test.go b/consensus/ethash/algorithm_test.go index 663687b81c..9cc9d535d4 100644 --- a/consensus/ethash/algorithm_test.go +++ b/consensus/ethash/algorithm_test.go @@ -726,10 +726,14 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) { for i := 0; i < 3; i++ { pend.Add(1) - go func(idx int) { defer pend.Done() - ethash := New(Config{cachedir, 0, 1, false, "", 0, 0, false, ModeNormal, nil}, nil, false) + + config := Config{ + CacheDir: cachedir, + CachesOnDisk: 1, + } + ethash := New(config, nil, false) defer ethash.Close() if err := ethash.verifySeal(nil, block.Header(), false); err != nil { t.Errorf("proc %d: block verification failed: %v", idx, err) diff --git a/consensus/ethash/api.go b/consensus/ethash/api.go index 68b3a84b09..f4d3802e0b 100644 --- a/consensus/ethash/api.go +++ b/consensus/ethash/api.go @@ -89,7 +89,7 @@ func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) boo // // It accepts the miner hash rate and an identifier which must be unique // between nodes. -func (api *API) SubmitHashRate(rate hexutil.Uint64, id common.Hash) bool { +func (api *API) SubmitHashrate(rate hexutil.Uint64, id common.Hash) bool { if api.ethash.remote == nil { return false } diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go index bc2e1256a8..995fb2038c 100644 --- a/consensus/ethash/ethash.go +++ b/consensus/ethash/ethash.go @@ -48,7 +48,7 @@ var ( two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) // sharedEthash is a full instance that can be shared between multiple users. - sharedEthash = New(Config{"", 3, 0, false, "", 1, 0, false, ModeNormal, nil}, nil, false) + sharedEthash *Ethash // algorithmRevision is the data structure version used for file naming. algorithmRevision = 23 @@ -57,6 +57,15 @@ var ( dumpMagic = []uint32{0xbaddcafe, 0xfee1dead} ) +func init() { + sharedConfig := Config{ + PowMode: ModeNormal, + CachesInMem: 3, + DatasetsInMem: 1, + } + sharedEthash = New(sharedConfig, nil, false) +} + // isLittleEndian returns whether the local system is running in little or big // endian byte order. func isLittleEndian() bool { @@ -411,6 +420,10 @@ type Config struct { DatasetsLockMmap bool PowMode Mode + // When set, notifications sent by the remote sealer will + // be block header JSON objects instead of work package arrays. + NotifyFull bool + Log log.Logger `toml:"-"` } @@ -462,6 +475,9 @@ func New(config Config, notify []string, noverify bool) *Ethash { update: make(chan struct{}), hashrate: metrics.NewMeterForced(), } + if config.PowMode == ModeShared { + ethash.shared = sharedEthash + } ethash.remote = startRemoteSealer(ethash, notify, noverify) return ethash } @@ -469,15 +485,7 @@ func New(config Config, notify []string, noverify bool) *Ethash { // NewTester creates a small sized ethash PoW scheme useful only for testing // purposes. func NewTester(notify []string, noverify bool) *Ethash { - ethash := &Ethash{ - config: Config{PowMode: ModeTest, Log: log.Root()}, - caches: newlru("cache", 1, newCache), - datasets: newlru("dataset", 1, newDataset), - update: make(chan struct{}), - hashrate: metrics.NewMeterForced(), - } - ethash.remote = startRemoteSealer(ethash, notify, noverify) - return ethash + return New(Config{PowMode: ModeTest}, notify, noverify) } // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts @@ -537,7 +545,6 @@ func NewShared() *Ethash { // Close closes the exit channel to notify all backend threads exiting. func (ethash *Ethash) Close() error { - var err error ethash.closeOnce.Do(func() { // Short circuit if the exit channel is not allocated. if ethash.remote == nil { @@ -546,7 +553,7 @@ func (ethash *Ethash) Close() error { close(ethash.remote.requestExit) <-ethash.remote.exitCh }) - return err + return nil } // cache tries to retrieve a verification cache for the specified block number diff --git a/consensus/ethash/ethash_test.go b/consensus/ethash/ethash_test.go index 2639707eb2..382eefeecf 100644 --- a/consensus/ethash/ethash_test.go +++ b/consensus/ethash/ethash_test.go @@ -62,7 +62,14 @@ func TestCacheFileEvict(t *testing.T) { t.Fatal(err) } defer os.RemoveAll(tmpdir) - e := New(Config{CachesInMem: 3, CachesOnDisk: 10, CacheDir: tmpdir, PowMode: ModeTest}, nil, false) + + config := Config{ + CachesInMem: 3, + CachesOnDisk: 10, + CacheDir: tmpdir, + PowMode: ModeTest, + } + e := New(config, nil, false) defer e.Close() workers := 8 @@ -128,7 +135,7 @@ func TestRemoteSealer(t *testing.T) { } } -func TestHashRate(t *testing.T) { +func TestHashrate(t *testing.T) { var ( hashrate = []hexutil.Uint64{100, 200, 300} expect uint64 @@ -143,7 +150,7 @@ func TestHashRate(t *testing.T) { api := &API{ethash} for i := 0; i < len(hashrate); i += 1 { - if res := api.SubmitHashRate(hashrate[i], ids[i]); !res { + if res := api.SubmitHashrate(hashrate[i], ids[i]); !res { t.Error("remote miner submit hashrate failed") } expect += uint64(hashrate[i]) @@ -163,7 +170,7 @@ func TestClosedRemoteSealer(t *testing.T) { t.Error("expect to return an error to indicate ethash is stopped") } - if res := api.SubmitHashRate(hexutil.Uint64(100), common.HexToHash("a")); res { + if res := api.SubmitHashrate(hexutil.Uint64(100), common.HexToHash("a")); res { t.Error("expect to return false when submit hashrate to a stopped ethash") } } diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go index 2053534028..1830e672b1 100644 --- a/consensus/ethash/sealer.go +++ b/consensus/ethash/sealer.go @@ -358,7 +358,16 @@ func (s *remoteSealer) makeWork(block *types.Block) { // new work to be processed. func (s *remoteSealer) notifyWork() { work := s.currentWork - blob, _ := json.Marshal(work) + + // Encode the JSON payload of the notification. When NotifyFull is set, + // this is the complete block header, otherwise it is a JSON array. + var blob []byte + if s.ethash.config.NotifyFull { + blob, _ = json.Marshal(s.currentBlock.Header()) + } else { + blob, _ = json.Marshal(work) + } + s.reqWG.Add(len(s.notifyURLs)) for _, url := range s.notifyURLs { go s.sendNotification(s.notifyCtx, url, blob, work) diff --git a/consensus/ethash/sealer_test.go b/consensus/ethash/sealer_test.go index 20ed2a4184..c34e76aec2 100644 --- a/consensus/ethash/sealer_test.go +++ b/consensus/ethash/sealer_test.go @@ -22,6 +22,7 @@ import ( "math/big" "net/http" "net/http/httptest" + "strconv" "testing" "time" @@ -74,6 +75,50 @@ func TestRemoteNotify(t *testing.T) { } } +// Tests whether remote HTTP servers are correctly notified of new work. (Full pending block body / --miner.notify.full) +func TestRemoteNotifyFull(t *testing.T) { + // Start a simple web server to capture notifications. + sink := make(chan map[string]interface{}) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + blob, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("failed to read miner notification: %v", err) + } + var work map[string]interface{} + if err := json.Unmarshal(blob, &work); err != nil { + t.Errorf("failed to unmarshal miner notification: %v", err) + } + sink <- work + })) + defer server.Close() + + // Create the custom ethash engine. + config := Config{ + PowMode: ModeTest, + NotifyFull: true, + Log: testlog.Logger(t, log.LvlWarn), + } + ethash := New(config, []string{server.URL}, false) + defer ethash.Close() + + // Stream a work task and ensure the notification bubbles out. + header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)} + block := types.NewBlockWithHeader(header) + + ethash.Seal(nil, block, nil, nil) + select { + case work := <-sink: + if want := "0x" + strconv.FormatUint(header.Number.Uint64(), 16); work["number"] != want { + t.Errorf("pending block number mismatch: have %v, want %v", work["number"], want) + } + if want := "0x" + header.Difficulty.Text(16); work["difficulty"] != want { + t.Errorf("pending block difficulty mismatch: have %s, want %s", work["difficulty"], want) + } + case <-time.After(3 * time.Second): + t.Fatalf("notification timed out") + } +} + // Tests that pushing work packages fast to the miner doesn't cause any data race // issues in the notifications. func TestRemoteMultiNotify(t *testing.T) { @@ -119,6 +164,55 @@ func TestRemoteMultiNotify(t *testing.T) { } } +// Tests that pushing work packages fast to the miner doesn't cause any data race +// issues in the notifications. Full pending block body / --miner.notify.full) +func TestRemoteMultiNotifyFull(t *testing.T) { + // Start a simple web server to capture notifications. + sink := make(chan map[string]interface{}, 64) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + blob, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("failed to read miner notification: %v", err) + } + var work map[string]interface{} + if err := json.Unmarshal(blob, &work); err != nil { + t.Errorf("failed to unmarshal miner notification: %v", err) + } + sink <- work + })) + defer server.Close() + + // Create the custom ethash engine. + config := Config{ + PowMode: ModeTest, + NotifyFull: true, + Log: testlog.Logger(t, log.LvlWarn), + } + ethash := New(config, []string{server.URL}, false) + defer ethash.Close() + + // Provide a results reader. + // Otherwise the unread results will be logged asynchronously + // and this can happen after the test is finished, causing a panic. + results := make(chan *types.Block, cap(sink)) + + // Stream a lot of work task and ensure all the notifications bubble out. + for i := 0; i < cap(sink); i++ { + header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)} + block := types.NewBlockWithHeader(header) + ethash.Seal(nil, block, results, nil) + } + + for i := 0; i < cap(sink); i++ { + select { + case <-sink: + <-results + case <-time.After(10 * time.Second): + t.Fatalf("notification %d timed out", i) + } + } +} + // Tests whether stale solutions are correctly processed. func TestStaleSubmission(t *testing.T) { ethash := NewTester(nil, true) diff --git a/core/bench_test.go b/core/bench_test.go index dc8838524d..7ef0c63c28 100644 --- a/core/bench_test.go +++ b/core/bench_test.go @@ -158,7 +158,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) { b.Fatalf("cannot create temporary directory: %v", err) } defer os.RemoveAll(dir) - db, err = rawdb.NewLevelDBDatabase(dir, 128, 128, "") + db, err = rawdb.NewLevelDBDatabase(dir, 128, 128, "", false) if err != nil { b.Fatalf("cannot create temporary database: %v", err) } @@ -256,7 +256,7 @@ func benchWriteChain(b *testing.B, full bool, count uint64) { if err != nil { b.Fatalf("cannot create temporary directory: %v", err) } - db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "") + db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false) if err != nil { b.Fatalf("error opening database at %v: %v", dir, err) } @@ -273,7 +273,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) { } defer os.RemoveAll(dir) - db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "") + db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false) if err != nil { b.Fatalf("error opening database at %v: %v", dir, err) } @@ -284,7 +284,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) { b.ResetTimer() for i := 0; i < b.N; i++ { - db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "") + db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false) if err != nil { b.Fatalf("error opening database at %v: %v", dir, err) } diff --git a/core/blockchain.go b/core/blockchain.go index 0d3cb36aff..65487b8c00 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1664,7 +1664,6 @@ func (bc *BlockChain) CommitBlockWithState(deleteEmptyObjects bool, state, priva if !bc.isRaft() { return errors.New("error function can be called only for Raft consensus") } - bc.chainmu.Lock() defer bc.chainmu.Unlock() if _, err := state.Commit(deleteEmptyObjects); err != nil { diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index a3849b8610..f502361926 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -1762,7 +1762,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { } os.RemoveAll(datadir) - db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") + db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) } @@ -1817,7 +1817,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { } // Force run a freeze cycle type freezer interface { - Freeze(threshold uint64) + Freeze(threshold uint64) error Ancients() (uint64, error) } db.(freezer).Freeze(tt.freezeThreshold) @@ -1830,7 +1830,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { db.Close() // Start a new blockchain back up and see where the repait leads us - db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") + db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false) if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go index ab27ec34e2..f80efbdddc 100644 --- a/core/blockchain_sethead_test.go +++ b/core/blockchain_sethead_test.go @@ -1961,7 +1961,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) { } os.RemoveAll(datadir) - db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") + db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) } @@ -2023,7 +2023,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) { } // Force run a freeze cycle type freezer interface { - Freeze(threshold uint64) + Freeze(threshold uint64) error Ancients() (uint64, error) } db.(freezer).Freeze(tt.freezeThreshold) diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go index 26d6950553..3dcdac6230 100644 --- a/core/blockchain_snapshot_test.go +++ b/core/blockchain_snapshot_test.go @@ -65,7 +65,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo } os.RemoveAll(datadir) - db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") + db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) } @@ -261,7 +261,7 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) { db.Close() // Start a new blockchain back up and see where the repair leads us - newdb, err := rawdb.NewLevelDBDatabaseWithFreezer(snaptest.datadir, 0, 0, snaptest.datadir, "") + newdb, err := rawdb.NewLevelDBDatabaseWithFreezer(snaptest.datadir, 0, 0, snaptest.datadir, "", false) if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 9cb03d07ae..c7d5d9ac61 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -38,6 +38,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" + "github.com/stretchr/testify/assert" ) // So we can deterministically seed different blockchains @@ -655,7 +656,7 @@ func TestFastVsFullChains(t *testing.T) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(frdir) - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "") + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -729,7 +730,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(dir) - db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "") + db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -1357,9 +1358,7 @@ func TestEIP155Transition(t *testing.T) { } }) _, err := blockchain.InsertChain(blocks) - if err != types.ErrInvalidChainId { - t.Error("expected error:", types.ErrInvalidChainId) - } + assert.ErrorIs(t, err, types.ErrInvalidChainId) } func TestEIP161AccountRemoval(t *testing.T) { @@ -1596,7 +1595,7 @@ func TestBlockchainRecovery(t *testing.T) { } defer os.Remove(frdir) - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "") + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -1653,7 +1652,7 @@ func TestIncompleteAncientReceiptChainInsertion(t *testing.T) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(frdir) - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "") + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -1852,7 +1851,7 @@ func testInsertKnownChainData(t *testing.T, typ string) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(dir) - chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "") + chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -2132,7 +2131,7 @@ func TestTransactionIndices(t *testing.T) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(frdir) - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "") + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -2160,7 +2159,7 @@ func TestTransactionIndices(t *testing.T) { // Init block chain with external ancients, check all needed indices has been indexed. limit := []uint64{0, 32, 64, 128} for _, l := range limit { - ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "") + ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -2180,7 +2179,7 @@ func TestTransactionIndices(t *testing.T) { } // Reconstruct a block chain which only reserves HEAD-64 tx indices - ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "") + ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -2259,7 +2258,7 @@ func TestSkipStaleTxIndicesInFastSync(t *testing.T) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(frdir) - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "") + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } diff --git a/core/chain_indexer_test.go b/core/chain_indexer_test.go index b76203dc8f..f099609015 100644 --- a/core/chain_indexer_test.go +++ b/core/chain_indexer_test.go @@ -18,6 +18,7 @@ package core import ( "context" + "errors" "fmt" "math/big" "math/rand" @@ -224,7 +225,10 @@ func (b *testChainIndexBackend) Process(ctx context.Context, header *types.Heade //t.processCh <- header.Number.Uint64() select { case <-time.After(10 * time.Second): - b.t.Fatal("Unexpected call to Process") + b.t.Error("Unexpected call to Process") + // Can't use Fatal since this is not the test's goroutine. + // Returning error stops the chainIndexer's updateLoop + return errors.New("Unexpected call to Process") case b.processCh <- header.Number.Uint64(): } return nil diff --git a/core/chain_makers.go b/core/chain_makers.go index 3b2c370dd7..f0c0166909 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -109,6 +109,8 @@ func (b *BlockGen) AddTxWithChain(bc *BlockChain, tx *types.Transaction) { privateDb := b.privateStatedb if privateDb == nil { privateDb = b.statedb + } else { + b.privateStatedb.Prepare(tx.Hash(), common.Hash{}, len(b.txs)) } // End Quorum @@ -120,6 +122,11 @@ func (b *BlockGen) AddTxWithChain(bc *BlockChain, tx *types.Transaction) { b.receipts = append(b.receipts, receipt) } +// GetBalance returns the balance of the given address at the generated block. +func (b *BlockGen) GetBalance(addr common.Address) *big.Int { + return b.statedb.GetBalance(addr) +} + // AddUncheckedTx forcefully adds a transaction to the block without any // validation. // diff --git a/core/headerchain.go b/core/headerchain.go index dcd3644cd1..9aee660eba 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -323,7 +323,7 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) seals := make([]bool, len(chain)) if checkFreq != 0 { // In case of checkFreq == 0 all seals are left false. - for i := 0; i < len(seals)/checkFreq; i++ { + for i := 0; i <= len(seals)/checkFreq; i++ { index := i*checkFreq + hc.rand.Intn(checkFreq) if index >= len(seals) { index = len(seals) - 1 diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 53ea6f4141..fbdf12ec04 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -896,3 +896,29 @@ func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header { } return a } + +// ReadHeadHeader returns the current canonical head header. +func ReadHeadHeader(db ethdb.Reader) *types.Header { + headHeaderHash := ReadHeadHeaderHash(db) + if headHeaderHash == (common.Hash{}) { + return nil + } + headHeaderNumber := ReadHeaderNumber(db, headHeaderHash) + if headHeaderNumber == nil { + return nil + } + return ReadHeader(db, headHeaderHash, *headHeaderNumber) +} + +// ReadHeadHeader returns the current canonical head block. +func ReadHeadBlock(db ethdb.Reader) *types.Block { + headBlockHash := ReadHeadBlockHash(db) + if headBlockHash == (common.Hash{}) { + return nil + } + headBlockNumber := ReadHeaderNumber(db, headBlockHash) + if headBlockNumber == nil { + return nil + } + return ReadBlock(db, headBlockHash, *headBlockNumber) +} diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 299211cb82..d0ce0e1236 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -699,7 +699,7 @@ func TestAncientStorage(t *testing.T) { } defer os.Remove(frdir) - db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "") + db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false) if err != nil { t.Fatalf("failed to create database with ancient backend") } diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go index 862a549540..ad222005be 100644 --- a/core/rawdb/chain_iterator.go +++ b/core/rawdb/chain_iterator.go @@ -23,10 +23,10 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/prque" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/crypto/sha3" ) // InitDatabaseFromFreezer reinitializes an empty database from a previous batch @@ -135,32 +135,15 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool close(hashesCh) } }() - - var hasher = sha3.NewLegacyKeccak256() for data := range rlpCh { - it, err := rlp.NewListIterator(data.rlp) - if err != nil { - log.Warn("tx iteration error", "error", err) - return - } - it.Next() - txs := it.Value() - txIt, err := rlp.NewListIterator(txs) - if err != nil { - log.Warn("tx iteration error", "error", err) + var body types.Body + if err := rlp.DecodeBytes(data.rlp, &body); err != nil { + log.Warn("Failed to decode block body", "block", data.number, "error", err) return } var hashes []common.Hash - for txIt.Next() { - if err := txIt.Err(); err != nil { - log.Warn("tx iteration error", "error", err) - return - } - var txHash common.Hash - hasher.Reset() - hasher.Write(txIt.Value()) - hasher.Sum(txHash[:0]) - hashes = append(hashes, txHash) + for _, tx := range body.Transactions { + hashes = append(hashes, tx.Hash()) } result := &blockTxHashes{ hashes: hashes, diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go index 90b2639d38..45cc6323e0 100644 --- a/core/rawdb/chain_iterator_test.go +++ b/core/rawdb/chain_iterator_test.go @@ -33,14 +33,34 @@ func TestChainIterator(t *testing.T) { var block *types.Block var txs []*types.Transaction - for i := uint64(0); i <= 10; i++ { - if i == 0 { - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, nil, nil, nil, newHasher()) // Empty genesis block + to := common.BytesToAddress([]byte{0x11}) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher()) // Empty genesis block + WriteBlock(chainDb, block) + WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) + for i := uint64(1); i <= 10; i++ { + var tx *types.Transaction + if i%2 == 0 { + tx = types.NewTx(&types.LegacyTx{ + Nonce: i, + GasPrice: big.NewInt(11111), + Gas: 1111, + To: &to, + Value: big.NewInt(111), + Data: []byte{0x11, 0x11, 0x11}, + }) } else { - tx := types.NewTransaction(i, common.BytesToAddress([]byte{0x11}), big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11}) - txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) + tx = types.NewTx(&types.AccessListTx{ + ChainID: big.NewInt(1337), + Nonce: i, + GasPrice: big.NewInt(11111), + Gas: 1111, + To: &to, + Value: big.NewInt(111), + Data: []byte{0x11, 0x11, 0x11}, + }) } + txs = append(txs, tx) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } @@ -66,7 +86,7 @@ func TestChainIterator(t *testing.T) { numbers = append(numbers, int(h.number)) if len(h.hashes) > 0 { if got, exp := h.hashes[0], txs[h.number-1].Hash(); got != exp { - t.Fatalf("hash wrong, got %x exp %x", got, exp) + t.Fatalf("block %d: hash wrong, got %x exp %x", h.number, got, exp) } } } @@ -88,14 +108,37 @@ func TestIndexTransactions(t *testing.T) { var block *types.Block var txs []*types.Transaction - for i := uint64(0); i <= 10; i++ { - if i == 0 { - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, nil, nil, nil, newHasher()) // Empty genesis block + to := common.BytesToAddress([]byte{0x11}) + + // Write empty genesis block + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher()) + WriteBlock(chainDb, block) + WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) + + for i := uint64(1); i <= 10; i++ { + var tx *types.Transaction + if i%2 == 0 { + tx = types.NewTx(&types.LegacyTx{ + Nonce: i, + GasPrice: big.NewInt(11111), + Gas: 1111, + To: &to, + Value: big.NewInt(111), + Data: []byte{0x11, 0x11, 0x11}, + }) } else { - tx := types.NewTransaction(i, common.BytesToAddress([]byte{0x11}), big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11}) - txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) + tx = types.NewTx(&types.AccessListTx{ + ChainID: big.NewInt(1337), + Nonce: i, + GasPrice: big.NewInt(11111), + Gas: 1111, + To: &to, + Value: big.NewInt(111), + Data: []byte{0x11, 0x11, 0x11}, + }) } + txs = append(txs, tx) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } @@ -108,10 +151,10 @@ func TestIndexTransactions(t *testing.T) { } number := ReadTxLookupEntry(chainDb, txs[i-1].Hash()) if exist && number == nil { - t.Fatalf("Transaction indice missing") + t.Fatalf("Transaction index %d missing", i) } if !exist && number != nil { - t.Fatalf("Transaction indice is not deleted") + t.Fatalf("Transaction index %d is not deleted", i) } } number := ReadTxIndexTail(chainDb) diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 91171ef92c..94759eb984 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -57,7 +57,10 @@ func (frdb *freezerdb) Close() error { // Freeze is a helper method used for external testing to trigger and block until // a freeze cycle completes, without having to sleep for a minute to trigger the // automatic background run. -func (frdb *freezerdb) Freeze(threshold uint64) { +func (frdb *freezerdb) Freeze(threshold uint64) error { + if frdb.AncientStore.(*freezer).readonly { + return errReadOnly + } // Set the freezer threshold to a temporary value defer func(old uint64) { atomic.StoreUint64(&frdb.AncientStore.(*freezer).threshold, old) @@ -68,6 +71,7 @@ func (frdb *freezerdb) Freeze(threshold uint64) { trigger := make(chan struct{}, 1) frdb.AncientStore.(*freezer).trigger <- trigger <-trigger + return nil } // nofreezedb is a database wrapper that disables freezer data retrievals. @@ -121,9 +125,9 @@ func NewDatabase(db ethdb.KeyValueStore) ethdb.Database { // NewDatabaseWithFreezer creates a high level database on top of a given key- // value data store with a freezer moving immutable chain segments into cold // storage. -func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string) (ethdb.Database, error) { +func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly bool) (ethdb.Database, error) { // Create the idle freezer instance - frdb, err := newFreezer(freezer, namespace) + frdb, err := newFreezer(freezer, namespace, readonly) if err != nil { return nil, err } @@ -192,8 +196,9 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st } } // Freezer is consistent with the key-value database, permit combining the two - go frdb.freeze(db) - + if !frdb.readonly { + go frdb.freeze(db) + } return &freezerdb{ KeyValueStore: db, AncientStore: frdb, @@ -215,8 +220,8 @@ func NewMemoryDatabaseWithCap(size int) ethdb.Database { // NewLevelDBDatabase creates a persistent key-value database without a freezer // moving immutable chain segments into cold storage. -func NewLevelDBDatabase(file string, cache int, handles int, namespace string) (ethdb.Database, error) { - db, err := leveldb.New(file, cache, handles, namespace) +func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { + db, err := leveldb.New(file, cache, handles, namespace, readonly) if err != nil { return nil, err } @@ -225,12 +230,12 @@ func NewLevelDBDatabase(file string, cache int, handles int, namespace string) ( // NewLevelDBDatabaseWithFreezer creates a persistent key-value database with a // freezer moving immutable chain segments into cold storage. -func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer string, namespace string) (ethdb.Database, error) { - kvdb, err := leveldb.New(file, cache, handles, namespace) +func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer string, namespace string, readonly bool) (ethdb.Database, error) { + kvdb, err := leveldb.New(file, cache, handles, namespace, readonly) if err != nil { return nil, err } - frdb, err := NewDatabaseWithFreezer(kvdb, freezer, namespace) + frdb, err := NewDatabaseWithFreezer(kvdb, freezer, namespace, readonly) if err != nil { kvdb.Close() return nil, err diff --git a/core/rawdb/database_quorum_test.go b/core/rawdb/database_quorum_test.go index ed6dc0b8ef..85e1edde63 100644 --- a/core/rawdb/database_quorum_test.go +++ b/core/rawdb/database_quorum_test.go @@ -17,7 +17,6 @@ package rawdb import ( - "errors" "testing" "github.com/ethereum/go-ethereum/common" @@ -92,8 +91,6 @@ func TestAccountExtraDataLinker_whenLinkingRoots(t *testing.T) { } } -var errReadOnly = errors.New("unable to write") - type ReadOnlyDB struct { memorydb.Database } diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 28715f3125..525def5b54 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -35,6 +35,10 @@ import ( ) var ( + // errReadOnly is returned if the freezer is opened in read only mode. All the + // mutations are disallowed. + errReadOnly = errors.New("read only") + // errUnknownTable is returned if the user attempts to read from a table that is // not tracked by the freezer. errUnknownTable = errors.New("unknown table") @@ -73,6 +77,7 @@ type freezer struct { frozen uint64 // Number of blocks already frozen threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests) + readonly bool tables map[string]*freezerTable // Data tables for storing everything instanceLock fileutil.Releaser // File-system lock to prevent double opens @@ -84,7 +89,7 @@ type freezer struct { // newFreezer creates a chain freezer that moves ancient chain data into // append-only flat file containers. -func newFreezer(datadir string, namespace string) (*freezer, error) { +func newFreezer(datadir string, namespace string, readonly bool) (*freezer, error) { // Create the initial freezer object var ( readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil) @@ -106,6 +111,7 @@ func newFreezer(datadir string, namespace string) (*freezer, error) { } // Open all the supported data tables freezer := &freezer{ + readonly: readonly, threshold: params.FullImmutabilityThreshold, tables: make(map[string]*freezerTable), instanceLock: lock, @@ -130,7 +136,7 @@ func newFreezer(datadir string, namespace string) (*freezer, error) { lock.Release() return nil, err } - log.Info("Opened ancient database", "database", datadir) + log.Info("Opened ancient database", "database", datadir, "readonly", readonly) return freezer, nil } @@ -138,7 +144,7 @@ func newFreezer(datadir string, namespace string) (*freezer, error) { func (f *freezer) Close() error { var errs []error f.closeOnce.Do(func() { - f.quit <- struct{}{} + close(f.quit) for _, table := range f.tables { if err := table.Close(); err != nil { errs = append(errs, err) @@ -191,6 +197,9 @@ func (f *freezer) AncientSize(kind string) (uint64, error) { // injection will be rejected. But if two injections with same number happen at // the same time, we can get into the trouble. func (f *freezer) AppendAncient(number uint64, hash, header, body, receipts, td []byte) (err error) { + if f.readonly { + return errReadOnly + } // Ensure the binary blobs we are appending is continuous with freezer. if atomic.LoadUint64(&f.frozen) != number { return errOutOrderInsertion @@ -233,6 +242,9 @@ func (f *freezer) AppendAncient(number uint64, hash, header, body, receipts, td // TruncateAncients discards any recent data above the provided threshold number. func (f *freezer) TruncateAncients(items uint64) error { + if f.readonly { + return errReadOnly + } if atomic.LoadUint64(&f.frozen) <= items { return nil } diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index 530a348540..4d6e415511 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -85,8 +85,12 @@ type Pruner struct { } // NewPruner creates the pruner instance. -func NewPruner(db ethdb.Database, headHeader *types.Header, datadir, trieCachePath string, bloomSize uint64) (*Pruner, error) { - snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headHeader.Root, false, false, false) +func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize uint64) (*Pruner, error) { + headBlock := rawdb.ReadHeadBlock(db) + if headBlock == nil { + return nil, errors.New("Failed to load head block") + } + snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, false) if err != nil { return nil, err // The relevant snapshot(s) might not exist } @@ -104,12 +108,12 @@ func NewPruner(db ethdb.Database, headHeader *types.Header, datadir, trieCachePa stateBloom: stateBloom, datadir: datadir, trieCachePath: trieCachePath, - headHeader: headHeader, + headHeader: headBlock.Header(), snaptree: snaptree, }, nil } -func prune(maindb ethdb.Database, stateBloom *stateBloom, middleStateRoots map[common.Hash]struct{}, start time.Time) error { +func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, stateBloom *stateBloom, bloomPath string, middleStateRoots map[common.Hash]struct{}, start time.Time) error { // Delete all stale trie nodes in the disk. With the help of state bloom // the trie nodes(and codes) belong to the active state will be filtered // out. A very small part of stale tries will also be filtered because of @@ -182,6 +186,25 @@ func prune(maindb ethdb.Database, stateBloom *stateBloom, middleStateRoots map[c iter.Release() log.Info("Pruned state data", "nodes", count, "size", size, "elapsed", common.PrettyDuration(time.Since(pstart))) + // Pruning is done, now drop the "useless" layers from the snapshot. + // Firstly, flushing the target layer into the disk. After that all + // diff layers below the target will all be merged into the disk. + if err := snaptree.Cap(root, 0); err != nil { + return err + } + // Secondly, flushing the snapshot journal into the disk. All diff + // layers upon are dropped silently. Eventually the entire snapshot + // tree is converted into a single disk layer with the pruning target + // as the root. + if _, err := snaptree.Journal(root); err != nil { + return err + } + // Delete the state bloom, it marks the entire pruning procedure is + // finished. If any crashes or manual exit happens before this, + // `RecoverPruning` will pick it up in the next restarts to redo all + // the things. + os.RemoveAll(bloomPath) + // Start compactions, will remove the deleted data from the disk immediately. // Note for small pruning, the compaction is skipped. if count >= rangeCompactionThreshold { @@ -310,29 +333,7 @@ func (p *Pruner) Prune(root common.Hash) error { return err } log.Info("State bloom filter committed", "name", filterName) - - if err := prune(p.db, p.stateBloom, middleRoots, start); err != nil { - return err - } - // Pruning is done, now drop the "useless" layers from the snapshot. - // Firstly, flushing the target layer into the disk. After that all - // diff layers below the target will all be merged into the disk. - if err := p.snaptree.Cap(root, 0); err != nil { - return err - } - // Secondly, flushing the snapshot journal into the disk. All diff - // layers upon the target layer are dropped silently. Eventually the - // entire snapshot tree is converted into a single disk layer with - // the pruning target as the root. - if _, err := p.snaptree.Journal(root); err != nil { - return err - } - // Delete the state bloom, it marks the entire pruning procedure is - // finished. If any crashes or manual exit happens before this, - // `RecoverPruning` will pick it up in the next restarts to redo all - // the things. - os.RemoveAll(filterName) - return nil + return prune(p.snaptree, root, p.db, p.stateBloom, filterName, middleRoots, start) } // RecoverPruning will resume the pruning procedure during the system restart. @@ -350,9 +351,9 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err if stateBloomPath == "" { return nil // nothing to recover } - headHeader, err := getHeadHeader(db) - if err != nil { - return err + headBlock := rawdb.ReadHeadBlock(db) + if headBlock == nil { + return errors.New("Failed to load head block") } // Initialize the snapshot tree in recovery mode to handle this special case: // - Users run the `prune-state` command multiple times @@ -362,7 +363,7 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err // - The state HEAD is rewound already because of multiple incomplete `prune-state` // In this case, even the state HEAD is not exactly matched with snapshot, it // still feasible to recover the pruning correctly. - snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headHeader.Root, false, false, true) + snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, true) if err != nil { return err // The relevant snapshot(s) might not exist } @@ -382,7 +383,7 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err // otherwise the dangling state will be left. var ( found bool - layers = snaptree.Snapshots(headHeader.Root, 128, true) + layers = snaptree.Snapshots(headBlock.Root(), 128, true) middleRoots = make(map[common.Hash]struct{}) ) for _, layer := range layers { @@ -396,28 +397,7 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err log.Error("Pruning target state is not existent") return errors.New("non-existent target state") } - if err := prune(db, stateBloom, middleRoots, time.Now()); err != nil { - return err - } - // Pruning is done, now drop the "useless" layers from the snapshot. - // Firstly, flushing the target layer into the disk. After that all - // diff layers below the target will all be merged into the disk. - if err := snaptree.Cap(stateBloomRoot, 0); err != nil { - return err - } - // Secondly, flushing the snapshot journal into the disk. All diff - // layers upon are dropped silently. Eventually the entire snapshot - // tree is converted into a single disk layer with the pruning target - // as the root. - if _, err := snaptree.Journal(stateBloomRoot); err != nil { - return err - } - // Delete the state bloom, it marks the entire pruning procedure is - // finished. If any crashes or manual exit happens before this, - // `RecoverPruning` will pick it up in the next restarts to redo all - // the things. - os.RemoveAll(stateBloomPath) - return nil + return prune(snaptree, stateBloomRoot, db, stateBloom, stateBloomPath, middleRoots, time.Now()) } // extractGenesis loads the genesis state and commits all the state entries @@ -506,22 +486,6 @@ func findBloomFilter(datadir string) (string, common.Hash, error) { return stateBloomPath, stateBloomRoot, nil } -func getHeadHeader(db ethdb.Database) (*types.Header, error) { - headHeaderHash := rawdb.ReadHeadBlockHash(db) - if headHeaderHash == (common.Hash{}) { - return nil, errors.New("empty head block hash") - } - headHeaderNumber := rawdb.ReadHeaderNumber(db, headHeaderHash) - if headHeaderNumber == nil { - return nil, errors.New("empty head block number") - } - headHeader := rawdb.ReadHeader(db, headHeaderHash, *headHeaderNumber) - if headHeader == nil { - return nil, errors.New("empty head header") - } - return headHeader, nil -} - const warningLog = ` WARNING! diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go index 9c86a679d1..ee88938b77 100644 --- a/core/state/snapshot/difflayer.go +++ b/core/state/snapshot/difflayer.go @@ -296,13 +296,17 @@ func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) { if !hit { hit = dl.diffed.Contains(destructBloomHasher(hash)) } + var origin *diskLayer + if !hit { + origin = dl.origin // extract origin while holding the lock + } dl.lock.RUnlock() // If the bloom filter misses, don't even bother with traversing the memory // diff layers, reach straight into the bottom persistent disk layer - if !hit { + if origin != nil { snapshotBloomAccountMissMeter.Mark(1) - return dl.origin.AccountRLP(hash) + return origin.AccountRLP(hash) } // The bloom filter hit, start poking in the internal maps return dl.accountRLP(hash, 0) @@ -358,13 +362,17 @@ func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro if !hit { hit = dl.diffed.Contains(destructBloomHasher(accountHash)) } + var origin *diskLayer + if !hit { + origin = dl.origin // extract origin while holding the lock + } dl.lock.RUnlock() // If the bloom filter misses, don't even bother with traversing the memory // diff layers, reach straight into the bottom persistent disk layer - if !hit { + if origin != nil { snapshotBloomStorageMissMeter.Mark(1) - return dl.origin.Storage(accountHash, storageHash) + return origin.Storage(accountHash, storageHash) } // The bloom filter hit, start poking in the internal maps return dl.storage(accountHash, storageHash, 0) diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go index 6beb944e07..ccde2fc094 100644 --- a/core/state/snapshot/disklayer_test.go +++ b/core/state/snapshot/disklayer_test.go @@ -524,7 +524,7 @@ func TestDiskSeek(t *testing.T) { t.Fatal(err) } else { defer os.RemoveAll(dir) - diskdb, err := leveldb.New(dir, 256, 0, "") + diskdb, err := leveldb.New(dir, 256, 0, "", false) if err != nil { t.Fatal(err) } diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index aa5f5900b0..eccf377264 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -283,11 +283,11 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs m return errSnapshotCycle } // Generate a new snapshot on top of the parent - parent := t.Snapshot(parentRoot).(snapshot) + parent := t.Snapshot(parentRoot) if parent == nil { return fmt.Errorf("parent [%#x] snapshot missing", parentRoot) } - snap := parent.Update(blockRoot, destructs, accounts, storage) + snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage) // Save the new snapshot for later t.lock.Lock() @@ -484,8 +484,17 @@ func diffToDisk(bottom *diffLayer) *diskLayer { if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator batch.Delete(key) base.cache.Del(key[1:]) - snapshotFlushStorageItemMeter.Mark(1) + + // Ensure we don't delete too much data blindly (contract can be + // huge). It's ok to flush, the root will go missing in case of a + // crash and we'll detect and regenerate the snapshot. + if batch.ValueSize() > ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { + log.Crit("Failed to write storage deletions", "err", err) + } + batch.Reset() + } } } it.Release() @@ -503,6 +512,16 @@ func diffToDisk(bottom *diffLayer) *diskLayer { snapshotFlushAccountItemMeter.Mark(1) snapshotFlushAccountSizeMeter.Mark(int64(len(data))) + + // Ensure we don't write too much data blindly. It's ok to flush, the + // root will go missing in case of a crash and we'll detect and regen + // the snapshot. + if batch.ValueSize() > ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { + log.Crit("Failed to write storage deletions", "err", err) + } + batch.Reset() + } } // Push all the storage slots into the database for accountHash, storage := range bottom.storageData { diff --git a/core/state_processor.go b/core/state_processor.go index e362d056aa..1a169b5402 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -118,7 +118,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, pri msg, err := tx.AsMessage(types.MakeSigner(p.config, header.Number)) if err != nil { - return nil, nil, nil, 0, err + return nil, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) } // Quorum: this tx needs to be applied as if we were not a party @@ -351,7 +351,6 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainCon receipt.BlockHash = statedb.BlockHash() receipt.BlockNumber = header.Number receipt.TransactionIndex = uint(statedb.TxIndex()) - // Quorum var privateReceipt *types.Receipt if config.IsQuorum { diff --git a/core/state_transition.go b/core/state_transition.go index ed3960e4da..784053d2ef 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -324,8 +324,8 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { } // Set up the initial access list. - if st.evm.ChainConfig().IsBerlin(st.evm.Context.BlockNumber) { - st.state.PrepareAccessList(msg.From(), msg.To(), st.evm.ActivePrecompiles(), msg.AccessList()) + if rules := st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber); rules.IsBerlin { + st.state.PrepareAccessList(msg.From(), msg.To(), vm.ActivePrecompiles(rules), msg.AccessList()) } var ( diff --git a/core/tx_pool.go b/core/tx_pool.go index 2f01a7cb31..1c1cd918fe 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -595,7 +595,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { } } else { // Drop non-local transactions under our own minimal accepted gas price - local = local || pool.locals.contains(from) // account may be local even if the transaction arrived from the network + local = local || pool.locals.contains(from) // Quorum: account may be local even if the transaction arrived from the network if !local && tx.GasPriceIntCmp(pool.gasPrice) < 0 { return ErrUnderpriced } @@ -1000,7 +1000,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { } } -// requestPromoteExecutables requests a pool reset to the new head block. +// requestReset requests a pool reset to the new head block. // The returned channel is closed when the reset has occurred. func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { select { @@ -1769,12 +1769,14 @@ func (t *txLookup) RemoteToLocals(locals *accountSet) int { return migrated } -// helper function to return chainHeadChannel size -func GetChainHeadChannleSize() int { - return chainHeadChanSize -} - // numSlots calculates the number of slots needed for a single transaction. func numSlots(tx *types.Transaction) int { return int((tx.Size() + txSlotSize - 1) / txSlotSize) } + +// Quorum + +// helper function to return chainHeadChannel size +func GetChainHeadChannleSize() int { + return chainHeadChanSize +} diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index e9e99cc866..2682fa40b8 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -1266,7 +1266,7 @@ func TestTransactionAllowedTxSize(t *testing.T) { t.Fatalf("expected rejection on slightly oversize transaction") } // Try adding a transaction of random not allowed size - if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, gasPrice, key, dataSize+1+uint64(rand.Intn(int(10*txMaxSize))))); err == nil { + if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, gasPrice, key, dataSize+1+uint64(rand.Intn(10*int(txMaxSize))))); err == nil { t.Fatalf("expected rejection on oversize transaction") } // Run some sanity checks on the pool internals diff --git a/core/types/hashing.go b/core/types/hashing.go index 71efb25a9a..3227cf8a72 100644 --- a/core/types/hashing.go +++ b/core/types/hashing.go @@ -36,6 +36,7 @@ var encodeBufferPool = sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, } +// rlpHash encodes x and hashes the encoded bytes. func rlpHash(x interface{}) (h common.Hash) { sha := hasherPool.Get().(crypto.KeccakState) defer hasherPool.Put(sha) @@ -45,8 +46,8 @@ func rlpHash(x interface{}) (h common.Hash) { return h } -// prefixedRlpHash writes the prefix into the hasher before rlp-encoding the -// given interface. It's used for typed transactions. +// prefixedRlpHash writes the prefix into the hasher before rlp-encoding x. +// It's used for typed transactions. func prefixedRlpHash(prefix byte, x interface{}) (h common.Hash) { sha := hasherPool.Get().(crypto.KeccakState) defer hasherPool.Put(sha) diff --git a/core/types/hashing_test.go b/core/types/hashing_test.go index a948b10ef6..6d1ebf897c 100644 --- a/core/types/hashing_test.go +++ b/core/types/hashing_test.go @@ -1,3 +1,19 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + package types_test import ( diff --git a/core/types/receipt.go b/core/types/receipt.go index a18a2da315..b9cc80c496 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -38,6 +38,7 @@ var ( receiptStatusSuccessfulRLP = []byte{0x01} ) +// This error is returned when a typed receipt is decoded, but the string is empty. var errEmptyTypedReceipt = errors.New("empty typed receipt bytes") const ( diff --git a/core/types/transaction.go b/core/types/transaction.go index 9384bab35c..3e38f3ee3f 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -172,19 +172,6 @@ func (tx *Transaction) UnmarshalBinary(b []byte) error { return nil } -// Quorum -func NewTxPrivacyMetadata(privacyFlag engine.PrivacyFlagType) *PrivacyMetadata { - return &PrivacyMetadata{ - PrivacyFlag: privacyFlag, - } -} - -func (tx *Transaction) SetTxPrivacyMetadata(pm *PrivacyMetadata) { - tx.privacyMetadata = pm -} - -// End Quorum - // decodeTyped decodes a typed transaction from the canonical format. func (tx *Transaction) decodeTyped(b []byte) (TxData, error) { if len(b) == 0 { @@ -284,9 +271,6 @@ func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value // Nonce returns the sender account nonce of the transaction. func (tx *Transaction) Nonce() uint64 { return tx.inner.nonce() } -// PrivacyMetadata returns the privacy metadata of the transaction. (Quorum) -func (tx *Transaction) PrivacyMetadata() *PrivacyMetadata { return tx.privacyMetadata } - // To returns the recipient address of the transaction. // For contract-creation transactions, To returns nil. func (tx *Transaction) To() *common.Address { @@ -314,7 +298,7 @@ func (tx *Transaction) RawSignatureValues() (v, r, s *big.Int) { // GasPriceCmp compares the gas prices of two transactions. func (tx *Transaction) GasPriceCmp(other *Transaction) int { - return tx.inner.gasPrice().Cmp(other.GasPrice()) + return tx.inner.gasPrice().Cmp(other.inner.gasPrice()) } // GasPriceIntCmp compares the gas price of the transaction against the given price. @@ -322,14 +306,6 @@ func (tx *Transaction) GasPriceIntCmp(other *big.Int) int { return tx.inner.gasPrice().Cmp(other) } -// From returns the sender address of the transaction. (Quorum) -func (tx *Transaction) From() common.Address { - if from, err := Sender(NewEIP2930Signer(tx.ChainId()), tx); err == nil { - return from - } - return common.Address{} -} - // Hash returns the transaction hash. func (tx *Transaction) Hash() common.Hash { if hash := tx.hash.Load(); hash != nil { @@ -370,57 +346,6 @@ func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, e return &Transaction{inner: cpy, time: tx.time}, nil } -// String returns the string representation of the transaction. (Quorum) -func (tx *Transaction) String() string { - var from, to string - v, r, s := tx.RawSignatureValues() - if v != nil { - if f, err := Sender(NewEIP2930Signer(tx.ChainId()), tx); err != nil { - from = "[invalid sender: invalid sig]" - } else { - from = fmt.Sprintf("%x", f[:]) - } - } else { - from = "[invalid sender: nil V field]" - } - - if tx.To() == nil { - to = "[contract creation]" - } else { - to = fmt.Sprintf("%x", tx.To()) - } - enc, _ := rlp.EncodeToBytes(&tx.inner) - return fmt.Sprintf(` - TX(%x) - Contract: %v - From: %s - To: %s - Nonce: %v - GasPrice: %#x - GasLimit %#x - Value: %#x - Data: 0x%x - V: %#x - R: %#x - S: %#x - Hex: %x -`, - tx.Hash(), - tx.To() == nil, - from, - to, - tx.Nonce(), - tx.Cost(), - tx.Gas(), - tx.Value(), - tx.Data(), - v, - r, - s, - enc, - ) -} - // Transactions implements DerivableList for transactions. type Transactions []*Transaction @@ -621,6 +546,81 @@ func (m Message) AccessList() AccessList { return m.accessList } func (m Message) CheckNonce() bool { return m.checkNonce } // Quorum + +func NewTxPrivacyMetadata(privacyFlag engine.PrivacyFlagType) *PrivacyMetadata { + return &PrivacyMetadata{ + PrivacyFlag: privacyFlag, + } +} + +func (tx *Transaction) SetTxPrivacyMetadata(pm *PrivacyMetadata) { + tx.privacyMetadata = pm +} + +// PrivacyMetadata returns the privacy metadata of the transaction. (Quorum) +func (tx *Transaction) PrivacyMetadata() *PrivacyMetadata { + return tx.privacyMetadata +} + +// From returns the sender address of the transaction. (Quorum) +func (tx *Transaction) From() common.Address { + if from, err := Sender(NewEIP2930Signer(tx.ChainId()), tx); err == nil { + return from + } + return common.Address{} +} + +// String returns the string representation of the transaction. (Quorum) +func (tx *Transaction) String() string { + var from, to string + v, r, s := tx.RawSignatureValues() + if v != nil { + if f, err := Sender(NewEIP2930Signer(tx.ChainId()), tx); err != nil { + from = "[invalid sender: invalid sig]" + } else { + from = fmt.Sprintf("%x", f[:]) + } + } else { + from = "[invalid sender: nil V field]" + } + + if tx.To() == nil { + to = "[contract creation]" + } else { + to = fmt.Sprintf("%x", tx.To()) + } + enc, _ := rlp.EncodeToBytes(&tx.inner) + return fmt.Sprintf(` + TX(%x) + Contract: %v + From: %s + To: %s + Nonce: %v + GasPrice: %#x + GasLimit %#x + Value: %#x + Data: 0x%x + V: %#x + R: %#x + S: %#x + Hex: %x +`, + tx.Hash(), + tx.To() == nil, + from, + to, + tx.Nonce(), + tx.Cost(), + tx.Gas(), + tx.Value(), + tx.Data(), + v, + r, + s, + enc, + ) +} + func (m Message) IsPrivate() bool { return m.isPrivate } diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index 14b61a971c..a42e3007ec 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -212,11 +212,7 @@ func (s eip2930Signer) Sender(tx *Transaction) (common.Address, error) { func (s eip2930Signer) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big.Int, err error) { switch txdata := tx.inner.(type) { case *LegacyTx: - R, S, V = decodeSignature(sig) - if s.chainId.Sign() != 0 { - V = big.NewInt(int64(sig[64] + 35)) - V.Add(V, s.chainIdMul) - } + return s.EIP155Signer.SignatureValues(tx, sig) case *AccessListTx: // Check that chain ID of tx matches the signer. We also accept ID zero here, // because it indicates that the chain ID was not specified in the tx. diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index cd0dee5b7e..aa1f5dcb1a 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -55,6 +55,7 @@ var ( common.Hex2Bytes("98ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4a8887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a301"), ) + // Quorum rightvrsTx2, _ = NewTransaction( 3, common.HexToAddress("b94f5374fce5edbc8e2a8697c15331677e6ebf0b"), @@ -66,6 +67,7 @@ var ( HomesteadSigner{}, common.Hex2Bytes("98ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4a8887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a301"), ) + // End Quorum emptyEip2718Tx = NewTx(&AccessListTx{ ChainID: big.NewInt(1), @@ -113,18 +115,6 @@ func TestTransactionEncode(t *testing.T) { } } -// Test from the original quorum implementation -func TestTransactionEncode2(t *testing.T) { - txb, err := rlp.EncodeToBytes(rightvrsTx2) - if err != nil { - t.Fatalf("encode error: %v", err) - } - should := common.FromHex("f86103808207d094b94f5374fce5edbc8e2a8697c15331677e6ebf0b0a8255441ca098ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4aa08887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a3") - if !bytes.Equal(txb, should) { - t.Errorf("encoded RLP mismatch, got %x", txb) - } -} - func TestEIP2718TransactionSigHash(t *testing.T) { s := NewEIP2930Signer(big.NewInt(1)) if s.Hash(emptyEip2718Tx) != common.HexToHash("49b486f0ec0a60dfbbca2d30cb07c9e8ffb2a2ff41f29a1ab6737475f6ff69f3") { @@ -504,3 +494,17 @@ func assertEqual(orig *Transaction, cpy *Transaction) error { } return nil } + +// Quorum + +// Test from the original quorum implementation +func TestTransactionEncode2(t *testing.T) { + txb, err := rlp.EncodeToBytes(rightvrsTx2) + if err != nil { + t.Fatalf("encode error: %v", err) + } + should := common.FromHex("f86103808207d094b94f5374fce5edbc8e2a8697c15331677e6ebf0b0a8255441ca098ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4aa08887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a3") + if !bytes.Equal(txb, should) { + t.Errorf("encoded RLP mismatch, got %x", txb) + } +} diff --git a/core/vm/access_list_tracer.go b/core/vm/access_list_tracer.go new file mode 100644 index 0000000000..f790a9c8b6 --- /dev/null +++ b/core/vm/access_list_tracer.go @@ -0,0 +1,179 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package vm + +import ( + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// accessList is an accumulator for the set of accounts and storage slots an EVM +// contract execution touches. +type accessList map[common.Address]accessListSlots + +// accessListSlots is an accumulator for the set of storage slots within a single +// contract that an EVM contract execution touches. +type accessListSlots map[common.Hash]struct{} + +// newAccessList creates a new accessList. +func newAccessList() accessList { + return make(map[common.Address]accessListSlots) +} + +// addAddress adds an address to the accesslist. +func (al accessList) addAddress(address common.Address) { + // Set address if not previously present + if _, present := al[address]; !present { + al[address] = make(map[common.Hash]struct{}) + } +} + +// addSlot adds a storage slot to the accesslist. +func (al accessList) addSlot(address common.Address, slot common.Hash) { + // Set address if not previously present + al.addAddress(address) + + // Set the slot on the surely existent storage set + al[address][slot] = struct{}{} +} + +// equal checks if the content of the current access list is the same as the +// content of the other one. +func (al accessList) equal(other accessList) bool { + // Cross reference the accounts first + if len(al) != len(other) { + return false + } + for addr := range al { + if _, ok := other[addr]; !ok { + return false + } + } + for addr := range other { + if _, ok := al[addr]; !ok { + return false + } + } + // Accounts match, cross reference the storage slots too + for addr, slots := range al { + otherslots := other[addr] + + if len(slots) != len(otherslots) { + return false + } + for hash := range slots { + if _, ok := otherslots[hash]; !ok { + return false + } + } + for hash := range otherslots { + if _, ok := slots[hash]; !ok { + return false + } + } + } + return true +} + +// accesslist converts the accesslist to a types.AccessList. +func (al accessList) accessList() types.AccessList { + acl := make(types.AccessList, 0, len(al)) + for addr, slots := range al { + tuple := types.AccessTuple{Address: addr} + for slot := range slots { + tuple.StorageKeys = append(tuple.StorageKeys, slot) + } + acl = append(acl, tuple) + } + return acl +} + +// AccessListTracer is a tracer that accumulates touched accounts and storage +// slots into an internal set. +type AccessListTracer struct { + excl map[common.Address]struct{} // Set of account to exclude from the list + list accessList // Set of accounts and storage slots touched +} + +var _ Tracer = &AccessListTracer{} + +// NewAccessListTracer creates a new tracer that can generate AccessLists. +// An optional AccessList can be specified to occupy slots and addresses in +// the resulting accesslist. +func NewAccessListTracer(acl types.AccessList, from, to common.Address, precompiles []common.Address) *AccessListTracer { + excl := map[common.Address]struct{}{ + from: {}, to: {}, + } + for _, addr := range precompiles { + excl[addr] = struct{}{} + } + list := newAccessList() + for _, al := range acl { + if _, ok := excl[al.Address]; !ok { + list.addAddress(al.Address) + } + for _, slot := range al.StorageKeys { + list.addSlot(al.Address, slot) + } + } + return &AccessListTracer{ + excl: excl, + list: list, + } +} + +func (a *AccessListTracer) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { +} + +// CaptureState captures all opcodes that touch storage or addresses and adds them to the accesslist. +func (a *AccessListTracer) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) { + stack := scope.Stack + if (op == SLOAD || op == SSTORE) && stack.len() >= 1 { + slot := common.Hash(stack.data[stack.len()-1].Bytes32()) + a.list.addSlot(scope.Contract.Address(), slot) + } + if (op == EXTCODECOPY || op == EXTCODEHASH || op == EXTCODESIZE || op == BALANCE || op == SELFDESTRUCT) && stack.len() >= 1 { + addr := common.Address(stack.data[stack.len()-1].Bytes20()) + if _, ok := a.excl[addr]; !ok { + a.list.addAddress(addr) + } + } + if (op == DELEGATECALL || op == CALL || op == STATICCALL || op == CALLCODE) && stack.len() >= 5 { + addr := common.Address(stack.data[stack.len()-2].Bytes20()) + if _, ok := a.excl[addr]; !ok { + a.list.addAddress(addr) + } + } +} + +func (*AccessListTracer) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) { +} + +func (*AccessListTracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) {} + +// AccessList returns the current accesslist maintained by the tracer. +func (a *AccessListTracer) AccessList() types.AccessList { + return a.list.accessList() +} + +// Equal returns if the content of two access list traces are equal. +func (a *AccessListTracer) Equal(other *AccessListTracer) bool { + return a.list.equal(other.list) +} diff --git a/core/vm/contracts.go b/core/vm/contracts.go index a3ceece0e9..780b104e7d 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -128,6 +128,25 @@ func init() { } } +// ActivePrecompiles returns the precompiles enabled with the current configuration. +func ActivePrecompiles(rules params.Rules) []common.Address { + var result []common.Address + switch { + case rules.IsBerlin: + result = PrecompiledAddressesBerlin + case rules.IsIstanbul: + result = PrecompiledAddressesIstanbul + case rules.IsByzantium: + result = PrecompiledAddressesByzantium + default: + result = PrecompiledAddressesHomestead + } + if rules.IsPrivacyPrecompile { + result = append(result, common.QuorumPrivacyPrecompileContractAddress()) + } + return result +} + // RunPrecompiledContract runs and evaluates the output of a precompiled contract. // It returns // - the returned bytes, diff --git a/core/vm/eips.go b/core/vm/eips.go index 0c8bf1792e..6bb941d5f9 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -76,9 +76,9 @@ func enable1884(jt *JumpTable) { } } -func opSelfBalance(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - balance, _ := uint256.FromBig(interpreter.evm.StateDB.GetBalance(callContext.contract.Address())) - callContext.stack.push(balance) +func opSelfBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + balance, _ := uint256.FromBig(interpreter.evm.StateDB.GetBalance(scope.Contract.Address())) + scope.Stack.push(balance) return nil, nil } @@ -95,9 +95,9 @@ func enable1344(jt *JumpTable) { } // opChainID implements CHAINID opcode -func opChainID(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opChainID(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { chainId, _ := uint256.FromBig(interpreter.evm.chainConfig.ChainID) - callContext.stack.push(chainId) + scope.Stack.push(chainId) return nil, nil } diff --git a/core/vm/evm.go b/core/vm/evm.go index 6680d56354..0e61a88bb6 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -57,36 +57,6 @@ type ( GetHashFunc func(uint64) common.Hash ) -// ActivePrecompiles returns the addresses of the precompiles enabled with the current -// configuration -func (evm *EVM) ActivePrecompiles() []common.Address { - return append(evm.activePrecompiles(), evm.activeQuorumPrecompiles()...) -} - -// (Quorum) moved upstream ActivePrecompiles() logic to new method activePrecompiles() -// This functionality is part of an experimental feature so may be subject to future changes. Keeping the original code -// untouched in a new method should flag any changes from future merges. -func (evm *EVM) activePrecompiles() []common.Address { - switch { - case evm.chainRules.IsBerlin: - return PrecompiledAddressesBerlin - case evm.chainRules.IsIstanbul: - return PrecompiledAddressesIstanbul - case evm.chainRules.IsByzantium: - return PrecompiledAddressesByzantium - default: - return PrecompiledAddressesHomestead - } -} - -func (evm *EVM) activeQuorumPrecompiles() []common.Address { - var p []common.Address - if evm.chainRules.IsPrivacyPrecompile { - p = append(p, common.QuorumPrivacyPrecompileContractAddress()) - } - return p -} - func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) { var precompiles map[common.Address]PrecompiledContract switch { @@ -171,9 +141,13 @@ type TxContext struct { Origin common.Address // Provides information for ORIGIN GasPrice *big.Int // Provides information for GASPRICE } + +// Quorum type PublicState StateDB type PrivateState StateDB +// End Quorum + // EVM is the Ethereum Virtual Machine base object and provides // the necessary tools to run a contract on the given state with // the provided context. It should be noted that any error @@ -335,7 +309,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas if !isPrecompile && !isQuorumPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 { // Calling a non existing account, don't do anything, but ping the tracer if evm.vmConfig.Debug && evm.depth == 0 { - evm.vmConfig.Tracer.CaptureStart(caller.Address(), addr, false, input, gas, value) + evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) evm.vmConfig.Tracer.CaptureEnd(ret, 0, 0, nil) } return nil, gas, nil @@ -363,7 +337,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas // Capture the tracer start/end events in debug mode if evm.vmConfig.Debug && evm.depth == 0 { - evm.vmConfig.Tracer.CaptureStart(caller.Address(), addr, false, input, gas, value) + evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) defer func(startGas uint64, startTime time.Time) { // Lazy evaluation of the parameters evm.vmConfig.Tracer.CaptureEnd(ret, startGas-gas, time.Since(startTime), err) }(gas, time.Now()) @@ -646,7 +620,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } if evm.vmConfig.Debug && evm.depth == 0 { - evm.vmConfig.Tracer.CaptureStart(caller.Address(), address, true, codeAndHash.code, gas, value) + evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), address, true, codeAndHash.code, gas, value) } start := time.Now() diff --git a/core/vm/evm_test.go b/core/vm/evm_test.go index ecef512c11..ceaada1c17 100644 --- a/core/vm/evm_test.go +++ b/core/vm/evm_test.go @@ -90,7 +90,7 @@ func TestActivePrecompiles(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := tt.evm.ActivePrecompiles() + got := ActivePrecompiles(tt.evm.chainRules) require.ElementsMatchf(t, tt.want, got, "want: %v, got: %v", tt.want, got) }) } diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 831737a30f..e8307cacb6 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -24,68 +24,68 @@ import ( "golang.org/x/crypto/sha3" ) -func opAdd(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opAdd(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.Add(&x, y) return nil, nil } -func opSub(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opSub(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.Sub(&x, y) return nil, nil } -func opMul(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opMul(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.Mul(&x, y) return nil, nil } -func opDiv(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opDiv(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.Div(&x, y) return nil, nil } -func opSdiv(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opSdiv(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.SDiv(&x, y) return nil, nil } -func opMod(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opMod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.Mod(&x, y) return nil, nil } -func opSmod(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opSmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.SMod(&x, y) return nil, nil } -func opExp(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - base, exponent := callContext.stack.pop(), callContext.stack.peek() +func opExp(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + base, exponent := scope.Stack.pop(), scope.Stack.peek() exponent.Exp(&base, exponent) return nil, nil } -func opSignExtend(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - back, num := callContext.stack.pop(), callContext.stack.peek() +func opSignExtend(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + back, num := scope.Stack.pop(), scope.Stack.peek() num.ExtendSign(num, &back) return nil, nil } -func opNot(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x := callContext.stack.peek() +func opNot(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x := scope.Stack.peek() x.Not(x) return nil, nil } -func opLt(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opLt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() if x.Lt(y) { y.SetOne() } else { @@ -94,8 +94,8 @@ func opLt(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte return nil, nil } -func opGt(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opGt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() if x.Gt(y) { y.SetOne() } else { @@ -104,8 +104,8 @@ func opGt(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte return nil, nil } -func opSlt(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opSlt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() if x.Slt(y) { y.SetOne() } else { @@ -114,8 +114,8 @@ func opSlt(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byt return nil, nil } -func opSgt(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opSgt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() if x.Sgt(y) { y.SetOne() } else { @@ -124,8 +124,8 @@ func opSgt(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byt return nil, nil } -func opEq(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opEq(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() if x.Eq(y) { y.SetOne() } else { @@ -134,8 +134,8 @@ func opEq(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte return nil, nil } -func opIszero(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x := callContext.stack.peek() +func opIszero(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x := scope.Stack.peek() if x.IsZero() { x.SetOne() } else { @@ -144,32 +144,32 @@ func opIszero(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([] return nil, nil } -func opAnd(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opAnd(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.And(&x, y) return nil, nil } -func opOr(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opOr(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.Or(&x, y) return nil, nil } -func opXor(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opXor(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.Xor(&x, y) return nil, nil } -func opByte(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - th, val := callContext.stack.pop(), callContext.stack.peek() +func opByte(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + th, val := scope.Stack.pop(), scope.Stack.peek() val.Byte(&th) return nil, nil } -func opAddmod(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y, z := callContext.stack.pop(), callContext.stack.pop(), callContext.stack.peek() +func opAddmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y, z := scope.Stack.pop(), scope.Stack.pop(), scope.Stack.peek() if z.IsZero() { z.Clear() } else { @@ -178,8 +178,8 @@ func opAddmod(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([] return nil, nil } -func opMulmod(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y, z := callContext.stack.pop(), callContext.stack.pop(), callContext.stack.peek() +func opMulmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y, z := scope.Stack.pop(), scope.Stack.pop(), scope.Stack.peek() z.MulMod(&x, &y, z) return nil, nil } @@ -187,9 +187,9 @@ func opMulmod(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([] // opSHL implements Shift Left // The SHL instruction (shift left) pops 2 values from the stack, first arg1 and then arg2, // and pushes on the stack arg2 shifted to the left by arg1 number of bits. -func opSHL(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opSHL(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { // Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards - shift, value := callContext.stack.pop(), callContext.stack.peek() + shift, value := scope.Stack.pop(), scope.Stack.peek() if shift.LtUint64(256) { value.Lsh(value, uint(shift.Uint64())) } else { @@ -201,9 +201,9 @@ func opSHL(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byt // opSHR implements Logical Shift Right // The SHR instruction (logical shift right) pops 2 values from the stack, first arg1 and then arg2, // and pushes on the stack arg2 shifted to the right by arg1 number of bits with zero fill. -func opSHR(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opSHR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { // Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards - shift, value := callContext.stack.pop(), callContext.stack.peek() + shift, value := scope.Stack.pop(), scope.Stack.peek() if shift.LtUint64(256) { value.Rsh(value, uint(shift.Uint64())) } else { @@ -215,8 +215,8 @@ func opSHR(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byt // opSAR implements Arithmetic Shift Right // The SAR instruction (arithmetic shift right) pops 2 values from the stack, first arg1 and then arg2, // and pushes on the stack arg2 shifted to the right by arg1 number of bits with sign extension. -func opSAR(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - shift, value := callContext.stack.pop(), callContext.stack.peek() +func opSAR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + shift, value := scope.Stack.pop(), scope.Stack.peek() if shift.GtUint64(256) { if value.Sign() >= 0 { value.Clear() @@ -231,9 +231,9 @@ func opSAR(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byt return nil, nil } -func opSha3(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - offset, size := callContext.stack.pop(), callContext.stack.peek() - data := callContext.memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) +func opSha3(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + offset, size := scope.Stack.pop(), scope.Stack.peek() + data := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) if interpreter.hasher == nil { interpreter.hasher = sha3.NewLegacyKeccak256().(keccakState) @@ -251,13 +251,13 @@ func opSha3(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]by size.SetBytes(interpreter.hasherBuf[:]) return nil, nil } -func opAddress(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetBytes(callContext.contract.Address().Bytes())) +func opAddress(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Address().Bytes())) return nil, nil } -func opBalance(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - slot := callContext.stack.peek() +func opBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + slot := scope.Stack.peek() addr := common.Address(slot.Bytes20()) // Quorum: get public/private state db based on addr balance := getDualState(interpreter.evm, addr).GetBalance(addr) @@ -265,25 +265,25 @@ func opBalance(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([ return nil, nil } -func opOrigin(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetBytes(interpreter.evm.Origin.Bytes())) +func opOrigin(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetBytes(interpreter.evm.Origin.Bytes())) return nil, nil } -func opCaller(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetBytes(callContext.contract.Caller().Bytes())) +func opCaller(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Caller().Bytes())) return nil, nil } -func opCallValue(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - v, _ := uint256.FromBig(callContext.contract.value) - callContext.stack.push(v) +func opCallValue(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + v, _ := uint256.FromBig(scope.Contract.value) + scope.Stack.push(v) return nil, nil } -func opCallDataLoad(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x := callContext.stack.peek() +func opCallDataLoad(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x := scope.Stack.peek() if offset, overflow := x.Uint64WithOverflow(); !overflow { - data := getData(callContext.contract.Input, offset, 32) + data := getData(scope.Contract.Input, offset, 32) x.SetBytes(data) } else { x.Clear() @@ -291,16 +291,16 @@ func opCallDataLoad(pc *uint64, interpreter *EVMInterpreter, callContext *callCt return nil, nil } -func opCallDataSize(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetUint64(uint64(len(callContext.contract.Input)))) +func opCallDataSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(scope.Contract.Input)))) return nil, nil } -func opCallDataCopy(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opCallDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( - memOffset = callContext.stack.pop() - dataOffset = callContext.stack.pop() - length = callContext.stack.pop() + memOffset = scope.Stack.pop() + dataOffset = scope.Stack.pop() + length = scope.Stack.pop() ) dataOffset64, overflow := dataOffset.Uint64WithOverflow() if overflow { @@ -309,21 +309,21 @@ func opCallDataCopy(pc *uint64, interpreter *EVMInterpreter, callContext *callCt // These values are checked for overflow during gas cost calculation memOffset64 := memOffset.Uint64() length64 := length.Uint64() - callContext.memory.Set(memOffset64, length64, getData(callContext.contract.Input, dataOffset64, length64)) + scope.Memory.Set(memOffset64, length64, getData(scope.Contract.Input, dataOffset64, length64)) return nil, nil } -func opReturnDataSize(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetUint64(uint64(len(interpreter.returnData)))) +func opReturnDataSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(interpreter.returnData)))) return nil, nil } -func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( - memOffset = callContext.stack.pop() - dataOffset = callContext.stack.pop() - length = callContext.stack.pop() + memOffset = scope.Stack.pop() + dataOffset = scope.Stack.pop() + length = scope.Stack.pop() ) offset64, overflow := dataOffset.Uint64WithOverflow() @@ -337,44 +337,44 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, callContext *call if overflow || uint64(len(interpreter.returnData)) < end64 { return nil, ErrReturnDataOutOfBounds } - callContext.memory.Set(memOffset.Uint64(), length.Uint64(), interpreter.returnData[offset64:end64]) + scope.Memory.Set(memOffset.Uint64(), length.Uint64(), interpreter.returnData[offset64:end64]) return nil, nil } -func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - slot := callContext.stack.peek() +func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + slot := scope.Stack.peek() addr := slot.Bytes20() // Quorum: get public/private state db based on addr slot.SetUint64(uint64(getDualState(interpreter.evm, addr).GetCodeSize(addr))) return nil, nil } -func opCodeSize(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { l := new(uint256.Int) - l.SetUint64(uint64(len(callContext.contract.Code))) - callContext.stack.push(l) + l.SetUint64(uint64(len(scope.Contract.Code))) + scope.Stack.push(l) return nil, nil } -func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( - memOffset = callContext.stack.pop() - codeOffset = callContext.stack.pop() - length = callContext.stack.pop() + memOffset = scope.Stack.pop() + codeOffset = scope.Stack.pop() + length = scope.Stack.pop() ) uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow() if overflow { uint64CodeOffset = 0xffffffffffffffff } - codeCopy := getData(callContext.contract.Code, uint64CodeOffset, length.Uint64()) - callContext.memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) + codeCopy := getData(scope.Contract.Code, uint64CodeOffset, length.Uint64()) + scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) return nil, nil } -func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( - stack = callContext.stack + stack = scope.Stack a = stack.pop() memOffset = stack.pop() codeOffset = stack.pop() @@ -387,7 +387,7 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx addr := common.Address(a.Bytes20()) codeCopy := getData(getDualState(interpreter.evm, addr).GetCode(addr), uint64CodeOffset, length.Uint64()) // Quorum: get public/private state db based on addr - callContext.memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) + scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) return nil, nil } @@ -418,8 +418,8 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx // // (6) Caller tries to get the code hash for an account which is marked as deleted, // this account should be regarded as a non-existent account and zero should be returned. -func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - slot := callContext.stack.peek() +func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + slot := scope.Stack.peek() address := common.Address(slot.Bytes20()) stateDB := getDualState(interpreter.evm, address) if stateDB.Empty(address) { @@ -430,14 +430,14 @@ func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx return nil, nil } -func opGasprice(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opGasprice(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { v, _ := uint256.FromBig(interpreter.evm.GasPrice) - callContext.stack.push(v) + scope.Stack.push(v) return nil, nil } -func opBlockhash(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - num := callContext.stack.peek() +func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + num := scope.Stack.peek() num64, overflow := num.Uint64WithOverflow() if overflow { num.Clear() @@ -458,90 +458,90 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) return nil, nil } -func opCoinbase(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetBytes(interpreter.evm.Context.Coinbase.Bytes())) +func opCoinbase(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetBytes(interpreter.evm.Context.Coinbase.Bytes())) return nil, nil } -func opTimestamp(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opTimestamp(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { v, _ := uint256.FromBig(interpreter.evm.Context.Time) - callContext.stack.push(v) + scope.Stack.push(v) return nil, nil } -func opNumber(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opNumber(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { v, _ := uint256.FromBig(interpreter.evm.Context.BlockNumber) - callContext.stack.push(v) + scope.Stack.push(v) return nil, nil } -func opDifficulty(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opDifficulty(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { v, _ := uint256.FromBig(interpreter.evm.Context.Difficulty) - callContext.stack.push(v) + scope.Stack.push(v) return nil, nil } -func opGasLimit(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.GasLimit)) +func opGasLimit(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.GasLimit)) return nil, nil } -func opPop(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.pop() +func opPop(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.pop() return nil, nil } -func opMload(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - v := callContext.stack.peek() +func opMload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + v := scope.Stack.peek() offset := int64(v.Uint64()) - v.SetBytes(callContext.memory.GetPtr(offset, 32)) + v.SetBytes(scope.Memory.GetPtr(offset, 32)) return nil, nil } -func opMstore(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opMstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { // pop value of the stack - mStart, val := callContext.stack.pop(), callContext.stack.pop() - callContext.memory.Set32(mStart.Uint64(), &val) + mStart, val := scope.Stack.pop(), scope.Stack.pop() + scope.Memory.Set32(mStart.Uint64(), &val) return nil, nil } -func opMstore8(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - off, val := callContext.stack.pop(), callContext.stack.pop() - callContext.memory.store[off.Uint64()] = byte(val.Uint64()) +func opMstore8(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + off, val := scope.Stack.pop(), scope.Stack.pop() + scope.Memory.store[off.Uint64()] = byte(val.Uint64()) return nil, nil } -func opSload(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - loc := callContext.stack.peek() +func opSload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + loc := scope.Stack.peek() hash := common.Hash(loc.Bytes32()) // Quorum: get public/private state db based on addr - val := getDualState(interpreter.evm, callContext.contract.Address()).GetState(callContext.contract.Address(), hash) + val := getDualState(interpreter.evm, scope.Contract.Address()).GetState(scope.Contract.Address(), hash) loc.SetBytes(val.Bytes()) return nil, nil } -func opSstore(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - loc := callContext.stack.pop() - val := callContext.stack.pop() +func opSstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + loc := scope.Stack.pop() + val := scope.Stack.pop() // Quorum: get public/private state db based on addr - getDualState(interpreter.evm, callContext.contract.Address()).SetState(callContext.contract.Address(), + getDualState(interpreter.evm, scope.Contract.Address()).SetState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32()) return nil, nil } -func opJump(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - pos := callContext.stack.pop() - if !callContext.contract.validJumpdest(&pos) { +func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + pos := scope.Stack.pop() + if !scope.Contract.validJumpdest(&pos) { return nil, ErrInvalidJump } *pc = pos.Uint64() return nil, nil } -func opJumpi(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - pos, cond := callContext.stack.pop(), callContext.stack.pop() +func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + pos, cond := scope.Stack.pop(), scope.Stack.pop() if !cond.IsZero() { - if !callContext.contract.validJumpdest(&pos) { + if !scope.Contract.validJumpdest(&pos) { return nil, ErrInvalidJump } *pc = pos.Uint64() @@ -551,31 +551,31 @@ func opJumpi(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]b return nil, nil } -func opJumpdest(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opJumpdest(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { return nil, nil } -func opPc(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetUint64(*pc)) +func opPc(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetUint64(*pc)) return nil, nil } -func opMsize(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetUint64(uint64(callContext.memory.Len()))) +func opMsize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetUint64(uint64(scope.Memory.Len()))) return nil, nil } -func opGas(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetUint64(callContext.contract.Gas)) +func opGas(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetUint64(scope.Contract.Gas)) return nil, nil } -func opCreate(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( - value = callContext.stack.pop() - offset, size = callContext.stack.pop(), callContext.stack.pop() - input = callContext.memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) - gas = callContext.contract.Gas + value = scope.Stack.pop() + offset, size = scope.Stack.pop(), scope.Stack.pop() + input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) + gas = scope.Contract.Gas ) if interpreter.evm.chainRules.IsEIP150 { gas -= gas / 64 @@ -583,14 +583,14 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([] // reuse size int for stackvalue stackvalue := size - callContext.contract.UseGas(gas) + scope.Contract.UseGas(gas) //TODO: use uint256.Int instead of converting with toBig() var bigVal = big0 if !value.IsZero() { bigVal = value.ToBig() } - res, addr, returnGas, suberr := interpreter.evm.Create(callContext.contract, input, gas, bigVal) + res, addr, returnGas, suberr := interpreter.evm.Create(scope.Contract, input, gas, bigVal) // Push item on the stack based on the returned error. If the ruleset is // homestead we must check for CodeStoreOutOfGasError (homestead only // rule) and treat as an error, if the ruleset is frontier we must @@ -602,8 +602,8 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([] } else { stackvalue.SetBytes(addr.Bytes()) } - callContext.stack.push(&stackvalue) - callContext.contract.Gas += returnGas + scope.Stack.push(&stackvalue) + scope.Contract.Gas += returnGas if suberr == ErrExecutionReverted { return res, nil @@ -611,18 +611,18 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([] return nil, nil } -func opCreate2(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( - endowment = callContext.stack.pop() - offset, size = callContext.stack.pop(), callContext.stack.pop() - salt = callContext.stack.pop() - input = callContext.memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) - gas = callContext.contract.Gas + endowment = scope.Stack.pop() + offset, size = scope.Stack.pop(), scope.Stack.pop() + salt = scope.Stack.pop() + input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) + gas = scope.Contract.Gas ) // Apply EIP150 gas -= gas / 64 - callContext.contract.UseGas(gas) + scope.Contract.UseGas(gas) // reuse size int for stackvalue stackvalue := size //TODO: use uint256.Int instead of converting with toBig() @@ -630,7 +630,7 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([ if !endowment.IsZero() { bigEndowment = endowment.ToBig() } - res, addr, returnGas, suberr := interpreter.evm.Create2(callContext.contract, input, gas, + res, addr, returnGas, suberr := interpreter.evm.Create2(scope.Contract, input, gas, bigEndowment, &salt) // Push item on the stack based on the returned error. if suberr != nil { @@ -638,8 +638,8 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([ } else { stackvalue.SetBytes(addr.Bytes()) } - callContext.stack.push(&stackvalue) - callContext.contract.Gas += returnGas + scope.Stack.push(&stackvalue) + scope.Contract.Gas += returnGas if suberr == ErrExecutionReverted { return res, nil @@ -647,8 +647,8 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([ return nil, nil } -func opCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - stack := callContext.stack +func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + stack := scope.Stack // Pop gas. The actual gas in interpreter.evm.callGasTemp. // We can use this as a temporary value temp := stack.pop() @@ -657,7 +657,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]by addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() toAddr := common.Address(addr.Bytes20()) // Get the arguments from the memory. - args := callContext.memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) + args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) var bigVal = big0 //TODO: use uint256.Int instead of converting with toBig() @@ -668,7 +668,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]by bigVal = value.ToBig() } - ret, returnGas, err := interpreter.evm.Call(callContext.contract, toAddr, args, gas, bigVal) + ret, returnGas, err := interpreter.evm.Call(scope.Contract, toAddr, args, gas, bigVal) if err != nil { temp.Clear() @@ -677,16 +677,16 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]by } stack.push(&temp) if err == nil || err == ErrExecutionReverted { - callContext.memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) + scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } - callContext.contract.Gas += returnGas + scope.Contract.Gas += returnGas return ret, nil } -func opCallCode(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { // Pop gas. The actual gas is in interpreter.evm.callGasTemp. - stack := callContext.stack + stack := scope.Stack // We use it as a temporary value temp := stack.pop() gas := interpreter.evm.callGasTemp @@ -694,7 +694,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ( addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() toAddr := common.Address(addr.Bytes20()) // Get arguments from the memory. - args := callContext.memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) + args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) //TODO: use uint256.Int instead of converting with toBig() var bigVal = big0 @@ -703,7 +703,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ( bigVal = value.ToBig() } - ret, returnGas, err := interpreter.evm.CallCode(callContext.contract, toAddr, args, gas, bigVal) + ret, returnGas, err := interpreter.evm.CallCode(scope.Contract, toAddr, args, gas, bigVal) if err != nil { temp.Clear() } else { @@ -711,15 +711,15 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ( } stack.push(&temp) if err == nil || err == ErrExecutionReverted { - callContext.memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) + scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } - callContext.contract.Gas += returnGas + scope.Contract.Gas += returnGas return ret, nil } -func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - stack := callContext.stack +func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + stack := scope.Stack // Pop gas. The actual gas is in interpreter.evm.callGasTemp. // We use it as a temporary value temp := stack.pop() @@ -728,9 +728,9 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCt addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() toAddr := common.Address(addr.Bytes20()) // Get arguments from the memory. - args := callContext.memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) + args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) - ret, returnGas, err := interpreter.evm.DelegateCall(callContext.contract, toAddr, args, gas) + ret, returnGas, err := interpreter.evm.DelegateCall(scope.Contract, toAddr, args, gas) if err != nil { temp.Clear() } else { @@ -738,16 +738,16 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCt } stack.push(&temp) if err == nil || err == ErrExecutionReverted { - callContext.memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) + scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } - callContext.contract.Gas += returnGas + scope.Contract.Gas += returnGas return ret, nil } -func opStaticCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { // Pop gas. The actual gas is in interpreter.evm.callGasTemp. - stack := callContext.stack + stack := scope.Stack // We use it as a temporary value temp := stack.pop() gas := interpreter.evm.callGasTemp @@ -755,9 +755,9 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() toAddr := common.Address(addr.Bytes20()) // Get arguments from the memory. - args := callContext.memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) + args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) - ret, returnGas, err := interpreter.evm.StaticCall(callContext.contract, toAddr, args, gas) + ret, returnGas, err := interpreter.evm.StaticCall(scope.Contract, toAddr, args, gas) if err != nil { temp.Clear() } else { @@ -765,38 +765,38 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) } stack.push(&temp) if err == nil || err == ErrExecutionReverted { - callContext.memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) + scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } - callContext.contract.Gas += returnGas + scope.Contract.Gas += returnGas return ret, nil } -func opReturn(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - offset, size := callContext.stack.pop(), callContext.stack.pop() - ret := callContext.memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) +func opReturn(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + offset, size := scope.Stack.pop(), scope.Stack.pop() + ret := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) return ret, nil } -func opRevert(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - offset, size := callContext.stack.pop(), callContext.stack.pop() - ret := callContext.memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) +func opRevert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + offset, size := scope.Stack.pop(), scope.Stack.pop() + ret := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) return ret, nil } -func opStop(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opStop(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { return nil, nil } -func opSuicide(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - beneficiary := callContext.stack.pop() +func opSuicide(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + beneficiary := scope.Stack.pop() // Quorum: get public/private state db based on addr - db := getDualState(interpreter.evm, callContext.contract.Address()) - balance := db.GetBalance(callContext.contract.Address()) + db := getDualState(interpreter.evm, scope.Contract.Address()) + balance := db.GetBalance(scope.Contract.Address()) db.AddBalance(beneficiary.Bytes20(), balance) - db.Suicide(callContext.contract.Address()) + db.Suicide(scope.Contract.Address()) return nil, nil } @@ -804,18 +804,18 @@ func opSuicide(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([ // make log instruction function func makeLog(size int) executionFunc { - return func(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { + return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { topics := make([]common.Hash, size) - stack := callContext.stack + stack := scope.Stack mStart, mSize := stack.pop(), stack.pop() for i := 0; i < size; i++ { addr := stack.pop() topics[i] = addr.Bytes32() } - d := callContext.memory.GetCopy(int64(mStart.Uint64()), int64(mSize.Uint64())) + d := scope.Memory.GetCopy(int64(mStart.Uint64()), int64(mSize.Uint64())) interpreter.evm.StateDB.AddLog(&types.Log{ - Address: callContext.contract.Address(), + Address: scope.Contract.Address(), Topics: topics, Data: d, // This is a non-consensus field, but assigned here because @@ -828,24 +828,24 @@ func makeLog(size int) executionFunc { } // opPush1 is a specialized version of pushN -func opPush1(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( - codeLen = uint64(len(callContext.contract.Code)) + codeLen = uint64(len(scope.Contract.Code)) integer = new(uint256.Int) ) *pc += 1 if *pc < codeLen { - callContext.stack.push(integer.SetUint64(uint64(callContext.contract.Code[*pc]))) + scope.Stack.push(integer.SetUint64(uint64(scope.Contract.Code[*pc]))) } else { - callContext.stack.push(integer.Clear()) + scope.Stack.push(integer.Clear()) } return nil, nil } // make push instruction function func makePush(size uint64, pushByteSize int) executionFunc { - return func(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - codeLen := len(callContext.contract.Code) + return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + codeLen := len(scope.Contract.Code) startMin := codeLen if int(*pc+1) < startMin { @@ -858,8 +858,8 @@ func makePush(size uint64, pushByteSize int) executionFunc { } integer := new(uint256.Int) - callContext.stack.push(integer.SetBytes(common.RightPadBytes( - callContext.contract.Code[startMin:endMin], pushByteSize))) + scope.Stack.push(integer.SetBytes(common.RightPadBytes( + scope.Contract.Code[startMin:endMin], pushByteSize))) *pc += size return nil, nil @@ -868,8 +868,8 @@ func makePush(size uint64, pushByteSize int) executionFunc { // make dup instruction function func makeDup(size int64) executionFunc { - return func(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.dup(int(size)) + return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.dup(int(size)) return nil, nil } } @@ -878,8 +878,8 @@ func makeDup(size int64) executionFunc { func makeSwap(size int64) executionFunc { // switch n + 1 otherwise n would be swapped with n size++ - return func(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.swap(int(size)) + return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.swap(int(size)) return nil, nil } } diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index 8b4a3bab5a..d30288951b 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -104,7 +104,7 @@ func testTwoOperandOp(t *testing.T, tests []TwoOperandTestcase, opFn executionFu expected := new(uint256.Int).SetBytes(common.Hex2Bytes(test.Expected)) stack.push(x) stack.push(y) - opFn(&pc, evmInterpreter, &callCtx{nil, stack, nil}) + opFn(&pc, evmInterpreter, &ScopeContext{nil, stack, nil}) if len(stack.data) != 1 { t.Errorf("Expected one item on stack after %v, got %d: ", name, len(stack.data)) } @@ -219,7 +219,7 @@ func TestAddMod(t *testing.T) { stack.push(z) stack.push(y) stack.push(x) - opAddmod(&pc, evmInterpreter, &callCtx{nil, stack, nil}) + opAddmod(&pc, evmInterpreter, &ScopeContext{nil, stack, nil}) actual := stack.pop() if actual.Cmp(expected) != 0 { t.Errorf("Testcase %d, expected %x, got %x", i, expected, actual) @@ -241,7 +241,7 @@ func getResult(args []*twoOperandParams, opFn executionFunc) []TwoOperandTestcas y := new(uint256.Int).SetBytes(common.Hex2Bytes(param.y)) stack.push(x) stack.push(y) - opFn(&pc, interpreter, &callCtx{nil, stack, nil}) + opFn(&pc, interpreter, &ScopeContext{nil, stack, nil}) actual := stack.pop() result[i] = TwoOperandTestcase{param.x, param.y, fmt.Sprintf("%064x", actual)} } @@ -299,7 +299,7 @@ func opBenchmark(bench *testing.B, op executionFunc, args ...string) { a.SetBytes(arg) stack.push(a) } - op(&pc, evmInterpreter, &callCtx{nil, stack, nil}) + op(&pc, evmInterpreter, &ScopeContext{nil, stack, nil}) stack.pop() } } @@ -525,12 +525,12 @@ func TestOpMstore(t *testing.T) { pc := uint64(0) v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700" stack.pushN(*new(uint256.Int).SetBytes(common.Hex2Bytes(v)), *new(uint256.Int)) - opMstore(&pc, evmInterpreter, &callCtx{mem, stack, nil}) + opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) if got := common.Bytes2Hex(mem.GetCopy(0, 32)); got != v { t.Fatalf("Mstore fail, got %v, expected %v", got, v) } stack.pushN(*new(uint256.Int).SetUint64(0x1), *new(uint256.Int)) - opMstore(&pc, evmInterpreter, &callCtx{mem, stack, nil}) + opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) if common.Bytes2Hex(mem.GetCopy(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" { t.Fatalf("Mstore failed to overwrite previous value") } @@ -553,7 +553,7 @@ func BenchmarkOpMstore(bench *testing.B) { bench.ResetTimer() for i := 0; i < bench.N; i++ { stack.pushN(*value, *memStart) - opMstore(&pc, evmInterpreter, &callCtx{mem, stack, nil}) + opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) } } @@ -572,7 +572,7 @@ func BenchmarkOpSHA3(bench *testing.B) { bench.ResetTimer() for i := 0; i < bench.N; i++ { stack.pushN(*uint256.NewInt().SetUint64(32), *start) - opSha3(&pc, evmInterpreter, &callCtx{mem, stack, nil}) + opSha3(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) } } diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 06fd38ac5b..68207e860b 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -66,12 +66,12 @@ type Interpreter interface { CanRun([]byte) bool } -// callCtx contains the things that are per-call, such as stack and memory, +// ScopeContext contains the things that are per-call, such as stack and memory, // but not transients like pc and gas -type callCtx struct { - memory *Memory - stack *Stack - contract *Contract +type ScopeContext struct { + Memory *Memory + Stack *Stack + Contract *Contract } // keccakState wraps sha3.state. In addition to the usual hash methods, it also supports @@ -167,10 +167,10 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( op OpCode // current opcode mem = NewMemory() // bound memory stack = newstack() // local stack - callContext = &callCtx{ - memory: mem, - stack: stack, - contract: contract, + callContext = &ScopeContext{ + Memory: mem, + Stack: stack, + Contract: contract, } // For optimisation reason we're using uint64 as the program counter. // It's theoretically possible to go above 2^64. The YP defines the PC @@ -195,9 +195,9 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( defer func() { if err != nil { if !logged { - in.cfg.Tracer.CaptureState(in.evm, pcCopy, op, gasCopy, cost, mem, stack, in.returnData, contract, in.evm.depth, err) + in.cfg.Tracer.CaptureState(in.evm, pcCopy, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) } else { - in.cfg.Tracer.CaptureFault(in.evm, pcCopy, op, gasCopy, cost, mem, stack, contract, in.evm.depth, err) + in.cfg.Tracer.CaptureFault(in.evm, pcCopy, op, gasCopy, cost, callContext, in.evm.depth, err) } } }() @@ -283,7 +283,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( } if in.cfg.Debug { - in.cfg.Tracer.CaptureState(in.evm, pc, op, gasCopy, cost, mem, stack, in.returnData, contract, in.evm.depth, err) + in.cfg.Tracer.CaptureState(in.evm, pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) logged = true } diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 7b61762456..bb1800ea91 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -21,7 +21,7 @@ import ( ) type ( - executionFunc func(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) + executionFunc func(pc *uint64, interpreter *EVMInterpreter, callContext *ScopeContext) ([]byte, error) gasFunc func(*EVM, *Contract, *Stack, *Memory, uint64) (uint64, error) // last parameter is the requested memory size as a uint64 // memorySizeFunc returns the required size, and whether the operation overflowed a uint64 memorySizeFunc func(*Stack) (size uint64, overflow bool) diff --git a/core/vm/logger.go b/core/vm/logger.go index 41ce00ed01..626dbc6865 100644 --- a/core/vm/logger.go +++ b/core/vm/logger.go @@ -18,7 +18,6 @@ package vm import ( "encoding/hex" - "errors" "fmt" "io" "math/big" @@ -32,8 +31,6 @@ import ( "github.com/ethereum/go-ethereum/params" ) -var errTraceLimitReached = errors.New("the number of logs reached the specified limit") - // Storage represents a contract's storage. type Storage map[common.Hash]common.Hash @@ -107,10 +104,10 @@ func (s *StructLog) ErrorString() string { // Note that reference types are actual VM data structures; make copies // if you need to retain them beyond the current call. type Tracer interface { - CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error - CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, rData []byte, contract *Contract, depth int, err error) error - CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error - CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) error + CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) + CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) + CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) + CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) } // StructLogger is an EVM state logger and implements Tracer. @@ -127,6 +124,8 @@ type StructLogger struct { err error } +var _ Tracer = &StructLogger{} + // NewStructLogger returns a new logger func NewStructLogger(cfg *LogConfig) *StructLogger { logger := &StructLogger{ @@ -139,17 +138,19 @@ func NewStructLogger(cfg *LogConfig) *StructLogger { } // CaptureStart implements the Tracer interface to initialize the tracing operation. -func (l *StructLogger) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error { - return nil +func (l *StructLogger) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { } // CaptureState logs a new structured log message and pushes it out to the environment // // CaptureState also tracks SLOAD/SSTORE ops to track storage change. -func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, rData []byte, contract *Contract, depth int, err error) error { +func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) { + memory := scope.Memory + stack := scope.Stack + contract := scope.Contract // check if already accumulated the specified number of logs if l.cfg.Limit != 0 && l.cfg.Limit <= len(l.logs) { - return errTraceLimitReached + return } // Copy a snapshot of the current memory state to a new buffer var mem []byte @@ -199,17 +200,15 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui // create a new snapshot of the EVM. log := StructLog{pc, op, gas, cost, mem, memory.Len(), stck, rdata, storage, depth, env.StateDB.GetRefund(), err} l.logs = append(l.logs, log) - return nil } // CaptureFault implements the Tracer interface to trace an execution fault // while running an opcode. -func (l *StructLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error { - return nil +func (l *StructLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) { } // CaptureEnd is called after the call finishes to finalize the tracing. -func (l *StructLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) error { +func (l *StructLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) { l.output = output l.err = err if l.cfg.Debug { @@ -218,7 +217,6 @@ func (l *StructLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration fmt.Printf(" error: %v\n", err) } } - return nil } // StructLogs returns the captured log entries. @@ -292,7 +290,7 @@ func NewMarkdownLogger(cfg *LogConfig, writer io.Writer) *mdLogger { return l } -func (t *mdLogger) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error { +func (t *mdLogger) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { if !create { fmt.Fprintf(t.out, "From: `%v`\nTo: `%v`\nData: `0x%x`\nGas: `%d`\nValue `%v` wei\n", from.String(), to.String(), @@ -307,10 +305,11 @@ func (t *mdLogger) CaptureStart(from common.Address, to common.Address, create b | Pc | Op | Cost | Stack | RStack | Refund | |-------|-------------|------|-----------|-----------|---------| `) - return nil } -func (t *mdLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, rData []byte, contract *Contract, depth int, err error) error { +// CaptureState also tracks SLOAD/SSTORE ops to track storage change. +func (t *mdLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) { + stack := scope.Stack fmt.Fprintf(t.out, "| %4d | %10v | %3d |", pc, op, cost) if !t.cfg.DisableStack { @@ -327,18 +326,13 @@ func (t *mdLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64 if err != nil { fmt.Fprintf(t.out, "Error: %v\n", err) } - return nil } -func (t *mdLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error { - +func (t *mdLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) { fmt.Fprintf(t.out, "\nError: at pc=%d, op=%v: %v\n", pc, op, err) - - return nil } -func (t *mdLogger) CaptureEnd(output []byte, gasUsed uint64, tm time.Duration, err error) error { +func (t *mdLogger) CaptureEnd(output []byte, gasUsed uint64, tm time.Duration, err error) { fmt.Fprintf(t.out, "\nOutput: `0x%x`\nConsumed gas: `%d`\nError: `%v`\n", output, gasUsed, err) - return nil } diff --git a/core/vm/logger_json.go b/core/vm/logger_json.go index a27c261ed8..df076ff028 100644 --- a/core/vm/logger_json.go +++ b/core/vm/logger_json.go @@ -31,6 +31,8 @@ type JSONLogger struct { cfg *LogConfig } +var _ Tracer = &JSONLogger{} + // NewJSONLogger creates a new EVM tracer that prints execution steps as JSON objects // into the provided stream. func NewJSONLogger(cfg *LogConfig, writer io.Writer) *JSONLogger { @@ -41,12 +43,16 @@ func NewJSONLogger(cfg *LogConfig, writer io.Writer) *JSONLogger { return l } -func (l *JSONLogger) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error { - return nil +func (l *JSONLogger) CaptureStart(env *EVM, from, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { } +func (l *JSONLogger) CaptureFault(*EVM, uint64, OpCode, uint64, uint64, *ScopeContext, int, error) {} + // CaptureState outputs state information on the logger. -func (l *JSONLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, rData []byte, contract *Contract, depth int, err error) error { +func (l *JSONLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) { + memory := scope.Memory + stack := scope.Stack + log := StructLog{ Pc: pc, Op: op, @@ -72,16 +78,11 @@ func (l *JSONLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint if !l.cfg.DisableReturnData { log.ReturnData = rData } - return l.encoder.Encode(log) -} - -// CaptureFault outputs state information on the logger. -func (l *JSONLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error { - return nil + l.encoder.Encode(log) } // CaptureEnd is triggered at end of execution. -func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) error { +func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) { type endLog struct { Output string `json:"output"` GasUsed math.HexOrDecimal64 `json:"gasUsed"` @@ -89,7 +90,7 @@ func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, Err string `json:"error,omitempty"` } if err != nil { - return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, err.Error()}) + l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, err.Error()}) } - return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, ""}) + l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, ""}) } diff --git a/core/vm/logger_test.go b/core/vm/logger_test.go index e6be238f08..12c0eb6908 100644 --- a/core/vm/logger_test.go +++ b/core/vm/logger_test.go @@ -51,19 +51,22 @@ func (*dummyStatedb) GetRefund() uint64 { return 1337 } func TestStoreCapture(t *testing.T) { var ( - db = &dummyStatedb{} - env = NewEVM(BlockContext{}, TxContext{}, db, db, params.TestChainConfig, Config{}) + env = NewEVM(BlockContext{}, TxContext{}, &dummyStatedb{}, &dummyStatedb{}, params.TestChainConfig, Config{}) logger = NewStructLogger(nil) - mem = NewMemory() - stack = newstack() contract = NewContract(&dummyContractRef{}, &dummyContractRef{}, new(big.Int), 0) + scope = &ScopeContext{ + Memory: NewMemory(), + Stack: newstack(), + Contract: contract, + } ) - stack.push(uint256.NewInt().SetUint64(1)) - stack.push(uint256.NewInt()) + scope.Stack.push(uint256.NewInt().SetUint64(1)) + scope.Stack.push(uint256.NewInt()) var index common.Hash - logger.CaptureState(env, 0, SSTORE, 0, 0, mem, stack, nil, contract, 0, nil) + logger.CaptureState(env, 0, SSTORE, 0, 0, scope, nil, 0, nil) if len(logger.storage[contract.Address()]) == 0 { - t.Fatalf("expected exactly 1 changed value on address %x, got %d", contract.Address(), len(logger.storage[contract.Address()])) + t.Fatalf("expected exactly 1 changed value on address %x, got %d", contract.Address(), + len(logger.storage[contract.Address()])) } exp := common.BigToHash(big.NewInt(1)) if logger.storage[contract.Address()][index] != exp { diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index 9cb69e1c76..72601441d5 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -114,8 +114,8 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) { vmenv = NewEnv(cfg) sender = vm.AccountRef(cfg.Origin) ) - if cfg.ChainConfig.IsBerlin(vmenv.Context.BlockNumber) { - cfg.State.PrepareAccessList(cfg.Origin, &address, vmenv.ActivePrecompiles(), nil) + if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber); rules.IsBerlin { + cfg.State.PrepareAccessList(cfg.Origin, &address, vm.ActivePrecompiles(rules), nil) } cfg.State.CreateAccount(address) // set the receiver's (the executing contract) code for execution. @@ -146,10 +146,9 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) { vmenv = NewEnv(cfg) sender = vm.AccountRef(cfg.Origin) ) - if cfg.ChainConfig.IsBerlin(vmenv.Context.BlockNumber) { - cfg.State.PrepareAccessList(cfg.Origin, nil, vmenv.ActivePrecompiles(), nil) + if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber); rules.IsBerlin { + cfg.State.PrepareAccessList(cfg.Origin, nil, vm.ActivePrecompiles(rules), nil) } - // Call the code with the given configuration. code, address, leftOverGas, err := vmenv.Create( sender, @@ -172,10 +171,10 @@ func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, er sender := cfg.State.GetOrNewStateObject(cfg.Origin) statedb := cfg.State - if cfg.ChainConfig.IsBerlin(vmenv.Context.BlockNumber) { - statedb.PrepareAccessList(cfg.Origin, &address, vmenv.ActivePrecompiles(), nil) - } + if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber); rules.IsBerlin { + statedb.PrepareAccessList(cfg.Origin, &address, vm.ActivePrecompiles(rules), nil) + } // Call the code with the given configuration. ret, leftOverGas, err := vmenv.Call( sender, @@ -184,6 +183,5 @@ func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, er cfg.GasLimit, cfg.Value, ) - return ret, leftOverGas, err } diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index 732e7214b9..9e9923b736 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -347,23 +347,18 @@ type stepCounter struct { steps int } -func (s *stepCounter) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error { - return nil +func (s *stepCounter) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { } -func (s *stepCounter) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, rData []byte, contract *vm.Contract, depth int, err error) error { - s.steps++ - // Enable this for more output - //s.inner.CaptureState(env, pc, op, gas, cost, memory, stack, rStack, contract, depth, err) - return nil +func (s *stepCounter) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { } -func (s *stepCounter) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error { - return nil -} +func (s *stepCounter) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) {} -func (s *stepCounter) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) error { - return nil +func (s *stepCounter) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { + s.steps++ + // Enable this for more output + //s.inner.CaptureState(env, pc, op, gas, cost, memory, stack, rStack, contract, depth, err) } // benchmarkNonModifyingCode benchmarks code, but if the code modifies the diff --git a/eth/api.go b/eth/api.go index 0e831291ed..e1b88e42ab 100644 --- a/eth/api.go +++ b/eth/api.go @@ -61,53 +61,6 @@ func (api *PublicEthereumAPI) Coinbase() (common.Address, error) { return api.Etherbase() } -// Quorum -// StorageRoot returns the storage root of an account on the the given (optional) block height. -// If block number is not given the latest block is used. -func (s *PublicEthereumAPI) StorageRoot(ctx context.Context, addr common.Address, blockNr *rpc.BlockNumber) (common.Hash, error) { - var ( - pub, priv *state.StateDB - err error - ) - - psm, err := s.e.blockchain.PrivateStateManager().ResolveForUserContext(ctx) - if err != nil { - return common.Hash{}, err - } - if blockNr == nil || blockNr.Int64() == rpc.LatestBlockNumber.Int64() { - pub, priv, err = s.e.blockchain.StatePSI(psm.ID) - } else { - if ch := s.e.blockchain.GetHeaderByNumber(uint64(blockNr.Int64())); ch != nil { - pub, priv, err = s.e.blockchain.StateAtPSI(ch.Root, psm.ID) - } else { - return common.Hash{}, fmt.Errorf("invalid block number") - } - } - - if err != nil { - return common.Hash{}, err - } - - if priv.Exist(addr) { - return priv.GetStorageRoot(addr) - } - return pub.GetStorageRoot(addr) -} - -// Hashrate returns the POW hashrate -func (api *PublicEthereumAPI) Hashrate() hexutil.Uint64 { - return hexutil.Uint64(api.e.Miner().HashRate()) -} - -// ChainId is the EIP-155 replay-protection chain id for the current ethereum chain config. -func (api *PublicEthereumAPI) ChainId() (hexutil.Uint64, error) { - // if current block is at or past the EIP-155 replay-protection fork block, return chainID from config - if config := api.e.blockchain.Config(); config.IsEIP155(api.e.blockchain.CurrentBlock().Number()) { - return (hexutil.Uint64)(config.ChainID.Uint64()), nil - } - return hexutil.Uint64(0), fmt.Errorf("chain not synced beyond EIP-155 replay-protection fork block") -} - // PublicMinerAPI provides an API to control the miner. // It offers only methods that operate on data that pose no security risk when it is publicly accessible. type PublicMinerAPI struct { @@ -182,11 +135,6 @@ func (api *PrivateMinerAPI) SetRecommitInterval(interval int) { api.e.Miner().SetRecommitInterval(time.Duration(interval) * time.Millisecond) } -// GetHashrate returns the current hashrate of the miner. -func (api *PrivateMinerAPI) GetHashrate() uint64 { - return api.e.miner.HashRate() -} - // PrivateAdminAPI is the collection of Ethereum full node-related APIs // exposed over the private admin endpoint. type PrivateAdminAPI struct { @@ -363,52 +311,6 @@ func (api *PublicDebugAPI) DefaultStateRoot(ctx context.Context, blockNr rpc.Blo return privateState.IntermediateRoot(true), nil } -// Quorum -// DumpAddress retrieves the state of an address at a given block. -// Quorum adds an additional parameter to support private state dump -func (api *PublicDebugAPI) DumpAddress(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (state.DumpAccount, error) { - publicState, privateState, err := api.getStateDbsFromBlockNumber(ctx, blockNr) - if err != nil { - return state.DumpAccount{}, err - } - - if accountDump, ok := privateState.DumpAddress(address); ok { - return accountDump, nil - } - if accountDump, ok := publicState.DumpAddress(address); ok { - return accountDump, nil - } - return state.DumpAccount{}, errors.New("error retrieving state") -} - -//Quorum -//Taken from DumpBlock, as it was reused in DumpAddress. -//Contains modifications from the original to return the private state db, as well as public. -func (api *PublicDebugAPI) getStateDbsFromBlockNumber(ctx context.Context, blockNr rpc.BlockNumber) (*state.StateDB, *state.StateDB, error) { - psm, err := api.eth.blockchain.PrivateStateManager().ResolveForUserContext(ctx) - if err != nil { - return nil, nil, err - } - if blockNr == rpc.PendingBlockNumber { - // If we're dumping the pending state, we need to request - // both the pending block as well as the pending state from - // the miner and operate on those - _, publicState, privateState := api.eth.miner.Pending(psm.ID) - return publicState, privateState, nil - } - - var block *types.Block - if blockNr == rpc.LatestBlockNumber { - block = api.eth.blockchain.CurrentBlock() - } else { - block = api.eth.blockchain.GetBlockByNumber(uint64(blockNr)) - } - if block == nil { - return nil, nil, fmt.Errorf("block #%d not found", blockNr) - } - return api.eth.BlockChain().StateAtPSI(block.Root(), psm.ID) -} - // PrivateDebugAPI is the collection of Ethereum full node APIs exposed over // the private debugging endpoint. type PrivateDebugAPI struct { @@ -537,11 +439,10 @@ func (api *PrivateDebugAPI) StorageRangeAt(ctx context.Context, blockHash common if block == nil { return StorageRangeResult{}, fmt.Errorf("block %#x not found", blockHash) } - _, _, statedb, _, _, release, err := api.eth.stateAtTransaction(ctx, block, txIndex, 0) + _, _, statedb, _, _, err := api.eth.stateAtTransaction(ctx, block, txIndex, 0) if err != nil { return StorageRangeResult{}, err } - defer release() st := statedb.StorageTrie(contractAddress) if st == nil { return StorageRangeResult{}, fmt.Errorf("account %x doesn't exist", contractAddress) @@ -654,3 +555,81 @@ func (api *PrivateDebugAPI) getModifiedAccounts(startBlock, endBlock *types.Bloc } return dirty, nil } + +// Quorum + +// StorageRoot returns the storage root of an account on the the given (optional) block height. +// If block number is not given the latest block is used. +func (s *PublicEthereumAPI) StorageRoot(ctx context.Context, addr common.Address, blockNr *rpc.BlockNumber) (common.Hash, error) { + var ( + pub, priv *state.StateDB + err error + ) + + psm, err := s.e.blockchain.PrivateStateManager().ResolveForUserContext(ctx) + if err != nil { + return common.Hash{}, err + } + if blockNr == nil || blockNr.Int64() == rpc.LatestBlockNumber.Int64() { + pub, priv, err = s.e.blockchain.StatePSI(psm.ID) + } else { + if ch := s.e.blockchain.GetHeaderByNumber(uint64(blockNr.Int64())); ch != nil { + pub, priv, err = s.e.blockchain.StateAtPSI(ch.Root, psm.ID) + } else { + return common.Hash{}, fmt.Errorf("invalid block number") + } + } + + if err != nil { + return common.Hash{}, err + } + + if priv.Exist(addr) { + return priv.GetStorageRoot(addr) + } + return pub.GetStorageRoot(addr) +} + +// DumpAddress retrieves the state of an address at a given block. +// Quorum adds an additional parameter to support private state dump +func (api *PublicDebugAPI) DumpAddress(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (state.DumpAccount, error) { + publicState, privateState, err := api.getStateDbsFromBlockNumber(ctx, blockNr) + if err != nil { + return state.DumpAccount{}, err + } + + if accountDump, ok := privateState.DumpAddress(address); ok { + return accountDump, nil + } + if accountDump, ok := publicState.DumpAddress(address); ok { + return accountDump, nil + } + return state.DumpAccount{}, errors.New("error retrieving state") +} + +//Taken from DumpBlock, as it was reused in DumpAddress. +//Contains modifications from the original to return the private state db, as well as public. +func (api *PublicDebugAPI) getStateDbsFromBlockNumber(ctx context.Context, blockNr rpc.BlockNumber) (*state.StateDB, *state.StateDB, error) { + psm, err := api.eth.blockchain.PrivateStateManager().ResolveForUserContext(ctx) + if err != nil { + return nil, nil, err + } + if blockNr == rpc.PendingBlockNumber { + // If we're dumping the pending state, we need to request + // both the pending block as well as the pending state from + // the miner and operate on those + _, publicState, privateState := api.eth.miner.Pending(psm.ID) + return publicState, privateState, nil + } + + var block *types.Block + if blockNr == rpc.LatestBlockNumber { + block = api.eth.blockchain.CurrentBlock() + } else { + block = api.eth.blockchain.GetBlockByNumber(uint64(blockNr)) + } + if block == nil { + return nil, nil, fmt.Errorf("block #%d not found", blockNr) + } + return api.eth.BlockChain().StateAtPSI(block.Root(), psm.ID) +} diff --git a/eth/api_backend.go b/eth/api_backend.go index 06ae2d136c..5cb41d516a 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -35,8 +35,10 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/gasprice" + "github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/params" pcore "github.com/ethereum/go-ethereum/permission/core" @@ -62,6 +64,9 @@ type EthAPIBackend struct { proxyClient *rpc.Client } +var _ ethapi.Backend = &EthAPIBackend{} +var _ tracers.Backend = &EthAPIBackend{} + func (b *EthAPIBackend) ProxyEnabled() bool { return b.eth.config.QuorumLightClient.Enabled() } @@ -283,25 +288,28 @@ func (b *EthAPIBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { return b.eth.blockchain.GetTdByHash(hash) } -func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state vm.MinimalApiState, header *types.Header) (*vm.EVM, func() error, error) { +func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state vm.MinimalApiState, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) { statedb := state.(EthAPIState) vmError := func() error { return nil } - + if vmConfig == nil { + vmConfig = b.eth.blockchain.GetVMConfig() + } txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(header, b.eth.BlockChain(), nil) + // Quorum // Set the private state to public state if contract address is not present in the private state to := common.Address{} if msg.To() != nil { to = *msg.To() } - privateState := statedb.privateState if !privateState.Exist(to) { privateState = statedb.state } + // End Quorum - return vm.NewEVM(context, txContext, statedb.state, privateState, b.eth.blockchain.Config(), *b.eth.blockchain.GetVMConfig()), vmError, nil + return vm.NewEVM(context, txContext, statedb.state, privateState, b.eth.blockchain.Config(), *vmConfig), vmError, nil } func (b *EthAPIBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { @@ -406,10 +414,6 @@ func (b *EthAPIBackend) ExtRPCEnabled() bool { return b.extRPCEnabled } -func (b *EthAPIBackend) CallTimeOut() time.Duration { - return b.evmCallTimeOut -} - func (b *EthAPIBackend) UnprotectedAllowed() bool { return b.allowUnprotectedTxs } @@ -449,16 +453,18 @@ func (b *EthAPIBackend) StartMining(threads int) error { return b.eth.StartMining(threads) } -func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, mps.PrivateStateRepository, func(), error) { - return b.eth.stateAtBlock(block, reexec) +func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, mps.PrivateStateRepository, error) { + return b.eth.stateAtBlock(block, reexec, base, checkLive) } -func (b *EthAPIBackend) StatesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, []mps.PrivateStateRepository, func(), error) { - return b.eth.statesInRange(fromBlock, toBlock, reexec) +func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, *state.StateDB, mps.PrivateStateRepository, error) { + return b.eth.stateAtTransaction(ctx, block, txIndex, reexec) } -func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, *state.StateDB, mps.PrivateStateRepository, func(), error) { - return b.eth.stateAtTransaction(ctx, block, txIndex, reexec) +// Quorum + +func (b *EthAPIBackend) CallTimeOut() time.Duration { + return b.evmCallTimeOut } func (b *EthAPIBackend) GetBlockchain() *core.BlockChain { @@ -613,5 +619,3 @@ func (s EthAPIState) GetCodeHash(addr common.Address) common.Hash { } return s.state.GetCodeHash(addr) } - -//func (s MinimalApiState) Error diff --git a/eth/backend.go b/eth/backend.go index 769ca5a4f9..6efe1548bd 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -135,8 +135,12 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024) + // Transfer mining-related config to the ethash config. + ethashConfig := config.Ethash + ethashConfig.NotifyFull = config.Miner.NotifyFull + // Assemble the Ethereum object - chainDb, err := stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/") + chainDb, err := stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/", false) if err != nil { return nil, err } @@ -173,18 +177,19 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } eth := &Ethereum{ - config: config, - chainDb: chainDb, - eventMux: stack.EventMux(), - accountManager: stack.AccountManager(), - engine: ethconfig.CreateConsensusEngine(stack, chainConfig, config, config.Miner.Notify, config.Miner.Noverify, chainDb), - closeBloomHandler: make(chan struct{}), - networkID: config.NetworkId, - gasPrice: config.Miner.GasPrice, - etherbase: config.Miner.Etherbase, - bloomRequests: make(chan chan *bloombits.Retrieval), - bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms), - p2pServer: stack.Server(), + config: config, + chainDb: chainDb, + eventMux: stack.EventMux(), + accountManager: stack.AccountManager(), + engine: ethconfig.CreateConsensusEngine(stack, chainConfig, config, config.Miner.Notify, config.Miner.Noverify, chainDb), + closeBloomHandler: make(chan struct{}), + networkID: config.NetworkId, + gasPrice: config.Miner.GasPrice, + etherbase: config.Miner.Etherbase, + bloomRequests: make(chan chan *bloombits.Retrieval), + bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms), + p2pServer: stack.Server(), + // Quorum qlightP2pServer: stack.QServer(), consensusServicePendingLogsFeed: new(event.Feed), } diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 58f43c7090..f384e4e05f 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -248,7 +248,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, headerProcCh: make(chan []*types.Header, 1), quitCh: make(chan struct{}), stateCh: make(chan dataPack), - SnapSyncer: snap.NewSyncer(stateDb, stateBloom), + SnapSyncer: snap.NewSyncer(stateDb), stateSyncStart: make(chan *stateSync), syncStatsState: stateSyncStats{ processed: rawdb.ReadFastTrieProgress(stateDb), diff --git a/eth/downloader/fakepeer.go b/eth/downloader/fakepeer.go deleted file mode 100644 index 3ec90bc9ef..0000000000 --- a/eth/downloader/fakepeer.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethdb" -) - -// FakePeer is a mock downloader peer that operates on a local database instance -// instead of being an actual live node. It's useful for testing and to implement -// sync commands from an existing local database. -type FakePeer struct { - id string - db ethdb.Database - hc *core.HeaderChain - dl *Downloader -} - -// NewFakePeer creates a new mock downloader peer with the given data sources. -func NewFakePeer(id string, db ethdb.Database, hc *core.HeaderChain, dl *Downloader) *FakePeer { - return &FakePeer{id: id, db: db, hc: hc, dl: dl} -} - -// Head implements downloader.Peer, returning the current head hash and number -// of the best known header. -func (p *FakePeer) Head() (common.Hash, *big.Int) { - header := p.hc.CurrentHeader() - return header.Hash(), header.Number -} - -// RequestHeadersByHash implements downloader.Peer, returning a batch of headers -// defined by the origin hash and the associated query parameters. -func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int, reverse bool) error { - var ( - headers []*types.Header - unknown bool - ) - for !unknown && len(headers) < amount { - origin := p.hc.GetHeaderByHash(hash) - if origin == nil { - break - } - number := origin.Number.Uint64() - headers = append(headers, origin) - if reverse { - for i := 0; i <= skip; i++ { - if header := p.hc.GetHeader(hash, number); header != nil { - hash = header.ParentHash - number-- - } else { - unknown = true - break - } - } - } else { - var ( - current = origin.Number.Uint64() - next = current + uint64(skip) + 1 - ) - if header := p.hc.GetHeaderByNumber(next); header != nil { - if p.hc.GetBlockHashesFromHash(header.Hash(), uint64(skip+1))[skip] == hash { - hash = header.Hash() - } else { - unknown = true - } - } else { - unknown = true - } - } - } - p.dl.DeliverHeaders(p.id, headers) - return nil -} - -// RequestHeadersByNumber implements downloader.Peer, returning a batch of headers -// defined by the origin number and the associated query parameters. -func (p *FakePeer) RequestHeadersByNumber(number uint64, amount int, skip int, reverse bool) error { - var ( - headers []*types.Header - unknown bool - ) - for !unknown && len(headers) < amount { - origin := p.hc.GetHeaderByNumber(number) - if origin == nil { - break - } - if reverse { - if number >= uint64(skip+1) { - number -= uint64(skip + 1) - } else { - unknown = true - } - } else { - number += uint64(skip + 1) - } - headers = append(headers, origin) - } - p.dl.DeliverHeaders(p.id, headers) - return nil -} - -// RequestBodies implements downloader.Peer, returning a batch of block bodies -// corresponding to the specified block hashes. -func (p *FakePeer) RequestBodies(hashes []common.Hash) error { - var ( - txs [][]*types.Transaction - uncles [][]*types.Header - ) - for _, hash := range hashes { - block := rawdb.ReadBlock(p.db, hash, *p.hc.GetBlockNumber(hash)) - - txs = append(txs, block.Transactions()) - uncles = append(uncles, block.Uncles()) - } - p.dl.DeliverBodies(p.id, txs, uncles) - return nil -} - -// RequestReceipts implements downloader.Peer, returning a batch of transaction -// receipts corresponding to the specified block hashes. -func (p *FakePeer) RequestReceipts(hashes []common.Hash) error { - var receipts [][]*types.Receipt - for _, hash := range hashes { - receipts = append(receipts, rawdb.ReadRawReceipts(p.db, hash, *p.hc.GetBlockNumber(hash))) - } - p.dl.DeliverReceipts(p.id, receipts) - return nil -} - -// RequestNodeData implements downloader.Peer, returning a batch of state trie -// nodes corresponding to the specified trie hashes. -func (p *FakePeer) RequestNodeData(hashes []common.Hash) error { - var data [][]byte - for _, hash := range hashes { - if entry, err := p.db.Get(hash.Bytes()); err == nil { - data = append(data, entry) - } - } - p.dl.DeliverNodeData(p.id, data) - return nil -} diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 5444651baa..463ccee4bf 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -182,11 +182,6 @@ type Config struct { // Enables tracking of SHA3 preimages in the VM EnablePreimageRecording bool - RaftMode bool - EnableNodePermission bool - // Istanbul options - Istanbul istanbul.Config - // Miscellaneous options DocRoot string `toml:"-"` @@ -197,11 +192,11 @@ type Config struct { EVMInterpreter string // RPCGasCap is the global gas cap for eth-call variants. - RPCGasCap uint64 `toml:",omitempty"` + RPCGasCap uint64 // RPCTxFeeCap is the global transaction fee(price * gaslimit) cap for // send-transction variants. The unit is ether. - RPCTxFeeCap float64 `toml:",omitempty"` + RPCTxFeeCap float64 // Checkpoint is a hardcoded checkpoint which can be nil. Checkpoint *params.TrustedCheckpoint `toml:",omitempty"` @@ -213,6 +208,12 @@ type Config struct { OverrideBerlin *big.Int `toml:",omitempty"` // Quorum + + RaftMode bool + EnableNodePermission bool + // Istanbul options + Istanbul istanbul.Config + // timeout value for call EVMCallTimeOut time.Duration @@ -224,25 +225,6 @@ type Config struct { QuorumLightClient *QuorumLightClient `toml:",omitempty"` } -type QuorumLightClient struct { - Use bool `toml:",omitempty"` - PSI string `toml:",omitempty"` - TokenEnabled bool `toml:",omitempty"` - TokenValue string `toml:",omitempty"` - TokenManagement string `toml:",omitempty"` - RPCTLS bool `toml:",omitempty"` - RPCTLSInsecureSkipVerify bool `toml:",omitempty"` - RPCTLSCACert string `toml:",omitempty"` - RPCTLSCert string `toml:",omitempty"` - RPCTLSKey string `toml:",omitempty"` - ServerNode string `toml:",omitempty"` - ServerNodeRPC string `toml:",omitempty"` -} - -func (q *QuorumLightClient) Enabled() bool { - return q != nil && q.Use -} - // CreateConsensusEngine creates a consensus engine for the given chain configuration. func CreateConsensusEngine(stack *node.Node, chainConfig *params.ChainConfig, config *Config, notify []string, noverify bool, db ethdb.Database) consensus.Engine { // If proof-of-authority is requested, set it up @@ -263,6 +245,7 @@ func CreateConsensusEngine(stack *node.Node, chainConfig *params.ChainConfig, co config.Istanbul.Ceil2Nby3Block = chainConfig.Istanbul.Ceil2Nby3Block config.Istanbul.AllowedFutureBlockTime = config.Miner.AllowedFutureBlockTime //Quorum config.Istanbul.TestQBFTBlock = chainConfig.Istanbul.TestQBFTBlock + return istanbulBackend.New(&config.Istanbul, stack.GetNodeKey(), db) } if chainConfig.IBFT != nil { @@ -281,24 +264,33 @@ func CreateConsensusEngine(stack *node.Node, chainConfig *params.ChainConfig, co } return istanbulBackend.New(&config.Istanbul, stack.GetNodeKey(), db) } - // Otherwise assume proof-of-work - switch config.Ethash.PowMode { - case ethash.ModeFake: - log.Warn("Ethash used in fake mode") - return ethash.NewFaker() - case ethash.ModeTest: - log.Warn("Ethash used in test mode") - return ethash.NewTester(nil, noverify) - case ethash.ModeShared: - log.Warn("Ethash used in shared mode") - return ethash.NewShared() - default: - // For Quorum, Raft run as a separate service, so - // the Ethereum service still needs a consensus engine, - // use the consensus with the lightest overhead - log.Warn("Ethash used in full fake mode") - return ethash.NewFullFaker() - } + // For Quorum, Raft run as a separate service, so + // the Ethereum service still needs a consensus engine, + // use the consensus with the lightest overhead + engine := ethash.NewFullFaker() + engine.SetThreads(-1) // Disable CPU Mining + return engine +} + +// Quorum + +type QuorumLightClient struct { + Use bool `toml:",omitempty"` + PSI string `toml:",omitempty"` + TokenEnabled bool `toml:",omitempty"` + TokenValue string `toml:",omitempty"` + TokenManagement string `toml:",omitempty"` + RPCTLS bool `toml:",omitempty"` + RPCTLSInsecureSkipVerify bool `toml:",omitempty"` + RPCTLSCACert string `toml:",omitempty"` + RPCTLSCert string `toml:",omitempty"` + RPCTLSKey string `toml:",omitempty"` + ServerNode string `toml:",omitempty"` + ServerNodeRPC string `toml:",omitempty"` +} + +func (q *QuorumLightClient) Enabled() bool { + return q != nil && q.Use } func setBFTConfig(istanbulConfig *istanbul.Config, bftConfig *params.BFTConfig) { diff --git a/eth/filters/api.go b/eth/filters/api.go index 1691fd2a04..2722b43f25 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -309,7 +309,7 @@ func (api *PublicFilterAPI) NewFilter(ctx context.Context, crit FilterCriteria) crit.PSI = psm.ID logsSub, err := api.events.SubscribeLogs(ethereum.FilterQuery(crit), logs) if err != nil { - return rpc.ID(""), err + return rpc.ID(""), err // Quorum } api.filtersMu.Lock() diff --git a/eth/filters/bench_test.go b/eth/filters/bench_test.go index 42c1245117..b1948a1727 100644 --- a/eth/filters/bench_test.go +++ b/eth/filters/bench_test.go @@ -65,7 +65,7 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) { benchDataDir := node.DefaultDataDir() + "/geth/chaindata" b.Log("Running bloombits benchmark section size:", sectionSize) - db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "") + db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) if err != nil { b.Fatalf("error opening database at %v: %v", benchDataDir, err) } @@ -126,7 +126,7 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) { for i := 0; i < benchFilterCnt; i++ { if i%20 == 0 { db.Close() - db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "") + db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) backend = &testBackend{db: db, sections: cnt} } var addr common.Address @@ -157,7 +157,7 @@ func clearBloomBits(db ethdb.Database) { func BenchmarkNoBloomBits(b *testing.B) { benchDataDir := node.DefaultDataDir() + "/geth/chaindata" b.Log("Running benchmark without bloombits") - db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "") + db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) if err != nil { b.Fatalf("error opening database at %v: %v", benchDataDir, err) } diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index 434ae46887..0248be0c55 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -646,6 +646,7 @@ func TestPendingTxFilterDeadlock(t *testing.T) { backend = &testBackend{db: db} api = NewPublicFilterAPI(backend, false, timeout) done = make(chan struct{}) + ctx = context.TODO() ) go func() { @@ -672,7 +673,7 @@ func TestPendingTxFilterDeadlock(t *testing.T) { fids[i] = fid // Wait for at least one tx to arrive in filter for { - hashes, err := api.GetFilterChanges(nil, fid) + hashes, err := api.GetFilterChanges(ctx, fid) if err != nil { t.Fatalf("Filter should exist: %v\n", err) } @@ -692,7 +693,7 @@ func TestPendingTxFilterDeadlock(t *testing.T) { case done <- struct{}{}: // Check that all filters have been uninstalled for _, fid := range fids { - if _, err := api.GetFilterChanges(nil, fid); err == nil { + if _, err := api.GetFilterChanges(ctx, fid); err == nil { t.Errorf("Filter %s should have been uninstalled\n", fid) } } diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index f1ea7b472e..33a2a81663 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -50,7 +50,7 @@ func BenchmarkFilters(b *testing.B) { defer os.RemoveAll(dir) var ( - db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "") + db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "", false) backend = &testBackend{db: db} key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -104,7 +104,7 @@ func TestFilters(t *testing.T) { defer os.RemoveAll(dir) var ( - db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "") + db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "", false) backend = &testBackend{db: db} key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr = crypto.PubkeyToAddress(key1.PublicKey) @@ -269,7 +269,7 @@ func TestMPSFilters(t *testing.T) { defer os.RemoveAll(dir) var ( - db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "") + db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "", false) backend = &testBackend{db: db} key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr = crypto.PubkeyToAddress(key1.PublicKey) diff --git a/eth/handler.go b/eth/handler.go index 9106b772d8..fedc8f152d 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -51,13 +51,17 @@ import ( const ( // txChanSize is the size of channel listening to NewTxsEvent. // The number is referenced from the size of tx pool. - txChanSize = 4096 + txChanSize = 4096 + + // Quorum protocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message ) var ( syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge - errMsgTooLarge = errors.New("message too long") + + // Quorum + errMsgTooLarge = errors.New("message too long") ) // txPool defines the methods needed from a transaction pool implementation to @@ -86,17 +90,18 @@ type txPool interface { // handlerConfig is the collection of initialization parameters to create a full // node network handler. type handlerConfig struct { - Database ethdb.Database // Database for direct sync insertions - Chain *core.BlockChain // Blockchain to serve data from - TxPool txPool // Transaction pool to propagate from - Network uint64 // Network identifier to adfvertise - Sync downloader.SyncMode // Whether to fast or full sync - BloomCache uint64 // Megabytes to alloc for fast sync bloom - EventMux *event.TypeMux // Legacy event mux, deprecate for `feed` - Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges - AuthorizationList map[uint64]common.Hash // Hard coded authorizationList for sync challenged + Database ethdb.Database // Database for direct sync insertions + Chain *core.BlockChain // Blockchain to serve data from + TxPool txPool // Transaction pool to propagate from + Network uint64 // Network identifier to adfvertise + Sync downloader.SyncMode // Whether to fast or full sync + BloomCache uint64 // Megabytes to alloc for fast sync bloom + EventMux *event.TypeMux // Legacy event mux, deprecate for `feed` + Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges // Quorum + AuthorizationList map[uint64]common.Hash // Hard coded authorization list for sync challenged + Engine consensus.Engine RaftMode bool @@ -171,16 +176,17 @@ func newHandler(config *handlerConfig) (*handler, error) { config.EventMux = new(event.TypeMux) // Nicety initialization for tests } h := &handler{ - networkID: config.Network, - forkFilter: forkid.NewFilter(config.Chain), - eventMux: config.EventMux, - database: config.Database, - txpool: config.TxPool, - chain: config.Chain, - peers: newPeerSet(), + networkID: config.Network, + forkFilter: forkid.NewFilter(config.Chain), + eventMux: config.EventMux, + database: config.Database, + txpool: config.TxPool, + chain: config.Chain, + peers: newPeerSet(), + txsyncCh: make(chan *txsync), + quitSync: make(chan struct{}), + // Quorum authorizationList: config.AuthorizationList, - txsyncCh: make(chan *txsync), - quitSync: make(chan struct{}), raftMode: config.RaftMode, engine: config.Engine, tokenHolder: config.tokenHolder, @@ -226,7 +232,11 @@ func newHandler(config *handlerConfig) (*handler, error) { // Construct the downloader (long sync) and its backing state bloom if fast // sync is requested. The downloader is responsible for deallocating the state // bloom when it's done. - if atomic.LoadUint32(&h.fastSync) == 1 { + // Note: we don't enable it if snap-sync is performed, since it's very heavy + // and the heal-portion of the snap sync is much lighter than fast. What we particularly + // want to avoid, is a 90%-finished (but restarted) snap-sync to begin + // indexing the entire trie + if atomic.LoadUint32(&h.fastSync) == 1 && atomic.LoadUint32(&h.snapSync) == 0 { h.stateBloom = trie.NewSyncBloom(config.BloomCache, config.Database) } h.downloader = downloader.New(h.checkpointNumber, config.Database, h.stateBloom, h.eventMux, h.chain, nil, h.removePeer) @@ -340,7 +350,6 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { // When the Register() returns an error, the Run method corresponding to `eth` protocol returns with the error, causing the peer to drop, signal subprotocol as well to exit the `Run` method peer.EthPeerDisconnected <- struct{}{} // End Quorum - return err } defer h.removePeer(peer.ID()) @@ -789,7 +798,6 @@ func (h *handler) handleConsensusMsg(p *eth.Peer, msg p2p.Msg) (bool, error) { pubKey := p.Node().Pubkey() addr := crypto.PubkeyToAddress(*pubKey) handled, err := handler.HandleMsg(addr, msg) - return handled, err } return false, nil diff --git a/eth/peerset.go b/eth/peerset.go index 392587c878..165ae6c9ae 100644 --- a/eth/peerset.go +++ b/eth/peerset.go @@ -157,28 +157,6 @@ func (ps *peerSet) registerPeer(peer *eth.Peer, ext *snap.Peer) error { return nil } -// registerPeer injects a new `eth` peer into the working set, or returns an error -// if the peer is already known. -func (ps *peerSet) registerQPeer(peer *qlight.Peer) error { - // Start tracking the new peer - ps.lock.Lock() - defer ps.lock.Unlock() - - if ps.closed { - return errPeerSetClosed - } - id := peer.ID() - if _, ok := ps.peers[id]; ok { - return errPeerAlreadyRegistered - } - eth := ðPeer{ - Peer: peer.EthPeer, - qlight: peer, - } - ps.peers[id] = eth - return nil -} - // unregisterPeer removes a remote peer from the active set, disabling any further // actions to/from that particular entity. func (ps *peerSet) unregisterPeer(id string) error { @@ -282,6 +260,7 @@ func (ps *peerSet) close() { } // Quorum + func (ps *peerSet) UpdateTokenForRunningQPeers(token string) error { ps.lock.Lock() defer ps.lock.Unlock() @@ -296,3 +275,25 @@ func (ps *peerSet) UpdateTokenForRunningQPeers(token string) error { } return nil } + +// registerPeer injects a new `eth` peer into the working set, or returns an error +// if the peer is already known. +func (ps *peerSet) registerQPeer(peer *qlight.Peer) error { + // Start tracking the new peer + ps.lock.Lock() + defer ps.lock.Unlock() + + if ps.closed { + return errPeerSetClosed + } + id := peer.ID() + if _, ok := ps.peers[id]; ok { + return errPeerAlreadyRegistered + } + eth := ðPeer{ + Peer: peer.EthPeer, + qlight: peer, + } + ps.peers[id] = eth + return nil +} diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index 64648ed419..0dc3de9898 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" @@ -241,7 +242,18 @@ func handleMessage(backend Backend, peer *Peer) error { } else if peer.Version() >= ETH66 { handlers = eth66 } - + // Track the emount of time it takes to serve the request and run the handler + if metrics.Enabled { + h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) + defer func(start time.Time) { + sampler := func() metrics.Sample { + return metrics.ResettingSample( + metrics.NewExpDecaySample(1028, 0.015), + ) + } + metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds()) + }(time.Now()) + } if handler := handlers[msg.Code]; handler != nil { return handler(backend, msg, peer) } diff --git a/eth/protocols/eth/protocol_test.go b/eth/protocols/eth/protocol_test.go index d92f3ea837..7910c9b735 100644 --- a/eth/protocols/eth/protocol_test.go +++ b/eth/protocols/eth/protocol_test.go @@ -178,7 +178,7 @@ func TestEth66Messages(t *testing.T) { // init the receipts { receipts = []*types.Receipt{ - &types.Receipt{ + { Status: types.ReceiptStatusFailed, CumulativeGasUsed: 1, Logs: []*types.Log{ diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go index 24c8599552..f7939964f3 100644 --- a/eth/protocols/snap/handler.go +++ b/eth/protocols/snap/handler.go @@ -19,12 +19,14 @@ package snap import ( "bytes" "fmt" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" @@ -48,6 +50,11 @@ const ( // maxTrieNodeLookups is the maximum number of state trie nodes to serve. This // number is there to limit the number of disk lookups. maxTrieNodeLookups = 1024 + + // maxTrieNodeTimeSpent is the maximum time we should spend on looking up trie nodes. + // If we spend too much time, then it's a fairly high chance of timing out + // at the remote side, which means all the work is in vain. + maxTrieNodeTimeSpent = 5 * time.Second ) // Handler is a callback to invoke from an outside runner after the boilerplate @@ -115,7 +122,7 @@ func handle(backend Backend, peer *Peer) error { } // handleMessage is invoked whenever an inbound message is received from a -// remote peer on the `spap` protocol. The remote connection is torn down upon +// remote peer on the `snap` protocol. The remote connection is torn down upon // returning any error. func handleMessage(backend Backend, peer *Peer) error { // Read the next message from the remote peer, and ensure it's fully consumed @@ -127,7 +134,19 @@ func handleMessage(backend Backend, peer *Peer) error { return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize) } defer msg.Discard() - + start := time.Now() + // Track the emount of time it takes to serve the request and run the handler + if metrics.Enabled { + h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) + defer func(start time.Time) { + sampler := func() metrics.Sample { + return metrics.ResettingSample( + metrics.NewExpDecaySample(1028, 0.015), + ) + } + metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds()) + }(start) + } // Handle the message depending on its contents switch { case msg.Code == GetAccountRangeMsg: @@ -256,8 +275,13 @@ func handleMessage(backend Backend, peer *Peer) error { var ( storage []*StorageData last common.Hash + abort bool ) - for it.Next() && size < hardLimit { + for it.Next() { + if size >= hardLimit { + abort = true + break + } hash, slot := it.Hash(), common.CopyBytes(it.Slot()) // Track the returned interval for the Merkle proofs @@ -280,7 +304,7 @@ func handleMessage(backend Backend, peer *Peer) error { // Generate the Merkle proofs for the first and last storage slot, but // only if the response was capped. If the entire storage trie included // in the response, no need for any proofs. - if origin != (common.Hash{}) || size >= hardLimit { + if origin != (common.Hash{}) || abort { // Request started at a non-zero hash or was capped prematurely, add // the endpoint Merkle proofs accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB()) @@ -451,13 +475,13 @@ func handleMessage(backend Backend, peer *Peer) error { bytes += uint64(len(blob)) // Sanity check limits to avoid DoS on the store trie loads - if bytes > req.Bytes || loads > maxTrieNodeLookups { + if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { break } } } // Abort request processing if we've exceeded our limits - if bytes > req.Bytes || loads > maxTrieNodeLookups { + if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { break } } diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index 1cfdef15bd..2924fa0802 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -85,7 +85,7 @@ const ( var ( // requestTimeout is the maximum time a peer is allowed to spend on serving // a single network request. - requestTimeout = 10 * time.Second // TODO(karalabe): Make it dynamic ala fast-sync? + requestTimeout = 15 * time.Second // TODO(karalabe): Make it dynamic ala fast-sync? ) // ErrCancelled is returned from snap syncing if the operation was prematurely @@ -376,8 +376,7 @@ type SyncPeer interface { // - The peer delivers a stale response after a previous timeout // - The peer delivers a refusal to serve the requested state type Syncer struct { - db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup) - bloom *trie.SyncBloom // Bloom filter to deduplicate nodes for state fixup + db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup) root common.Hash // Current state trie root being synced tasks []*accountTask // Current account task set being synced @@ -446,10 +445,9 @@ type Syncer struct { // NewSyncer creates a new snapshot syncer to download the Ethereum state over the // snap protocol. -func NewSyncer(db ethdb.KeyValueStore, bloom *trie.SyncBloom) *Syncer { +func NewSyncer(db ethdb.KeyValueStore) *Syncer { return &Syncer{ - db: db, - bloom: bloom, + db: db, peers: make(map[string]SyncPeer), peerJoin: new(event.Feed), @@ -546,7 +544,7 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error { s.lock.Lock() s.root = root s.healer = &healTask{ - scheduler: state.NewStateSync(root, s.db, s.bloom), + scheduler: state.NewStateSync(root, s.db, nil), trieTasks: make(map[common.Hash]trie.SyncPath), codeTasks: make(map[common.Hash]struct{}), } @@ -813,6 +811,8 @@ func (s *Syncer) assignAccountTasks(cancel chan struct{}) { if idle == "" { return } + peer := s.peers[idle] + // Matched a pending task to an idle peer, allocate a unique request id var reqid uint64 for { @@ -836,14 +836,14 @@ func (s *Syncer) assignAccountTasks(cancel chan struct{}) { task: task, } req.timeout = time.AfterFunc(requestTimeout, func() { - log.Debug("Account range request timed out") + peer.Log().Debug("Account range request timed out", "reqid", reqid) s.scheduleRevertAccountRequest(req) }) s.accountReqs[reqid] = req delete(s.accountIdlers, idle) s.pend.Add(1) - go func(peer SyncPeer, root common.Hash) { + go func(root common.Hash) { defer s.pend.Done() // Attempt to send the remote request and revert if it fails @@ -851,7 +851,7 @@ func (s *Syncer) assignAccountTasks(cancel chan struct{}) { peer.Log().Debug("Failed to request account range", "err", err) s.scheduleRevertAccountRequest(req) } - }(s.peers[idle], s.root) // We're in the lock, peers[id] surely exists + }(s.root) // Inject the request into the task to block further assignments task.req = req @@ -893,6 +893,8 @@ func (s *Syncer) assignBytecodeTasks(cancel chan struct{}) { if idle == "" { return } + peer := s.peers[idle] + // Matched a pending task to an idle peer, allocate a unique request id var reqid uint64 for { @@ -923,14 +925,14 @@ func (s *Syncer) assignBytecodeTasks(cancel chan struct{}) { task: task, } req.timeout = time.AfterFunc(requestTimeout, func() { - log.Debug("Bytecode request timed out") + peer.Log().Debug("Bytecode request timed out", "reqid", reqid) s.scheduleRevertBytecodeRequest(req) }) s.bytecodeReqs[reqid] = req delete(s.bytecodeIdlers, idle) s.pend.Add(1) - go func(peer SyncPeer) { + go func() { defer s.pend.Done() // Attempt to send the remote request and revert if it fails @@ -938,7 +940,7 @@ func (s *Syncer) assignBytecodeTasks(cancel chan struct{}) { log.Debug("Failed to request bytecodes", "err", err) s.scheduleRevertBytecodeRequest(req) } - }(s.peers[idle]) // We're in the lock, peers[id] surely exists + }() } } @@ -978,6 +980,8 @@ func (s *Syncer) assignStorageTasks(cancel chan struct{}) { if idle == "" { return } + peer := s.peers[idle] + // Matched a pending task to an idle peer, allocate a unique request id var reqid uint64 for { @@ -1047,14 +1051,14 @@ func (s *Syncer) assignStorageTasks(cancel chan struct{}) { req.limit = subtask.Last } req.timeout = time.AfterFunc(requestTimeout, func() { - log.Debug("Storage request timed out") + peer.Log().Debug("Storage request timed out", "reqid", reqid) s.scheduleRevertStorageRequest(req) }) s.storageReqs[reqid] = req delete(s.storageIdlers, idle) s.pend.Add(1) - go func(peer SyncPeer, root common.Hash) { + go func(root common.Hash) { defer s.pend.Done() // Attempt to send the remote request and revert if it fails @@ -1066,7 +1070,7 @@ func (s *Syncer) assignStorageTasks(cancel chan struct{}) { log.Debug("Failed to request storage", "err", err) s.scheduleRevertStorageRequest(req) } - }(s.peers[idle], s.root) // We're in the lock, peers[id] surely exists + }(s.root) // Inject the request into the subtask to block further assignments if subtask != nil { @@ -1123,6 +1127,8 @@ func (s *Syncer) assignTrienodeHealTasks(cancel chan struct{}) { if idle == "" { return } + peer := s.peers[idle] + // Matched a pending task to an idle peer, allocate a unique request id var reqid uint64 for { @@ -1162,14 +1168,14 @@ func (s *Syncer) assignTrienodeHealTasks(cancel chan struct{}) { task: s.healer, } req.timeout = time.AfterFunc(requestTimeout, func() { - log.Debug("Trienode heal request timed out") + peer.Log().Debug("Trienode heal request timed out", "reqid", reqid) s.scheduleRevertTrienodeHealRequest(req) }) s.trienodeHealReqs[reqid] = req delete(s.trienodeHealIdlers, idle) s.pend.Add(1) - go func(peer SyncPeer, root common.Hash) { + go func(root common.Hash) { defer s.pend.Done() // Attempt to send the remote request and revert if it fails @@ -1177,7 +1183,7 @@ func (s *Syncer) assignTrienodeHealTasks(cancel chan struct{}) { log.Debug("Failed to request trienode healers", "err", err) s.scheduleRevertTrienodeHealRequest(req) } - }(s.peers[idle], s.root) // We're in the lock, peers[id] surely exists + }(s.root) } } @@ -1229,6 +1235,8 @@ func (s *Syncer) assignBytecodeHealTasks(cancel chan struct{}) { if idle == "" { return } + peer := s.peers[idle] + // Matched a pending task to an idle peer, allocate a unique request id var reqid uint64 for { @@ -1260,14 +1268,14 @@ func (s *Syncer) assignBytecodeHealTasks(cancel chan struct{}) { task: s.healer, } req.timeout = time.AfterFunc(requestTimeout, func() { - log.Debug("Bytecode heal request timed out") + peer.Log().Debug("Bytecode heal request timed out", "reqid", reqid) s.scheduleRevertBytecodeHealRequest(req) }) s.bytecodeHealReqs[reqid] = req delete(s.bytecodeHealIdlers, idle) s.pend.Add(1) - go func(peer SyncPeer) { + go func() { defer s.pend.Done() // Attempt to send the remote request and revert if it fails @@ -1275,7 +1283,7 @@ func (s *Syncer) assignBytecodeHealTasks(cancel chan struct{}) { log.Debug("Failed to request bytecode healers", "err", err) s.scheduleRevertBytecodeHealRequest(req) } - }(s.peers[idle]) // We're in the lock, peers[id] surely exists + }() } } @@ -1553,7 +1561,14 @@ func (s *Syncer) processAccountResponse(res *accountResponse) { // Ensure that the response doesn't overflow into the subsequent task last := res.task.Last.Big() for i, hash := range res.hashes { - if hash.Big().Cmp(last) > 0 { + // Mark the range complete if the last is already included. + // Keep iteration to delete the extra states if exists. + cmp := hash.Big().Cmp(last) + if cmp == 0 { + res.cont = false + continue + } + if cmp > 0 { // Chunk overflown, cut off excess, but also update the boundary nodes for j := i; j < len(res.hashes); j++ { if err := res.trie.Prove(res.hashes[j][:], 0, res.overflow); err != nil { @@ -1660,7 +1675,6 @@ func (s *Syncer) processBytecodeResponse(res *bytecodeResponse) { bytes += common.StorageSize(len(code)) rawdb.WriteCode(batch, hash, code) - s.bloom.Add(hash[:]) } if err := batch.Write(); err != nil { log.Crit("Failed to persist bytecodes", "err", err) @@ -1761,7 +1775,14 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { // Ensure the response doesn't overflow into the subsequent task last := res.subTask.Last.Big() for k, hash := range res.hashes[i] { - if hash.Big().Cmp(last) > 0 { + // Mark the range complete if the last is already included. + // Keep iteration to delete the extra states if exists. + cmp := hash.Big().Cmp(last) + if cmp == 0 { + res.cont = false + continue + } + if cmp > 0 { // Chunk overflown, cut off excess, but also update the boundary for l := k; l < len(res.hashes[i]); l++ { if err := res.tries[i].Prove(res.hashes[i][l][:], 0, res.overflow); err != nil { @@ -1788,15 +1809,18 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { it := res.nodes[i].NewIterator(nil, nil) for it.Next() { // Boundary nodes are not written for the last result, since they are incomplete - if i == len(res.hashes)-1 { + if i == len(res.hashes)-1 && res.subTask != nil { if _, ok := res.bounds[common.BytesToHash(it.Key())]; ok { skipped++ continue } + if _, err := res.overflow.Get(it.Key()); err == nil { + skipped++ + continue + } } // Node is not a boundary, persist to disk batch.Put(it.Key(), it.Value()) - s.bloom.Add(it.Key()) bytes += common.StorageSize(common.HashLength + len(it.Value())) nodes++ @@ -1953,7 +1977,6 @@ func (s *Syncer) forwardAccountTask(task *accountTask) { } // Node is neither a boundary, not an incomplete account, persist to disk batch.Put(it.Key(), it.Value()) - s.bloom.Add(it.Key()) bytes += common.StorageSize(common.HashLength + len(it.Value())) nodes++ diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index 0b048786e8..3e9778dbc7 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -23,6 +23,7 @@ import ( "fmt" "math/big" "sort" + "sync" "testing" "time" @@ -30,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -111,10 +113,12 @@ func BenchmarkHashing(b *testing.B) { }) } -type storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error -type accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error -type trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error -type codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error +type ( + accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error + storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error + trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error + codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error +) type testPeer struct { id string @@ -130,10 +134,10 @@ type testPeer struct { storageRequestHandler storageHandlerFunc trieRequestHandler trieHandlerFunc codeRequestHandler codeHandlerFunc - cancelCh chan struct{} + term func() } -func newTestPeer(id string, t *testing.T, cancelCh chan struct{}) *testPeer { +func newTestPeer(id string, t *testing.T, term func()) *testPeer { peer := &testPeer{ id: id, test: t, @@ -142,12 +146,11 @@ func newTestPeer(id string, t *testing.T, cancelCh chan struct{}) *testPeer { trieRequestHandler: defaultTrieRequestHandler, storageRequestHandler: defaultStorageRequestHandler, codeRequestHandler: defaultCodeRequestHandler, - cancelCh: cancelCh, + term: term, } //stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true)) //peer.logger.SetHandler(stderrHandler) return peer - } func (t *testPeer) ID() string { return t.id } @@ -155,7 +158,7 @@ func (t *testPeer) Log() log.Logger { return t.logger } func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error { t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes)) - go t.accountRequestHandler(t, id, root, origin, bytes) + go t.accountRequestHandler(t, id, root, origin, limit, bytes) return nil } @@ -211,20 +214,21 @@ func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, } // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests -func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, cap uint64) error { - keys, vals, proofs := createAccountRequestResponse(t, root, origin, cap) +func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap) if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil { - t.logger.Error("remote error on delivery", "error", err) t.test.Errorf("Remote side rejected our delivery: %v", err) - t.remote.Unregister(t.id) - close(t.cancelCh) + t.term() return err } return nil } -func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) { +func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) { var size uint64 + if limit == (common.Hash{}) { + limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } for _, entry := range t.accountValues { if size > cap { break @@ -234,20 +238,22 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H vals = append(vals, entry.v) size += uint64(32 + len(entry.v)) } + // If we've exceeded the request threshold, abort + if bytes.Compare(entry.k, limit[:]) >= 0 { + break + } } // Unless we send the entire trie, we need to supply proofs - // Actually, we need to supply proofs either way! This seems tob be an implementation + // Actually, we need to supply proofs either way! This seems to be an implementation // quirk in go-ethereum proof := light.NewNodeSet() if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil { - t.logger.Error("Could not prove inexistence of origin", "origin", origin, - "error", err) + t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err) } if len(keys) > 0 { lastK := (keys[len(keys)-1])[:] if err := t.accountTrie.Prove(lastK, 0, proof); err != nil { - t.logger.Error("Could not prove last item", - "error", err) + t.logger.Error("Could not prove last item", "error", err) } } for _, blob := range proof.NodeList() { @@ -260,9 +266,8 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error { hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max) if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil { - t.logger.Error("remote error on delivery", "error", err) t.test.Errorf("Remote side rejected our delivery: %v", err) - close(t.cancelCh) + t.term() } return nil } @@ -270,58 +275,112 @@ func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Has func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { var bytecodes [][]byte for _, h := range hashes { - bytecodes = append(bytecodes, getCode(h)) + bytecodes = append(bytecodes, getCodeByHash(h)) } if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { - t.logger.Error("remote error on delivery", "error", err) t.test.Errorf("Remote side rejected our delivery: %v", err) - close(t.cancelCh) + t.term() } return nil } -func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { - var ( - size uint64 - limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") - ) - if len(bLimit) > 0 { - limit = common.BytesToHash(bLimit) +func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { + var size uint64 + for _, account := range accounts { + // The first account might start from a different origin and end sooner + var originHash common.Hash + if len(origin) > 0 { + originHash = common.BytesToHash(origin) + } + var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + if len(limit) > 0 { + limitHash = common.BytesToHash(limit) + } + var ( + keys []common.Hash + vals [][]byte + abort bool + ) + for _, entry := range t.storageValues[account] { + if size >= max { + abort = true + break + } + if bytes.Compare(entry.k, originHash[:]) < 0 { + continue + } + keys = append(keys, common.BytesToHash(entry.k)) + vals = append(vals, entry.v) + size += uint64(32 + len(entry.v)) + if bytes.Compare(entry.k, limitHash[:]) >= 0 { + break + } + } + hashes = append(hashes, keys) + slots = append(slots, vals) + + // Generate the Merkle proofs for the first and last storage slot, but + // only if the response was capped. If the entire storage trie included + // in the response, no need for any proofs. + if originHash != (common.Hash{}) || abort { + // If we're aborting, we need to prove the first and last item + // This terminates the response (and thus the loop) + proof := light.NewNodeSet() + stTrie := t.storageTries[account] + + // Here's a potential gotcha: when constructing the proof, we cannot + // use the 'origin' slice directly, but must use the full 32-byte + // hash form. + if err := stTrie.Prove(originHash[:], 0, proof); err != nil { + t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err) + } + if len(keys) > 0 { + lastK := (keys[len(keys)-1])[:] + if err := stTrie.Prove(lastK, 0, proof); err != nil { + t.logger.Error("Could not prove last item", "error", err) + } + } + for _, blob := range proof.NodeList() { + proofs = append(proofs, blob) + } + break + } } + return hashes, slots, proofs +} + +// the createStorageRequestResponseAlwaysProve tests a cornercase, where it always +// supplies the proof for the last account, even if it is 'complete'.h +func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { + var size uint64 + max = max * 3 / 4 + var origin common.Hash if len(bOrigin) > 0 { origin = common.BytesToHash(bOrigin) } - - var limitExceeded bool - var incomplete bool - for _, account := range accounts { - + var exit bool + for i, account := range accounts { var keys []common.Hash var vals [][]byte for _, entry := range t.storageValues[account] { - if limitExceeded { - incomplete = true - break - } if bytes.Compare(entry.k, origin[:]) < 0 { - incomplete = true - continue + exit = true } keys = append(keys, common.BytesToHash(entry.k)) vals = append(vals, entry.v) size += uint64(32 + len(entry.v)) - if bytes.Compare(entry.k, limit[:]) >= 0 { - limitExceeded = true - } if size > max { - limitExceeded = true + exit = true } } + if i == len(accounts)-1 { + exit = true + } hashes = append(hashes, keys) slots = append(slots, vals) - if incomplete { + if exit { // If we're aborting, we need to prove the first and last item // This terminates the response (and thus the loop) proof := light.NewNodeSet() @@ -350,21 +409,17 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm } // emptyRequestAccountRangeFn is a rejects AccountRangeRequests -func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { - var proofs [][]byte - var keys []common.Hash - var vals [][]byte - t.remote.OnAccounts(t, requestId, keys, vals, proofs) +func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + t.remote.OnAccounts(t, requestId, nil, nil, nil) return nil } -func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { +func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { return nil } func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error { - var nodes [][]byte - t.remote.OnTrieNodes(t, requestId, nodes) + t.remote.OnTrieNodes(t, requestId, nil) return nil } @@ -373,10 +428,7 @@ func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common. } func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { - var hashes [][]common.Hash - var slots [][][]byte - var proofs [][]byte - t.remote.OnStorage(t, requestId, hashes, slots, proofs) + t.remote.OnStorage(t, requestId, nil, nil, nil) return nil } @@ -384,6 +436,15 @@ func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root comm return nil } +func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { + hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max) + if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil { + t.test.Errorf("Remote side rejected our delivery: %v", err) + t.term() + } + return nil +} + //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { // var bytecodes [][]byte // t.remote.OnByteCodes(t, id, bytecodes) @@ -397,7 +458,7 @@ func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max bytecodes = append(bytecodes, h[:]) } if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { - t.logger.Error("remote error on delivery", "error", err) + t.logger.Info("remote error on delivery (as expected)", "error", err) // Mimic the real-life handler, which drops a peer on errors t.remote.Unregister(t.id) } @@ -407,12 +468,12 @@ func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { var bytecodes [][]byte for _, h := range hashes[:1] { - bytecodes = append(bytecodes, getCode(h)) + bytecodes = append(bytecodes, getCodeByHash(h)) } + // Missing bytecode can be retrieved again, no error expected if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { - t.logger.Error("remote error on delivery", "error", err) - // Mimic the real-life handler, which drops a peer on errors - t.remote.Unregister(t.id) + t.test.Errorf("Remote side rejected our delivery: %v", err) + t.term() } return nil } @@ -422,16 +483,16 @@ func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Ha return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500) } -func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { - return defaultAccountRequestHandler(t, requestId, root, origin, 500) +func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500) } //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { // return defaultAccountRequestHandler(t, requestId-1, root, origin, 500) //} -func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { - hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, cap) +func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap) if len(proofs) > 0 { proofs = proofs[1:] } @@ -473,23 +534,36 @@ func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Has func TestSyncBloatedProof(t *testing.T) { t.Parallel() + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(100) - cancel := make(chan struct{}) - source := newTestPeer("source", t, cancel) + source := newTestPeer("source", t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems - source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { - var proofs [][]byte - var keys []common.Hash - var vals [][]byte - + source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + var ( + proofs [][]byte + keys []common.Hash + vals [][]byte + ) // The values for _, entry := range t.accountValues { - if bytes.Compare(origin[:], entry.k) <= 0 { - keys = append(keys, common.BytesToHash(entry.k)) - vals = append(vals, entry.v) + if bytes.Compare(entry.k, origin[:]) < 0 { + continue + } + if bytes.Compare(entry.k, limit[:]) > 0 { + continue } + keys = append(keys, common.BytesToHash(entry.k)) + vals = append(vals, entry.v) } // The proofs proof := light.NewNodeSet() @@ -511,9 +585,9 @@ func TestSyncBloatedProof(t *testing.T) { proofs = append(proofs, blob) } if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil { - t.logger.Info("remote error on delivery", "error", err) + t.logger.Info("remote error on delivery (as expected)", "error", err) + t.term() // This is actually correct, signal to exit the test successfully - close(t.cancelCh) } return nil } @@ -525,7 +599,7 @@ func TestSyncBloatedProof(t *testing.T) { func setupSyncer(peers ...*testPeer) *Syncer { stateDb := rawdb.NewMemoryDatabase() - syncer := NewSyncer(stateDb, trie.NewSyncBloom(1, stateDb)) + syncer := NewSyncer(stateDb) for _, peer := range peers { syncer.Register(peer) peer.remote = syncer @@ -537,20 +611,28 @@ func setupSyncer(peers ...*testPeer) *Syncer { func TestSync(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(100) mkSource := func(name string) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems return source } - - syncer := setupSyncer(mkSource("sourceA")) + syncer := setupSyncer(mkSource("source")) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a @@ -558,56 +640,79 @@ func TestSync(t *testing.T) { func TestSyncTinyTriePanic(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(1) mkSource := func(name string) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems return source } - - syncer := setupSyncer( - mkSource("nice-a"), - ) - done := checkStall(t, cancel) + syncer := setupSyncer(mkSource("source")) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSync tests a basic sync with multiple peers func TestMultiSync(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(100) mkSource := func(name string) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems return source } - syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB")) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncWithStorage tests basic sync using accounts + storage + code func TestSyncWithStorage(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false) mkSource := func(name string) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries @@ -615,33 +720,43 @@ func TestSyncWithStorage(t *testing.T) { return source } syncer := setupSyncer(mkSource("sourceA")) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all func TestMultiSyncManyUseless(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) - mkSource := func(name string, a, b, c bool) *testPeer { - source := newTestPeer(name, t, cancel) + mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries source.storageValues = storageElems - if !a { + if !noAccount { source.accountRequestHandler = emptyRequestAccountRangeFn } - if !b { + if !noStorage { source.storageRequestHandler = emptyStorageRequestHandler } - if !c { + if !noTrieNode { source.trieRequestHandler = emptyTrieRequestHandler } return source @@ -653,9 +768,12 @@ func TestMultiSyncManyUseless(t *testing.T) { mkSource("noStorage", true, false, true), mkSource("noTrie", true, true, false), ) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all @@ -666,24 +784,31 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { defer func(old time.Duration) { requestTimeout = old }(requestTimeout) requestTimeout = time.Millisecond - cancel := make(chan struct{}) - - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) - mkSource := func(name string, a, b, c bool) *testPeer { - source := newTestPeer(name, t, cancel) + mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries source.storageValues = storageElems - if !a { + if !noAccount { source.accountRequestHandler = emptyRequestAccountRangeFn } - if !b { + if !noStorage { source.storageRequestHandler = emptyStorageRequestHandler } - if !c { + if !noTrieNode { source.trieRequestHandler = emptyTrieRequestHandler } return source @@ -695,9 +820,12 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { mkSource("noStorage", true, false, true), mkSource("noTrie", true, true, false), ) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all @@ -706,24 +834,31 @@ func TestMultiSyncManyUnresponsive(t *testing.T) { defer func(old time.Duration) { requestTimeout = old }(requestTimeout) requestTimeout = time.Millisecond - cancel := make(chan struct{}) - - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) - mkSource := func(name string, a, b, c bool) *testPeer { - source := newTestPeer(name, t, cancel) + mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries source.storageValues = storageElems - if !a { + if !noAccount { source.accountRequestHandler = nonResponsiveRequestAccountRangeFn } - if !b { + if !noStorage { source.storageRequestHandler = nonResponsiveStorageRequestHandler } - if !c { + if !noTrieNode { source.trieRequestHandler = nonResponsiveTrieRequestHandler } return source @@ -735,18 +870,21 @@ func TestMultiSyncManyUnresponsive(t *testing.T) { mkSource("noStorage", true, false, true), mkSource("noTrie", true, true, false), ) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } -func checkStall(t *testing.T, cancel chan struct{}) chan struct{} { +func checkStall(t *testing.T, term func()) chan struct{} { testDone := make(chan struct{}) go func() { select { case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much t.Log("Sync stalled") - close(cancel) + term() case <-testDone: return } @@ -754,17 +892,58 @@ func checkStall(t *testing.T, cancel chan struct{}) chan struct{} { return testDone } +// TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the +// account trie has a few boundary elements. +func TestSyncBoundaryAccountTrie(t *testing.T) { + t.Parallel() + + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems := makeBoundaryAccountTrie(3000) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + return source + } + syncer := setupSyncer( + mkSource("peer-a"), + mkSource("peer-b"), + ) + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) +} + // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is // consistently returning very small results func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) mkSource := func(name string, slow bool) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems @@ -780,11 +959,12 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { mkSource("nice-c", false), mkSource("capped", true), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver @@ -792,12 +972,19 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.codeRequestHandler = codeFn @@ -811,22 +998,30 @@ func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { mkSource("capped", cappedCodeRequestHandler), mkSource("corrupt", corruptCodeRequestHandler), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) mkSource := func(name string, accFn accountHandlerFunc) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.accountRequestHandler = accFn @@ -840,11 +1035,12 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { mkSource("capped", defaultAccountRequestHandler), mkSource("corrupt", corruptAccountRequestHandler), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes @@ -852,12 +1048,19 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.codeRequestHandler = codeFn @@ -872,7 +1075,7 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { return cappedCodeRequestHandler(t, id, hashes, max) }), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } @@ -885,6 +1088,43 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { if threshold := 100; counter > threshold { t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter) } + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the +// storage trie has a few boundary elements. +func TestSyncBoundaryStorageTrie(t *testing.T) { + t.Parallel() + + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + source.storageTries = storageTries + source.storageValues = storageElems + return source + } + syncer := setupSyncer( + mkSource("peer-a"), + mkSource("peer-b"), + ) + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is @@ -892,12 +1132,19 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false) mkSource := func(name string, slow bool) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries @@ -913,11 +1160,12 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { mkSource("nice-a", false), mkSource("slow", true), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is @@ -925,12 +1173,19 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { func TestSyncWithStorageAndCorruptPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) mkSource := func(name string, handler storageHandlerFunc) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries @@ -945,22 +1200,30 @@ func TestSyncWithStorageAndCorruptPeer(t *testing.T) { mkSource("nice-c", defaultStorageRequestHandler), mkSource("corrupt", corruptStorageRequestHandler), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) mkSource := func(name string, handler storageHandlerFunc) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries @@ -968,23 +1231,55 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { source.storageRequestHandler = handler return source } - syncer := setupSyncer( mkSource("nice-a", defaultStorageRequestHandler), mkSource("nice-b", defaultStorageRequestHandler), mkSource("nice-c", defaultStorageRequestHandler), mkSource("corrupt", noProofStorageRequestHandler), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestSyncWithStorage tests basic sync using accounts + storage + code, against +// a peer who insists on delivering full storage sets _and_ proofs. This triggered +// an error, where the recipient erroneously clipped the boundary nodes, but +// did not mark the account for healing. +func TestSyncWithStorageMisbehavingProve(t *testing.T) { + t.Parallel() + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + source.storageTries = storageTries + source.storageValues = storageElems + source.storageRequestHandler = proofHappyStorageRequestHandler + return source + } + syncer := setupSyncer(mkSource("sourceA")) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } type kv struct { k, v []byte - t bool } // Some helpers for sorting @@ -1013,14 +1308,14 @@ var ( } ) -// getACodeHash returns a pseudo-random code hash -func getACodeHash(i uint64) []byte { +// getCodeHash returns a pseudo-random code hash +func getCodeHash(i uint64) []byte { h := codehashes[int(i)%len(codehashes)] return common.CopyBytes(h[:]) } -// convenience function to lookup the code from the code hash -func getCode(hash common.Hash) []byte { +// getCodeByHash convenience function to lookup the code from the code hash +func getCodeByHash(hash common.Hash) []byte { if hash == emptyCode { return nil } @@ -1042,23 +1337,77 @@ func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) { Nonce: i, Balance: big.NewInt(int64(i)), Root: emptyRoot, - CodeHash: getACodeHash(i), + CodeHash: getCodeHash(i), }) key := key32(i) - elem := &kv{key, value, false} + elem := &kv{key, value} accTrie.Update(elem.k, elem.v) entries = append(entries, elem) } sort.Sort(entries) - // Push to disk layer accTrie.Commit(nil) return accTrie, entries } -// makeAccountTrieWithStorage spits out a trie, along with the leafs -func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, - map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { +// makeBoundaryAccountTrie constructs an account trie. Instead of filling +// accounts normally, this function will fill a few accounts which have +// boundary hash. +func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) { + var ( + entries entrySlice + boundaries []common.Hash + db = trie.NewDatabase(rawdb.NewMemoryDatabase()) + trie, _ = trie.New(common.Hash{}, db) + ) + // Initialize boundaries + var next common.Hash + step := new(big.Int).Sub( + new(big.Int).Div( + new(big.Int).Exp(common.Big2, common.Big256, nil), + big.NewInt(accountConcurrency), + ), common.Big1, + ) + for i := 0; i < accountConcurrency; i++ { + last := common.BigToHash(new(big.Int).Add(next.Big(), step)) + if i == accountConcurrency-1 { + last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + boundaries = append(boundaries, last) + next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1)) + } + // Fill boundary accounts + for i := 0; i < len(boundaries); i++ { + value, _ := rlp.EncodeToBytes(state.Account{ + Nonce: uint64(0), + Balance: big.NewInt(int64(i)), + Root: emptyRoot, + CodeHash: getCodeHash(uint64(i)), + }) + elem := &kv{boundaries[i].Bytes(), value} + trie.Update(elem.k, elem.v) + entries = append(entries, elem) + } + // Fill other accounts if required + for i := uint64(1); i <= uint64(n); i++ { + value, _ := rlp.EncodeToBytes(state.Account{ + Nonce: i, + Balance: big.NewInt(int64(i)), + Root: emptyRoot, + CodeHash: getCodeHash(i), + }) + elem := &kv{key32(i), value} + trie.Update(elem.k, elem.v) + entries = append(entries, elem) + } + sort.Sort(entries) + trie.Commit(nil) + return trie, entries +} + +// makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts +// has a unique storage set. +func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { var ( db = trie.NewDatabase(rawdb.NewMemoryDatabase()) accTrie, _ = trie.New(common.Hash{}, db) @@ -1066,16 +1415,63 @@ func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, ent storageTries = make(map[common.Hash]*trie.Trie) storageEntries = make(map[common.Hash]entrySlice) ) + // Create n accounts in the trie + for i := uint64(1); i <= uint64(accounts); i++ { + key := key32(i) + codehash := emptyCode[:] + if code { + codehash = getCodeHash(i) + } + // Create a storage trie + stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db) + stRoot := stTrie.Hash() + stTrie.Commit(nil) + value, _ := rlp.EncodeToBytes(state.Account{ + Nonce: i, + Balance: big.NewInt(int64(i)), + Root: stRoot, + CodeHash: codehash, + }) + elem := &kv{key, value} + accTrie.Update(elem.k, elem.v) + entries = append(entries, elem) + + storageTries[common.BytesToHash(key)] = stTrie + storageEntries[common.BytesToHash(key)] = stEntries + } + sort.Sort(entries) + + accTrie.Commit(nil) + return accTrie, entries, storageTries, storageEntries +} +// makeAccountTrieWithStorage spits out a trie, along with the leafs +func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { + var ( + db = trie.NewDatabase(rawdb.NewMemoryDatabase()) + accTrie, _ = trie.New(common.Hash{}, db) + entries entrySlice + storageTries = make(map[common.Hash]*trie.Trie) + storageEntries = make(map[common.Hash]entrySlice) + ) // Make a storage trie which we reuse for the whole lot - stTrie, stEntries := makeStorageTrie(slots, db) + var ( + stTrie *trie.Trie + stEntries entrySlice + ) + if boundary { + stTrie, stEntries = makeBoundaryStorageTrie(slots, db) + } else { + stTrie, stEntries = makeStorageTrieWithSeed(uint64(slots), 0, db) + } stRoot := stTrie.Hash() + // Create n accounts in the trie for i := uint64(1); i <= uint64(accounts); i++ { key := key32(i) codehash := emptyCode[:] if code { - codehash = getACodeHash(i) + codehash = getCodeHash(i) } value, _ := rlp.EncodeToBytes(state.Account{ Nonce: i, @@ -1083,7 +1479,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, ent Root: stRoot, CodeHash: codehash, }) - elem := &kv{key, value, false} + elem := &kv{key, value} accTrie.Update(elem.k, elem.v) entries = append(entries, elem) // we reuse the same one for all accounts @@ -1096,23 +1492,116 @@ func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, ent return accTrie, entries, storageTries, storageEntries } -// makeStorageTrie fills a storage trie with n items, returning the -// not-yet-committed trie and the sorted entries -func makeStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) { +// makeStorageTrieWithSeed fills a storage trie with n items, returning the +// not-yet-committed trie and the sorted entries. The seeds can be used to ensure +// that tries are unique. +func makeStorageTrieWithSeed(n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) { trie, _ := trie.New(common.Hash{}, db) var entries entrySlice - for i := uint64(1); i <= uint64(n); i++ { - // store 'i' at slot 'i' - slotValue := key32(i) + for i := uint64(1); i <= n; i++ { + // store 'x' at slot 'x' + slotValue := key32(i + seed) rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:])) slotKey := key32(i) key := crypto.Keccak256Hash(slotKey[:]) - elem := &kv{key[:], rlpSlotValue, false} + elem := &kv{key[:], rlpSlotValue} + trie.Update(elem.k, elem.v) + entries = append(entries, elem) + } + sort.Sort(entries) + trie.Commit(nil) + return trie, entries +} + +// makeBoundaryStorageTrie constructs a storage trie. Instead of filling +// storage slots normally, this function will fill a few slots which have +// boundary hash. +func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) { + var ( + entries entrySlice + boundaries []common.Hash + trie, _ = trie.New(common.Hash{}, db) + ) + // Initialize boundaries + var next common.Hash + step := new(big.Int).Sub( + new(big.Int).Div( + new(big.Int).Exp(common.Big2, common.Big256, nil), + big.NewInt(accountConcurrency), + ), common.Big1, + ) + for i := 0; i < accountConcurrency; i++ { + last := common.BigToHash(new(big.Int).Add(next.Big(), step)) + if i == accountConcurrency-1 { + last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + boundaries = append(boundaries, last) + next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1)) + } + // Fill boundary slots + for i := 0; i < len(boundaries); i++ { + key := boundaries[i] + val := []byte{0xde, 0xad, 0xbe, 0xef} + + elem := &kv{key[:], val} + trie.Update(elem.k, elem.v) + entries = append(entries, elem) + } + // Fill other slots if required + for i := uint64(1); i <= uint64(n); i++ { + slotKey := key32(i) + key := crypto.Keccak256Hash(slotKey[:]) + + slotValue := key32(i) + rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:])) + + elem := &kv{key[:], rlpSlotValue} trie.Update(elem.k, elem.v) entries = append(entries, elem) } sort.Sort(entries) + trie.Commit(nil) return trie, entries } + +func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) { + t.Helper() + triedb := trie.NewDatabase(db) + accTrie, err := trie.New(root, triedb) + if err != nil { + t.Fatal(err) + } + accounts, slots := 0, 0 + accIt := trie.NewIterator(accTrie.NodeIterator(nil)) + for accIt.Next() { + var acc struct { + Nonce uint64 + Balance *big.Int + Root common.Hash + CodeHash []byte + } + if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil { + log.Crit("Invalid account encountered during snapshot creation", "err", err) + } + accounts++ + if acc.Root != emptyRoot { + storeTrie, err := trie.NewSecure(acc.Root, triedb) + if err != nil { + t.Fatal(err) + } + storeIt := trie.NewIterator(storeTrie.NodeIterator(nil)) + for storeIt.Next() { + slots++ + } + if err := storeIt.Err; err != nil { + t.Fatal(err) + } + } + } + if err := accIt.Err; err != nil { + t.Fatal(err) + } + t.Logf("accounts: %d, slots: %d", accounts, slots) +} diff --git a/eth/state_accessor.go b/eth/state_accessor.go index d15bf0825c..4b29e4c62d 100644 --- a/eth/state_accessor.go +++ b/eth/state_accessor.go @@ -34,39 +34,78 @@ import ( ) // stateAtBlock retrieves the state database associated with a certain block. -// If no state is locally available for the given block, a number of blocks are -// attempted to be reexecuted to generate the desired state. -func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64) (statedb *state.StateDB, privateStateDB mps.PrivateStateRepository, release func(), err error) { - // If we have the state fully available, use that - statedb, privateStateRepo, err := eth.blockchain.StateAt(block.Root()) - if err == nil { - return statedb, privateStateRepo, func() {}, nil +// If no state is locally available for the given block, a number of blocks +// are attempted to be reexecuted to generate the desired state. The optional +// base layer statedb can be passed then it's regarded as the statedb of the +// parent block. +func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (statedb *state.StateDB, privateStateDB mps.PrivateStateRepository, err error) { + var ( + current *types.Block + database state.Database + report = true + origin = block.NumberU64() + ) + // Check the live database first if we have the state fully available, use that. + if checkLive { + statedb, privateStateDB, err = eth.blockchain.StateAt(block.Root()) + if err == nil { + return + } } - // Otherwise try to reexec blocks until we find a state or reach our limit - origin := block.NumberU64() - database := state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16, Preimages: true}) + if base != nil { + // The optional base statedb is given, mark the start point as parent block + statedb, database, report = base, base.Database(), false + current = eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) + } else { + // Otherwise try to reexec blocks until we find a state or reach our limit + current = block - for i := uint64(0); i < reexec; i++ { - if block.NumberU64() == 0 { - return nil, nil, nil, errors.New("genesis state is missing") - } - parent := eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) - if parent == nil { - return nil, nil, nil, fmt.Errorf("missing block %v %d", block.ParentHash(), block.NumberU64()-1) + // Create an ephemeral trie.Database for isolating the live one. Otherwise + // the internal junks created by tracing will be persisted into the disk. + database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16}) + + // If we didn't check the dirty database, do check the clean one, otherwise + // we would rewind past a persisted block (specific corner case is chain + // tracing from the genesis). + if !checkLive { + statedb, err = state.New(current.Root(), database, nil) + if err == nil { + // Quorum + _, privateStateDB, err = eth.blockchain.StateAt(current.Root()) + if err == nil { + return statedb, privateStateDB, nil + } + // End Quorum + } } - block = parent + // Database does not have the state for the given block, try to regenerate + for i := uint64(0); i < reexec; i++ { + if current.NumberU64() == 0 { + return nil, nil, errors.New("genesis state is missing") + } + parent := eth.blockchain.GetBlock(current.ParentHash(), current.NumberU64()-1) + if parent == nil { + return nil, nil, fmt.Errorf("missing block %v %d", current.ParentHash(), current.NumberU64()-1) + } + current = parent - statedb, privateStateRepo, err = eth.blockchain.StateAt(block.Root()) - if err == nil { - break + statedb, err = state.New(current.Root(), database, nil) + if err == nil { + // Quorum + _, privateStateDB, err = eth.blockchain.StateAt(current.Root()) + if err == nil { + break + } + // End Quorum + } } - } - if err != nil { - switch err.(type) { - case *trie.MissingNodeError: - return nil, nil, nil, fmt.Errorf("required historical state unavailable (reexec=%d)", reexec) - default: - return nil, nil, nil, err + if err != nil { + switch err.(type) { + case *trie.MissingNodeError: + return nil, nil, fmt.Errorf("required historical state unavailable (reexec=%d)", reexec) + default: + return nil, nil, err + } } } // State was available at historical point, regenerate @@ -80,38 +119,39 @@ func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64) (statedb *s database.TrieDB().Dereference(parent) } }() - for block.NumberU64() < origin { + for current.NumberU64() < origin { // Print progress logs if long enough time elapsed - if time.Since(logged) > 8*time.Second { - log.Info("Regenerating historical state", "block", block.NumberU64()+1, "target", origin, "remaining", origin-block.NumberU64()-1, "elapsed", time.Since(start)) + if time.Since(logged) > 8*time.Second && report { + log.Info("Regenerating historical state", "block", current.NumberU64()+1, "target", origin, "remaining", origin-current.NumberU64()-1, "elapsed", time.Since(start)) logged = time.Now() } // Retrieve the next block to regenerate and process it - if block = eth.blockchain.GetBlockByNumber(block.NumberU64() + 1); block == nil { - return nil, nil, nil, fmt.Errorf("block #%d not found", block.NumberU64()+1) + next := current.NumberU64() + 1 + if current = eth.blockchain.GetBlockByNumber(next); current == nil { + return nil, nil, fmt.Errorf("block #%d not found", next) } - _, _, _, _, err := eth.blockchain.Processor().Process(block, statedb, privateStateRepo, vm.Config{}) + _, _, _, _, err = eth.blockchain.Processor().Process(current, statedb, privateStateDB, vm.Config{}) if err != nil { - return nil, nil, nil, fmt.Errorf("processing block %d failed: %v", block.NumberU64(), err) + return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err) } + var root common.Hash // Finalize the state so any modifications are written to the trie - root, err := statedb.Commit(eth.blockchain.Config().IsEIP158(block.Number())) + root, err = statedb.Commit(eth.blockchain.Config().IsEIP158(current.Number())) if err != nil { - return nil, nil, nil, err + return nil, nil, err } - err = statedb.Reset(root) - // statedb, err = state.New(root, database, nil) // Quorum + statedb, err = state.New(root, database, nil) if err != nil { - return nil, nil, nil, fmt.Errorf("state reset after block %d failed: %v", block.NumberU64(), err) + return nil, nil, fmt.Errorf("state reset after block %d failed: %v", current.NumberU64(), err) } // Quorum - err = privateStateRepo.Commit(eth.blockchain.Config().IsEIP158(block.Number()), block) + err = privateStateDB.Commit(eth.blockchain.Config().IsEIP158(block.Number()), block) if err != nil { - return nil, nil, nil, err + return nil, nil, err } - if err := privateStateRepo.Reset(); err != nil { - return nil, nil, nil, fmt.Errorf("private state reset after block %d failed: %v", block.NumberU64(), err) + if err := privateStateDB.Reset(); err != nil { + return nil, nil, fmt.Errorf("private state reset after block %d failed: %v", block.NumberU64(), err) } // End Quorum @@ -121,128 +161,54 @@ func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64) (statedb *s } parent = root } - nodes, imgs := database.TrieDB().Size() - log.Info("Historical state regenerated", "block", block.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) - return statedb, privateStateRepo, func() { database.TrieDB().Dereference(parent) }, nil -} - -// statesInRange retrieves a batch of state databases associated with the specific -// block ranges. If no state is locally available for the given range, a number of -// blocks are attempted to be reexecuted to generate the ancestor state. -func (eth *Ethereum) statesInRange(fromBlock, toBlock *types.Block, reexec uint64) (states []*state.StateDB, privateStateRepos []mps.PrivateStateRepository, release func(), err error) { - statedb, privateStateRepo, err := eth.blockchain.StateAt(fromBlock.Root()) - if err != nil { - statedb, privateStateRepo, _, err = eth.stateAtBlock(fromBlock, reexec) + if report { + nodes, imgs := database.TrieDB().Size() + log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) } - if err != nil { - return nil, nil, nil, err - } - states = append(states, statedb.Copy()) - privateStateRepos = append(privateStateRepos, privateStateRepo) - - var ( - logged time.Time - parent common.Hash - start = time.Now() - refs = []common.Hash{fromBlock.Root()} - database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16, Preimages: true}) - ) - // Release all resources(including the states referenced by `stateAtBlock`) - // if error is returned. - defer func() { - if err != nil { - for _, ref := range refs { - database.TrieDB().Dereference(ref) - } - } - }() - for i := fromBlock.NumberU64() + 1; i <= toBlock.NumberU64(); i++ { - // Print progress logs if long enough time elapsed - if time.Since(logged) > 8*time.Second { - logged = time.Now() - log.Info("Regenerating historical state", "block", i, "target", fromBlock.NumberU64(), "remaining", toBlock.NumberU64()-i, "elapsed", time.Since(start)) - } - // Retrieve the next block to regenerate and process it - block := eth.blockchain.GetBlockByNumber(i) - if block == nil { - return nil, nil, nil, fmt.Errorf("block #%d not found", i) - } - _, _, _, _, err := eth.blockchain.Processor().Process(block, statedb, privateStateRepo, vm.Config{}) - if err != nil { - return nil, nil, nil, fmt.Errorf("processing block %d failed: %v", block.NumberU64(), err) - } - // Finalize the state so any modifications are written to the trie - root, err := statedb.Commit(eth.blockchain.Config().IsEIP158(block.Number())) - if err != nil { - return nil, nil, nil, err - } - statedb, privateStateRepo, err := eth.blockchain.StateAt(root) - if err != nil { - return nil, nil, nil, fmt.Errorf("state reset after block %d failed: %v", block.NumberU64(), err) - } - states = append(states, statedb.Copy()) - privateStateRepos = append(privateStateRepos, privateStateRepo.Copy()) - - // Reference the trie twice, once for us, once for the tracer - database.TrieDB().Reference(root, common.Hash{}) - database.TrieDB().Reference(root, common.Hash{}) - refs = append(refs, root) - - // Dereference all past tries we ourselves are done working with - if parent != (common.Hash{}) { - database.TrieDB().Dereference(parent) - } - parent = root - } - // release is handler to release all states referenced, including - // the one referenced in `stateAtBlock`. - release = func() { - for _, ref := range refs { - database.TrieDB().Dereference(ref) - } - } - return states, privateStateRepos, release, nil + return statedb, privateStateDB, nil } // stateAtTransaction returns the execution environment of a certain transaction. -func (eth *Ethereum) stateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, *state.StateDB, mps.PrivateStateRepository, func(), error) { +func (eth *Ethereum) stateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, *state.StateDB, mps.PrivateStateRepository, error) { // Short circuit if it's genesis block. if block.NumberU64() == 0 { - return nil, vm.BlockContext{}, nil, nil, nil, nil, errors.New("no transaction in genesis") + return nil, vm.BlockContext{}, nil, nil, nil, errors.New("no transaction in genesis") } // Create the parent state database parent := eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) if parent == nil { - return nil, vm.BlockContext{}, nil, nil, nil, nil, fmt.Errorf("parent %#x not found", block.ParentHash()) + return nil, vm.BlockContext{}, nil, nil, nil, fmt.Errorf("parent %#x not found", block.ParentHash()) } - statedb, privateStateRepo, release, err := eth.stateAtBlock(parent, reexec) + // Quorum + statedb, privateStateRepo, err := eth.stateAtBlock(parent, reexec, nil, true) if err != nil { - return nil, vm.BlockContext{}, nil, nil, nil, nil, err + return nil, vm.BlockContext{}, nil, nil, nil, err } psm, err := eth.blockchain.PrivateStateManager().ResolveForUserContext(ctx) if err != nil { - return nil, vm.BlockContext{}, nil, nil, nil, nil, err + return nil, vm.BlockContext{}, nil, nil, nil, err } privateStateDb, err := privateStateRepo.StatePSI(psm.ID) if err != nil { - return nil, vm.BlockContext{}, nil, nil, nil, nil, err + return nil, vm.BlockContext{}, nil, nil, nil, err } + // End Quorum if txIndex == 0 && len(block.Transactions()) == 0 { - return nil, vm.BlockContext{}, statedb, privateStateDb, privateStateRepo, release, nil + return nil, vm.BlockContext{}, statedb, privateStateDb, privateStateRepo, nil } // Recompute transactions up to the target index. signer := types.MakeSigner(eth.blockchain.Config(), block.Number()) for idx, tx := range block.Transactions() { // Quorum privateStateDbToUse := core.PrivateStateDBForTxn(eth.blockchain.Config().IsQuorum, tx, statedb, privateStateDb) - // /Quorum + // End Quorum // Assemble the transaction call message and return if the requested offset msg, _ := tx.AsMessage(signer) - msg = eth.clearMessageDataIfNonParty(msg, psm) + msg = eth.clearMessageDataIfNonParty(msg, psm) // Quorum txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(block.Header(), eth.blockchain, nil) if idx == txIndex { - return msg, context, statedb, privateStateDb, privateStateRepo, release, nil + return msg, context, statedb, privateStateDb, privateStateRepo, nil } // Not yet the searched for transaction, execute on top of the current state vmenv := vm.NewEVM(context, txContext, statedb, privateStateDbToUse, eth.blockchain.Config(), vm.Config{}) @@ -251,17 +217,17 @@ func (eth *Ethereum) stateAtTransaction(ctx context.Context, block *types.Block, return applyInnerTransaction(eth.blockchain, statedb, privateStateDbToUse, block.Header(), tx, vm.Config{}, privateStateRepo.IsMPS(), privateStateRepo, vmenv, innerTx, idx) } if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - release() - return nil, vm.BlockContext{}, nil, nil, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + return nil, vm.BlockContext{}, nil, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } // Ensure any modifications are committed to the state // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) } - release() - return nil, vm.BlockContext{}, nil, nil, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) + return nil, vm.BlockContext{}, nil, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) } +// Quorum + func (eth *Ethereum) GetBlockchain() *core.BlockChain { return eth.BlockChain() } diff --git a/eth/sync.go b/eth/sync.go index 0551528b23..60f77593fd 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -296,8 +296,8 @@ func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { } } // Nope, we're really full syncing - head := cs.handler.chain.CurrentHeader() - td := cs.handler.chain.GetTd(head.Hash(), head.Number.Uint64()) + head := cs.handler.chain.CurrentBlock() + td := cs.handler.chain.GetTd(head.Hash(), head.NumberU64()) return downloader.FullSync, td } diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 56c657e5bd..ba8d6d034c 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -69,9 +69,8 @@ type Backend interface { ChainConfig() *params.ChainConfig Engine() consensus.Engine ChainDb() ethdb.Database - StateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, mps.PrivateStateRepository, func(), error) - StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, *state.StateDB, mps.PrivateStateRepository, func(), error) - StatesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, []mps.PrivateStateRepository, func(), error) + StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, mps.PrivateStateRepository, error) + StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, *state.StateDB, mps.PrivateStateRepository, error) // Quorum GetBlockchain() *core.BlockChain @@ -92,24 +91,7 @@ type chainContext struct { ctx context.Context } -// Quorum - -func (context *chainContext) Config() *params.ChainConfig { - return context.api.backend.ChainConfig() -} - -func (context *chainContext) QuorumConfig() *core.QuorumChainConfig { - return &core.QuorumChainConfig{} -} - -func (context *chainContext) PrivateStateManager() mps.PrivateStateManager { - return context.api.backend.GetBlockchain().PrivateStateManager() -} - -func (context *chainContext) CheckAndSetPrivateState(txLogs []*types.Log, privateState *state.StateDB, psi types.PrivateStateIdentifier) { -} - -// End Quorum +var _ core.ChainContext = &chainContext{} func (context *chainContext) Engine() consensus.Engine { return context.api.backend.Engine() @@ -130,6 +112,25 @@ func (context *chainContext) GetHeader(hash common.Hash, number uint64) *types.H return header } +// Quorum + +func (context *chainContext) Config() *params.ChainConfig { + return context.api.backend.ChainConfig() +} + +func (context *chainContext) QuorumConfig() *core.QuorumChainConfig { + return &core.QuorumChainConfig{} +} + +func (context *chainContext) PrivateStateManager() mps.PrivateStateManager { + return context.api.backend.GetBlockchain().PrivateStateManager() +} + +func (context *chainContext) CheckAndSetPrivateState(txLogs []*types.Log, privateState *state.StateDB, psi types.PrivateStateIdentifier) { +} + +// End Quorum + // chainContext construts the context reader which is used by the evm for reading // the necessary chain context. func (api *API) chainContext(ctx context.Context) core.ChainContext { @@ -193,6 +194,15 @@ type StdTraceConfig struct { TxHash common.Hash } +// txTraceContext is the contextual infos about a transaction before it gets run. +type txTraceContext struct { + index int // Index of the transaction within the block + hash common.Hash // Hash of the transaction + block common.Hash // Hash of the block containing the transaction + // Quorum + tx *types.Transaction // Transaction is needed +} + // txTraceResult is the result of a single transaction trace. type txTraceResult struct { Result interface{} `json:"result,omitempty"` // Trace results produced by the tracer @@ -204,9 +214,11 @@ type txTraceResult struct { type blockTraceTask struct { statedb *state.StateDB // Intermediate state prepped for tracing block *types.Block // Block to trace the transactions from + rootref common.Hash // Trie root reference held for this task results []*txTraceResult // Trace results procudes by the task // Quorum - privateStateDb *state.StateDB + privateStateDb *state.StateDB + privateStateRepo mps.PrivateStateRepository } // blockTraceResult represets the results of tracing a single block when an entire @@ -223,7 +235,8 @@ type txTraceTask struct { statedb *state.StateDB // Intermediate state prepped for tracing index int // Transaction offset in the block // Quorum - privateStateDb *state.StateDB + privateStateDb *state.StateDB + privateStateRepo mps.PrivateStateRepository } // TraceChain returns the structured logs created during the execution of EVM @@ -254,37 +267,28 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config } sub := notifier.CreateSubscription() - // Shift the border to a block ahead in order to get the states - // before these blocks. - endBlock, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(end.NumberU64()-1), end.ParentHash()) - if err != nil { - return nil, err - } // Prepare all the states for tracing. Note this procedure can take very // long time. Timeout mechanism is necessary. reexec := defaultTraceReexec if config != nil && config.Reexec != nil { reexec = *config.Reexec } + // Quorum psm, err := api.chainContext(ctx).PrivateStateManager().ResolveForUserContext(ctx) if err != nil { return nil, err } - states, privateStateRepos, release, err := api.backend.StatesInRange(ctx, start, endBlock, reexec) - if err != nil { - return nil, err - } - defer release() // Release all the resources in the last step. - + // End Quorum blocks := int(end.NumberU64() - start.NumberU64()) threads := runtime.NumCPU() if threads > blocks { threads = blocks } var ( - pend = new(sync.WaitGroup) - tasks = make(chan *blockTraceTask, threads) - results = make(chan *blockTraceTask, threads) + pend = new(sync.WaitGroup) + tasks = make(chan *blockTraceTask, threads) + results = make(chan *blockTraceTask, threads) + localctx = context.Background() ) for th := 0; th < threads; th++ { pend.Add(1) @@ -294,20 +298,27 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config // Fetch and execute the next block trace tasks for task := range tasks { signer := types.MakeSigner(api.backend.ChainConfig(), task.block.Number()) - blockCtx := core.NewEVMBlockContext(task.block.Header(), api.chainContext(ctx), nil) + blockCtx := core.NewEVMBlockContext(task.block.Header(), api.chainContext(localctx), nil) // Trace all the transactions contained within for i, tx := range task.block.Transactions() { msg, _ := tx.AsMessage(signer) msg = api.clearMessageDataIfNonParty(msg, psm) - res, err := api.traceTx(ctx, msg, tx, blockCtx, task.statedb, task.privateStateDb, config, i, task.block.Header(), privateStateRepos[0]) + txctx := &txTraceContext{ + index: i, + hash: tx.Hash(), + block: task.block.Hash(), + tx: tx, + } + res, err := api.traceTx(localctx, msg, txctx, blockCtx, task.statedb, task.privateStateDb, task.privateStateRepo, config) if err != nil { task.results[i] = &txTraceResult{Error: err.Error()} log.Warn("Tracing failed", "hash", tx.Hash(), "block", task.block.NumberU64(), "err", err) break } // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect - task.statedb.Finalise(api.backend.ChainConfig().IsEIP158(task.block.Number())) - task.privateStateDb.Finalise(api.backend.ChainConfig().IsEIP158(task.block.Number())) + eip158 := api.backend.ChainConfig().IsEIP158(task.block.Number()) + task.statedb.Finalise(eip158) + task.privateStateDb.Finalise(eip158) task.results[i] = &txTraceResult{Result: res} } // Stream the result back to the user or abort on teardown @@ -324,10 +335,15 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config go func() { var ( - logged time.Time - number uint64 - traced uint64 - failed error + logged time.Time + number uint64 + traced uint64 + failed error + parent common.Hash + statedb *state.StateDB + // Quorum + privateStateRepo mps.PrivateStateRepository + privateState *state.StateDB ) // Ensure everything is properly cleaned up on any exit path defer func() { @@ -345,7 +361,7 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config close(results) }() // Feed all the blocks both into the tracer, as well as fast process concurrently - for number = start.NumberU64() + 1; number <= end.NumberU64(); number++ { + for number = start.NumberU64(); number < end.NumberU64(); number++ { // Stop tracing if interruption was requested select { case <-notifier.Closed(): @@ -357,24 +373,53 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config logged = time.Now() log.Info("Tracing chain segment", "start", start.NumberU64(), "end", end.NumberU64(), "current", number, "transactions", traced, "elapsed", time.Since(begin)) } - // Retrieve the next block to trace - block, err := api.blockByNumber(ctx, rpc.BlockNumber(number)) + // Retrieve the parent state to trace on top + block, err := api.blockByNumber(localctx, rpc.BlockNumber(number)) if err != nil { failed = err break } - // Send the block over to the concurrent tracers (if not in the fast-forward phase) - if number <= start.NumberU64() { // Quorum - continue + // Prepare the statedb for tracing. Don't use the live database for + // tracing to avoid persisting state junks into the database. + statedb, privateStateRepo, err = api.backend.StateAtBlock(localctx, block, reexec, statedb, false) + if err != nil { + failed = err + break } - txs := block.Transactions() - privateState, err := privateStateRepos[0].StatePSI(psm.ID) + if statedb.Database().TrieDB() != nil { + // Hold the reference for tracer, will be released at the final stage + statedb.Database().TrieDB().Reference(block.Root(), common.Hash{}) + + // Release the parent state because it's already held by the tracer + if parent != (common.Hash{}) { + statedb.Database().TrieDB().Dereference(parent) + } + } + parent = block.Root() + + privateState, err = privateStateRepo.StatePSI(psm.ID) if err != nil { failed = err break } + + next, err := api.blockByNumber(localctx, rpc.BlockNumber(number+1)) + if err != nil { + failed = err + break + } + // Send the block over to the concurrent tracers (if not in the fast-forward phase) + txs := next.Transactions() select { - case tasks <- &blockTraceTask{statedb: states[int(number-start.NumberU64()-1)], privateStateDb: privateState.Copy(), block: block, results: make([]*txTraceResult, len(txs))}: + case tasks <- &blockTraceTask{ + statedb: statedb.Copy(), + block: next, + rootref: block.Root(), + results: make([]*txTraceResult, len(txs)), + // Quorum + privateStateDb: privateState.Copy(), + privateStateRepo: privateStateRepo, + }: case <-notifier.Closed(): return } @@ -397,6 +442,13 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config } done[uint64(result.Block)] = result + // Dereference any parent tries held in memory by this task + if res.statedb.Database().TrieDB() != nil { + res.statedb.Database().TrieDB().Dereference(res.rootref) + } + if res.privateStateDb != nil && res.privateStateDb != res.statedb && res.privateStateDb.Database().TrieDB() != nil { + res.privateStateDb.Database().TrieDB().Dereference(res.rootref) + } // Stream completed traces to the user, aborting on the first error for result, ok := done[next]; ok; result, ok = done[next] { if len(result.Traces) > 0 || next == end.NumberU64() { @@ -500,10 +552,12 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, privateStateRepo, release, err := api.backend.StateAtBlock(ctx, parent, reexec) + statedb, privateStateRepo, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true) if err != nil { return nil, err } + + // Quorum psm, err := api.chainContext(ctx).PrivateStateManager().ResolveForUserContext(ctx) if err != nil { return nil, err @@ -512,7 +566,7 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac if err != nil { return nil, err } - defer release() + // End Quorum // Execute all the transaction contained within the block concurrently var ( @@ -528,6 +582,7 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac threads = len(txs) } blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) + blockHash := block.Hash() for th := 0; th < threads; th++ { pend.Add(1) go func() { @@ -537,7 +592,13 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac tx := txs[task.index] msg, _ := tx.AsMessage(signer) msg = api.clearMessageDataIfNonParty(msg, psm) - res, err := api.traceTx(ctx, msg, tx, blockCtx, task.statedb, task.privateStateDb, config, task.index, block.Header(), privateStateRepo) + txctx := &txTraceContext{ + index: task.index, + hash: tx.Hash(), + block: blockHash, + tx: tx, + } + res, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, task.privateStateDb, task.privateStateRepo, config) if err != nil { results[task.index] = &txTraceResult{Error: err.Error()} continue @@ -551,34 +612,32 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac for i, tx := range txs { // Send the trace task over for execution jobs <- &txTraceTask{ - statedb: statedb.Copy(), - privateStateDb: privateStateDb.Copy(), - index: i, + statedb: statedb.Copy(), + index: i, + // Quorum + privateStateDb: privateStateDb.Copy(), + privateStateRepo: privateStateRepo, } // Generate the next state snapshot fast without tracing msg, _ := tx.AsMessage(signer) - txContext := core.NewEVMTxContext(msg) - - // Quorum privateStateDbToUse := core.PrivateStateDBForTxn(api.chainContext(ctx).Config().IsQuorum, tx, statedb, privateStateDb) - vmenv := vm.NewEVM(blockCtx, txContext, statedb, privateStateDbToUse, api.chainContext(ctx).Config(), vm.Config{}) + statedb.Prepare(tx.Hash(), block.Hash(), i) + privateStateDbToUse.Prepare(tx.Hash(), block.Hash(), i) + vmenv := vm.NewEVM(blockCtx, core.NewEVMTxContext(msg), statedb, privateStateDbToUse, api.backend.ChainConfig(), vm.Config{}) vmenv.SetCurrentTX(tx) vmenv.InnerApply = func(innerTx *types.Transaction) error { return applyInnerTransaction(api.backend.GetBlockchain(), statedb, privateStateDbToUse, block.Header(), tx, vm.Config{}, privateStateRepo.IsMPS(), privateStateRepo, vmenv, innerTx, i) } - msg = api.clearMessageDataIfNonParty(msg, psm) - // /Quorum - if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil { failed = err break } // Finalize the state so any modifications are written to the trie // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect - statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) - privateStateDb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) - + eip158 := vmenv.ChainConfig().IsEIP158(block.Number()) + statedb.Finalise(eip158) + privateStateDb.Finalise(eip158) } close(jobs) pend.Wait() @@ -611,7 +670,7 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, privateStateRepo, release, err := api.backend.StateAtBlock(ctx, parent, reexec) + statedb, privateStateRepo, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true) if err != nil { return nil, err } @@ -623,8 +682,6 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block if err != nil { return nil, err } - defer release() - // Retrieve the tracing configurations, or use default values var ( logConfig vm.LogConfig @@ -693,16 +750,22 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block } } // Execute the transaction and flush any traces to disk + // Quorum privateStateDbToUse := core.PrivateStateDBForTxn(chainConfig.IsQuorum, tx, statedb, privateStateDb) vmConf.ApplyOnPartyOverride = &psm.ID + vmenv := vm.NewEVM(vmctx, txContext, statedb, privateStateDbToUse, chainConfig, vmConf) + + // Quorum vmenv.SetCurrentTX(tx) vmenv.InnerApply = func(innerTx *types.Transaction) error { return applyInnerTransaction(api.backend.GetBlockchain(), statedb, privateStateDbToUse, block.Header(), tx, vmConf, privateStateRepo.IsMPS(), privateStateRepo, vmenv, innerTx, i) } msg = api.clearMessageDataIfNonParty(msg, psm) - // /Quorum + + statedb.Prepare(tx.Hash(), block.Hash(), i) + privateStateDbToUse.Prepare(tx.Hash(), block.Hash(), i) _, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())) if writer != nil { @@ -717,7 +780,9 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block } // Finalize the state so any modifications are written to the trie // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect - statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) + eip158 := vmenv.ChainConfig().IsEIP158(block.Number()) + statedb.Finalise(eip158) + privateStateDbToUse.Finalise(eip158) // If we've traced the transaction we were looking for, abort if tx.Hash() == txHash { @@ -741,7 +806,10 @@ func containsTx(block *types.Block, hash common.Hash) bool { // TraceTransaction returns the structured logs created during the execution of EVM // and returns them as a JSON object. func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config *TraceConfig) (interface{}, error) { - tx, blockHash, blockNumber, index, _ := api.backend.GetTransaction(ctx, hash) + tx, blockHash, blockNumber, index, err := api.backend.GetTransaction(ctx, hash) + if err != nil { + return nil, err + } if tx == nil { return nil, fmt.Errorf("transaction %#x not found", hash) } @@ -757,12 +825,17 @@ func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config * if err != nil { return nil, err } - msg, vmctx, statedb, privateState, privateStateRepo, release, err := api.backend.StateAtTransaction(ctx, block, int(index), reexec) + msg, vmctx, statedb, privateState, privateStateRepo, err := api.backend.StateAtTransaction(ctx, block, int(index), reexec) if err != nil { return nil, err } - defer release() - return api.traceTx(ctx, msg, tx, vmctx, statedb, privateState, config, int(index), block.Header(), privateStateRepo) + txctx := &txTraceContext{ + index: int(index), + hash: hash, + block: blockHash, + tx: tx, + } + return api.traceTx(ctx, msg, txctx, vmctx, statedb, privateState, privateStateRepo, config) } // TraceCall lets you trace a given eth_call. It collects the structured logs @@ -788,20 +861,15 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHa if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, privateStateRepo, release, err := api.backend.StateAtBlock(ctx, block, reexec) + statedb, privateStateRepo, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true) if err != nil { return nil, err } - defer release() privateStateDb, err := privateStateRepo.DefaultState() if err != nil { return nil, err } - // Quorum - header := block.Header() - // End Quorum - // Execute the trace msg := args.ToMessage(api.backend.RPCGasCap()) vmctx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) @@ -816,23 +884,24 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHa Timeout: config.Timeout, } } - res, err := api.traceTx(ctx, msg, nil, vmctx, statedb, privateStateDb, noTracerConfig, 0, header, privateStateRepo) // test private with no config - if exeRes, ok := res.(*ethapi.ExecutionResult); ok && err == nil && len(exeRes.StructLogs) > 0 { // check there is a result + res, err := api.traceTx(ctx, msg, new(txTraceContext), vmctx, statedb, privateStateDb, privateStateRepo, noTracerConfig) // test private with no config + if exeRes, ok := res.(*ethapi.ExecutionResult); ok && err == nil && len(exeRes.StructLogs) > 0 { // check there is a result if config != nil && config.Tracer != nil { // re-run the private call with the custom JS tracer - return api.traceTx(ctx, msg, nil, vmctx, statedb, privateStateDb, config, 0, header, privateStateRepo) // re-run with trace + return api.traceTx(ctx, msg, new(txTraceContext), vmctx, statedb, privateStateDb, privateStateRepo, config) // re-run with trace } return res, err // return private result with no tracer } else if err == nil && !ok { return nil, fmt.Errorf("can not cast traceTx result to *ethapi.ExecutionResult: %#v, %#v", res, err) // better error formatting than "method handler failed" } - // / Quorum - return api.traceTx(ctx, msg, nil, vmctx, statedb, privateStateDb, config, 0, header, privateStateRepo) // public / standard run + // End Quorum + + return api.traceTx(ctx, msg, new(txTraceContext), vmctx, statedb, privateStateDb, privateStateRepo, config) } // traceTx configures a new tracer according to the provided configuration, and // executes the given message in the provided environment. The return value will // be tracer dependent. -func (api *API) traceTx(ctx context.Context, message core.Message, tx *types.Transaction, vmctx vm.BlockContext, statedb, privateStateDb *state.StateDB, config *TraceConfig, txIndex int, header *types.Header, privateStateRepo mps.PrivateStateRepository) (interface{}, error) { +func (api *API) traceTx(ctx context.Context, message core.Message, txctx *txTraceContext, vmctx vm.BlockContext, statedb, privateStateDb *state.StateDB, privateStateRepo mps.PrivateStateRepository, config *TraceConfig) (interface{}, error) { // Assemble the structured logger or the JavaScript tracer var ( tracer vm.Tracer @@ -856,7 +925,9 @@ func (api *API) traceTx(ctx context.Context, message core.Message, tx *types.Tra deadlineCtx, cancel := context.WithTimeout(ctx, timeout) go func() { <-deadlineCtx.Done() - tracer.(*Tracer).Stop(errors.New("execution timeout")) + if deadlineCtx.Err() == context.DeadlineExceeded { + tracer.(*Tracer).Stop(errors.New("execution timeout")) + } }() defer cancel() @@ -869,7 +940,7 @@ func (api *API) traceTx(ctx context.Context, message core.Message, tx *types.Tra // Quorum // Set the private state to public state if it is not a private tx - privateStateDbToUse := core.PrivateStateDBForTxn(api.backend.ChainConfig().IsQuorum, tx, statedb, privateStateDb) + privateStateDbToUse := core.PrivateStateDBForTxn(api.backend.ChainConfig().IsQuorum, txctx.tx, statedb, privateStateDb) psm, err := api.chainContext(ctx).PrivateStateManager().ResolveForUserContext(ctx) if err != nil { @@ -877,13 +948,21 @@ func (api *API) traceTx(ctx context.Context, message core.Message, tx *types.Tra } // Run the transaction with tracing enabled. - vmconf := &vm.Config{Debug: true, Tracer: tracer, ApplyOnPartyOverride: &psm.ID} - vmenv := vm.NewEVM(vmctx, txContext, statedb, privateStateDbToUse, api.chainContext(ctx).Config(), *vmconf) - vmenv.SetCurrentTX(tx) + vmconf := vm.Config{Debug: true, Tracer: tracer, ApplyOnPartyOverride: &psm.ID} + vmenv := vm.NewEVM(vmctx, txContext, statedb, privateStateDbToUse, api.backend.ChainConfig(), vmconf) + vmenv.SetCurrentTX(txctx.tx) vmenv.InnerApply = func(innerTx *types.Transaction) error { - return applyInnerTransaction(api.backend.GetBlockchain(), statedb, privateStateDbToUse, header, tx, *vmconf, privateStateRepo.IsMPS(), privateStateRepo, vmenv, innerTx, txIndex) + header, err := api.backend.HeaderByHash(ctx, txctx.hash) + if err != nil { + return err + } + return applyInnerTransaction(api.backend.GetBlockchain(), statedb, privateStateDbToUse, header, txctx.tx, vmconf, privateStateRepo.IsMPS(), privateStateRepo, vmenv, innerTx, txctx.index) } - // /Quorum + // End Quorum + + // Call Prepare to clear out the statedb access list + statedb.Prepare(txctx.hash, txctx.block, txctx.index) + privateStateDbToUse.Prepare(txctx.hash, txctx.block, txctx.index) result, err := core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas())) if err != nil { @@ -913,6 +992,21 @@ func (api *API) traceTx(ctx context.Context, message core.Message, tx *types.Tra } } +// APIs return the collection of RPC services the tracer package offers. +func APIs(backend Backend) []rpc.API { + // Append all the local APIs and return + return []rpc.API{ + { + Namespace: "debug", + Version: "1.0", + Service: NewAPI(backend), + Public: false, + }, + } +} + +// Quorum + // clearMessageDataIfNonParty sets the message data to empty hash in case the private state is not party to the // transaction. The effect is that when the private tx payload is resolved using the privacy manager the private part of // the transaction is not retrieved and the transaction is being executed as if the node/private state is not party to @@ -936,16 +1030,3 @@ func applyInnerTransaction(bc *core.BlockChain, stateDB *state.StateDB, privateS ) return core.ApplyInnerTransaction(bc, author, gp, stateDB, privateStateDB, header, outerTx, &usedGas, evmConf, forceNonParty, privateStateRepo, vmenv, innerTx, txIndex) } - -// APIs return the collection of RPC services the tracer package offers. -func APIs(backend Backend) []rpc.API { - // Append all the local APIs and return - return []rpc.API{ - { - Namespace: "debug", - Version: "1.0", - Service: NewAPI(backend), - Public: false, - }, - } -} diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index 0003a83a73..a84a339437 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -58,6 +58,8 @@ type testBackend struct { chain *core.BlockChain } +var _ Backend = &testBackend{} + func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend { backend := &testBackend{ chainConfig: params.TestChainConfig, @@ -138,33 +140,33 @@ func (b *testBackend) ChainDb() ethdb.Database { return b.chaindb } -func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, mps.PrivateStateRepository, func(), error) { +func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, mps.PrivateStateRepository, error) { statedb, privateStateRepo, err := b.chain.StateAt(block.Root()) if err != nil { - return nil, nil, nil, errStateNotFound + return nil, nil, errStateNotFound } - return statedb, privateStateRepo, func() {}, nil + return statedb, privateStateRepo, nil } -func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, *state.StateDB, mps.PrivateStateRepository, func(), error) { +func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, *state.StateDB, mps.PrivateStateRepository, error) { parent := b.chain.GetBlock(block.ParentHash(), block.NumberU64()-1) if parent == nil { - return nil, vm.BlockContext{}, nil, nil, nil, nil, errBlockNotFound + return nil, vm.BlockContext{}, nil, nil, nil, errBlockNotFound } statedb, privateStateRepo, err := b.chain.StateAt(parent.Root()) if err != nil { - return nil, vm.BlockContext{}, nil, nil, nil, nil, errStateNotFound + return nil, vm.BlockContext{}, nil, nil, nil, errStateNotFound } psm, err := b.chain.PrivateStateManager().ResolveForUserContext(ctx) if err != nil { - return nil, vm.BlockContext{}, nil, nil, nil, nil, err + return nil, vm.BlockContext{}, nil, nil, nil, err } privateState, err := privateStateRepo.StatePSI(psm.ID) if err != nil { - return nil, vm.BlockContext{}, nil, nil, nil, nil, errStateNotFound + return nil, vm.BlockContext{}, nil, nil, nil, errStateNotFound } if txIndex == 0 && len(block.Transactions()) == 0 { - return nil, vm.BlockContext{}, statedb, privateState, privateStateRepo, func() {}, nil + return nil, vm.BlockContext{}, statedb, privateState, privateStateRepo, nil } // Recompute transactions up to the target index. signer := types.MakeSigner(b.chainConfig, block.Number()) @@ -173,33 +175,15 @@ func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(block.Header(), b.chain, nil) if idx == txIndex { - return msg, context, statedb, privateState, privateStateRepo, func() {}, nil + return msg, context, statedb, privateState, privateStateRepo, nil } vmenv := vm.NewEVM(context, txContext, statedb, privateState, b.chainConfig, vm.Config{}) if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - return nil, vm.BlockContext{}, nil, nil, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + return nil, vm.BlockContext{}, nil, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) } - return nil, vm.BlockContext{}, nil, nil, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) -} - -func (b *testBackend) StatesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, []mps.PrivateStateRepository, func(), error) { - var result []*state.StateDB - var privateResult []mps.PrivateStateRepository - for number := fromBlock.NumberU64(); number <= toBlock.NumberU64(); number += 1 { - block := b.chain.GetBlockByNumber(number) - if block == nil { - return nil, nil, nil, errBlockNotFound - } - statedb, privateStateRepo, err := b.chain.StateAt(block.Root()) - if err != nil { - return nil, nil, nil, errStateNotFound - } - result = append(result, statedb) - privateResult = append(privateResult, privateStateRepo) - } - return result, privateResult, func() {}, nil + return nil, vm.BlockContext{}, nil, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) } func (b *testBackend) GetBlockchain() *core.BlockChain { diff --git a/eth/tracers/tracer.go b/eth/tracers/tracer.go index 4c398edf34..008075faa6 100644 --- a/eth/tracers/tracer.go +++ b/eth/tracers/tracer.go @@ -287,8 +287,6 @@ func (cw *contractWrapper) pushObject(vm *duktape.Context) { // Tracer provides an implementation of Tracer that evaluates a Javascript // function for each VM execution step. type Tracer struct { - inited bool // Flag whether the context was already inited from the EVM - vm *duktape.Context // Javascript VM instance tracerObject int // Stack index of the tracer JavaScript object @@ -314,6 +312,8 @@ type Tracer struct { reason error // Textual reason for the interruption } +var _ vm.Tracer = &Tracer{} + // New instantiates a new tracer instance. code specifies a Javascript snippet, // which must evaluate to an expression returning an object with 'step', 'fault' // and 'result' functions. @@ -529,7 +529,7 @@ func wrapError(context string, err error) error { } // CaptureStart implements the Tracer interface to initialize the tracing operation. -func (jst *Tracer) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error { +func (jst *Tracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { jst.ctx["type"] = "CALL" if create { jst.ctx["type"] = "CREATE" @@ -540,77 +540,67 @@ func (jst *Tracer) CaptureStart(from common.Address, to common.Address, create b jst.ctx["gas"] = gas jst.ctx["value"] = value - return nil + // Initialize the context + jst.ctx["block"] = env.Context.BlockNumber.Uint64() + jst.dbWrapper.db = env.StateDB + // Compute intrinsic gas + isHomestead := env.ChainConfig().IsHomestead(env.Context.BlockNumber) + isIstanbul := env.ChainConfig().IsIstanbul(env.Context.BlockNumber) + intrinsicGas, err := core.IntrinsicGas(input, nil, jst.ctx["type"] == "CREATE", isHomestead, isIstanbul) + if err != nil { + return + } + jst.ctx["intrinsicGas"] = intrinsicGas } // CaptureState implements the Tracer interface to trace a single step of VM execution. -func (jst *Tracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, rdata []byte, contract *vm.Contract, depth int, err error) error { - if jst.err == nil { - // Initialize the context if it wasn't done yet - if !jst.inited { - jst.ctx["block"] = env.Context.BlockNumber.Uint64() - // Compute intrinsic gas - isHomestead := env.ChainConfig().IsHomestead(env.Context.BlockNumber) - isIstanbul := env.ChainConfig().IsIstanbul(env.Context.BlockNumber) - var input []byte - if data, ok := jst.ctx["input"].([]byte); ok { - input = data - } - intrinsicGas, err := core.IntrinsicGas(input, nil, jst.ctx["type"] == "CREATE", isHomestead, isIstanbul) - if err != nil { - return err - } - jst.ctx["intrinsicGas"] = intrinsicGas - jst.inited = true - } - // If tracing was interrupted, set the error and stop - if atomic.LoadUint32(&jst.interrupt) > 0 { - jst.err = jst.reason - return nil - } - jst.opWrapper.op = op - jst.stackWrapper.stack = stack - jst.memoryWrapper.memory = memory - jst.contractWrapper.contract = contract - jst.dbWrapper.db = env.StateDB - - *jst.pcValue = uint(pc) - *jst.gasValue = uint(gas) - *jst.costValue = uint(cost) - *jst.depthValue = uint(depth) - *jst.refundValue = uint(env.StateDB.GetRefund()) - - jst.errorValue = nil - if err != nil { - jst.errorValue = new(string) - *jst.errorValue = err.Error() - } - _, err := jst.call("step", "log", "db") - if err != nil { - jst.err = wrapError("step", err) - } +func (jst *Tracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { + if jst.err != nil { + return + } + // If tracing was interrupted, set the error and stop + if atomic.LoadUint32(&jst.interrupt) > 0 { + jst.err = jst.reason + return + } + jst.opWrapper.op = op + jst.stackWrapper.stack = scope.Stack + jst.memoryWrapper.memory = scope.Memory + jst.contractWrapper.contract = scope.Contract + + *jst.pcValue = uint(pc) + *jst.gasValue = uint(gas) + *jst.costValue = uint(cost) + *jst.depthValue = uint(depth) + *jst.refundValue = uint(env.StateDB.GetRefund()) + + jst.errorValue = nil + if err != nil { + jst.errorValue = new(string) + *jst.errorValue = err.Error() + } + + if _, err := jst.call("step", "log", "db"); err != nil { + jst.err = wrapError("step", err) } - return nil } // CaptureFault implements the Tracer interface to trace an execution fault -// while running an opcode. -func (jst *Tracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error { - if jst.err == nil { - // Apart from the error, everything matches the previous invocation - jst.errorValue = new(string) - *jst.errorValue = err.Error() +func (jst *Tracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { + if jst.err != nil { + return + } + // Apart from the error, everything matches the previous invocation + jst.errorValue = new(string) + *jst.errorValue = err.Error() - _, err := jst.call("fault", "log", "db") - if err != nil { - jst.err = wrapError("fault", err) - } + if _, err := jst.call("fault", "log", "db"); err != nil { + jst.err = wrapError("fault", err) } - return nil } // CaptureEnd is called after the call finishes to finalize the tracing. -func (jst *Tracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) error { +func (jst *Tracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) { jst.ctx["output"] = output jst.ctx["time"] = t.String() jst.ctx["gasUsed"] = gasUsed @@ -618,7 +608,6 @@ func (jst *Tracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, er if err != nil { jst.ctx["error"] = err.Error() } - return nil } // GetResult calls the Javascript 'result' function and returns its value, or any accumulated error diff --git a/eth/tracers/tracer_test.go b/eth/tracers/tracer_test.go index 7d20193949..85a09047c8 100644 --- a/eth/tracers/tracer_test.go +++ b/eth/tracers/tracer_test.go @@ -47,7 +47,8 @@ type dummyStatedb struct { state.StateDB } -func (*dummyStatedb) GetRefund() uint64 { return 1337 } +func (*dummyStatedb) GetRefund() uint64 { return 1337 } +func (*dummyStatedb) GetBalance(addr common.Address) *big.Int { return new(big.Int) } type vmContext struct { blockCtx vm.BlockContext @@ -59,8 +60,7 @@ func testCtx() *vmContext { } func runTrace(tracer *Tracer, vmctx *vmContext) (json.RawMessage, error) { - db := &dummyStatedb{} - env := vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, db, db, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) + env := vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) var ( startGas uint64 = 10000 value = big.NewInt(0) @@ -68,7 +68,7 @@ func runTrace(tracer *Tracer, vmctx *vmContext) (json.RawMessage, error) { contract := vm.NewContract(account{}, account{}, value, startGas) contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x1, 0x0} - tracer.CaptureStart(contract.Caller(), contract.Address(), false, []byte{}, startGas, value) + tracer.CaptureStart(env, contract.Caller(), contract.Address(), false, []byte{}, startGas, value) ret, err := env.Interpreter().Run(contract, []byte{}, false) tracer.CaptureEnd(ret, startGas-contract.Gas, 1, err) if err != nil { @@ -150,16 +150,56 @@ func TestHaltBetweenSteps(t *testing.T) { if err != nil { t.Fatal(err) } - db := &dummyStatedb{} - env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{}, db, db, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) - contract := vm.NewContract(&account{}, &account{}, big.NewInt(0), 0) + env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{}, &dummyStatedb{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) + scope := &vm.ScopeContext{ + Contract: vm.NewContract(&account{}, &account{}, big.NewInt(0), 0), + } - tracer.CaptureState(env, 0, 0, 0, 0, nil, nil, nil, contract, 0, nil) + tracer.CaptureState(env, 0, 0, 0, 0, scope, nil, 0, nil) timeout := errors.New("stahp") tracer.Stop(timeout) - tracer.CaptureState(env, 0, 0, 0, 0, nil, nil, nil, contract, 0, nil) + tracer.CaptureState(env, 0, 0, 0, 0, scope, nil, 0, nil) if _, err := tracer.GetResult(); err.Error() != timeout.Error() { t.Errorf("Expected timeout error, got %v", err) } } + +// TestNoStepExec tests a regular value transfer (no exec), and accessing the statedb +// in 'result' +func TestNoStepExec(t *testing.T) { + runEmptyTrace := func(tracer *Tracer, vmctx *vmContext) (json.RawMessage, error) { + env := vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) + startGas := uint64(10000) + contract := vm.NewContract(account{}, account{}, big.NewInt(0), startGas) + tracer.CaptureStart(env, contract.Caller(), contract.Address(), false, []byte{}, startGas, big.NewInt(0)) + tracer.CaptureEnd(nil, startGas-contract.Gas, 1, nil) + return tracer.GetResult() + } + execTracer := func(code string) []byte { + t.Helper() + ctx := &vmContext{blockCtx: vm.BlockContext{BlockNumber: big.NewInt(1)}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}} + tracer, err := New(code, ctx.txCtx) + if err != nil { + t.Fatal(err) + } + ret, err := runEmptyTrace(tracer, ctx) + if err != nil { + t.Fatal(err) + } + return ret + } + for i, tt := range []struct { + code string + want string + }{ + { // tests that we don't panic on accessing the db methods + code: "{depths: [], step: function() {}, fault: function() {}, result: function(ctx, db){ return db.getBalance(ctx.to)} }", + want: `"0"`, + }, + } { + if have := execTracer(tt.code); tt.want != string(have) { + t.Errorf("testcase %d: expected return value to be %s got %s\n\tcode: %v", i, tt.want, string(have), tt.code) + } + } +} diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go index ece3225550..87993ccb52 100644 --- a/ethclient/ethclient_test.go +++ b/ethclient/ethclient_test.go @@ -26,9 +26,8 @@ import ( "testing" "time" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" @@ -291,8 +290,9 @@ func testHeader(t *testing.T, chain []*types.Block, client *rpc.Client) { want: chain[1].Header(), }, "future_block": { - block: big.NewInt(1000000000), - want: nil, + block: big.NewInt(1000000000), + want: nil, + wantErr: ethereum.NotFound, }, } for name, tt := range tests { @@ -302,10 +302,10 @@ func testHeader(t *testing.T, chain []*types.Block, client *rpc.Client) { defer cancel() got, err := ec.HeaderByNumber(ctx, tt.block) - if tt.wantErr != nil && (err == nil || err.Error() != tt.wantErr.Error()) { + if !errors.Is(err, tt.wantErr) { t.Fatalf("HeaderByNumber(%v) error = %q, want %q", tt.block, err, tt.wantErr) } - if got != nil && got.Number.Sign() == 0 { + if got != nil && got.Number != nil && got.Number.Sign() == 0 { got.Number = big.NewInt(0) // hack to make DeepEqual work } if !reflect.DeepEqual(got, tt.want) { @@ -576,6 +576,8 @@ func sendTransaction(ec *Client) error { return ec.SendTransaction(context.Background(), signedTx, bind.PrivateTxArgs{PrivateFor: nil}) } +// Quorum + func TestClient_PreparePrivateTransaction_whenTypical(t *testing.T) { testObject := NewClient(nil) diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go index 70ac7a91ac..5d19cc3577 100644 --- a/ethdb/leveldb/leveldb.go +++ b/ethdb/leveldb/leveldb.go @@ -83,7 +83,7 @@ type Database struct { // New returns a wrapped LevelDB object. The namespace is the prefix that the // metrics reporting should use for surfacing internal stats. -func New(file string, cache int, handles int, namespace string) (*Database, error) { +func New(file string, cache int, handles int, namespace string, readonly bool) (*Database, error) { return NewCustom(file, namespace, func(options *opt.Options) { // Ensure we have some minimal caching and file guarantees if cache < minCache { @@ -96,6 +96,9 @@ func New(file string, cache int, handles int, namespace string) (*Database, erro options.OpenFilesCacheCapacity = handles options.BlockCacheCapacity = cache / 2 * opt.MiB options.WriteBuffer = cache / 4 * opt.MiB // Two of these are used internally + if readonly { + options.ReadOnly = true + } }) } @@ -458,7 +461,7 @@ func (b *batch) Put(key, value []byte) error { // Delete inserts the a key removal into the batch for later committing. func (b *batch) Delete(key []byte) error { b.b.Delete(key) - b.size++ + b.size += len(key) return nil } diff --git a/ethdb/memorydb/memorydb.go b/ethdb/memorydb/memorydb.go index 4c5e1a84de..fedc9e326c 100644 --- a/ethdb/memorydb/memorydb.go +++ b/ethdb/memorydb/memorydb.go @@ -211,7 +211,7 @@ func (b *batch) Put(key, value []byte) error { // Delete inserts the a key removal into the batch for later committing. func (b *batch) Delete(key []byte) error { b.writes = append(b.writes, keyvalue{common.CopyBytes(key), nil, true}) - b.size += 1 + b.size += len(key) return nil } diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go index d9292923b4..b48661b86d 100644 --- a/ethstats/ethstats.go +++ b/ethstats/ethstats.go @@ -95,6 +95,8 @@ type Service struct { pongCh chan struct{} // Pong notifications are fed into this channel histCh chan []uint64 // History request block numbers are fed into this channel + headSub event.Subscription + txSub event.Subscription } // connWrapper is a wrapper to prevent concurrent-write or concurrent-read on the @@ -167,7 +169,12 @@ func New(node *node.Node, backend backend, engine consensus.Engine, url string) // Start implements node.Lifecycle, starting up the monitoring and reporting daemon. func (s *Service) Start() error { - go s.loop() + // Subscribe to chain events to execute updates on + chainHeadCh := make(chan core.ChainHeadEvent, chainHeadChanSize) + s.headSub = s.backend.SubscribeChainHeadEvent(chainHeadCh) + txEventCh := make(chan core.NewTxsEvent, txChanSize) + s.txSub = s.backend.SubscribeNewTxsEvent(txEventCh) + go s.loop(chainHeadCh, txEventCh) log.Info("Stats daemon started") return nil @@ -175,22 +182,15 @@ func (s *Service) Start() error { // Stop implements node.Lifecycle, terminating the monitoring and reporting daemon. func (s *Service) Stop() error { + s.headSub.Unsubscribe() + s.txSub.Unsubscribe() log.Info("Stats daemon stopped") return nil } // loop keeps trying to connect to the netstats server, reporting chain events // until termination. -func (s *Service) loop() { - // Subscribe to chain events to execute updates on - chainHeadCh := make(chan core.ChainHeadEvent, chainHeadChanSize) - headSub := s.backend.SubscribeChainHeadEvent(chainHeadCh) - defer headSub.Unsubscribe() - - txEventCh := make(chan core.NewTxsEvent, txChanSize) - txSub := s.backend.SubscribeNewTxsEvent(txEventCh) - defer txSub.Unsubscribe() - +func (s *Service) loop(chainHeadCh chan core.ChainHeadEvent, txEventCh chan core.NewTxsEvent) { // Start a goroutine that exhausts the subscriptions to avoid events piling up var ( quitCh = make(chan struct{}) @@ -223,9 +223,9 @@ func (s *Service) loop() { } // node stopped - case <-txSub.Err(): + case <-s.txSub.Err(): break HandleLoop - case <-headSub.Err(): + case <-s.headSub.Err(): break HandleLoop } } @@ -756,7 +756,7 @@ func (s *Service) reportStats(conn *connWrapper) error { fullBackend, ok := s.backend.(fullNodeBackend) if ok { mining = fullBackend.Miner().Mining() - hashrate = int(fullBackend.Miner().HashRate()) + hashrate = int(fullBackend.Miner().Hashrate()) sync := fullBackend.Downloader().Progress() syncing = fullBackend.CurrentHeader().Number.Uint64() >= sync.HighestBlock diff --git a/go.mod b/go.mod index ae1f330468..c70cca3ab7 100644 --- a/go.mod +++ b/go.mod @@ -12,10 +12,13 @@ require ( github.com/BurntSushi/toml v0.3.1 github.com/ConsenSys/quorum-qlight-token-manager-plugin-sdk-go v0.0.0-20220427130631-ecd75caa6e73 github.com/VictoriaMetrics/fastcache v1.5.7 - github.com/aws/aws-sdk-go v1.25.48 + github.com/aws/aws-sdk-go-v2 v1.2.0 + github.com/aws/aws-sdk-go-v2/config v1.1.1 + github.com/aws/aws-sdk-go-v2/credentials v1.1.1 + github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1 github.com/btcsuite/btcd v0.20.1-beta github.com/cespare/cp v0.1.0 - github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9 + github.com/cloudflare/cloudflare-go v0.14.0 github.com/consensys/gurvy v0.3.8 github.com/coreos/etcd v3.3.20+incompatible github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect @@ -42,7 +45,7 @@ require ( github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d github.com/holiman/bloomfilter/v2 v2.0.3 github.com/holiman/uint256 v1.1.1 - github.com/huin/goupnp v1.0.1-0.20200620063722-49508fba0031 + github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88 github.com/influxdata/influxdb v1.8.3 github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e @@ -54,7 +57,7 @@ require ( github.com/mattn/go-colorable v0.1.4 github.com/mattn/go-isatty v0.0.14 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 - github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c + github.com/olekukonko/tablewriter v0.0.5 github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222 @@ -65,15 +68,15 @@ require ( github.com/shirou/gopsutil v2.20.5+incompatible github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 github.com/stretchr/testify v1.7.0 - github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca + github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef - golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 + golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 golang.org/x/text v0.3.7 - golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 + golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 google.golang.org/grpc v1.46.0 - gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/karalabe/cookiejar.v2 v2.0.0-20150724131613-8dcd6a7f4951 gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 diff --git a/go.sum b/go.sum index 97e4b85ce9..29c3bfc74f 100644 --- a/go.sum +++ b/go.sum @@ -66,8 +66,31 @@ github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1: github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.25.48 h1:J82DYDGZHOKHdhx6hD24Tm30c2C3GchYGfN0mf9iKUk= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v1.2.0 h1:BS+UYpbsElC82gB+2E2jiCBg36i8HlubTB/dO/moQ9c= +github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo= +github.com/aws/aws-sdk-go-v2/config v1.1.1 h1:ZAoq32boMzcaTW9bcUacBswAmHTbvlvDJICgHFZuECo= +github.com/aws/aws-sdk-go-v2/config v1.1.1/go.mod h1:0XsVy9lBI/BCXm+2Tuvt39YmdHwS5unDQmxZOYe8F5Y= +github.com/aws/aws-sdk-go-v2/credentials v1.1.1 h1:NbvWIM1Mx6sNPTxowHgS2ewXCRp+NGTzUYb/96FZJbY= +github.com/aws/aws-sdk-go-v2/credentials v1.1.1/go.mod h1:mM2iIjwl7LULWtS6JCACyInboHirisUUdkBPoTHMOUo= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2 h1:EtEU7WRaWliitZh2nmuxEXrN0Cb8EgPUFGIoTMeqbzI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2/go.mod h1:3hGg3PpiEjHnrkrlasTfxFqUsZ2GCk/fMUn4CbKgSkM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2 h1:4AH9fFjUlVktQMznF+YN33aWNXaR4VgDXyP28qokJC0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2/go.mod h1:45MfaXZ0cNbeuT0KQ1XJylq8A6+OpVV2E5kvY/Kq+u8= +github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1 h1:cKr6St+CtC3/dl/rEBJvlk7A/IN5D5F02GNkGzfbtVU= +github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7NkwbjlijluLsrIbu/iyl35RO4= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.1 h1:37QubsarExl5ZuCBlnRP+7l1tNwZPBSTqpTBrPH98RU= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0= +github.com/aws/aws-sdk-go-v2/service/sts v1.1.1 h1:TJoIfnIFubCX0ACVeJ0w46HEH5MwjwYN4iFhuYIhfIY= +github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM= +github.com/aws/smithy-go v1.1.0 h1:D6CSsM3gdxaGaqXnPgOBCeL6Mophqzu7KJOu7zW78sU= +github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -97,8 +120,17 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9 h1:J82+/8rub3qSy0HxEnoYD8cs+HDlHWYrqYXe2Vqxluk= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= +github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu2VvzMZlOFA= +github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= @@ -228,7 +260,9 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -292,8 +326,8 @@ github.com/holiman/uint256 v1.1.1 h1:4JywC80b+/hSfljFlEBLHrrh+CIONLDz9NuFl0af4Mw github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.0.1-0.20200620063722-49508fba0031 h1:HarGZ5h9HD9LgEg1yRVMXyfiw4wlXiLiYM2oMjeA/SE= -github.com/huin/goupnp v1.0.1-0.20200620063722-49508fba0031/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo= +github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88 h1:bcAj8KroPf552TScjFPIakjH2/tdIrIH8F+cc4v4SRo= +github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -314,8 +348,19 @@ github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1C github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U= +github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U= +github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpmorganchase/quorum-account-plugin-sdk-go v0.0.0-20200714175524-662195b38a5e h1:aE+TcHdEop381e8gMBWw/7Nw5aOdXdVmVrVP7ZrKrq4= github.com/jpmorganchase/quorum-account-plugin-sdk-go v0.0.0-20200714175524-662195b38a5e/go.mod h1:clocsx5vZHANnLM+SmcJJDKY6VVxxcdRUCKe5Y+roQ0= @@ -348,8 +393,9 @@ github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH6 github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -375,8 +421,9 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= @@ -406,8 +453,9 @@ github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c h1:1RHs3tNxjXGHeul8z2t6H2N2TlAqpKe5yryJztRx4Jk= github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= @@ -510,8 +558,17 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= +github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 h1:xQdMZ1WLrgkkvOZ/LDQxjVxMLdby7osSh4ZEVa5sIjs= +github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= @@ -519,6 +576,7 @@ github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9 github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4= github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= @@ -544,8 +602,9 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 h1:pLI5jrR7OSLijeIDcmRxNmw2api+jEfxLoykJVice/E= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -601,6 +660,7 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -666,12 +726,14 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -779,8 +841,8 @@ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= diff --git a/graphql/graphql.go b/graphql/graphql.go index 7162374876..d42098aa61 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -150,6 +150,20 @@ func (l *Log) Data(ctx context.Context) hexutil.Bytes { return l.log.Data } +// AccessTuple represents EIP-2930 +type AccessTuple struct { + address common.Address + storageKeys *[]common.Hash +} + +func (at *AccessTuple) Address(ctx context.Context) common.Address { + return at.address +} + +func (at *AccessTuple) StorageKeys(ctx context.Context) *[]common.Hash { + return at.storageKeys +} + // Transaction represents an Ethereum transaction. // backend and hash are mandatory; all others will be fetched when required. type Transaction struct { @@ -394,74 +408,31 @@ func (t *Transaction) Logs(ctx context.Context) (*[]*Log, error) { return &ret, nil } -// Quorum - -// (Quorum) PrivateTransaction returns the internal private transaction for privacy marker transactions -func (t *Transaction) PrivateTransaction(ctx context.Context) (*Transaction, error) { +func (t *Transaction) Type(ctx context.Context) (*int32, error) { tx, err := t.resolve(ctx) - if err != nil || tx == nil { - return nil, err - } - - if !tx.IsPrivacyMarker() { - // tx will not have a private tx so return early - no error to keep in line with other graphql behaviour (see PrivateInputData) - return nil, nil - } - - pvtTx, _, _, err := private.FetchPrivateTransaction(tx.Data()) if err != nil { return nil, err } - - if pvtTx == nil { - return nil, nil - } - - return &Transaction{ - backend: t.backend, - hash: t.hash, - tx: pvtTx, - block: t.block, - index: t.index, - receiptGetter: &privateTransactionReceiptGetter{pmt: t}, - }, nil -} - -func (t *Transaction) IsPrivate(ctx context.Context) (*bool, error) { - ret := false - tx, err := t.resolve(ctx) - if err != nil || tx == nil { - return &ret, err - } - ret = tx.IsPrivate() - return &ret, nil + txType := int32(tx.Type()) + return &txType, nil } -func (t *Transaction) PrivateInputData(ctx context.Context) (*hexutil.Bytes, error) { +func (t *Transaction) AccessList(ctx context.Context) (*[]*AccessTuple, error) { tx, err := t.resolve(ctx) if err != nil || tx == nil { - return &hexutil.Bytes{}, err + return nil, err } - if tx.IsPrivate() { - psm, err := t.backend.PSMR().ResolveForUserContext(ctx) - if err != nil { - return &hexutil.Bytes{}, err - } - _, managedParties, privateInputData, _, err := private.P.Receive(common.BytesToEncryptedPayloadHash(tx.Data())) - if err != nil || tx == nil { - return &hexutil.Bytes{}, err - } - if t.backend.PSMR().NotIncludeAny(psm, managedParties...) { - return &hexutil.Bytes{}, nil - } - ret := hexutil.Bytes(privateInputData) - return &ret, nil + accessList := tx.AccessList() + ret := make([]*AccessTuple, 0, len(accessList)) + for _, al := range accessList { + ret = append(ret, &AccessTuple{ + address: al.Address, + storageKeys: &al.StorageKeys, + }) } - return &hexutil.Bytes{}, nil + return &ret, nil } -// END QUORUM - func (t *Transaction) R(ctx context.Context) (hexutil.Big, error) { tx, err := t.resolve(ctx) if err != nil || tx == nil { @@ -1252,3 +1223,69 @@ func (r *Resolver) Syncing() (*SyncState, error) { // Otherwise gather the block sync stats return &SyncState{progress}, nil } + +// Quorum + +// PrivateTransaction returns the internal private transaction for privacy marker transactions +func (t *Transaction) PrivateTransaction(ctx context.Context) (*Transaction, error) { + tx, err := t.resolve(ctx) + if err != nil || tx == nil { + return nil, err + } + + if !tx.IsPrivacyMarker() { + // tx will not have a private tx so return early - no error to keep in line with other graphql behaviour (see PrivateInputData) + return nil, nil + } + + pvtTx, _, _, err := private.FetchPrivateTransaction(tx.Data()) + if err != nil { + return nil, err + } + + if pvtTx == nil { + return nil, nil + } + + return &Transaction{ + backend: t.backend, + hash: t.hash, + tx: pvtTx, + block: t.block, + index: t.index, + receiptGetter: &privateTransactionReceiptGetter{pmt: t}, + }, nil +} + +func (t *Transaction) IsPrivate(ctx context.Context) (*bool, error) { + ret := false + tx, err := t.resolve(ctx) + if err != nil || tx == nil { + return &ret, err + } + ret = tx.IsPrivate() + return &ret, nil +} + +func (t *Transaction) PrivateInputData(ctx context.Context) (*hexutil.Bytes, error) { + tx, err := t.resolve(ctx) + if err != nil || tx == nil { + return &hexutil.Bytes{}, err + } + if tx.IsPrivate() { + psm, err := t.backend.PSMR().ResolveForUserContext(ctx) + if err != nil { + return &hexutil.Bytes{}, err + } + _, managedParties, privateInputData, _, err := private.P.Receive(common.BytesToEncryptedPayloadHash(tx.Data())) + if err != nil || tx == nil { + return &hexutil.Bytes{}, err + } + if t.backend.PSMR().NotIncludeAny(psm, managedParties...) { + return &hexutil.Bytes{}, nil + } + ret := hexutil.Bytes(privateInputData) + return &ret, nil + } + return &hexutil.Bytes{}, nil +} diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go index a4349810f4..5ed3cc7139 100644 --- a/graphql/graphql_test.go +++ b/graphql/graphql_test.go @@ -36,11 +36,13 @@ import ( "github.com/ethereum/go-ethereum/core/mps" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/multitenancy" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" @@ -73,7 +75,7 @@ func TestBuildSchema(t *testing.T) { // Tests that a graphQL request is successfully handled when graphql is enabled on the specified endpoint func TestGraphQLBlockSerialization(t *testing.T) { - stack := createNode(t, true) + stack := createNode(t, true, false) defer stack.Close() // start node if err := stack.Start(); err != nil { @@ -175,48 +177,59 @@ func TestGraphQLBlockSerialization(t *testing.T) { } } -// Tests that a graphQL request is not handled successfully when graphql is not enabled on the specified endpoint -func TestGraphQLHTTPOnSamePort_GQLRequest_Unsuccessful(t *testing.T) { - stack := createNode(t, false) +func TestGraphQLBlockSerializationEIP2718(t *testing.T) { + stack := createNode(t, true, true) defer stack.Close() + // start node if err := stack.Start(); err != nil { t.Fatalf("could not start node: %v", err) } - body := strings.NewReader(`{"query": "{block{number}}","variables": null}`) - resp, err := http.Post(fmt.Sprintf("%s/graphql", stack.HTTPEndpoint()), "application/json", body) - if err != nil { - t.Fatalf("could not post: %v", err) + + for i, tt := range []struct { + body string + want string + code int + }{ + { + body: `{"query": "{block {number transactions { from { address } to { address } value hash type accessList { address storageKeys } index}}}"}`, + want: `{"data":{"block":{"number":1,"transactions":[{"from":{"address":"0x71562b71999873db5b286df957af199ec94617f7"},"to":{"address":"0x0000000000000000000000000000000000000dad"},"value":"0x64","hash":"0x4f7b8d718145233dcf7f29e34a969c63dd4de8715c054ea2af022b66c4f4633e","type":0,"accessList":[],"index":0},{"from":{"address":"0x71562b71999873db5b286df957af199ec94617f7"},"to":{"address":"0x0000000000000000000000000000000000000dad"},"value":"0x32","hash":"0x9c6c2c045b618fe87add0e49ba3ca00659076ecae00fd51de3ba5d4ccf9dbf40","type":1,"accessList":[{"address":"0x0000000000000000000000000000000000000dad","storageKeys":["0x0000000000000000000000000000000000000000000000000000000000000000"]}],"index":1}]}}}`, + code: 200, + }, + } { + resp, err := http.Post(fmt.Sprintf("%s/graphql", stack.HTTPEndpoint()), "application/json", strings.NewReader(tt.body)) + if err != nil { + t.Fatalf("could not post: %v", err) + } + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("could not read from response body: %v", err) + } + if have := string(bodyBytes); have != tt.want { + t.Errorf("testcase %d %s,\nhave:\n%v\nwant:\n%v", i, tt.body, have, tt.want) + } + if tt.code != resp.StatusCode { + t.Errorf("testcase %d %s,\nwrong statuscode, have: %v, want: %v", i, tt.body, resp.StatusCode, tt.code) + } } - // make sure the request is not handled successfully - assert.Equal(t, http.StatusNotFound, resp.StatusCode) } -// Tests that 400 is returned when an invalid RPC request is made. -func TestGraphQL_BadRequest(t *testing.T) { - stack := createNode(t, true) +// Tests that a graphQL request is not handled successfully when graphql is not enabled on the specified endpoint +func TestGraphQLHTTPOnSamePort_GQLRequest_Unsuccessful(t *testing.T) { + stack := createNode(t, false, false) defer stack.Close() - // start node if err := stack.Start(); err != nil { t.Fatalf("could not start node: %v", err) } - // create http request - body := strings.NewReader("{\"query\": \"{bleh{number}}\",\"variables\": null}") - gqlReq, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/graphql", stack.HTTPEndpoint()), body) + body := strings.NewReader(`{"query": "{block{number}}","variables": null}`) + resp, err := http.Post(fmt.Sprintf("%s/graphql", stack.HTTPEndpoint()), "application/json", body) if err != nil { t.Fatalf("could not post: %v", err) } - // read from response - resp := doHTTPRequest(t, gqlReq) - bodyBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("could not read from response body: %v", err) - } - expected := "{\"errors\":[{\"message\":\"Cannot query field \\\"bleh\\\" on type \\\"Query\\\".\",\"locations\":[{\"line\":1,\"column\":2}]}]}" - assert.Equal(t, expected, string(bodyBytes)) - assert.Equal(t, 400, resp.StatusCode) + // make sure the request is not handled successfully + assert.Equal(t, http.StatusNotFound, resp.StatusCode) } -func createNode(t *testing.T, gqlEnabled bool) *node.Node { +func createNode(t *testing.T, gqlEnabled bool, txEnabled bool) *node.Node { stack, err := node.New(&node.Config{ HTTPHost: "127.0.0.1", HTTPPort: 0, @@ -229,7 +242,11 @@ func createNode(t *testing.T, gqlEnabled bool) *node.Node { if !gqlEnabled { return stack } - createGQLService(t, stack) + if !txEnabled { + createGQLService(t, stack) + } else { + createGQLServiceWithTransactions(t, stack) + } return stack } @@ -270,12 +287,121 @@ func createGQLService(t *testing.T, stack *node.Node) { } } +func createGQLServiceWithTransactions(t *testing.T, stack *node.Node) { + // create backend + key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + address := crypto.PubkeyToAddress(key.PublicKey) + funds := big.NewInt(1000000000) + dad := common.HexToAddress("0x0000000000000000000000000000000000000dad") + + ethConf := ðconfig.Config{ + Genesis: &core.Genesis{ + Config: params.AllEthashProtocolChanges, + GasLimit: 11500000, + Difficulty: big.NewInt(1048576), + Alloc: core.GenesisAlloc{ + address: {Balance: funds}, + // The address 0xdad sloads 0x00 and 0x01 + dad: { + Code: []byte{ + byte(vm.PC), + byte(vm.PC), + byte(vm.SLOAD), + byte(vm.SLOAD), + }, + Nonce: 0, + Balance: big.NewInt(0), + }, + }, + }, + Ethash: ethash.Config{ + PowMode: ethash.ModeFake, + }, + NetworkId: 1337, + TrieCleanCache: 5, + TrieCleanCacheJournal: "triecache", + TrieCleanCacheRejournal: 60 * time.Minute, + TrieDirtyCache: 5, + TrieTimeout: 60 * time.Minute, + SnapshotCache: 5, + } + + ethBackend, err := eth.New(stack, ethConf) + if err != nil { + t.Fatalf("could not create eth backend: %v", err) + } + signer := types.LatestSigner(ethConf.Genesis.Config) + + legacyTx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ + Nonce: uint64(0), + To: &dad, + Value: big.NewInt(100), + Gas: 50000, + GasPrice: big.NewInt(1), + }) + envelopTx, _ := types.SignNewTx(key, signer, &types.AccessListTx{ + ChainID: ethConf.Genesis.Config.ChainID, + Nonce: uint64(1), + To: &dad, + Gas: 30000, + GasPrice: big.NewInt(1), + Value: big.NewInt(50), + AccessList: types.AccessList{{ + Address: dad, + StorageKeys: []common.Hash{{0}}, + }}, + }) + + // Create some blocks and import them + chain, _ := core.GenerateChain(params.AllEthashProtocolChanges, ethBackend.BlockChain().Genesis(), + ethash.NewFaker(), ethBackend.ChainDb(), 1, func(i int, b *core.BlockGen) { + b.SetCoinbase(common.Address{1}) + b.AddTx(legacyTx) + b.AddTx(envelopTx) + }) + + _, err = ethBackend.BlockChain().InsertChain(chain) + if err != nil { + t.Fatalf("could not create import blocks: %v", err) + } + // create gql service + err = New(stack, ethBackend.APIBackend, []string{}, []string{}) + if err != nil { + t.Fatalf("could not create graphql service: %v", err) + } +} + +// Quorum + +// Tests that 400 is returned when an invalid RPC request is made. +func TestGraphQL_BadRequest(t *testing.T) { + stack := createNode(t, false, true) + defer stack.Close() + // start node + if err := stack.Start(); err != nil { + t.Fatalf("could not start node: %v", err) + } + // create http request + body := strings.NewReader("{\"query\": \"{bleh{number}}\",\"variables\": null}") + gqlReq, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/graphql", stack.HTTPEndpoint()), body) + if err != nil { + t.Fatalf("could not post: %v", err) + } + // read from response + resp := doHTTPRequest(t, gqlReq) + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("could not read from response body: %v", err) + } + assert.Equal(t, "", string(bodyBytes)) // TODO: geth1.10.2: check changes + assert.Equal(t, 404, resp.StatusCode) +} + func doHTTPRequest(t *testing.T, req *http.Request) *http.Response { client := &http.Client{} resp, err := client.Do(req) if err != nil { t.Fatal("could not issue a GET request to the given endpoint", err) - } return resp } @@ -491,6 +617,8 @@ func (spm *stubPrivateTransactionManager) ReceiveRaw(hash common.EncryptedPayloa type StubBackend struct{} +var _ ethapi.Backend = &StubBackend{} + func (sb *StubBackend) CurrentHeader() *types.Header { panic("implement me") } @@ -511,7 +639,7 @@ func (sb *StubBackend) IsAuthorized(authToken *proto.PreAuthenticatedAuthenticat panic("implement me") } -func (sb *StubBackend) GetEVM(ctx context.Context, msg core.Message, state vm.MinimalApiState, header *types.Header) (*vm.EVM, func() error, error) { +func (sb *StubBackend) GetEVM(ctx context.Context, msg core.Message, state vm.MinimalApiState, header *types.Header, vmconfig *vm.Config) (*vm.EVM, func() error, error) { panic("implement me") } diff --git a/graphql/schema.go b/graphql/schema.go index 1a2cb14aae..5f0e480673 100644 --- a/graphql/schema.go +++ b/graphql/schema.go @@ -69,6 +69,12 @@ const schema string = ` transaction: Transaction! } + #EIP-2718 + type AccessTuple{ + address: Address! + storageKeys : [Bytes32!] + } + # Transaction is an Ethereum transaction. type Transaction { # Hash is the hash of this transaction. @@ -125,6 +131,9 @@ const schema string = ` r: BigInt! s: BigInt! v: BigInt! + #Envelope transaction support + type: Int + accessList: [AccessTuple!] } # BlockFilterCriteria encapsulates log filter criteria for a filter applied diff --git a/internal/debug/flags.go b/internal/debug/flags.go index 2c92b19de6..7e6bf8d865 100644 --- a/internal/debug/flags.go +++ b/internal/debug/flags.go @@ -41,22 +41,22 @@ var ( Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail", Value: 3, } - logjsonFlag = cli.BoolFlag{ - Name: "log.json", - Usage: "Format logs with JSON", - } vmoduleFlag = cli.StringFlag{ Name: "vmodule", Usage: "Per-module verbosity: comma-separated list of = (e.g. eth/*=5,p2p=4)", Value: "", } + logjsonFlag = cli.BoolFlag{ + Name: "log.json", + Usage: "Format logs with JSON", + } backtraceAtFlag = cli.StringFlag{ - Name: "backtrace", + Name: "log.backtrace", Usage: "Request a stack trace at a specific logging statement (e.g. \"block.go:271\")", Value: "", } debugFlag = cli.BoolFlag{ - Name: "debug", + Name: "log.debug", Usage: "Prepends log messages with call-site location (file and line number)", } pprofFlag = cli.BoolFlag{ @@ -90,18 +90,69 @@ var ( Name: "trace", Usage: "Write execution trace to the given file", } + // (Deprecated April 2020) + legacyPprofPortFlag = cli.IntFlag{ + Name: "pprofport", + Usage: "pprof HTTP server listening port (deprecated, use --pprof.port)", + Value: 6060, + } + legacyPprofAddrFlag = cli.StringFlag{ + Name: "pprofaddr", + Usage: "pprof HTTP server listening interface (deprecated, use --pprof.addr)", + Value: "127.0.0.1", + } + legacyMemprofilerateFlag = cli.IntFlag{ + Name: "memprofilerate", + Usage: "Turn on memory profiling with the given rate (deprecated, use --pprof.memprofilerate)", + Value: runtime.MemProfileRate, + } + legacyBlockprofilerateFlag = cli.IntFlag{ + Name: "blockprofilerate", + Usage: "Turn on block profiling with the given rate (deprecated, use --pprof.blockprofilerate)", + } + legacyCpuprofileFlag = cli.StringFlag{ + Name: "cpuprofile", + Usage: "Write CPU profile to the given file (deprecated, use --pprof.cpuprofile)", + } + legacyBacktraceAtFlag = cli.StringFlag{ + Name: "backtrace", + Usage: "Request a stack trace at a specific logging statement (e.g. \"block.go:271\") (deprecated, use --log.backtrace)", + Value: "", + } + legacyDebugFlag = cli.BoolFlag{ + Name: "debug", + Usage: "Prepends log messages with call-site location (file and line number) (deprecated, use --log.debug)", + } ) // Flags holds all command-line flags required for debugging. var Flags = []cli.Flag{ - verbosityFlag, logjsonFlag, vmoduleFlag, backtraceAtFlag, debugFlag, - pprofFlag, pprofAddrFlag, pprofPortFlag, memprofilerateFlag, - blockprofilerateFlag, cpuprofileFlag, traceFlag, + verbosityFlag, + vmoduleFlag, + logjsonFlag, + backtraceAtFlag, + debugFlag, + pprofFlag, + pprofAddrFlag, + pprofPortFlag, + memprofilerateFlag, + blockprofilerateFlag, + cpuprofileFlag, + traceFlag, } -var ( - glogger *log.GlogHandler -) +// This is the list of deprecated debugging flags. +var DeprecatedFlags = []cli.Flag{ + legacyPprofPortFlag, + legacyPprofAddrFlag, + legacyMemprofilerateFlag, + legacyBlockprofilerateFlag, + legacyCpuprofileFlag, + legacyBacktraceAtFlag, + legacyDebugFlag, +} + +var glogger *log.GlogHandler func init() { glogger = log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) @@ -125,16 +176,52 @@ func Setup(ctx *cli.Context) error { } glogger.SetHandler(ostream) // logging - log.PrintOrigins(ctx.GlobalBool(debugFlag.Name)) - glogger.Verbosity(log.Lvl(ctx.GlobalInt(verbosityFlag.Name))) - glogger.Vmodule(ctx.GlobalString(vmoduleFlag.Name)) - glogger.BacktraceAt(ctx.GlobalString(backtraceAtFlag.Name)) + verbosity := ctx.GlobalInt(verbosityFlag.Name) + glogger.Verbosity(log.Lvl(verbosity)) + vmodule := ctx.GlobalString(vmoduleFlag.Name) + glogger.Vmodule(vmodule) + + debug := ctx.GlobalBool(debugFlag.Name) + if ctx.GlobalIsSet(legacyDebugFlag.Name) { + debug = ctx.GlobalBool(legacyDebugFlag.Name) + log.Warn("The flag --debug is deprecated and will be removed in the future, please use --log.debug") + } + if ctx.GlobalIsSet(debugFlag.Name) { + debug = ctx.GlobalBool(debugFlag.Name) + } + log.PrintOrigins(debug) + + backtrace := ctx.GlobalString(backtraceAtFlag.Name) + if b := ctx.GlobalString(legacyBacktraceAtFlag.Name); b != "" { + backtrace = b + log.Warn("The flag --backtrace is deprecated and will be removed in the future, please use --log.backtrace") + } + if b := ctx.GlobalString(backtraceAtFlag.Name); b != "" { + backtrace = b + } + glogger.BacktraceAt(backtrace) + log.Root().SetHandler(glogger) // profiling, tracing - runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name) + runtime.MemProfileRate = memprofilerateFlag.Value + if ctx.GlobalIsSet(legacyMemprofilerateFlag.Name) { + runtime.MemProfileRate = ctx.GlobalInt(legacyMemprofilerateFlag.Name) + log.Warn("The flag --memprofilerate is deprecated and will be removed in the future, please use --pprof.memprofilerate") + } + if ctx.GlobalIsSet(memprofilerateFlag.Name) { + runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name) + } - Handler.SetBlockProfileRate(ctx.GlobalInt(blockprofilerateFlag.Name)) + blockProfileRate := blockprofilerateFlag.Value + if ctx.GlobalIsSet(legacyBlockprofilerateFlag.Name) { + blockProfileRate = ctx.GlobalInt(legacyBlockprofilerateFlag.Name) + log.Warn("The flag --blockprofilerate is deprecated and will be removed in the future, please use --pprof.blockprofilerate") + } + if ctx.GlobalIsSet(blockprofilerateFlag.Name) { + blockProfileRate = ctx.GlobalInt(blockprofilerateFlag.Name) + } + Handler.SetBlockProfileRate(blockProfileRate) if traceFile := ctx.GlobalString(traceFlag.Name); traceFile != "" { if err := Handler.StartGoTrace(traceFile); err != nil { diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 52df5a3430..9231f796ae 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -642,9 +642,13 @@ func NewPublicBlockChainAPI(b Backend) *PublicBlockChainAPI { return &PublicBlockChainAPI{b} } -// ChainId returns the chainID value for transaction replay protection. -func (s *PublicBlockChainAPI) ChainId() *hexutil.Big { - return (*hexutil.Big)(s.b.ChainConfig().ChainID) +// ChainId is the EIP-155 replay-protection chain id for the current ethereum chain config. +func (api *PublicBlockChainAPI) ChainId() (*hexutil.Big, error) { + // if current block is at or past the EIP-155 replay-protection fork block, return chainID from config + if config := api.b.ChainConfig(); config.IsEIP155(api.b.CurrentBlock().Number()) { + return (*hexutil.Big)(config.ChainID), nil + } + return nil, fmt.Errorf("chain not synced beyond EIP-155 replay-protection fork block") } // GetPSI - retunrs the PSI that was resolved based on the client request @@ -986,7 +990,7 @@ func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.Blo msg := args.ToMessage(globalGasCap) // Get a new instance of the EVM. - evm, vmError, err := b.GetEVM(ctx, msg, state, header) + evm, vmError, err := b.GetEVM(ctx, msg, state, header, nil) if err != nil { return nil, err } @@ -1473,6 +1477,106 @@ func newRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransa return nil } +// accessListResult returns an optional accesslist +// Its the result of the `debug_createAccessList` RPC call. +// It contains an error if the transaction itself failed. +type accessListResult struct { + Accesslist *types.AccessList `json:"accessList"` + Error string `json:"error,omitempty"` + GasUsed hexutil.Uint64 `json:"gasUsed"` +} + +// CreateAccessList creates a EIP-2930 type AccessList for the given transaction. +// Reexec and BlockNrOrHash can be specified to create the accessList on top of a certain state. +func (s *PublicBlockChainAPI) CreateAccessList(ctx context.Context, args SendTxArgs, blockNrOrHash *rpc.BlockNumberOrHash) (*accessListResult, error) { + bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) + if blockNrOrHash != nil { + bNrOrHash = *blockNrOrHash + } + acl, gasUsed, vmerr, err := AccessList(ctx, s.b, bNrOrHash, args) + if err != nil { + return nil, err + } + result := &accessListResult{Accesslist: &acl, GasUsed: hexutil.Uint64(gasUsed)} + if vmerr != nil { + result.Error = vmerr.Error() + } + return result, nil +} + +// AccessList creates an access list for the given transaction. +// If the accesslist creation fails an error is returned. +// If the transaction itself fails, an vmErr is returned. +func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrHash, args SendTxArgs) (acl types.AccessList, gasUsed uint64, vmErr error, err error) { + // Retrieve the execution context + db, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) + if db == nil || err != nil { + return nil, 0, nil, err + } + // If the gas amount is not set, extract this as it will depend on access + // lists and we'll need to reestimate every time + nogas := args.Gas == nil + + // Ensure any missing fields are filled, extract the recipient and input data + if err := args.setDefaults(ctx, b); err != nil { + return nil, 0, nil, err + } + var to common.Address + if args.To != nil { + to = *args.To + } else { + to = crypto.CreateAddress(args.From, uint64(*args.Nonce)) + } + var input []byte + if args.Input != nil { + input = *args.Input + } else if args.Data != nil { + input = *args.Data + } + // Retrieve the precompiles since they don't need to be added to the access list + precompiles := vm.ActivePrecompiles(b.ChainConfig().Rules(header.Number)) + + // Create an initial tracer + prevTracer := vm.NewAccessListTracer(nil, args.From, to, precompiles) + if args.AccessList != nil { + prevTracer = vm.NewAccessListTracer(*args.AccessList, args.From, to, precompiles) + } + for { + // Retrieve the current access list to expand + accessList := prevTracer.AccessList() + log.Trace("Creating access list", "input", accessList) + + // If no gas amount was specified, each unique access list needs it's own + // gas calculation. This is quite expensive, but we need to be accurate + // and it's convered by the sender only anyway. + if nogas { + args.Gas = nil + if err := args.setDefaults(ctx, b); err != nil { + return nil, 0, nil, err // shouldn't happen, just in case + } + } + // Copy the original db so we don't modify it + statedb := db.(*state.StateDB) + msg := types.NewMessage(args.From, args.To, uint64(*args.Nonce), args.Value.ToInt(), uint64(*args.Gas), args.GasPrice.ToInt(), input, accessList, false) + + // Apply the transaction with the access list tracer + tracer := vm.NewAccessListTracer(accessList, args.From, to, precompiles) + config := vm.Config{Tracer: tracer, Debug: true} + vmenv, _, err := b.GetEVM(ctx, msg, statedb, header, &config) + if err != nil { + return nil, 0, nil, err + } + res, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())) + if err != nil { + return nil, 0, nil, fmt.Errorf("failed to apply transaction: %v err: %v", args.toTransaction().Hash(), err) + } + if tracer.Equal(prevTracer) { + return accessList, res.UsedGas, res.Err, nil + } + prevTracer = tracer + } +} + // PublicTransactionPoolAPI exposes methods for the RPC interface type PublicTransactionPoolAPI struct { b Backend @@ -1930,12 +2034,12 @@ func (args *SendTxArgs) setDefaults(ctx context.Context, b Backend) error { if args.ChainID == nil { args.ChainID = b.ChainConfig().ChainID } - //Quorum + // Quorum if args.PrivateTxType == "" { args.PrivateTxType = "restricted" } return args.SetDefaultPrivateFrom(ctx, b) - //End-Quorum + // End Quorum } // toTransaction converts the arguments to a transaction. @@ -1947,7 +2051,6 @@ func (args *SendTxArgs) toTransaction() *types.Transaction { } else if args.Data != nil { input = *args.Data } - var data types.TxData if args.AccessList == nil { data = &types.LegacyTx{ @@ -2030,6 +2133,10 @@ func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction, pr } } } + if !b.UnprotectedAllowed() && !tx.Protected() { + // Ensure only eip155 signed transactions are submitted if EIP155Required is set. + return common.Hash{}, errors.New("only replay-protected (EIP-155) transactions allowed over RPC") + } if err := b.SendTx(ctx, tx); err != nil { return common.Hash{}, err } @@ -2079,7 +2186,7 @@ func runSimulation(ctx context.Context, b Backend, from common.Address, tx *type if stateAtBlock == nil || err != nil { return nil, nil, err } - evm, _, err := b.GetEVM(ctx, msg, stateAtBlock, header) + evm, _, err := b.GetEVM(ctx, msg, stateAtBlock, header, &vm.Config{}) if err != nil { return nil, nil, err } @@ -2150,9 +2257,7 @@ func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args Sen if args.IsPrivate() { tx.SetPrivate() } - // /Quorum - // Quorum var chainID *big.Int if config := s.b.ChainConfig(); config.IsEIP155(s.b.CurrentBlock().Number()) && !tx.IsPrivate() { chainID = config.ChainID @@ -2183,7 +2288,6 @@ func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args Sen return common.Hash{}, err } } - // /Quorum return SubmitTransaction(ctx, s.b, signed, args.PrivateFrom, false) } @@ -2239,7 +2343,6 @@ func (s *PublicTransactionPoolAPI) SendRawTransaction(ctx context.Context, input func (s *PublicTransactionPoolAPI) SendRawPrivateTransaction(ctx context.Context, encodedTx hexutil.Bytes, args SendRawTxArgs) (common.Hash, error) { tx := new(types.Transaction) - // if err := tx.UnmarshalBinary(encodedTx); err != nil { // Quorum if err := rlp.DecodeBytes(encodedTx, tx); err != nil { return common.Hash{}, err } diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index e6b3742820..c010334dc8 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -912,6 +912,8 @@ type StubBackend struct { CurrentHeadNumber *big.Int } +var _ Backend = &StubBackend{} + func (sb *StubBackend) UnprotectedAllowed() bool { return sb.allowUnprotectedTxs } @@ -936,7 +938,7 @@ func (sb *StubBackend) IsAuthorized(authToken *proto.PreAuthenticatedAuthenticat panic("implement me") } -func (sb *StubBackend) GetEVM(ctx context.Context, msg core.Message, state vm.MinimalApiState, header *types.Header) (*vm.EVM, func() error, error) { +func (sb *StubBackend) GetEVM(ctx context.Context, msg core.Message, state vm.MinimalApiState, header *types.Header, vmconfig *vm.Config) (*vm.EVM, func() error, error) { sb.getEVMCalled = true vmCtx := core.NewEVMBlockContext(&types.Header{ Coinbase: arbitraryFrom, diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index a9e52e91fa..4ccd543dfa 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -65,7 +65,7 @@ type Backend interface { StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (vm.MinimalApiState, *types.Header, error) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) GetTd(ctx context.Context, hash common.Hash) *big.Int - GetEVM(ctx context.Context, msg core.Message, state vm.MinimalApiState, header *types.Header) (*vm.EVM, func() error, error) + GetEVM(ctx context.Context, msg core.Message, state vm.MinimalApiState, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index 2ce1a37118..b32c2b266d 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -146,8 +146,8 @@ web3._extend({ params: 3, }), new web3._extend.Method({ - name: 'submitHashRate', - call: 'ethash_submitHashRate', + name: 'submitHashrate', + call: 'ethash_submitHashrate', params: 2, }), ] @@ -199,12 +199,24 @@ web3._extend({ call: 'admin_sleepBlocks', params: 2 }), + new web3._extend.Method({ + name: 'startHTTP', + call: 'admin_startHTTP', + params: 5, + inputFormatter: [null, null, null, null, null] + }), + new web3._extend.Method({ + name: 'stopHTTP', + call: 'admin_stopHTTP' + }), + // This method is deprecated. new web3._extend.Method({ name: 'startRPC', call: 'admin_startRPC', - params: 4, - inputFormatter: [null, null, null, null] + params: 5, + inputFormatter: [null, null, null, null, null] }), + // This method is deprecated. new web3._extend.Method({ name: 'stopRPC', call: 'admin_stopRPC' @@ -624,6 +636,12 @@ web3._extend({ params: 3, inputFormatter: [web3._extend.formatters.inputAddressFormatter, null, web3._extend.formatters.inputBlockNumberFormatter] }), + new web3._extend.Method({ + name: 'createAccessList', + call: 'eth_createAccessList', + params: 2, + inputFormatter: [null, web3._extend.formatters.inputBlockNumberFormatter], + }), new web3._extend.Method({ name: 'storageRoot', call: 'eth_storageRoot', @@ -1015,7 +1033,7 @@ web3._extend({ call: 'quorumPermission_changeAccountRole', params: 4, inputFormatter: [web3._extend.formatters.inputAddressFormatter,null,null,web3._extend.formatters.inputTransactionFormatter] - }), + }), new web3._extend.Method({ name: 'updateAccountStatus', call: 'quorumPermission_updateAccountStatus', @@ -1071,11 +1089,11 @@ web3._extend({ new web3._extend.Property({ name: 'orgList', getter: 'quorumPermission_orgList' - }), + }), new web3._extend.Property({ name: 'nodeList', getter: 'quorumPermission_nodeList' - }), + }), new web3._extend.Property({ name: 'roleList', getter: 'quorumPermission_roleList' @@ -1083,7 +1101,7 @@ web3._extend({ new web3._extend.Property({ name: 'acctList', getter: 'quorumPermission_acctList' - }), + }), ] }) ` diff --git a/les/api.go b/les/api.go index 6491c4dcc4..782bb31ef2 100644 --- a/les/api.go +++ b/les/api.go @@ -31,7 +31,6 @@ var ( errNoCheckpoint = errors.New("no local checkpoint provided") errNotActivated = errors.New("checkpoint registrar is not activated") errUnknownBenchmarkType = errors.New("unknown benchmark type") - errNoPriority = errors.New("priority too low to raise capacity") ) // PrivateLightServerAPI provides an API to access the LES light server. @@ -44,8 +43,20 @@ type PrivateLightServerAPI struct { func NewPrivateLightServerAPI(server *LesServer) *PrivateLightServerAPI { return &PrivateLightServerAPI{ server: server, - defaultPosFactors: server.clientPool.defaultPosFactors, - defaultNegFactors: server.clientPool.defaultNegFactors, + defaultPosFactors: defaultPosFactors, + defaultNegFactors: defaultNegFactors, + } +} + +// parseNode parses either an enode address a raw hex node id +func parseNode(node string) (enode.ID, error) { + if id, err := enode.ParseID(node); err == nil { + return id, nil + } + if node, err := enode.Parse(enode.ValidSchemes, node); err == nil { + return node.ID(), nil + } else { + return enode.ID{}, err } } @@ -54,16 +65,34 @@ func (api *PrivateLightServerAPI) ServerInfo() map[string]interface{} { res := make(map[string]interface{}) res["minimumCapacity"] = api.server.minCapacity res["maximumCapacity"] = api.server.maxCapacity - res["totalCapacity"], res["totalConnectedCapacity"], res["priorityConnectedCapacity"] = api.server.clientPool.capacityInfo() + _, res["totalCapacity"] = api.server.clientPool.Limits() + _, res["totalConnectedCapacity"] = api.server.clientPool.Active() + res["priorityConnectedCapacity"] = 0 //TODO connect when token sale module is added return res } // ClientInfo returns information about clients listed in the ids list or matching the given tags -func (api *PrivateLightServerAPI) ClientInfo(ids []enode.ID) map[enode.ID]map[string]interface{} { +func (api *PrivateLightServerAPI) ClientInfo(nodes []string) map[enode.ID]map[string]interface{} { + var ids []enode.ID + for _, node := range nodes { + if id, err := parseNode(node); err == nil { + ids = append(ids, id) + } + } + res := make(map[enode.ID]map[string]interface{}) - api.server.clientPool.forClients(ids, func(client *clientInfo) { - res[client.node.ID()] = api.clientInfo(client) - }) + if len(ids) == 0 { + ids = api.server.peers.ids() + } + for _, id := range ids { + if peer := api.server.peers.peer(id); peer != nil { + res[id] = api.clientInfo(peer, peer.balance) + } else { + api.server.clientPool.BalanceOperation(id, "", func(balance vfs.AtomicBalanceOperator) { + res[id] = api.clientInfo(nil, balance) + }) + } + } return res } @@ -75,31 +104,35 @@ func (api *PrivateLightServerAPI) ClientInfo(ids []enode.ID) map[enode.ID]map[st // assigned to it. func (api *PrivateLightServerAPI) PriorityClientInfo(start, stop enode.ID, maxCount int) map[enode.ID]map[string]interface{} { res := make(map[enode.ID]map[string]interface{}) - ids := api.server.clientPool.bt.GetPosBalanceIDs(start, stop, maxCount+1) + ids := api.server.clientPool.GetPosBalanceIDs(start, stop, maxCount+1) if len(ids) > maxCount { res[ids[maxCount]] = make(map[string]interface{}) ids = ids[:maxCount] } - if len(ids) != 0 { - api.server.clientPool.forClients(ids, func(client *clientInfo) { - res[client.node.ID()] = api.clientInfo(client) - }) + for _, id := range ids { + if peer := api.server.peers.peer(id); peer != nil { + res[id] = api.clientInfo(peer, peer.balance) + } else { + api.server.clientPool.BalanceOperation(id, "", func(balance vfs.AtomicBalanceOperator) { + res[id] = api.clientInfo(nil, balance) + }) + } } return res } // clientInfo creates a client info data structure -func (api *PrivateLightServerAPI) clientInfo(c *clientInfo) map[string]interface{} { +func (api *PrivateLightServerAPI) clientInfo(peer *clientPeer, balance vfs.ReadOnlyBalance) map[string]interface{} { info := make(map[string]interface{}) - pb, nb := c.balance.GetBalance() - info["isConnected"] = c.connected + pb, nb := balance.GetBalance() + info["isConnected"] = peer != nil info["pricing/balance"] = pb info["priority"] = pb != 0 // cb := api.server.clientPool.ndb.getCurrencyBalance(id) // info["pricing/currency"] = cb.amount - if c.connected { - info["connectionTime"] = float64(mclock.Now()-c.connectedAt) / float64(time.Second) - info["capacity"], _ = api.server.clientPool.ns.GetField(c.node, priorityPoolSetup.CapacityField).(uint64) + if peer != nil { + info["connectionTime"] = float64(mclock.Now()-peer.connectedAt) / float64(time.Second) + info["capacity"] = peer.getCapacity() info["pricing/negBalance"] = nb } return info @@ -107,7 +140,7 @@ func (api *PrivateLightServerAPI) clientInfo(c *clientInfo) map[string]interface // setParams either sets the given parameters for a single connected client (if specified) // or the default parameters applicable to clients connected in the future -func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, client *clientInfo, posFactors, negFactors *vfs.PriceFactors) (updateFactors bool, err error) { +func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, client *clientPeer, posFactors, negFactors *vfs.PriceFactors) (updateFactors bool, err error) { defParams := client == nil for name, value := range params { errValue := func() error { @@ -137,9 +170,8 @@ func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, clien setFactor(&negFactors.RequestFactor) case !defParams && name == "capacity": if capacity, ok := value.(float64); ok && uint64(capacity) >= api.server.minCapacity { - _, err = api.server.clientPool.setCapacity(client.node, client.address, uint64(capacity), 0, true) - // Don't have to call factor update explicitly. It's already done - // in setCapacity function. + _, err = api.server.clientPool.SetCapacity(client.Node(), uint64(capacity), 0, false) + // time factor recalculation is performed automatically by the balance tracker } else { err = errValue() } @@ -159,22 +191,26 @@ func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, clien // SetClientParams sets client parameters for all clients listed in the ids list // or all connected clients if the list is empty -func (api *PrivateLightServerAPI) SetClientParams(ids []enode.ID, params map[string]interface{}) error { +func (api *PrivateLightServerAPI) SetClientParams(nodes []string, params map[string]interface{}) error { var err error - api.server.clientPool.forClients(ids, func(client *clientInfo) { - if client.connected { - posFactors, negFactors := client.balance.GetPriceFactors() - update, e := api.setParams(params, client, &posFactors, &negFactors) + for _, node := range nodes { + var id enode.ID + if id, err = parseNode(node); err != nil { + return err + } + if peer := api.server.peers.peer(id); peer != nil { + posFactors, negFactors := peer.balance.GetPriceFactors() + update, e := api.setParams(params, peer, &posFactors, &negFactors) if update { - client.balance.SetPriceFactors(posFactors, negFactors) + peer.balance.SetPriceFactors(posFactors, negFactors) } if e != nil { err = e } } else { - err = fmt.Errorf("client %064x is not connected", client.node.ID()) + err = fmt.Errorf("client %064x is not connected", id) } - }) + } return err } @@ -182,7 +218,7 @@ func (api *PrivateLightServerAPI) SetClientParams(ids []enode.ID, params map[str func (api *PrivateLightServerAPI) SetDefaultParams(params map[string]interface{}) error { update, err := api.setParams(params, nil, &api.defaultPosFactors, &api.defaultNegFactors) if update { - api.server.clientPool.setDefaultFactors(api.defaultPosFactors, api.defaultNegFactors) + api.server.clientPool.SetDefaultFactors(api.defaultPosFactors, api.defaultNegFactors) } return err } @@ -195,15 +231,19 @@ func (api *PrivateLightServerAPI) SetConnectedBias(bias time.Duration) error { if bias < time.Duration(0) { return fmt.Errorf("bias illegal: %v less than 0", bias) } - api.server.clientPool.setConnectedBias(bias) + api.server.clientPool.SetConnectedBias(bias) return nil } // AddBalance adds the given amount to the balance of a client if possible and returns // the balance before and after the operation -func (api *PrivateLightServerAPI) AddBalance(id enode.ID, amount int64) (balance [2]uint64, err error) { - api.server.clientPool.forClients([]enode.ID{id}, func(c *clientInfo) { - balance[0], balance[1], err = c.balance.AddBalance(amount) +func (api *PrivateLightServerAPI) AddBalance(node string, amount int64) (balance [2]uint64, err error) { + var id enode.ID + if id, err = parseNode(node); err != nil { + return + } + api.server.clientPool.BalanceOperation(id, "", func(nb vfs.AtomicBalanceOperator) { + balance[0], balance[1], err = nb.AddBalance(amount) }) return } @@ -297,16 +337,20 @@ func NewPrivateDebugAPI(server *LesServer) *PrivateDebugAPI { } // FreezeClient forces a temporary client freeze which normally happens when the server is overloaded -func (api *PrivateDebugAPI) FreezeClient(id enode.ID) error { - var err error - api.server.clientPool.forClients([]enode.ID{id}, func(c *clientInfo) { - if c.connected { - c.peer.freeze() - } else { - err = fmt.Errorf("client %064x is not connected", id[:]) - } - }) - return err +func (api *PrivateDebugAPI) FreezeClient(node string) error { + var ( + id enode.ID + err error + ) + if id, err = parseNode(node); err != nil { + return err + } + if peer := api.server.peers.peer(id); peer != nil { + peer.freeze() + return nil + } else { + return fmt.Errorf("client %064x is not connected", id[:]) + } } // PrivateLightAPI provides an API to access the LES light server or light client. diff --git a/les/api_backend.go b/les/api_backend.go index d67f65ce55..9e01056a78 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -34,8 +34,10 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/gasprice" + "github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" @@ -49,6 +51,9 @@ type LesApiBackend struct { gpo *gasprice.Oracle } +var _ ethapi.Backend = &LesApiBackend{} +var _ tracers.Backend = &LesApiBackend{} + func (b *LesApiBackend) ChainConfig() *params.ChainConfig { return b.eth.chainConfig } @@ -178,11 +183,14 @@ func (b *LesApiBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { return nil } -func (b *LesApiBackend) GetEVM(ctx context.Context, msg core.Message, apiState vm.MinimalApiState, header *types.Header) (*vm.EVM, func() error, error) { +func (b *LesApiBackend) GetEVM(ctx context.Context, msg core.Message, apiState vm.MinimalApiState, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) { statedb := apiState.(*state.StateDB) + if vmConfig == nil { + vmConfig = new(vm.Config) + } txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(header, b.eth.blockchain, nil) - return vm.NewEVM(context, txContext, statedb, statedb, b.eth.chainConfig, vm.Config{}), statedb.Error, nil + return vm.NewEVM(context, txContext, statedb, statedb, b.eth.chainConfig, *vmConfig), statedb.Error, nil } func (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { @@ -276,13 +284,6 @@ func (b *LesApiBackend) UnprotectedAllowed() bool { return b.allowUnprotectedTxs } -// Quorum -func (b *LesApiBackend) CallTimeOut() time.Duration { - return b.eth.config.EVMCallTimeOut -} - -// End Quorum - func (b *LesApiBackend) RPCGasCap() uint64 { return b.eth.config.RPCGasCap } @@ -313,15 +314,11 @@ func (b *LesApiBackend) CurrentHeader() *types.Header { return b.eth.blockchain.CurrentHeader() } -func (b *LesApiBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, mps.PrivateStateRepository, func(), error) { +func (b *LesApiBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, mps.PrivateStateRepository, error) { return b.eth.stateAtBlock(ctx, block, reexec) } -func (b *LesApiBackend) StatesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, []mps.PrivateStateRepository, func(), error) { - return b.eth.statesInRange(ctx, fromBlock, toBlock, reexec) -} - -func (b *LesApiBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, *state.StateDB, mps.PrivateStateRepository, func(), error) { +func (b *LesApiBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, *state.StateDB, mps.PrivateStateRepository, error) { return b.eth.stateAtTransaction(ctx, block, txIndex, reexec) } @@ -348,4 +345,8 @@ func (b *LesApiBackend) IsPrivacyMarkerTransactionCreationEnabled() bool { return b.eth.config.QuorumChainConfig.PrivacyMarkerEnabled() } +func (b *LesApiBackend) CallTimeOut() time.Duration { + return b.eth.config.EVMCallTimeOut +} + // End Quorum diff --git a/les/client.go b/les/client.go index 33fbf46ee8..d4ec8eafb2 100644 --- a/les/client.go +++ b/les/client.go @@ -73,17 +73,18 @@ type LightEthereum struct { accountManager *accounts.Manager netRPCService *ethapi.PublicNetAPI - p2pServer *p2p.Server - p2pConfig *p2p.Config + udpEnabled bool + p2pServer *p2p.Server + p2pConfig *p2p.Config } // New creates an instance of the light client. func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { - chainDb, err := stack.OpenDatabase("lightchaindata", config.DatabaseCache, config.DatabaseHandles, "eth/db/chaindata/") + chainDb, err := stack.OpenDatabase("lightchaindata", config.DatabaseCache, config.DatabaseHandles, "eth/db/chaindata/", false) if err != nil { return nil, err } - lesDb, err := stack.OpenDatabase("les.client", 0, 0, "eth/db/les.client") + lesDb, err := stack.OpenDatabase("les.client", 0, 0, "eth/db/lesclient/", false) if err != nil { return nil, err } @@ -113,10 +114,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocksClient, params.HelperTrieConfirmations), p2pServer: stack.Server(), p2pConfig: &stack.Config().P2P, + udpEnabled: stack.Config().P2P.DiscoveryV5, } var prenegQuery vfc.QueryFunc - if leth.p2pServer.DiscV5 != nil { + if leth.udpEnabled { prenegQuery = leth.prenegQuery } leth.serverPool, leth.serverPoolIterator = vfc.NewServerPool(lesDb, []byte("serverpool:"), time.Second, prenegQuery, &mclock.System{}, config.UltraLightServers, requestList) @@ -202,7 +204,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { // VfluxRequest sends a batch of requests to the given node through discv5 UDP TalkRequest and returns the responses func (s *LightEthereum) VfluxRequest(n *enode.Node, reqs vflux.Requests) vflux.Replies { - if s.p2pServer.DiscV5 == nil { + if !s.udpEnabled { return nil } reqsEnc, _ := rlp.EncodeToBytes(&reqs) @@ -219,7 +221,7 @@ func (s *LightEthereum) VfluxRequest(n *enode.Node, reqs vflux.Requests) vflux.R func (s *LightEthereum) vfxVersion(n *enode.Node) uint { if n.Seq() == 0 { var err error - if s.p2pServer.DiscV5 == nil { + if !s.udpEnabled { return 0 } if n, err = s.p2pServer.DiscV5.RequestENR(n); n != nil && err == nil && n.Seq() != 0 { @@ -350,7 +352,11 @@ func (s *LightEthereum) Protocols() []p2p.Protocol { func (s *LightEthereum) Start() error { log.Warn("Light client mode is an experimental feature") - discovery, err := s.setupDiscovery(s.p2pConfig) + if s.udpEnabled && s.p2pServer.DiscV5 == nil { + s.udpEnabled = false + log.Error("Discovery v5 is not initialized") + } + discovery, err := s.setupDiscovery() if err != nil { return err } diff --git a/les/clientpool.go b/les/clientpool.go deleted file mode 100644 index 1aa63a281e..0000000000 --- a/les/clientpool.go +++ /dev/null @@ -1,439 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package les - -import ( - "fmt" - "sync" - "time" - - "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/les/utils" - "github.com/ethereum/go-ethereum/les/vflux" - vfs "github.com/ethereum/go-ethereum/les/vflux/server" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" - "github.com/ethereum/go-ethereum/p2p/nodestate" - "github.com/ethereum/go-ethereum/rlp" -) - -const ( - defaultNegExpTC = 3600 // default time constant (in seconds) for exponentially reducing negative balance - - // defaultConnectedBias is applied to already connected clients So that - // already connected client won't be kicked out very soon and we - // can ensure all connected clients can have enough time to request - // or sync some data. - // - // todo(rjl493456442) make it configurable. It can be the option of - // free trial time! - defaultConnectedBias = time.Minute * 3 - inactiveTimeout = time.Second * 10 -) - -// clientPool implements a client database that assigns a priority to each client -// based on a positive and negative balance. Positive balance is externally assigned -// to prioritized clients and is decreased with connection time and processed -// requests (unless the price factors are zero). If the positive balance is zero -// then negative balance is accumulated. -// -// Balance tracking and priority calculation for connected clients is done by -// balanceTracker. activeQueue ensures that clients with the lowest positive or -// highest negative balance get evicted when the total capacity allowance is full -// and new clients with a better balance want to connect. -// -// Already connected nodes receive a small bias in their favor in order to avoid -// accepting and instantly kicking out clients. In theory, we try to ensure that -// each client can have several minutes of connection time. -// -// Balances of disconnected clients are stored in nodeDB including positive balance -// and negative banalce. Boeth positive balance and negative balance will decrease -// exponentially. If the balance is low enough, then the record will be dropped. -type clientPool struct { - vfs.BalanceTrackerSetup - vfs.PriorityPoolSetup - lock sync.Mutex - clock mclock.Clock - closed bool - removePeer func(enode.ID) - ns *nodestate.NodeStateMachine - pp *vfs.PriorityPool - bt *vfs.BalanceTracker - - defaultPosFactors, defaultNegFactors vfs.PriceFactors - posExpTC, negExpTC uint64 - minCap uint64 // The minimal capacity value allowed for any client - connectedBias time.Duration - capLimit uint64 -} - -// clientPoolPeer represents a client peer in the pool. -// Positive balances are assigned to node key while negative balances are assigned -// to freeClientId. Currently network IP address without port is used because -// clients have a limited access to IP addresses while new node keys can be easily -// generated so it would be useless to assign a negative value to them. -type clientPoolPeer interface { - Node() *enode.Node - freeClientId() string - updateCapacity(uint64) - freeze() - allowInactive() bool -} - -// clientInfo defines all information required by clientpool. -type clientInfo struct { - node *enode.Node - address string - peer clientPoolPeer - connected, priority bool - connectedAt mclock.AbsTime - balance *vfs.NodeBalance -} - -// newClientPool creates a new client pool -func newClientPool(ns *nodestate.NodeStateMachine, lesDb ethdb.Database, minCap uint64, connectedBias time.Duration, clock mclock.Clock, removePeer func(enode.ID)) *clientPool { - pool := &clientPool{ - ns: ns, - BalanceTrackerSetup: balanceTrackerSetup, - PriorityPoolSetup: priorityPoolSetup, - clock: clock, - minCap: minCap, - connectedBias: connectedBias, - removePeer: removePeer, - } - pool.bt = vfs.NewBalanceTracker(ns, balanceTrackerSetup, lesDb, clock, &utils.Expirer{}, &utils.Expirer{}) - pool.pp = vfs.NewPriorityPool(ns, priorityPoolSetup, clock, minCap, connectedBias, 4) - - // set default expiration constants used by tests - // Note: server overwrites this if token sale is active - pool.bt.SetExpirationTCs(0, defaultNegExpTC) - - ns.SubscribeState(pool.InactiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { - if newState.Equals(pool.InactiveFlag) { - ns.AddTimeout(node, pool.InactiveFlag, inactiveTimeout) - } - if oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.InactiveFlag.Or(pool.PriorityFlag)) { - ns.SetStateSub(node, pool.InactiveFlag, nodestate.Flags{}, 0) // remove timeout - } - }) - - ns.SubscribeState(pool.ActiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { - c, _ := ns.GetField(node, clientInfoField).(*clientInfo) - if c == nil { - return - } - c.priority = newState.HasAll(pool.PriorityFlag) - if newState.Equals(pool.ActiveFlag) { - cap, _ := ns.GetField(node, pool.CapacityField).(uint64) - if cap > minCap { - pool.pp.RequestCapacity(node, minCap, 0, true) - } - } - }) - - ns.SubscribeState(pool.InactiveFlag.Or(pool.ActiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { - if oldState.IsEmpty() { - clientConnectedMeter.Mark(1) - log.Debug("Client connected", "id", node.ID()) - } - if oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.ActiveFlag) { - clientActivatedMeter.Mark(1) - log.Debug("Client activated", "id", node.ID()) - } - if oldState.Equals(pool.ActiveFlag) && newState.Equals(pool.InactiveFlag) { - clientDeactivatedMeter.Mark(1) - log.Debug("Client deactivated", "id", node.ID()) - c, _ := ns.GetField(node, clientInfoField).(*clientInfo) - if c == nil || !c.peer.allowInactive() { - pool.removePeer(node.ID()) - } - } - if newState.IsEmpty() { - clientDisconnectedMeter.Mark(1) - log.Debug("Client disconnected", "id", node.ID()) - pool.removePeer(node.ID()) - } - }) - - var totalConnected uint64 - ns.SubscribeField(pool.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { - oldCap, _ := oldValue.(uint64) - newCap, _ := newValue.(uint64) - totalConnected += newCap - oldCap - totalConnectedGauge.Update(int64(totalConnected)) - c, _ := ns.GetField(node, clientInfoField).(*clientInfo) - if c != nil { - c.peer.updateCapacity(newCap) - } - }) - return pool -} - -// stop shuts the client pool down -func (f *clientPool) stop() { - f.lock.Lock() - f.closed = true - f.lock.Unlock() - f.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - // enforces saving all balances in BalanceTracker - f.disconnectNode(node) - }) - f.bt.Stop() -} - -// connect should be called after a successful handshake. If the connection was -// rejected, there is no need to call disconnect. -func (f *clientPool) connect(peer clientPoolPeer) (uint64, error) { - f.lock.Lock() - defer f.lock.Unlock() - - // Short circuit if clientPool is already closed. - if f.closed { - return 0, fmt.Errorf("Client pool is already closed") - } - // Dedup connected peers. - node, freeID := peer.Node(), peer.freeClientId() - if f.ns.GetField(node, clientInfoField) != nil { - log.Debug("Client already connected", "address", freeID, "id", node.ID().String()) - return 0, fmt.Errorf("Client already connected address=%s id=%s", freeID, node.ID().String()) - } - now := f.clock.Now() - c := &clientInfo{ - node: node, - address: freeID, - peer: peer, - connected: true, - connectedAt: now, - } - f.ns.SetField(node, clientInfoField, c) - f.ns.SetField(node, connAddressField, freeID) - if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil { - f.disconnect(peer) - return 0, nil - } - c.balance.SetPriceFactors(f.defaultPosFactors, f.defaultNegFactors) - - f.ns.SetState(node, f.InactiveFlag, nodestate.Flags{}, 0) - var allowed bool - f.ns.Operation(func() { - _, allowed = f.pp.RequestCapacity(node, f.minCap, f.connectedBias, true) - }) - if allowed { - return f.minCap, nil - } - if !peer.allowInactive() { - f.disconnect(peer) - } - return 0, nil -} - -// setConnectedBias sets the connection bias, which is applied to already connected clients -// So that already connected client won't be kicked out very soon and we can ensure all -// connected clients can have enough time to request or sync some data. -func (f *clientPool) setConnectedBias(bias time.Duration) { - f.lock.Lock() - defer f.lock.Unlock() - - f.connectedBias = bias - f.pp.SetActiveBias(bias) -} - -// disconnect should be called when a connection is terminated. If the disconnection -// was initiated by the pool itself using disconnectFn then calling disconnect is -// not necessary but permitted. -func (f *clientPool) disconnect(p clientPoolPeer) { - f.disconnectNode(p.Node()) -} - -// disconnectNode removes node fields and flags related to connected status -func (f *clientPool) disconnectNode(node *enode.Node) { - f.ns.SetField(node, connAddressField, nil) - f.ns.SetField(node, clientInfoField, nil) -} - -// setDefaultFactors sets the default price factors applied to subsequently connected clients -func (f *clientPool) setDefaultFactors(posFactors, negFactors vfs.PriceFactors) { - f.lock.Lock() - defer f.lock.Unlock() - - f.defaultPosFactors = posFactors - f.defaultNegFactors = negFactors -} - -// capacityInfo returns the total capacity allowance, the total capacity of connected -// clients and the total capacity of connected and prioritized clients -func (f *clientPool) capacityInfo() (uint64, uint64, uint64) { - f.lock.Lock() - defer f.lock.Unlock() - - // total priority active cap will be supported when the token issuer module is added - _, activeCap := f.pp.Active() - return f.capLimit, activeCap, 0 -} - -// setLimits sets the maximum number and total capacity of connected clients, -// dropping some of them if necessary. -func (f *clientPool) setLimits(totalConn int, totalCap uint64) { - f.lock.Lock() - defer f.lock.Unlock() - - f.capLimit = totalCap - f.pp.SetLimits(uint64(totalConn), totalCap) -} - -// setCapacity sets the assigned capacity of a connected client -func (f *clientPool) setCapacity(node *enode.Node, freeID string, capacity uint64, bias time.Duration, setCap bool) (uint64, error) { - c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo) - if c == nil { - if setCap { - return 0, fmt.Errorf("client %064x is not connected", node.ID()) - } - c = &clientInfo{node: node} - f.ns.SetField(node, clientInfoField, c) - f.ns.SetField(node, connAddressField, freeID) - if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil { - log.Error("BalanceField is missing", "node", node.ID()) - return 0, fmt.Errorf("BalanceField of %064x is missing", node.ID()) - } - defer func() { - f.ns.SetField(node, connAddressField, nil) - f.ns.SetField(node, clientInfoField, nil) - }() - } - var ( - minPriority int64 - allowed bool - ) - f.ns.Operation(func() { - if !setCap || c.priority { - // check clientInfo.priority inside Operation to ensure thread safety - minPriority, allowed = f.pp.RequestCapacity(node, capacity, bias, setCap) - } - }) - if allowed { - return 0, nil - } - missing := c.balance.PosBalanceMissing(minPriority, capacity, bias) - if missing < 1 { - // ensure that we never return 0 missing and insufficient priority error - missing = 1 - } - return missing, errNoPriority -} - -// setCapacityLocked is the equivalent of setCapacity used when f.lock is already locked -func (f *clientPool) setCapacityLocked(node *enode.Node, freeID string, capacity uint64, minConnTime time.Duration, setCap bool) (uint64, error) { - f.lock.Lock() - defer f.lock.Unlock() - - return f.setCapacity(node, freeID, capacity, minConnTime, setCap) -} - -// forClients calls the supplied callback for either the listed node IDs or all connected -// nodes. It passes a valid clientInfo to the callback and ensures that the necessary -// fields and flags are set in order for BalanceTracker and PriorityPool to work even if -// the node is not connected. -func (f *clientPool) forClients(ids []enode.ID, cb func(client *clientInfo)) { - f.lock.Lock() - defer f.lock.Unlock() - - if len(ids) == 0 { - f.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo) - if c != nil { - cb(c) - } - }) - } else { - for _, id := range ids { - node := f.ns.GetNode(id) - if node == nil { - node = enode.SignNull(&enr.Record{}, id) - } - c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo) - if c != nil { - cb(c) - } else { - c = &clientInfo{node: node} - f.ns.SetField(node, clientInfoField, c) - f.ns.SetField(node, connAddressField, "") - if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance != nil { - cb(c) - } else { - log.Error("BalanceField is missing") - } - f.ns.SetField(node, connAddressField, nil) - f.ns.SetField(node, clientInfoField, nil) - } - } - } -} - -// serveCapQuery serves a vflux capacity query. It receives multiple token amount values -// and a bias time value. For each given token amount it calculates the maximum achievable -// capacity in case the amount is added to the balance. -func (f *clientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []byte { - var req vflux.CapacityQueryReq - if rlp.DecodeBytes(data, &req) != nil { - return nil - } - if l := len(req.AddTokens); l == 0 || l > vflux.CapacityQueryMaxLen { - return nil - } - node := f.ns.GetNode(id) - if node == nil { - node = enode.SignNull(&enr.Record{}, id) - } - c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo) - if c == nil { - c = &clientInfo{node: node} - f.ns.SetField(node, clientInfoField, c) - f.ns.SetField(node, connAddressField, freeID) - defer func() { - f.ns.SetField(node, connAddressField, nil) - f.ns.SetField(node, clientInfoField, nil) - }() - if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil { - log.Error("BalanceField is missing", "node", node.ID()) - return nil - } - } - // use vfs.CapacityCurve to answer request for multiple newly bought token amounts - curve := f.pp.GetCapacityCurve().Exclude(id) - result := make(vflux.CapacityQueryReply, len(req.AddTokens)) - bias := time.Second * time.Duration(req.Bias) - if f.connectedBias > bias { - bias = f.connectedBias - } - pb, _ := c.balance.GetBalance() - for i, addTokens := range req.AddTokens { - add := addTokens.Int64() - result[i] = curve.MaxCapacity(func(capacity uint64) int64 { - return c.balance.EstimatePriority(capacity, add, 0, bias, false) / int64(capacity) - }) - if add <= 0 && uint64(-add) >= pb && result[i] > f.minCap { - result[i] = f.minCap - } - if result[i] < f.minCap { - result[i] = 0 - } - } - reply, _ := rlp.EncodeToBytes(&result) - return reply -} diff --git a/les/enr_entry.go b/les/enr_entry.go index 8be4a7a00e..307313fb10 100644 --- a/les/enr_entry.go +++ b/les/enr_entry.go @@ -18,7 +18,6 @@ package les import ( "github.com/ethereum/go-ethereum/core/forkid" - "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/dnsdisc" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/rlp" @@ -36,13 +35,13 @@ func (lesEntry) ENRKey() string { return "les" } // ethEntry is the "eth" ENR entry. This is redeclared here to avoid depending on package eth. type ethEntry struct { ForkID forkid.ID - _ []rlp.RawValue `rlp:"tail"` + Tail []rlp.RawValue `rlp:"tail"` } func (ethEntry) ENRKey() string { return "eth" } // setupDiscovery creates the node discovery source for the eth protocol. -func (eth *LightEthereum) setupDiscovery(cfg *p2p.Config) (enode.Iterator, error) { +func (eth *LightEthereum) setupDiscovery() (enode.Iterator, error) { it := enode.NewFairMix(0) // Enable DNS discovery. @@ -56,7 +55,7 @@ func (eth *LightEthereum) setupDiscovery(cfg *p2p.Config) (enode.Iterator, error } // Enable DHT. - if cfg.DiscoveryV5 && eth.p2pServer.DiscV5 != nil { + if eth.udpEnabled { it.AddSource(eth.p2pServer.DiscV5.RandomNodes()) } diff --git a/les/flowcontrol/manager.go b/les/flowcontrol/manager.go index d6d0b1adde..c9e681c144 100644 --- a/les/flowcontrol/manager.go +++ b/les/flowcontrol/manager.go @@ -108,7 +108,7 @@ type ClientManager struct { func NewClientManager(curve PieceWiseLinear, clock mclock.Clock) *ClientManager { cm := &ClientManager{ clock: clock, - rcQueue: prque.New(func(a interface{}, i int) { a.(*ClientNode).queueIndex = i }), + rcQueue: prque.NewWrapAround(func(a interface{}, i int) { a.(*ClientNode).queueIndex = i }), capLastUpdate: clock.Now(), stop: make(chan chan struct{}), } diff --git a/les/metrics.go b/les/metrics.go index 9a79fd1bbd..d356326b76 100644 --- a/les/metrics.go +++ b/les/metrics.go @@ -75,7 +75,6 @@ var ( totalCapacityGauge = metrics.NewRegisteredGauge("les/server/totalCapacity", nil) totalRechargeGauge = metrics.NewRegisteredGauge("les/server/totalRecharge", nil) - totalConnectedGauge = metrics.NewRegisteredGauge("les/server/totalConnected", nil) blockProcessingTimer = metrics.NewRegisteredTimer("les/server/blockProcessingTime", nil) requestServedMeter = metrics.NewRegisteredMeter("les/server/req/avgServedTime", nil) @@ -98,12 +97,8 @@ var ( sqServedGauge = metrics.NewRegisteredGauge("les/server/servingQueue/served", nil) sqQueuedGauge = metrics.NewRegisteredGauge("les/server/servingQueue/queued", nil) - clientConnectedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/connected", nil) - clientActivatedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/activated", nil) - clientDeactivatedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/deactivated", nil) - clientDisconnectedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/disconnected", nil) - clientFreezeMeter = metrics.NewRegisteredMeter("les/server/clientEvent/freeze", nil) - clientErrorMeter = metrics.NewRegisteredMeter("les/server/clientEvent/error", nil) + clientFreezeMeter = metrics.NewRegisteredMeter("les/server/clientEvent/freeze", nil) + clientErrorMeter = metrics.NewRegisteredMeter("les/server/clientEvent/error", nil) requestRTT = metrics.NewRegisteredTimer("les/client/req/rtt", nil) requestSendDelay = metrics.NewRegisteredTimer("les/client/req/sendDelay", nil) diff --git a/les/peer.go b/les/peer.go index 78019b1d87..f6cc94dfad 100644 --- a/les/peer.go +++ b/les/peer.go @@ -17,6 +17,7 @@ package les import ( + "crypto/ecdsa" "errors" "fmt" "math/big" @@ -37,6 +38,7 @@ import ( vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" ) @@ -762,15 +764,22 @@ type clientPeer struct { responseLock sync.Mutex responseCount uint64 // Counter to generate an unique id for request processing. - balance *vfs.NodeBalance + balance vfs.ConnectedBalance // invalidLock is used for protecting invalidCount. invalidLock sync.RWMutex invalidCount utils.LinearExpiredValue // Counter the invalid request the client peer has made. - server bool - errCh chan error - fcClient *flowcontrol.ClientNode // Server side mirror token bucket. + capacity uint64 + // lastAnnounce is the last broadcast created by the server; may be newer than the last head + // sent to the specific client (stored in headInfo) if capacity is zero. In this case the + // latest head is sent when the client gains non-zero capacity. + lastAnnounce announceData + + connectedAt mclock.AbsTime + server bool + errCh chan error + fcClient *flowcontrol.ClientNode // Server side mirror token bucket. } func newClientPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWriter) *clientPeer { @@ -789,9 +798,9 @@ func newClientPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWrite } } -// freeClientId returns a string identifier for the peer. Multiple peers with +// FreeClientId returns a string identifier for the peer. Multiple peers with // the same identifier can not be connected in free mode simultaneously. -func (p *clientPeer) freeClientId() string { +func (p *clientPeer) FreeClientId() string { if addr, ok := p.RemoteAddr().(*net.TCPAddr); ok { if addr.IP.IsLoopback() { // using peer id instead of loopback ip address allows multiple free @@ -921,25 +930,69 @@ func (p *clientPeer) sendAnnounce(request announceData) error { return p2p.Send(p.rw, AnnounceMsg, request) } -// allowInactive implements clientPoolPeer -func (p *clientPeer) allowInactive() bool { - return false +// InactiveAllowance implements vfs.clientPeer +func (p *clientPeer) InactiveAllowance() time.Duration { + return 0 // will return more than zero for les/5 clients +} + +// getCapacity returns the current capacity of the peer +func (p *clientPeer) getCapacity() uint64 { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.capacity } -// updateCapacity updates the request serving capacity assigned to a given client -// and also sends an announcement about the updated flow control parameters -func (p *clientPeer) updateCapacity(cap uint64) { +// UpdateCapacity updates the request serving capacity assigned to a given client +// and also sends an announcement about the updated flow control parameters. +// Note: UpdateCapacity implements vfs.clientPeer and should not block. The requested +// parameter is true if the callback was initiated by ClientPool.SetCapacity on the given peer. +func (p *clientPeer) UpdateCapacity(newCap uint64, requested bool) { p.lock.Lock() defer p.lock.Unlock() - if cap != p.fcParams.MinRecharge { - p.fcParams = flowcontrol.ServerParams{MinRecharge: cap, BufLimit: cap * bufLimitRatio} + if newCap != p.fcParams.MinRecharge { + p.fcParams = flowcontrol.ServerParams{MinRecharge: newCap, BufLimit: newCap * bufLimitRatio} p.fcClient.UpdateParams(p.fcParams) var kvList keyValueList - kvList = kvList.add("flowControl/MRR", cap) - kvList = kvList.add("flowControl/BL", cap*bufLimitRatio) + kvList = kvList.add("flowControl/MRR", newCap) + kvList = kvList.add("flowControl/BL", newCap*bufLimitRatio) p.queueSend(func() { p.sendAnnounce(announceData{Update: kvList}) }) } + + if p.capacity == 0 && newCap != 0 { + p.sendLastAnnounce() + } + p.capacity = newCap +} + +// announceOrStore sends the given head announcement to the client if the client is +// active (capacity != 0) and the same announcement hasn't been sent before. If the +// client is inactive the announcement is stored and sent later if the client is +// activated again. +func (p *clientPeer) announceOrStore(announce announceData) { + p.lock.Lock() + defer p.lock.Unlock() + + p.lastAnnounce = announce + if p.capacity != 0 { + p.sendLastAnnounce() + } +} + +// announce sends the given head announcement to the client if it hasn't been sent before +func (p *clientPeer) sendLastAnnounce() { + if p.lastAnnounce.Td == nil { + return + } + if p.headInfo.Td == nil || p.lastAnnounce.Td.Cmp(p.headInfo.Td) > 0 { + if !p.queueSend(func() { p.sendAnnounce(p.lastAnnounce) }) { + p.Log().Debug("Dropped announcement because queue is full", "number", p.lastAnnounce.Number, "hash", p.lastAnnounce.Hash) + } else { + p.Log().Debug("Sent announcement", "number", p.lastAnnounce.Number, "hash", p.lastAnnounce.Hash) + } + p.headInfo = blockInfo{Hash: p.lastAnnounce.Hash, Number: p.lastAnnounce.Number, Td: p.lastAnnounce.Td} + } } // freezeClient temporarily puts the client in a frozen state which means all @@ -1064,6 +1117,11 @@ func (p *clientPeer) getInvalid() uint64 { return p.invalidCount.Value(mclock.Now()) } +// Disconnect implements vfs.clientPeer +func (p *clientPeer) Disconnect() { + p.Peer.Disconnect(p2p.DiscRequested) +} + // serverPeerSubscriber is an interface to notify services about added or // removed server peers type serverPeerSubscriber interface { @@ -1221,3 +1279,181 @@ func (ps *serverPeerSet) close() { } ps.closed = true } + +// clientPeerSet represents the set of active client peers currently +// participating in the Light Ethereum sub-protocol. +type clientPeerSet struct { + peers map[enode.ID]*clientPeer + lock sync.RWMutex + closed bool + + privateKey *ecdsa.PrivateKey + lastAnnounce, signedAnnounce announceData +} + +// newClientPeerSet creates a new peer set to track the client peers. +func newClientPeerSet() *clientPeerSet { + return &clientPeerSet{peers: make(map[enode.ID]*clientPeer)} +} + +// register adds a new peer into the peer set, or returns an error if the +// peer is already known. +func (ps *clientPeerSet) register(peer *clientPeer) error { + ps.lock.Lock() + defer ps.lock.Unlock() + + if ps.closed { + return errClosed + } + if _, exist := ps.peers[peer.ID()]; exist { + return errAlreadyRegistered + } + ps.peers[peer.ID()] = peer + ps.announceOrStore(peer) + return nil +} + +// unregister removes a remote peer from the peer set, disabling any further +// actions to/from that particular entity. It also initiates disconnection +// at the networking layer. +func (ps *clientPeerSet) unregister(id enode.ID) error { + ps.lock.Lock() + defer ps.lock.Unlock() + + p, ok := ps.peers[id] + if !ok { + return errNotRegistered + } + delete(ps.peers, id) + p.Peer.Disconnect(p2p.DiscRequested) + return nil +} + +// ids returns a list of all registered peer IDs +func (ps *clientPeerSet) ids() []enode.ID { + ps.lock.RLock() + defer ps.lock.RUnlock() + + var ids []enode.ID + for id := range ps.peers { + ids = append(ids, id) + } + return ids +} + +// peer retrieves the registered peer with the given id. +func (ps *clientPeerSet) peer(id enode.ID) *clientPeer { + ps.lock.RLock() + defer ps.lock.RUnlock() + + return ps.peers[id] +} + +// len returns if the current number of peers in the set. +func (ps *clientPeerSet) len() int { + ps.lock.RLock() + defer ps.lock.RUnlock() + + return len(ps.peers) +} + +// setSignerKey sets the signer key for signed announcements. Should be called before +// starting the protocol handler. +func (ps *clientPeerSet) setSignerKey(privateKey *ecdsa.PrivateKey) { + ps.privateKey = privateKey +} + +// broadcast sends the given announcements to all active peers +func (ps *clientPeerSet) broadcast(announce announceData) { + ps.lock.Lock() + defer ps.lock.Unlock() + + ps.lastAnnounce = announce + for _, peer := range ps.peers { + ps.announceOrStore(peer) + } +} + +// announceOrStore sends the requested type of announcement to the given peer or stores +// it for later if the peer is inactive (capacity == 0). +func (ps *clientPeerSet) announceOrStore(p *clientPeer) { + if ps.lastAnnounce.Td == nil { + return + } + switch p.announceType { + case announceTypeSimple: + p.announceOrStore(ps.lastAnnounce) + case announceTypeSigned: + if ps.signedAnnounce.Hash != ps.lastAnnounce.Hash { + ps.signedAnnounce = ps.lastAnnounce + ps.signedAnnounce.sign(ps.privateKey) + } + p.announceOrStore(ps.signedAnnounce) + } +} + +// close disconnects all peers. No new peers can be registered +// after close has returned. +func (ps *clientPeerSet) close() { + ps.lock.Lock() + defer ps.lock.Unlock() + + for _, p := range ps.peers { + p.Peer.Disconnect(p2p.DiscQuitting) + } + ps.closed = true +} + +// serverSet is a special set which contains all connected les servers. +// Les servers will also be discovered by discovery protocol because they +// also run the LES protocol. We can't drop them although they are useless +// for us(server) but for other protocols(e.g. ETH) upon the devp2p they +// may be useful. +type serverSet struct { + lock sync.Mutex + set map[string]*clientPeer + closed bool +} + +func newServerSet() *serverSet { + return &serverSet{set: make(map[string]*clientPeer)} +} + +func (s *serverSet) register(peer *clientPeer) error { + s.lock.Lock() + defer s.lock.Unlock() + + if s.closed { + return errClosed + } + if _, exist := s.set[peer.id]; exist { + return errAlreadyRegistered + } + s.set[peer.id] = peer + return nil +} + +func (s *serverSet) unregister(peer *clientPeer) error { + s.lock.Lock() + defer s.lock.Unlock() + + if s.closed { + return errClosed + } + if _, exist := s.set[peer.id]; !exist { + return errNotRegistered + } + delete(s.set, peer.id) + peer.Peer.Disconnect(p2p.DiscQuitting) + return nil +} + +func (s *serverSet) close() { + s.lock.Lock() + defer s.lock.Unlock() + + for _, p := range s.set { + p.Peer.Disconnect(p2p.DiscQuitting) + } + s.closed = true +} diff --git a/les/server.go b/les/server.go index 63feaf892c..d44b1b57d4 100644 --- a/les/server.go +++ b/les/server.go @@ -18,7 +18,6 @@ package les import ( "crypto/ecdsa" - "reflect" "time" "github.com/ethereum/go-ethereum/common/mclock" @@ -26,7 +25,6 @@ import ( "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/les/flowcontrol" - "github.com/ethereum/go-ethereum/les/vflux" vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" @@ -34,24 +32,16 @@ import ( "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" - "github.com/ethereum/go-ethereum/p2p/nodestate" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" ) var ( - serverSetup = &nodestate.Setup{} - clientPeerField = serverSetup.NewField("clientPeer", reflect.TypeOf(&clientPeer{})) - clientInfoField = serverSetup.NewField("clientInfo", reflect.TypeOf(&clientInfo{})) - connAddressField = serverSetup.NewField("connAddr", reflect.TypeOf("")) - balanceTrackerSetup = vfs.NewBalanceTrackerSetup(serverSetup) - priorityPoolSetup = vfs.NewPriorityPoolSetup(serverSetup) + defaultPosFactors = vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1} + defaultNegFactors = vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1} ) -func init() { - balanceTrackerSetup.Connect(connAddressField, priorityPoolSetup.CapacityField) - priorityPoolSetup.Connect(balanceTrackerSetup.BalanceField, balanceTrackerSetup.UpdateFlag) // NodeBalance implements nodePriority -} +const defaultConnectedBias = time.Minute * 3 type ethBackend interface { ArchiveMode() bool @@ -65,10 +55,10 @@ type ethBackend interface { type LesServer struct { lesCommons - ns *nodestate.NodeStateMachine archiveMode bool // Flag whether the ethereum node runs in archive mode. handler *serverHandler - broadcaster *broadcaster + peers *clientPeerSet + serverset *serverSet vfluxServer *vfs.Server privateKey *ecdsa.PrivateKey @@ -77,7 +67,7 @@ type LesServer struct { costTracker *costTracker defParams flowcontrol.ServerParams servingQueue *servingQueue - clientPool *clientPool + clientPool *vfs.ClientPool minCapacity, maxCapacity uint64 threadsIdle int // Request serving threads count when system is idle. @@ -87,11 +77,10 @@ type LesServer struct { } func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*LesServer, error) { - lesDb, err := node.OpenDatabase("les.server", 0, 0, "eth/db/les.server") + lesDb, err := node.OpenDatabase("les.server", 0, 0, "eth/db/lesserver/", false) if err != nil { return nil, err } - ns := nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup) // Calculate the number of threads used to service the light client // requests based on the user-specified value. threads := config.LightServ * 4 / 100 @@ -111,9 +100,9 @@ func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*Les bloomTrieIndexer: light.NewBloomTrieIndexer(e.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency, true), closeCh: make(chan struct{}), }, - ns: ns, archiveMode: e.ArchiveMode(), - broadcaster: newBroadcaster(ns), + peers: newClientPeerSet(), + serverset: newServerSet(), vfluxServer: vfs.NewServer(time.Millisecond * 10), fcManager: flowcontrol.NewClientManager(nil, &mclock.System{}), servingQueue: newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100), @@ -121,7 +110,6 @@ func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*Les threadsIdle: threads, p2pSrv: node.Server(), } - srv.vfluxServer.Register(srv) issync := e.Synced if config.LightNoSyncServe { issync = func() bool { return true } @@ -149,8 +137,10 @@ func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*Les srv.maxCapacity = totalRecharge } srv.fcManager.SetCapacityLimits(srv.minCapacity, srv.maxCapacity, srv.minCapacity*2) - srv.clientPool = newClientPool(ns, lesDb, srv.minCapacity, defaultConnectedBias, mclock.System{}, srv.dropClient) - srv.clientPool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1}) + srv.clientPool = vfs.NewClientPool(lesDb, srv.minCapacity, defaultConnectedBias, mclock.System{}, issync) + srv.clientPool.Start() + srv.clientPool.SetDefaultFactors(defaultPosFactors, defaultNegFactors) + srv.vfluxServer.Register(srv.clientPool, "les", "Ethereum light client service") checkpoint := srv.latestLocalCheckpoint() if !checkpoint.Empty() { @@ -162,14 +152,6 @@ func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*Les node.RegisterProtocols(srv.Protocols()) node.RegisterAPIs(srv.APIs()) node.RegisterLifecycle(srv) - - // disconnect all peers at nsm shutdown - ns.SubscribeField(clientPeerField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { - if state.Equals(serverSetup.OfflineFlag()) && oldValue != nil { - oldValue.(*clientPeer).Peer.Disconnect(p2p.DiscRequested) - } - }) - ns.Start() return srv, nil } @@ -198,7 +180,7 @@ func (s *LesServer) APIs() []rpc.API { func (s *LesServer) Protocols() []p2p.Protocol { ps := s.makeProtocols(ServerProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} { - if p := s.getClient(id); p != nil { + if p := s.peers.peer(id); p != nil { return p.Info() } return nil @@ -215,7 +197,7 @@ func (s *LesServer) Protocols() []p2p.Protocol { // Start starts the LES server func (s *LesServer) Start() error { s.privateKey = s.p2pSrv.PrivateKey - s.broadcaster.setSignerKey(s.privateKey) + s.peers.setSignerKey(s.privateKey) s.handler.start() s.wg.Add(1) go s.capacityManagement() @@ -229,8 +211,9 @@ func (s *LesServer) Start() error { func (s *LesServer) Stop() error { close(s.closeCh) - s.clientPool.stop() - s.ns.Stop() + s.clientPool.Stop() + s.serverset.close() + s.peers.close() s.fcManager.Stop() s.costTracker.stop() s.handler.stop() @@ -261,7 +244,7 @@ func (s *LesServer) capacityManagement() { totalCapacityCh := make(chan uint64, 100) totalCapacity := s.fcManager.SubscribeTotalCapacity(totalCapacityCh) - s.clientPool.setLimits(s.config.LightPeers, totalCapacity) + s.clientPool.SetLimits(uint64(s.config.LightPeers), totalCapacity) var ( busy bool @@ -298,39 +281,9 @@ func (s *LesServer) capacityManagement() { log.Warn("Reduced free peer connections", "from", freePeers, "to", newFreePeers) } freePeers = newFreePeers - s.clientPool.setLimits(s.config.LightPeers, totalCapacity) + s.clientPool.SetLimits(uint64(s.config.LightPeers), totalCapacity) case <-s.closeCh: return } } } - -func (s *LesServer) getClient(id enode.ID) *clientPeer { - if node := s.ns.GetNode(id); node != nil { - if p, ok := s.ns.GetField(node, clientPeerField).(*clientPeer); ok { - return p - } - } - return nil -} - -func (s *LesServer) dropClient(id enode.ID) { - if p := s.getClient(id); p != nil { - p.Peer.Disconnect(p2p.DiscRequested) - } -} - -// ServiceInfo implements vfs.Service -func (s *LesServer) ServiceInfo() (string, string) { - return "les", "Ethereum light client service" -} - -// Handle implements vfs.Service -func (s *LesServer) Handle(id enode.ID, address string, name string, data []byte) []byte { - switch name { - case vflux.CapacityQueryName: - return s.clientPool.serveCapQuery(id, address, data) - default: - return nil - } -} diff --git a/les/server_handler.go b/les/server_handler.go index 7651d03cab..5e12136d90 100644 --- a/les/server_handler.go +++ b/les/server_handler.go @@ -17,7 +17,6 @@ package les import ( - "crypto/ecdsa" "errors" "sync" "sync/atomic" @@ -31,13 +30,10 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" - vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/nodestate" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" ) @@ -59,7 +55,6 @@ const ( var ( errTooManyInvalidRequest = errors.New("too many invalid requests made") - errFullClientPool = errors.New("client pool is full") ) // serverHandler is responsible for serving light client and process @@ -128,32 +123,18 @@ func (h *serverHandler) handle(p *clientPeer) error { p.Log().Debug("Light Ethereum handshake failed", "err", err) return err } - // Reject the duplicated peer, otherwise register it to peerset. - var registered bool - if err := h.server.ns.Operation(func() { - if h.server.ns.GetField(p.Node(), clientPeerField) != nil { - registered = true - } else { - h.server.ns.SetFieldSub(p.Node(), clientPeerField, p) - } - }); err != nil { - return err - } - if registered { - return errAlreadyRegistered - } - defer func() { - h.server.ns.SetField(p.Node(), clientPeerField, nil) - if p.fcClient != nil { // is nil when connecting another server - p.fcClient.Disconnect() - } - }() if p.server { + if err := h.server.serverset.register(p); err != nil { + return err + } // connected to another server, no messages expected, just wait for disconnection _, err := p.rw.ReadMsg() + h.server.serverset.unregister(p) return err } + defer p.fcClient.Disconnect() // set by handshake if it's not another server + // Reject light clients if server is not synced. // // Put this checking here, so that "non-synced" les-server peers are still allowed @@ -162,30 +143,31 @@ func (h *serverHandler) handle(p *clientPeer) error { p.Log().Debug("Light server not synced, rejecting peer") return p2p.DiscRequested } - // Disconnect the inbound peer if it's rejected by clientPool - if cap, err := h.server.clientPool.connect(p); cap != p.fcParams.MinRecharge || err != nil { - p.Log().Debug("Light Ethereum peer rejected", "err", errFullClientPool) - return errFullClientPool + if err := h.server.peers.register(p); err != nil { + return err } - p.balance, _ = h.server.ns.GetField(p.Node(), h.server.clientPool.BalanceField).(*vfs.NodeBalance) - if p.balance == nil { + if p.balance = h.server.clientPool.Register(p); p.balance == nil { + h.server.peers.unregister(p.ID()) + p.Log().Debug("Client pool already closed") return p2p.DiscRequested } - activeCount, _ := h.server.clientPool.pp.Active() + activeCount, _ := h.server.clientPool.Active() clientConnectionGauge.Update(int64(activeCount)) + p.connectedAt = mclock.Now() var wg sync.WaitGroup // Wait group used to track all in-flight task routines. - connectedAt := mclock.Now() defer func() { wg.Wait() // Ensure all background task routines have exited. - h.server.clientPool.disconnect(p) + h.server.clientPool.Unregister(p) + h.server.peers.unregister(p.ID()) p.balance = nil - activeCount, _ := h.server.clientPool.pp.Active() + activeCount, _ := h.server.clientPool.Active() clientConnectionGauge.Update(int64(activeCount)) - connectionTimer.Update(time.Duration(mclock.Now() - connectedAt)) + connectionTimer.Update(time.Duration(mclock.Now() - p.connectedAt)) }() - // Mark the peer starts to be served. + + // Mark the peer as being served. atomic.StoreUint32(&p.serving, 1) defer atomic.StoreUint32(&p.serving, 0) @@ -448,78 +430,9 @@ func (h *serverHandler) broadcastLoop() { } lastHead, lastTd = header, td log.Debug("Announcing block to peers", "number", number, "hash", hash, "td", td, "reorg", reorg) - h.server.broadcaster.broadcast(announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg}) + h.server.peers.broadcast(announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg}) case <-h.closeCh: return } } } - -// broadcaster sends new header announcements to active client peers -type broadcaster struct { - ns *nodestate.NodeStateMachine - privateKey *ecdsa.PrivateKey - lastAnnounce, signedAnnounce announceData -} - -// newBroadcaster creates a new broadcaster -func newBroadcaster(ns *nodestate.NodeStateMachine) *broadcaster { - b := &broadcaster{ns: ns} - ns.SubscribeState(priorityPoolSetup.ActiveFlag, func(node *enode.Node, oldState, newState nodestate.Flags) { - if newState.Equals(priorityPoolSetup.ActiveFlag) { - // send last announcement to activated peers - b.sendTo(node) - } - }) - return b -} - -// setSignerKey sets the signer key for signed announcements. Should be called before -// starting the protocol handler. -func (b *broadcaster) setSignerKey(privateKey *ecdsa.PrivateKey) { - b.privateKey = privateKey -} - -// broadcast sends the given announcements to all active peers -func (b *broadcaster) broadcast(announce announceData) { - b.ns.Operation(func() { - // iterate in an Operation to ensure that the active set does not change while iterating - b.lastAnnounce = announce - b.ns.ForEach(priorityPoolSetup.ActiveFlag, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - b.sendTo(node) - }) - }) -} - -// sendTo sends the most recent announcement to the given node unless the same or higher Td -// announcement has already been sent. -func (b *broadcaster) sendTo(node *enode.Node) { - if b.lastAnnounce.Td == nil { - return - } - if p, _ := b.ns.GetField(node, clientPeerField).(*clientPeer); p != nil { - if p.headInfo.Td == nil || b.lastAnnounce.Td.Cmp(p.headInfo.Td) > 0 { - announce := b.lastAnnounce - switch p.announceType { - case announceTypeSimple: - if !p.queueSend(func() { p.sendAnnounce(announce) }) { - log.Debug("Drop announcement because queue is full", "number", announce.Number, "hash", announce.Hash) - } else { - log.Debug("Sent announcement", "number", announce.Number, "hash", announce.Hash) - } - case announceTypeSigned: - if b.signedAnnounce.Hash != b.lastAnnounce.Hash { - b.signedAnnounce = b.lastAnnounce - b.signedAnnounce.sign(b.privateKey) - } - announce := b.signedAnnounce - if !p.queueSend(func() { p.sendAnnounce(announce) }) { - log.Debug("Drop announcement because queue is full", "number", announce.Number, "hash", announce.Hash) - } else { - log.Debug("Sent announcement", "number", announce.Number, "hash", announce.Hash) - } - } - p.headInfo = blockInfo{b.lastAnnounce.Hash, b.lastAnnounce.Number, b.lastAnnounce.Td} - } - } -} diff --git a/les/server_requests.go b/les/server_requests.go index 07f30b1b73..bab5f733d5 100644 --- a/les/server_requests.go +++ b/les/server_requests.go @@ -206,8 +206,8 @@ func handleGetBlockHeaders(msg Decoder) (serveRequestFn, uint64, uint64, error) next = current + r.Query.Skip + 1 ) if next <= current { - infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ") - p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", r.Query.Skip, "next", next, "attacker", infos) + infos, _ := json.Marshal(p.Peer.Info()) + p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", r.Query.Skip, "next", next, "attacker", string(infos)) unknown = true } else { if header := bc.GetHeaderByNumber(next); header != nil { diff --git a/les/servingqueue.go b/les/servingqueue.go index 9db84e6159..16e064cb3f 100644 --- a/les/servingqueue.go +++ b/les/servingqueue.go @@ -123,7 +123,7 @@ func (t *servingTask) waitOrStop() bool { // newServingQueue returns a new servingQueue func newServingQueue(suspendBias int64, utilTarget float64) *servingQueue { sq := &servingQueue{ - queue: prque.New(nil), + queue: prque.NewWrapAround(nil), suspendBias: suspendBias, queueAddCh: make(chan *servingTask, 100), queueBestCh: make(chan *servingTask), @@ -279,7 +279,7 @@ func (sq *servingQueue) updateRecentTime() { func (sq *servingQueue) addTask(task *servingTask) { if sq.best == nil { sq.best = task - } else if task.priority > sq.best.priority { + } else if task.priority-sq.best.priority > 0 { sq.queue.Push(sq.best, sq.best.priority) sq.best = task } else { diff --git a/les/state_accessor.go b/les/state_accessor.go index 013d883e4c..fcd838d4fa 100644 --- a/les/state_accessor.go +++ b/les/state_accessor.go @@ -30,41 +30,31 @@ import ( ) // stateAtBlock retrieves the state database associated with a certain block. -func (leth *LightEthereum) stateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, mps.PrivateStateRepository, func(), error) { - return light.NewState(ctx, block.Header(), leth.odr), nil, func() {}, nil -} - -// statesInRange retrieves a batch of state databases associated with the specific -// block ranges. -func (leth *LightEthereum) statesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, []mps.PrivateStateRepository, func(), error) { - var states []*state.StateDB - for number := fromBlock.NumberU64(); number <= toBlock.NumberU64(); number++ { - header, err := leth.blockchain.GetHeaderByNumberOdr(ctx, number) - if err != nil { - return nil, nil, nil, err - } - states = append(states, light.NewState(ctx, header, leth.odr)) - } - return states, nil, nil, nil +func (leth *LightEthereum) stateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, mps.PrivateStateRepository, error) { + return light.NewState(ctx, block.Header(), leth.odr), nil, nil } // stateAtTransaction returns the execution environment of a certain transaction. -func (leth *LightEthereum) stateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, *state.StateDB, mps.PrivateStateRepository, func(), error) { +func (leth *LightEthereum) stateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, *state.StateDB, mps.PrivateStateRepository, error) { // Short circuit if it's genesis block. if block.NumberU64() == 0 { - return nil, vm.BlockContext{}, nil, nil, nil, nil, errors.New("no transaction in genesis") + return nil, vm.BlockContext{}, nil, nil, nil, errors.New("no transaction in genesis") } // Create the parent state database parent, err := leth.blockchain.GetBlock(ctx, block.ParentHash(), block.NumberU64()-1) if err != nil { - return nil, vm.BlockContext{}, nil, nil, nil, nil, err + return nil, vm.BlockContext{}, nil, nil, nil, err + } + statedb, private, err := leth.stateAtBlock(ctx, parent, reexec) + if err != nil { + return nil, vm.BlockContext{}, nil, nil, nil, err } - statedb, _, _, err := leth.stateAtBlock(ctx, parent, reexec) + privateStateDb, err := private.DefaultState() if err != nil { - return nil, vm.BlockContext{}, nil, nil, nil, nil, err + return nil, vm.BlockContext{}, nil, nil, nil, err } if txIndex == 0 && len(block.Transactions()) == 0 { - return nil, vm.BlockContext{}, statedb, nil, nil, func() {}, nil + return nil, vm.BlockContext{}, statedb, privateStateDb, private, nil } // Recompute transactions up to the target index. signer := types.MakeSigner(leth.blockchain.Config(), block.Number()) @@ -73,17 +63,19 @@ func (leth *LightEthereum) stateAtTransaction(ctx context.Context, block *types. msg, _ := tx.AsMessage(signer) txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(block.Header(), leth.blockchain, nil) + statedb.Prepare(tx.Hash(), block.Hash(), idx) + privateStateDb.Prepare(tx.Hash(), block.Hash(), idx) if idx == txIndex { - return msg, context, statedb, nil, nil, func() {}, nil + return msg, context, statedb, statedb, nil, nil } // Not yet the searched for transaction, execute on top of the current state - vmenv := vm.NewEVM(context, txContext, statedb, nil, leth.blockchain.Config(), vm.Config{}) + vmenv := vm.NewEVM(context, txContext, statedb, privateStateDb, leth.blockchain.Config(), vm.Config{}) if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - return nil, vm.BlockContext{}, nil, nil, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + return nil, vm.BlockContext{}, nil, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } // Ensure any modifications are committed to the state // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) } - return nil, vm.BlockContext{}, nil, nil, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) + return nil, vm.BlockContext{}, nil, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) } diff --git a/les/test_helper.go b/les/test_helper.go index bf22c99fac..87b53e6971 100644 --- a/les/test_helper.go +++ b/les/test_helper.go @@ -45,10 +45,10 @@ import ( "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/les/checkpointoracle" "github.com/ethereum/go-ethereum/les/flowcontrol" + vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/nodestate" "github.com/ethereum/go-ethereum/params" ) @@ -284,7 +284,6 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da } oracle = checkpointoracle.New(checkpointConfig, getLocal) } - ns := nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup) server := &LesServer{ lesCommons: lesCommons{ genesis: genesis.Hash(), @@ -296,8 +295,7 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da oracle: oracle, closeCh: make(chan struct{}), }, - ns: ns, - broadcaster: newBroadcaster(ns), + peers: newClientPeerSet(), servingQueue: newServingQueue(int64(time.Millisecond*10), 1), defParams: flowcontrol.ServerParams{ BufLimit: testBufLimit, @@ -307,18 +305,22 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da } server.costTracker, server.minCapacity = newCostTracker(db, server.config) server.costTracker.testCostList = testCostList(0) // Disable flow control mechanism. - server.clientPool = newClientPool(ns, db, testBufRecharge, defaultConnectedBias, clock, func(id enode.ID) {}) - server.clientPool.setLimits(10000, 10000) // Assign enough capacity for clientpool + server.clientPool = vfs.NewClientPool(db, testBufRecharge, defaultConnectedBias, clock, alwaysTrueFn) + server.clientPool.Start() + server.clientPool.SetLimits(10000, 10000) // Assign enough capacity for clientpool server.handler = newServerHandler(server, simulation.Blockchain(), db, txpool, func() bool { return true }) if server.oracle != nil { server.oracle.Start(simulation) } server.servingQueue.setThreads(4) - ns.Start() server.handler.start() return server.handler, simulation } +func alwaysTrueFn() bool { + return true +} + // testPeer is a simulated peer to allow testing direct network calls. type testPeer struct { cpeer *clientPeer @@ -661,6 +663,10 @@ func newClientServerEnv(t *testing.T, config testnetConfig) (*testServer, *testC return s, c, teardown } -func NewFuzzerPeer(version int) *clientPeer { - return newClientPeer(version, 0, p2p.NewPeer(enode.ID{}, "", nil), nil) +// NewFuzzerPeer creates a client peer for test purposes, and also returns +// a function to close the peer: this is needed to avoid goroutine leaks in the +// exec queue. +func NewFuzzerPeer(version int) (p *clientPeer, closer func()) { + p = newClientPeer(version, 0, p2p.NewPeer(enode.ID{}, "", nil), nil) + return p, func() { p.peerCommons.close() } } diff --git a/les/vflux/client/serverpool.go b/les/vflux/client/serverpool.go index e73b277ced..cc0254c127 100644 --- a/les/vflux/client/serverpool.go +++ b/les/vflux/client/serverpool.go @@ -47,7 +47,8 @@ const ( nodeWeightThreshold = 100 // minimum weight for keeping a node in the the known (valuable) set minRedialWait = 10 // minimum redial wait time in seconds preNegLimit = 5 // maximum number of simultaneous pre-negotiation queries - maxQueryFails = 100 // number of consecutive UDP query failures before we print a warning + warnQueryFails = 20 // number of consecutive UDP query failures before we print a warning + maxQueryFails = 100 // number of consecutive UDP query failures when then chance of skipping a query reaches 50% ) // ServerPool provides a node iterator for dial candidates. The output is a mix of newly discovered @@ -94,16 +95,16 @@ type nodeHistoryEnc struct { type QueryFunc func(*enode.Node) int var ( - clientSetup = &nodestate.Setup{Version: 2} - sfHasValue = clientSetup.NewPersistentFlag("hasValue") - sfQueried = clientSetup.NewFlag("queried") - sfCanDial = clientSetup.NewFlag("canDial") - sfDialing = clientSetup.NewFlag("dialed") - sfWaitDialTimeout = clientSetup.NewFlag("dialTimeout") - sfConnected = clientSetup.NewFlag("connected") - sfRedialWait = clientSetup.NewFlag("redialWait") - sfAlwaysConnect = clientSetup.NewFlag("alwaysConnect") - sfDisableSelection = nodestate.MergeFlags(sfQueried, sfCanDial, sfDialing, sfConnected, sfRedialWait) + clientSetup = &nodestate.Setup{Version: 2} + sfHasValue = clientSetup.NewPersistentFlag("hasValue") + sfQuery = clientSetup.NewFlag("query") + sfCanDial = clientSetup.NewFlag("canDial") + sfDialing = clientSetup.NewFlag("dialed") + sfWaitDialTimeout = clientSetup.NewFlag("dialTimeout") + sfConnected = clientSetup.NewFlag("connected") + sfRedialWait = clientSetup.NewFlag("redialWait") + sfAlwaysConnect = clientSetup.NewFlag("alwaysConnect") + sfDialProcess = nodestate.MergeFlags(sfQuery, sfCanDial, sfDialing, sfConnected, sfRedialWait) sfiNodeHistory = clientSetup.NewPersistentField("nodeHistory", reflect.TypeOf(nodeHistory{}), func(field interface{}) ([]byte, error) { @@ -162,8 +163,8 @@ func NewServerPool(db ethdb.KeyValueStore, dbKey []byte, mixTimeout time.Duratio } s.recalTimeout() s.mixer = enode.NewFairMix(mixTimeout) - knownSelector := NewWrsIterator(s.ns, sfHasValue, sfDisableSelection, sfiNodeWeight) - alwaysConnect := NewQueueIterator(s.ns, sfAlwaysConnect, sfDisableSelection, true, nil) + knownSelector := NewWrsIterator(s.ns, sfHasValue, sfDialProcess, sfiNodeWeight) + alwaysConnect := NewQueueIterator(s.ns, sfAlwaysConnect, sfDialProcess, true, nil) s.mixSources = append(s.mixSources, knownSelector) s.mixSources = append(s.mixSources, alwaysConnect) @@ -226,7 +227,7 @@ func (s *ServerPool) AddMetrics( s.totalValueGauge = totalValueGauge s.sessionValueMeter = sessionValueMeter if serverSelectableGauge != nil { - s.ns.AddLogMetrics(sfHasValue, sfDisableSelection, "selectable", nil, nil, serverSelectableGauge) + s.ns.AddLogMetrics(sfHasValue, sfDialProcess, "selectable", nil, nil, serverSelectableGauge) } if serverDialedMeter != nil { s.ns.AddLogMetrics(sfDialing, nodestate.Flags{}, "dialed", serverDialedMeter, nil, nil) @@ -247,43 +248,51 @@ func (s *ServerPool) AddSource(source enode.Iterator) { // Nodes that are filtered out and does not appear on the output iterator are put back // into redialWait state. func (s *ServerPool) addPreNegFilter(input enode.Iterator, query QueryFunc) enode.Iterator { - s.fillSet = NewFillSet(s.ns, input, sfQueried) - s.ns.SubscribeState(sfQueried, func(n *enode.Node, oldState, newState nodestate.Flags) { - if newState.Equals(sfQueried) { - fails := atomic.LoadUint32(&s.queryFails) - if fails == maxQueryFails { - log.Warn("UDP pre-negotiation query does not seem to work") + s.fillSet = NewFillSet(s.ns, input, sfQuery) + s.ns.SubscribeState(sfDialProcess, func(n *enode.Node, oldState, newState nodestate.Flags) { + if !newState.Equals(sfQuery) { + if newState.HasAll(sfQuery) { + // remove query flag if the node is already somewhere in the dial process + s.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0) } - if fails > maxQueryFails { - fails = maxQueryFails - } - if rand.Intn(maxQueryFails*2) < int(fails) { - // skip pre-negotiation with increasing chance, max 50% - // this ensures that the client can operate even if UDP is not working at all - s.ns.SetStateSub(n, sfCanDial, nodestate.Flags{}, time.Second*10) - // set canDial before resetting queried so that FillSet will not read more - // candidates unnecessarily - s.ns.SetStateSub(n, nodestate.Flags{}, sfQueried, 0) - return + return + } + fails := atomic.LoadUint32(&s.queryFails) + failMax := fails + if failMax > maxQueryFails { + failMax = maxQueryFails + } + if rand.Intn(maxQueryFails*2) < int(failMax) { + // skip pre-negotiation with increasing chance, max 50% + // this ensures that the client can operate even if UDP is not working at all + s.ns.SetStateSub(n, sfCanDial, nodestate.Flags{}, time.Second*10) + // set canDial before resetting queried so that FillSet will not read more + // candidates unnecessarily + s.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0) + return + } + go func() { + q := query(n) + if q == -1 { + atomic.AddUint32(&s.queryFails, 1) + fails++ + if fails%warnQueryFails == 0 { + // warn if a large number of consecutive queries have failed + log.Warn("UDP connection queries failed", "count", fails) + } + } else { + atomic.StoreUint32(&s.queryFails, 0) } - go func() { - q := query(n) - if q == -1 { - atomic.AddUint32(&s.queryFails, 1) + s.ns.Operation(func() { + // we are no longer running in the operation that the callback belongs to, start a new one because of setRedialWait + if q == 1 { + s.ns.SetStateSub(n, sfCanDial, nodestate.Flags{}, time.Second*10) } else { - atomic.StoreUint32(&s.queryFails, 0) + s.setRedialWait(n, queryCost, queryWaitStep) } - s.ns.Operation(func() { - // we are no longer running in the operation that the callback belongs to, start a new one because of setRedialWait - if q == 1 { - s.ns.SetStateSub(n, sfCanDial, nodestate.Flags{}, time.Second*10) - } else { - s.setRedialWait(n, queryCost, queryWaitStep) - } - s.ns.SetStateSub(n, nodestate.Flags{}, sfQueried, 0) - }) - }() - } + s.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0) + }) + }() }) return NewQueueIterator(s.ns, sfCanDial, nodestate.Flags{}, false, func(waiting bool) { if waiting { diff --git a/les/vflux/server/balance.go b/les/vflux/server/balance.go index 5bb1ca99c7..01e645a16a 100644 --- a/les/vflux/server/balance.go +++ b/les/vflux/server/balance.go @@ -47,21 +47,57 @@ type PriceFactors struct { TimeFactor, CapacityFactor, RequestFactor float64 } -// timePrice returns the price of connection per nanosecond at the given capacity -func (p PriceFactors) timePrice(cap uint64) float64 { - return p.TimeFactor + float64(cap)*p.CapacityFactor/1000000 +// connectionPrice returns the price of connection per nanosecond at the given capacity +// and the estimated average request cost. +func (p PriceFactors) connectionPrice(cap uint64, avgReqCost float64) float64 { + return p.TimeFactor + float64(cap)*p.CapacityFactor/1000000 + p.RequestFactor*avgReqCost } -// NodeBalance keeps track of the positive and negative balances of a connected +type ( + // nodePriority interface provides current and estimated future priorities on demand + nodePriority interface { + // priority should return the current priority of the node (higher is better) + priority(cap uint64) int64 + // estimatePriority should return a lower estimate for the minimum of the node priority + // value starting from the current moment until the given time. If the priority goes + // under the returned estimate before the specified moment then it is the caller's + // responsibility to signal with updateFlag. + estimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 + } + + // ReadOnlyBalance provides read-only operations on the node balance + ReadOnlyBalance interface { + nodePriority + GetBalance() (uint64, uint64) + GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) + GetPriceFactors() (posFactor, negFactor PriceFactors) + } + + // ConnectedBalance provides operations permitted on connected nodes (non-read-only + // operations are not permitted inside a BalanceOperation) + ConnectedBalance interface { + ReadOnlyBalance + SetPriceFactors(posFactor, negFactor PriceFactors) + RequestServed(cost uint64) uint64 + } + + // AtomicBalanceOperator provides operations permitted in an atomic BalanceOperation + AtomicBalanceOperator interface { + ReadOnlyBalance + AddBalance(amount int64) (uint64, uint64, error) + SetBalance(pos, neg uint64) error + } +) + +// nodeBalance keeps track of the positive and negative balances of a connected // client and calculates actual and projected future priority values. // Implements nodePriority interface. -type NodeBalance struct { - bt *BalanceTracker +type nodeBalance struct { + bt *balanceTracker lock sync.RWMutex node *enode.Node connAddress string - active bool - priority bool + active, hasPriority, setFlags bool capacity uint64 balance balance posFactor, negFactor PriceFactors @@ -78,7 +114,62 @@ type NodeBalance struct { // balance represents a pair of positive and negative balances type balance struct { - pos, neg utils.ExpiredValue + pos, neg utils.ExpiredValue + posExp, negExp utils.ValueExpirer +} + +// posValue returns the value of positive balance at a given timestamp. +func (b balance) posValue(now mclock.AbsTime) uint64 { + return b.pos.Value(b.posExp.LogOffset(now)) +} + +// negValue returns the value of negative balance at a given timestamp. +func (b balance) negValue(now mclock.AbsTime) uint64 { + return b.neg.Value(b.negExp.LogOffset(now)) +} + +// addValue adds the value of a given amount to the balance. The original value and +// updated value will also be returned if the addition is successful. +// Returns the error if the given value is too large and the value overflows. +func (b *balance) addValue(now mclock.AbsTime, amount int64, pos bool, force bool) (uint64, uint64, int64, error) { + var ( + val utils.ExpiredValue + offset utils.Fixed64 + ) + if pos { + offset, val = b.posExp.LogOffset(now), b.pos + } else { + offset, val = b.negExp.LogOffset(now), b.neg + } + old := val.Value(offset) + if amount > 0 && (amount > maxBalance || old > maxBalance-uint64(amount)) { + if !force { + return old, 0, 0, errBalanceOverflow + } + val = utils.ExpiredValue{} + amount = maxBalance + } + net := val.Add(amount, offset) + if pos { + b.pos = val + } else { + b.neg = val + } + return old, val.Value(offset), net, nil +} + +// setValue sets the internal balance amount to the given values. Returns the +// error if the given value is too large. +func (b *balance) setValue(now mclock.AbsTime, pos uint64, neg uint64) error { + if pos > maxBalance || neg > maxBalance { + return errBalanceOverflow + } + var pb, nb utils.ExpiredValue + pb.Add(int64(pos), b.posExp.LogOffset(now)) + nb.Add(int64(neg), b.negExp.LogOffset(now)) + b.pos = pb + b.neg = nb + return nil } // balanceCallback represents a single callback that is activated when client priority @@ -90,18 +181,18 @@ type balanceCallback struct { } // GetBalance returns the current positive and negative balance. -func (n *NodeBalance) GetBalance() (uint64, uint64) { +func (n *nodeBalance) GetBalance() (uint64, uint64) { n.lock.Lock() defer n.lock.Unlock() now := n.bt.clock.Now() n.updateBalance(now) - return n.balance.pos.Value(n.bt.posExp.LogOffset(now)), n.balance.neg.Value(n.bt.negExp.LogOffset(now)) + return n.balance.posValue(now), n.balance.negValue(now) } // GetRawBalance returns the current positive and negative balance // but in the raw(expired value) format. -func (n *NodeBalance) GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) { +func (n *nodeBalance) GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) { n.lock.Lock() defer n.lock.Unlock() @@ -114,164 +205,147 @@ func (n *NodeBalance) GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) { // before and after the operation. Exceeding maxBalance results in an error (balance is // unchanged) while adding a negative amount higher than the current balance results in // zero balance. -func (n *NodeBalance) AddBalance(amount int64) (uint64, uint64, error) { +// Note: this function should run inside a NodeStateMachine operation +func (n *nodeBalance) AddBalance(amount int64) (uint64, uint64, error) { var ( - err error - old, new uint64 + err error + old, new uint64 + now = n.bt.clock.Now() + callbacks []func() + setPriority bool ) - n.bt.ns.Operation(func() { - var ( - callbacks []func() - setPriority bool - ) - n.bt.updateTotalBalance(n, func() bool { - now := n.bt.clock.Now() - n.updateBalance(now) - - // Ensure the given amount is valid to apply. - offset := n.bt.posExp.LogOffset(now) - old = n.balance.pos.Value(offset) - if amount > 0 && (amount > maxBalance || old > maxBalance-uint64(amount)) { - err = errBalanceOverflow - return false - } - - // Update the total positive balance counter. - n.balance.pos.Add(amount, offset) - callbacks = n.checkCallbacks(now) - setPriority = n.checkPriorityStatus() - new = n.balance.pos.Value(offset) - n.storeBalance(true, false) - return true - }) - for _, cb := range callbacks { - cb() - } - if setPriority { - n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0) + // Operation with holding the lock + n.bt.updateTotalBalance(n, func() bool { + n.updateBalance(now) + if old, new, _, err = n.balance.addValue(now, amount, true, false); err != nil { + return false } - n.signalPriorityUpdate() + callbacks, setPriority = n.checkCallbacks(now), n.checkPriorityStatus() + n.storeBalance(true, false) + return true }) if err != nil { return old, old, err } - + // Operation without holding the lock + for _, cb := range callbacks { + cb() + } + if n.setFlags { + if setPriority { + n.bt.ns.SetStateSub(n.node, n.bt.setup.priorityFlag, nodestate.Flags{}, 0) + } + // Note: priority flag is automatically removed by the zero priority callback if necessary + n.signalPriorityUpdate() + } return old, new, nil } // SetBalance sets the positive and negative balance to the given values -func (n *NodeBalance) SetBalance(pos, neg uint64) error { - if pos > maxBalance || neg > maxBalance { - return errBalanceOverflow - } - n.bt.ns.Operation(func() { - var ( - callbacks []func() - setPriority bool - ) - n.bt.updateTotalBalance(n, func() bool { - now := n.bt.clock.Now() - n.updateBalance(now) - - var pb, nb utils.ExpiredValue - pb.Add(int64(pos), n.bt.posExp.LogOffset(now)) - nb.Add(int64(neg), n.bt.negExp.LogOffset(now)) - n.balance.pos = pb - n.balance.neg = nb - callbacks = n.checkCallbacks(now) - setPriority = n.checkPriorityStatus() - n.storeBalance(true, true) - return true - }) - for _, cb := range callbacks { - cb() +// Note: this function should run inside a NodeStateMachine operation +func (n *nodeBalance) SetBalance(pos, neg uint64) error { + var ( + now = n.bt.clock.Now() + callbacks []func() + setPriority bool + ) + // Operation with holding the lock + n.bt.updateTotalBalance(n, func() bool { + n.updateBalance(now) + if err := n.balance.setValue(now, pos, neg); err != nil { + return false } + callbacks, setPriority = n.checkCallbacks(now), n.checkPriorityStatus() + n.storeBalance(true, true) + return true + }) + // Operation without holding the lock + for _, cb := range callbacks { + cb() + } + if n.setFlags { if setPriority { - n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0) + n.bt.ns.SetStateSub(n.node, n.bt.setup.priorityFlag, nodestate.Flags{}, 0) } + // Note: priority flag is automatically removed by the zero priority callback if necessary n.signalPriorityUpdate() - }) + } return nil } // RequestServed should be called after serving a request for the given peer -func (n *NodeBalance) RequestServed(cost uint64) uint64 { +func (n *nodeBalance) RequestServed(cost uint64) (newBalance uint64) { n.lock.Lock() - var callbacks []func() - defer func() { - n.lock.Unlock() - if callbacks != nil { - n.bt.ns.Operation(func() { - for _, cb := range callbacks { - cb() - } - }) - } - }() - now := n.bt.clock.Now() + var ( + check bool + fcost = float64(cost) + now = n.bt.clock.Now() + ) n.updateBalance(now) - fcost := float64(cost) - - posExp := n.bt.posExp.LogOffset(now) - var check bool if !n.balance.pos.IsZero() { - if n.posFactor.RequestFactor != 0 { - c := -int64(fcost * n.posFactor.RequestFactor) - cc := n.balance.pos.Add(c, posExp) - if c == cc { + posCost := -int64(fcost * n.posFactor.RequestFactor) + if posCost == 0 { + fcost = 0 + newBalance = n.balance.posValue(now) + } else { + var net int64 + _, newBalance, net, _ = n.balance.addValue(now, posCost, true, false) + if posCost == net { fcost = 0 } else { - fcost *= 1 - float64(cc)/float64(c) + fcost *= 1 - float64(net)/float64(posCost) } check = true - } else { - fcost = 0 } } - if fcost > 0 { - if n.negFactor.RequestFactor != 0 { - n.balance.neg.Add(int64(fcost*n.negFactor.RequestFactor), n.bt.negExp.LogOffset(now)) - check = true - } + if fcost > 0 && n.negFactor.RequestFactor != 0 { + n.balance.addValue(now, int64(fcost*n.negFactor.RequestFactor), false, false) + check = true } + n.sumReqCost += cost + + var callbacks []func() if check { callbacks = n.checkCallbacks(now) } - n.sumReqCost += cost - return n.balance.pos.Value(posExp) + n.lock.Unlock() + + if callbacks != nil { + n.bt.ns.Operation(func() { + for _, cb := range callbacks { + cb() + } + }) + } + return } -// Priority returns the actual priority based on the current balance -func (n *NodeBalance) Priority(capacity uint64) int64 { +// priority returns the actual priority based on the current balance +func (n *nodeBalance) priority(capacity uint64) int64 { n.lock.Lock() defer n.lock.Unlock() - n.updateBalance(n.bt.clock.Now()) - return n.balanceToPriority(n.balance, capacity) + now := n.bt.clock.Now() + n.updateBalance(now) + return n.balanceToPriority(now, n.balance, capacity) } // EstMinPriority gives a lower estimate for the priority at a given time in the future. // An average request cost per time is assumed that is twice the average cost per time // in the current session. -// If update is true then a priority callback is added that turns UpdateFlag on and off +// If update is true then a priority callback is added that turns updateFlag on and off // in case the priority goes below the estimated minimum. -func (n *NodeBalance) EstimatePriority(capacity uint64, addBalance int64, future, bias time.Duration, update bool) int64 { +func (n *nodeBalance) estimatePriority(capacity uint64, addBalance int64, future, bias time.Duration, update bool) int64 { n.lock.Lock() defer n.lock.Unlock() now := n.bt.clock.Now() n.updateBalance(now) - b := n.balance + + b := n.balance // copy the balance if addBalance != 0 { - offset := n.bt.posExp.LogOffset(now) - old := n.balance.pos.Value(offset) - if addBalance > 0 && (addBalance > maxBalance || old > maxBalance-uint64(addBalance)) { - b.pos = utils.ExpiredValue{} - b.pos.Add(maxBalance, offset) - } else { - b.pos.Add(addBalance, offset) - } + b.addValue(now, addBalance, true, true) } if future > 0 { var avgReqCost float64 @@ -284,52 +358,20 @@ func (n *NodeBalance) EstimatePriority(capacity uint64, addBalance int64, future if bias > 0 { b = n.reducedBalance(b, now+mclock.AbsTime(future), bias, capacity, 0) } - pri := n.balanceToPriority(b, capacity) + // Note: we subtract one from the estimated priority in order to ensure that biased + // estimates are always lower than actual priorities, even if the bias is very small. + // This ensures that two nodes will not ping-pong update signals forever if both of + // them have zero estimated priority drop in the projected future. + pri := n.balanceToPriority(now, b, capacity) - 1 if update { n.addCallback(balanceCallbackUpdate, pri, n.signalPriorityUpdate) } return pri } -// PosBalanceMissing calculates the missing amount of positive balance in order to -// connect at targetCapacity, stay connected for the given amount of time and then -// still have a priority of targetPriority -func (n *NodeBalance) PosBalanceMissing(targetPriority int64, targetCapacity uint64, after time.Duration) uint64 { - n.lock.Lock() - defer n.lock.Unlock() - - now := n.bt.clock.Now() - if targetPriority < 0 { - timePrice := n.negFactor.timePrice(targetCapacity) - timeCost := uint64(float64(after) * timePrice) - negBalance := n.balance.neg.Value(n.bt.negExp.LogOffset(now)) - if timeCost+negBalance < uint64(-targetPriority) { - return 0 - } - if uint64(-targetPriority) > negBalance && timePrice > 1e-100 { - if negTime := time.Duration(float64(uint64(-targetPriority)-negBalance) / timePrice); negTime < after { - after -= negTime - } else { - after = 0 - } - } - targetPriority = 0 - } - timePrice := n.posFactor.timePrice(targetCapacity) - posRequired := uint64(float64(targetPriority)*float64(targetCapacity)+float64(after)*timePrice) + 1 - if posRequired >= maxBalance { - return math.MaxUint64 // target not reachable - } - posBalance := n.balance.pos.Value(n.bt.posExp.LogOffset(now)) - if posRequired > posBalance { - return posRequired - posBalance - } - return 0 -} - // SetPriceFactors sets the price factors. TimeFactor is the price of a nanosecond of // connection while RequestFactor is the price of a request cost unit. -func (n *NodeBalance) SetPriceFactors(posFactor, negFactor PriceFactors) { +func (n *nodeBalance) SetPriceFactors(posFactor, negFactor PriceFactors) { n.lock.Lock() now := n.bt.clock.Now() n.updateBalance(now) @@ -346,7 +388,7 @@ func (n *NodeBalance) SetPriceFactors(posFactor, negFactor PriceFactors) { } // GetPriceFactors returns the price factors -func (n *NodeBalance) GetPriceFactors() (posFactor, negFactor PriceFactors) { +func (n *nodeBalance) GetPriceFactors() (posFactor, negFactor PriceFactors) { n.lock.Lock() defer n.lock.Unlock() @@ -354,7 +396,7 @@ func (n *NodeBalance) GetPriceFactors() (posFactor, negFactor PriceFactors) { } // activate starts time/capacity cost deduction. -func (n *NodeBalance) activate() { +func (n *nodeBalance) activate() { n.bt.updateTotalBalance(n, func() bool { if n.active { return false @@ -366,7 +408,7 @@ func (n *NodeBalance) activate() { } // deactivate stops time/capacity cost deduction and saves the balances in the database -func (n *NodeBalance) deactivate() { +func (n *nodeBalance) deactivate() { n.bt.updateTotalBalance(n, func() bool { if !n.active { return false @@ -383,7 +425,7 @@ func (n *NodeBalance) deactivate() { } // updateBalance updates balance based on the time factor -func (n *NodeBalance) updateBalance(now mclock.AbsTime) { +func (n *nodeBalance) updateBalance(now mclock.AbsTime) { if n.active && now > n.lastUpdate { n.balance = n.reducedBalance(n.balance, n.lastUpdate, time.Duration(now-n.lastUpdate), n.capacity, 0) n.lastUpdate = now @@ -391,7 +433,7 @@ func (n *NodeBalance) updateBalance(now mclock.AbsTime) { } // storeBalance stores the positive and/or negative balance of the node in the database -func (n *NodeBalance) storeBalance(pos, neg bool) { +func (n *nodeBalance) storeBalance(pos, neg bool) { if pos { n.bt.storeBalance(n.node.ID().Bytes(), false, n.balance.pos) } @@ -405,7 +447,7 @@ func (n *NodeBalance) storeBalance(pos, neg bool) { // immediately. // Note: should be called while n.lock is held // Note 2: the callback function runs inside a NodeStateMachine operation -func (n *NodeBalance) addCallback(id int, threshold int64, callback func()) { +func (n *nodeBalance) addCallback(id int, threshold int64, callback func()) { n.removeCallback(id) idx := 0 for idx < n.callbackCount && threshold > n.callbacks[idx].threshold { @@ -425,7 +467,7 @@ func (n *NodeBalance) addCallback(id int, threshold int64, callback func()) { // removeCallback removes the given callback and returns true if it was active // Note: should be called while n.lock is held -func (n *NodeBalance) removeCallback(id int) bool { +func (n *nodeBalance) removeCallback(id int) bool { idx := n.callbackIndex[id] if idx == -1 { return false @@ -442,11 +484,11 @@ func (n *NodeBalance) removeCallback(id int) bool { // checkCallbacks checks whether the threshold of any of the active callbacks // have been reached and returns triggered callbacks. // Note: checkCallbacks assumes that the balance has been recently updated. -func (n *NodeBalance) checkCallbacks(now mclock.AbsTime) (callbacks []func()) { +func (n *nodeBalance) checkCallbacks(now mclock.AbsTime) (callbacks []func()) { if n.callbackCount == 0 || n.capacity == 0 { return } - pri := n.balanceToPriority(n.balance, n.capacity) + pri := n.balanceToPriority(now, n.balance, n.capacity) for n.callbackCount != 0 && n.callbacks[n.callbackCount-1].threshold >= pri { n.callbackCount-- n.callbackIndex[n.callbacks[n.callbackCount].id] = -1 @@ -458,7 +500,7 @@ func (n *NodeBalance) checkCallbacks(now mclock.AbsTime) (callbacks []func()) { // scheduleCheck sets up or updates a scheduled event to ensure that it will be called // again just after the next threshold has been reached. -func (n *NodeBalance) scheduleCheck(now mclock.AbsTime) { +func (n *nodeBalance) scheduleCheck(now mclock.AbsTime) { if n.callbackCount != 0 { d, ok := n.timeUntil(n.callbacks[n.callbackCount-1].threshold) if !ok { @@ -484,7 +526,7 @@ func (n *NodeBalance) scheduleCheck(now mclock.AbsTime) { } // updateAfter schedules a balance update and callback check in the future -func (n *NodeBalance) updateAfter(dt time.Duration) { +func (n *nodeBalance) updateAfter(dt time.Duration) { if n.updateEvent == nil || n.updateEvent.Stop() { if dt == 0 { n.updateEvent = nil @@ -512,20 +554,22 @@ func (n *NodeBalance) updateAfter(dt time.Duration) { // balanceExhausted should be called when the positive balance is exhausted (priority goes to zero/negative) // Note: this function should run inside a NodeStateMachine operation -func (n *NodeBalance) balanceExhausted() { +func (n *nodeBalance) balanceExhausted() { n.lock.Lock() n.storeBalance(true, false) - n.priority = false + n.hasPriority = false n.lock.Unlock() - n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.PriorityFlag, 0) + if n.setFlags { + n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.setup.priorityFlag, 0) + } } // checkPriorityStatus checks whether the node has gained priority status and sets the priority // callback and flag if necessary. It assumes that the balance has been recently updated. // Note that the priority flag has to be set by the caller after the mutex has been released. -func (n *NodeBalance) checkPriorityStatus() bool { - if !n.priority && !n.balance.pos.IsZero() { - n.priority = true +func (n *nodeBalance) checkPriorityStatus() bool { + if !n.hasPriority && !n.balance.pos.IsZero() { + n.hasPriority = true n.addCallback(balanceCallbackZero, 0, func() { n.balanceExhausted() }) return true } @@ -534,15 +578,15 @@ func (n *NodeBalance) checkPriorityStatus() bool { // signalPriorityUpdate signals that the priority fell below the previous minimum estimate // Note: this function should run inside a NodeStateMachine operation -func (n *NodeBalance) signalPriorityUpdate() { - n.bt.ns.SetStateSub(n.node, n.bt.UpdateFlag, nodestate.Flags{}, 0) - n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.UpdateFlag, 0) +func (n *nodeBalance) signalPriorityUpdate() { + n.bt.ns.SetStateSub(n.node, n.bt.setup.updateFlag, nodestate.Flags{}, 0) + n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.setup.updateFlag, 0) } // setCapacity updates the capacity value used for priority calculation // Note: capacity should never be zero // Note 2: this function should run inside a NodeStateMachine operation -func (n *NodeBalance) setCapacity(capacity uint64) { +func (n *nodeBalance) setCapacity(capacity uint64) { n.lock.Lock() now := n.bt.clock.Now() n.updateBalance(now) @@ -557,73 +601,89 @@ func (n *NodeBalance) setCapacity(capacity uint64) { // balanceToPriority converts a balance to a priority value. Lower priority means // first to disconnect. Positive balance translates to positive priority. If positive // balance is zero then negative balance translates to a negative priority. -func (n *NodeBalance) balanceToPriority(b balance, capacity uint64) int64 { - if !b.pos.IsZero() { - return int64(b.pos.Value(n.bt.posExp.LogOffset(n.bt.clock.Now())) / capacity) +func (n *nodeBalance) balanceToPriority(now mclock.AbsTime, b balance, capacity uint64) int64 { + pos := b.posValue(now) + if pos > 0 { + return int64(pos / capacity) } - return -int64(b.neg.Value(n.bt.negExp.LogOffset(n.bt.clock.Now()))) + return -int64(b.negValue(now)) +} + +// priorityToBalance converts a target priority to a requested balance value. +// If the priority is negative, then minimal negative balance is returned; +// otherwise the minimal positive balance is returned. +func (n *nodeBalance) priorityToBalance(priority int64, capacity uint64) (uint64, uint64) { + if priority > 0 { + return uint64(priority) * n.capacity, 0 + } + return 0, uint64(-priority) } // reducedBalance estimates the reduced balance at a given time in the fututre based // on the given balance, the time factor and an estimated average request cost per time ratio -func (n *NodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Duration, capacity uint64, avgReqCost float64) balance { +func (n *nodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Duration, capacity uint64, avgReqCost float64) balance { // since the costs are applied continuously during the dt time period we calculate // the expiration offset at the middle of the period - at := start + mclock.AbsTime(dt/2) - dtf := float64(dt) + var ( + at = start + mclock.AbsTime(dt/2) + dtf = float64(dt) + ) if !b.pos.IsZero() { - factor := n.posFactor.timePrice(capacity) + n.posFactor.RequestFactor*avgReqCost + factor := n.posFactor.connectionPrice(capacity, avgReqCost) diff := -int64(dtf * factor) - dd := b.pos.Add(diff, n.bt.posExp.LogOffset(at)) - if dd == diff { + _, _, net, _ := b.addValue(at, diff, true, false) + if net == diff { dtf = 0 } else { - dtf += float64(dd) / factor + dtf += float64(net) / factor } } - if dt > 0 { - factor := n.negFactor.timePrice(capacity) + n.negFactor.RequestFactor*avgReqCost - b.neg.Add(int64(dtf*factor), n.bt.negExp.LogOffset(at)) + if dtf > 0 { + factor := n.negFactor.connectionPrice(capacity, avgReqCost) + b.addValue(at, int64(dtf*factor), false, false) } return b } // timeUntil calculates the remaining time needed to reach a given priority level // assuming that no requests are processed until then. If the given level is never -// reached then (0, false) is returned. +// reached then (0, false) is returned. If it has already been reached then (0, true) +// is returned. // Note: the function assumes that the balance has been recently updated and // calculates the time starting from the last update. -func (n *NodeBalance) timeUntil(priority int64) (time.Duration, bool) { - now := n.bt.clock.Now() - var dt float64 - if !n.balance.pos.IsZero() { - posBalance := n.balance.pos.Value(n.bt.posExp.LogOffset(now)) - timePrice := n.posFactor.timePrice(n.capacity) +func (n *nodeBalance) timeUntil(priority int64) (time.Duration, bool) { + var ( + now = n.bt.clock.Now() + pos = n.balance.posValue(now) + targetPos, targetNeg = n.priorityToBalance(priority, n.capacity) + diffTime float64 + ) + if pos > 0 { + timePrice := n.posFactor.connectionPrice(n.capacity, 0) if timePrice < 1e-100 { return 0, false } - if priority > 0 { - newBalance := uint64(priority) * n.capacity - if newBalance > posBalance { - return 0, false + if targetPos > 0 { + if targetPos > pos { + return 0, true } - dt = float64(posBalance-newBalance) / timePrice - return time.Duration(dt), true + diffTime = float64(pos-targetPos) / timePrice + return time.Duration(diffTime), true + } else { + diffTime = float64(pos) / timePrice } - dt = float64(posBalance) / timePrice } else { - if priority > 0 { - return 0, false + if targetPos > 0 { + return 0, true } } - // if we have a positive balance then dt equals the time needed to get it to zero - negBalance := n.balance.neg.Value(n.bt.negExp.LogOffset(now)) - timePrice := n.negFactor.timePrice(n.capacity) - if uint64(-priority) > negBalance { + neg := n.balance.negValue(now) + if targetNeg > neg { + timePrice := n.negFactor.connectionPrice(n.capacity, 0) if timePrice < 1e-100 { return 0, false } - dt += float64(uint64(-priority)-negBalance) / timePrice + diffTime += float64(targetNeg-neg) / timePrice } - return time.Duration(dt), true + return time.Duration(diffTime), true } diff --git a/les/vflux/server/balance_test.go b/les/vflux/server/balance_test.go index e22074db2d..66f0d1f301 100644 --- a/les/vflux/server/balance_test.go +++ b/les/vflux/server/balance_test.go @@ -24,6 +24,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/les/utils" "github.com/ethereum/go-ethereum/p2p/enode" @@ -31,59 +32,82 @@ import ( "github.com/ethereum/go-ethereum/p2p/nodestate" ) -var ( - testFlag = testSetup.NewFlag("testFlag") - connAddrFlag = testSetup.NewField("connAddr", reflect.TypeOf("")) - btTestSetup = NewBalanceTrackerSetup(testSetup) -) - -func init() { - btTestSetup.Connect(connAddrFlag, ppTestSetup.CapacityField) -} - type zeroExpirer struct{} func (z zeroExpirer) SetRate(now mclock.AbsTime, rate float64) {} func (z zeroExpirer) SetLogOffset(now mclock.AbsTime, logOffset utils.Fixed64) {} func (z zeroExpirer) LogOffset(now mclock.AbsTime) utils.Fixed64 { return 0 } +type balanceTestClient struct{} + +func (client balanceTestClient) FreeClientId() string { return "" } + type balanceTestSetup struct { clock *mclock.Simulated + db ethdb.KeyValueStore ns *nodestate.NodeStateMachine - bt *BalanceTracker + setup *serverSetup + bt *balanceTracker } -func newBalanceTestSetup() *balanceTestSetup { +func newBalanceTestSetup(db ethdb.KeyValueStore, posExp, negExp utils.ValueExpirer) *balanceTestSetup { + // Initialize and customize the setup for the balance testing clock := &mclock.Simulated{} - ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) - db := memorydb.New() - bt := NewBalanceTracker(ns, btTestSetup, db, clock, zeroExpirer{}, zeroExpirer{}) + setup := newServerSetup() + setup.clientField = setup.setup.NewField("balancTestClient", reflect.TypeOf(balanceTestClient{})) + + ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) + if posExp == nil { + posExp = zeroExpirer{} + } + if negExp == nil { + negExp = zeroExpirer{} + } + if db == nil { + db = memorydb.New() + } + bt := newBalanceTracker(ns, setup, db, clock, posExp, negExp) ns.Start() return &balanceTestSetup{ clock: clock, + db: db, ns: ns, + setup: setup, bt: bt, } } -func (b *balanceTestSetup) newNode(capacity uint64) *NodeBalance { +func (b *balanceTestSetup) newNode(capacity uint64) *nodeBalance { node := enode.SignNull(&enr.Record{}, enode.ID{}) - b.ns.SetState(node, testFlag, nodestate.Flags{}, 0) - b.ns.SetField(node, btTestSetup.connAddressField, "") + b.ns.SetField(node, b.setup.clientField, balanceTestClient{}) if capacity != 0 { - b.ns.SetField(node, ppTestSetup.CapacityField, capacity) + b.ns.SetField(node, b.setup.capacityField, capacity) } - n, _ := b.ns.GetField(node, btTestSetup.BalanceField).(*NodeBalance) + n, _ := b.ns.GetField(node, b.setup.balanceField).(*nodeBalance) return n } +func (b *balanceTestSetup) setBalance(node *nodeBalance, pos, neg uint64) (err error) { + b.bt.BalanceOperation(node.node.ID(), node.connAddress, func(balance AtomicBalanceOperator) { + err = balance.SetBalance(pos, neg) + }) + return +} + +func (b *balanceTestSetup) addBalance(node *nodeBalance, add int64) (old, new uint64, err error) { + b.bt.BalanceOperation(node.node.ID(), node.connAddress, func(balance AtomicBalanceOperator) { + old, new, err = balance.AddBalance(add) + }) + return +} + func (b *balanceTestSetup) stop() { - b.bt.Stop() + b.bt.stop() b.ns.Stop() } func TestAddBalance(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) @@ -100,7 +124,7 @@ func TestAddBalance(t *testing.T) { {maxBalance, [2]uint64{0, 0}, 0, true}, } for _, i := range inputs { - old, new, err := node.AddBalance(i.delta) + old, new, err := b.addBalance(node, i.delta) if i.expectErr { if err == nil { t.Fatalf("Expect get error but nil") @@ -119,7 +143,7 @@ func TestAddBalance(t *testing.T) { } func TestSetBalance(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) @@ -130,9 +154,8 @@ func TestSetBalance(t *testing.T) { {0, 1000}, {1000, 1000}, } - for _, i := range inputs { - node.SetBalance(i.pos, i.neg) + b.setBalance(node, i.pos, i.neg) pos, neg := node.GetBalance() if pos != i.pos { t.Fatalf("Positive balance mismatch, want %v, got %v", i.pos, pos) @@ -144,13 +167,12 @@ func TestSetBalance(t *testing.T) { } func TestBalanceTimeCost(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) - b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - node.SetBalance(uint64(time.Minute), 0) // 1 minute time allowance + b.setBalance(node, uint64(time.Minute), 0) // 1 minute time allowance var inputs = []struct { runTime time.Duration @@ -172,7 +194,7 @@ func TestBalanceTimeCost(t *testing.T) { } } - node.SetBalance(uint64(time.Minute), 0) // Refill 1 minute time allowance + b.setBalance(node, uint64(time.Minute), 0) // Refill 1 minute time allowance for _, i := range inputs { b.clock.Run(i.runTime) if pos, _ := node.GetBalance(); pos != i.expPos { @@ -185,13 +207,12 @@ func TestBalanceTimeCost(t *testing.T) { } func TestBalanceReqCost(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) - node.SetBalance(uint64(time.Minute), 0) // 1 minute time serving time allowance + b.setBalance(node, uint64(time.Minute), 0) // 1 minute time serving time allowance var inputs = []struct { reqCost uint64 expPos uint64 @@ -214,7 +235,7 @@ func TestBalanceReqCost(t *testing.T) { } func TestBalanceToPriority(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) @@ -230,22 +251,20 @@ func TestBalanceToPriority(t *testing.T) { {0, 1000, -1000}, } for _, i := range inputs { - node.SetBalance(i.pos, i.neg) - priority := node.Priority(1000) + b.setBalance(node, i.pos, i.neg) + priority := node.priority(1000) if priority != i.priority { - t.Fatalf("Priority mismatch, want %v, got %v", i.priority, priority) + t.Fatalf("priority mismatch, want %v, got %v", i.priority, priority) } } } func TestEstimatedPriority(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000000000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - - b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) - node.SetBalance(uint64(time.Minute), 0) + b.setBalance(node, uint64(time.Minute), 0) var inputs = []struct { runTime time.Duration // time cost futureTime time.Duration // diff of future time @@ -272,47 +291,18 @@ func TestEstimatedPriority(t *testing.T) { for _, i := range inputs { b.clock.Run(i.runTime) node.RequestServed(i.reqCost) - priority := node.EstimatePriority(1000000000, 0, i.futureTime, 0, false) - if priority != i.priority { - t.Fatalf("Estimated priority mismatch, want %v, got %v", i.priority, priority) - } - } -} - -func TestPosBalanceMissing(t *testing.T) { - b := newBalanceTestSetup() - defer b.stop() - node := b.newNode(1000) - node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - - b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) - var inputs = []struct { - pos, neg uint64 - priority int64 - cap uint64 - after time.Duration - expect uint64 - }{ - {uint64(time.Second * 2), 0, 0, 1, time.Second, 0}, - {uint64(time.Second * 2), 0, 0, 1, 2 * time.Second, 1}, - {uint64(time.Second * 2), 0, int64(time.Second), 1, 2 * time.Second, uint64(time.Second) + 1}, - {0, 0, int64(time.Second), 1, time.Second, uint64(2*time.Second) + 1}, - {0, 0, -int64(time.Second), 1, time.Second, 1}, - } - for _, i := range inputs { - node.SetBalance(i.pos, i.neg) - got := node.PosBalanceMissing(i.priority, i.cap, i.after) - if got != i.expect { - t.Fatalf("Missing budget mismatch, want %v, got %v", i.expect, got) + priority := node.estimatePriority(1000000000, 0, i.futureTime, 0, false) + if priority != i.priority-1 { + t.Fatalf("Estimated priority mismatch, want %v, got %v", i.priority-1, priority) } } } func TestPostiveBalanceCounting(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() - var nodes []*NodeBalance + var nodes []*nodeBalance for i := 0; i < 100; i += 1 { node := b.newNode(1000000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) @@ -323,7 +313,7 @@ func TestPostiveBalanceCounting(t *testing.T) { var sum uint64 for i := 0; i < 100; i += 1 { amount := int64(rand.Intn(100) + 100) - nodes[i].AddBalance(amount) + b.addBalance(nodes[i], amount) sum += uint64(amount) } if b.bt.TotalTokenAmount() != sum { @@ -333,7 +323,7 @@ func TestPostiveBalanceCounting(t *testing.T) { // Change client status for i := 0; i < 100; i += 1 { if rand.Intn(2) == 0 { - b.ns.SetField(nodes[i].node, ppTestSetup.CapacityField, uint64(1)) + b.ns.SetField(nodes[i].node, b.setup.capacityField, uint64(1)) } } if b.bt.TotalTokenAmount() != sum { @@ -341,7 +331,7 @@ func TestPostiveBalanceCounting(t *testing.T) { } for i := 0; i < 100; i += 1 { if rand.Intn(2) == 0 { - b.ns.SetField(nodes[i].node, ppTestSetup.CapacityField, uint64(1)) + b.ns.SetField(nodes[i].node, b.setup.capacityField, uint64(1)) } } if b.bt.TotalTokenAmount() != sum { @@ -350,7 +340,7 @@ func TestPostiveBalanceCounting(t *testing.T) { } func TestCallbackChecking(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) @@ -363,7 +353,7 @@ func TestCallbackChecking(t *testing.T) { {0, time.Second}, {-int64(time.Second), 2 * time.Second}, } - node.SetBalance(uint64(time.Second), 0) + b.setBalance(node, uint64(time.Second), 0) for _, i := range inputs { diff, _ := node.timeUntil(i.priority) if diff != i.expDiff { @@ -373,14 +363,13 @@ func TestCallbackChecking(t *testing.T) { } func TestCallback(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) callCh := make(chan struct{}, 1) - node.SetBalance(uint64(time.Minute), 0) + b.setBalance(node, uint64(time.Minute), 0) node.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} }) b.clock.Run(time.Minute) @@ -390,7 +379,7 @@ func TestCallback(t *testing.T) { t.Fatalf("Callback hasn't been called yet") } - node.SetBalance(uint64(time.Minute), 0) + b.setBalance(node, uint64(time.Minute), 0) node.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} }) node.removeCallback(balanceCallbackZero) @@ -403,23 +392,14 @@ func TestCallback(t *testing.T) { } func TestBalancePersistence(t *testing.T) { - clock := &mclock.Simulated{} - ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) - db := memorydb.New() posExp := &utils.Expirer{} negExp := &utils.Expirer{} - posExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour*2)) // halves every two hours - negExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour)) // halves every hour - bt := NewBalanceTracker(ns, btTestSetup, db, clock, posExp, negExp) - ns.Start() - bts := &balanceTestSetup{ - clock: clock, - ns: ns, - bt: bt, - } - var nb *NodeBalance - exp := func(expPos, expNeg uint64) { - pos, neg := nb.GetBalance() + posExp.SetRate(0, math.Log(2)/float64(time.Hour*2)) // halves every two hours + negExp.SetRate(0, math.Log(2)/float64(time.Hour)) // halves every hour + setup := newBalanceTestSetup(nil, posExp, negExp) + + exp := func(balance *nodeBalance, expPos, expNeg uint64) { + pos, neg := balance.GetBalance() if pos != expPos { t.Fatalf("Positive balance incorrect, want %v, got %v", expPos, pos) } @@ -428,44 +408,32 @@ func TestBalancePersistence(t *testing.T) { } } expTotal := func(expTotal uint64) { - total := bt.TotalTokenAmount() + total := setup.bt.TotalTokenAmount() if total != expTotal { t.Fatalf("Total token amount incorrect, want %v, got %v", expTotal, total) } } expTotal(0) - nb = bts.newNode(0) + balance := setup.newNode(0) expTotal(0) - nb.SetBalance(16000000000, 16000000000) - exp(16000000000, 16000000000) + setup.setBalance(balance, 16000000000, 16000000000) + exp(balance, 16000000000, 16000000000) expTotal(16000000000) - clock.Run(time.Hour * 2) - exp(8000000000, 4000000000) + + setup.clock.Run(time.Hour * 2) + exp(balance, 8000000000, 4000000000) expTotal(8000000000) - bt.Stop() - ns.Stop() - - clock = &mclock.Simulated{} - ns = nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) - posExp = &utils.Expirer{} - negExp = &utils.Expirer{} - posExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour*2)) // halves every two hours - negExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour)) // halves every hour - bt = NewBalanceTracker(ns, btTestSetup, db, clock, posExp, negExp) - ns.Start() - bts = &balanceTestSetup{ - clock: clock, - ns: ns, - bt: bt, - } + setup.stop() + + // Test the functionalities after restart + setup = newBalanceTestSetup(setup.db, posExp, negExp) expTotal(8000000000) - nb = bts.newNode(0) - exp(8000000000, 4000000000) + balance = setup.newNode(0) + exp(balance, 8000000000, 4000000000) expTotal(8000000000) - clock.Run(time.Hour * 2) - exp(4000000000, 1000000000) + setup.clock.Run(time.Hour * 2) + exp(balance, 4000000000, 1000000000) expTotal(4000000000) - bt.Stop() - ns.Stop() + setup.stop() } diff --git a/les/vflux/server/balance_tracker.go b/les/vflux/server/balance_tracker.go index 1708019de4..746697a8c7 100644 --- a/les/vflux/server/balance_tracker.go +++ b/les/vflux/server/balance_tracker.go @@ -17,7 +17,6 @@ package server import ( - "reflect" "sync" "time" @@ -25,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/les/utils" "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/nodestate" ) @@ -34,82 +34,56 @@ const ( persistExpirationRefresh = time.Minute * 5 // refresh period of the token expiration persistence ) -// BalanceTrackerSetup contains node state flags and fields used by BalanceTracker -type BalanceTrackerSetup struct { - // controlled by PriorityPool - PriorityFlag, UpdateFlag nodestate.Flags - BalanceField nodestate.Field - // external connections - connAddressField, capacityField nodestate.Field -} - -// NewBalanceTrackerSetup creates a new BalanceTrackerSetup and initializes the fields -// and flags controlled by BalanceTracker -func NewBalanceTrackerSetup(setup *nodestate.Setup) BalanceTrackerSetup { - return BalanceTrackerSetup{ - // PriorityFlag is set if the node has a positive balance - PriorityFlag: setup.NewFlag("priorityNode"), - // UpdateFlag set and then immediately reset if the balance has been updated and - // therefore priority is suddenly changed - UpdateFlag: setup.NewFlag("balanceUpdate"), - // BalanceField contains the NodeBalance struct which implements nodePriority, - // allowing on-demand priority calculation and future priority estimation - BalanceField: setup.NewField("balance", reflect.TypeOf(&NodeBalance{})), - } -} - -// Connect sets the fields used by BalanceTracker as an input -func (bts *BalanceTrackerSetup) Connect(connAddressField, capacityField nodestate.Field) { - bts.connAddressField = connAddressField - bts.capacityField = capacityField -} - -// BalanceTracker tracks positive and negative balances for connected nodes. -// After connAddressField is set externally, a NodeBalance is created and previous +// balanceTracker tracks positive and negative balances for connected nodes. +// After clientField is set externally, a nodeBalance is created and previous // balance values are loaded from the database. Both balances are exponentially expired // values. Costs are deducted from the positive balance if present, otherwise added to // the negative balance. If the capacity is non-zero then a time cost is applied // continuously while individual request costs are applied immediately. // The two balances are translated into a single priority value that also depends // on the actual capacity. -type BalanceTracker struct { - BalanceTrackerSetup - clock mclock.Clock - lock sync.Mutex - ns *nodestate.NodeStateMachine - ndb *nodeDB - posExp, negExp utils.ValueExpirer - posExpTC, negExpTC uint64 +type balanceTracker struct { + setup *serverSetup + clock mclock.Clock + lock sync.Mutex + ns *nodestate.NodeStateMachine + ndb *nodeDB + posExp, negExp utils.ValueExpirer + + posExpTC, negExpTC uint64 + defaultPosFactors, defaultNegFactors PriceFactors active, inactive utils.ExpiredValue balanceTimer *utils.UpdateTimer quit chan struct{} } -// NewBalanceTracker creates a new BalanceTracker -func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup BalanceTrackerSetup, db ethdb.KeyValueStore, clock mclock.Clock, posExp, negExp utils.ValueExpirer) *BalanceTracker { +// newBalanceTracker creates a new balanceTracker +func newBalanceTracker(ns *nodestate.NodeStateMachine, setup *serverSetup, db ethdb.KeyValueStore, clock mclock.Clock, posExp, negExp utils.ValueExpirer) *balanceTracker { ndb := newNodeDB(db, clock) - bt := &BalanceTracker{ - ns: ns, - BalanceTrackerSetup: setup, - ndb: ndb, - clock: clock, - posExp: posExp, - negExp: negExp, - balanceTimer: utils.NewUpdateTimer(clock, time.Second*10), - quit: make(chan struct{}), + bt := &balanceTracker{ + ns: ns, + setup: setup, + ndb: ndb, + clock: clock, + posExp: posExp, + negExp: negExp, + balanceTimer: utils.NewUpdateTimer(clock, time.Second*10), + quit: make(chan struct{}), } posOffset, negOffset := bt.ndb.getExpiration() posExp.SetLogOffset(clock.Now(), posOffset) negExp.SetLogOffset(clock.Now(), negOffset) + // Load all persisted balance entries of priority nodes, + // calculate the total number of issued service tokens. bt.ndb.forEachBalance(false, func(id enode.ID, balance utils.ExpiredValue) bool { bt.inactive.AddExp(balance) return true }) - ns.SubscribeField(bt.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { - n, _ := ns.GetField(node, bt.BalanceField).(*NodeBalance) + ns.SubscribeField(bt.setup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + n, _ := ns.GetField(node, bt.setup.balanceField).(*nodeBalance) if n == nil { return } @@ -126,15 +100,22 @@ func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup BalanceTrackerSetup n.deactivate() } }) - ns.SubscribeField(bt.connAddressField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + ns.SubscribeField(bt.setup.clientField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + type peer interface { + FreeClientId() string + } if newValue != nil { - ns.SetFieldSub(node, bt.BalanceField, bt.newNodeBalance(node, newValue.(string))) + n := bt.newNodeBalance(node, newValue.(peer).FreeClientId(), true) + bt.lock.Lock() + n.SetPriceFactors(bt.defaultPosFactors, bt.defaultNegFactors) + bt.lock.Unlock() + ns.SetFieldSub(node, bt.setup.balanceField, n) } else { - ns.SetStateSub(node, nodestate.Flags{}, bt.PriorityFlag, 0) - if b, _ := ns.GetField(node, bt.BalanceField).(*NodeBalance); b != nil { + ns.SetStateSub(node, nodestate.Flags{}, bt.setup.priorityFlag, 0) + if b, _ := ns.GetField(node, bt.setup.balanceField).(*nodeBalance); b != nil { b.deactivate() } - ns.SetFieldSub(node, bt.BalanceField, nil) + ns.SetFieldSub(node, bt.setup.balanceField, nil) } }) @@ -157,31 +138,31 @@ func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup BalanceTrackerSetup return bt } -// Stop saves expiration offset and unsaved node balances and shuts BalanceTracker down -func (bt *BalanceTracker) Stop() { +// Stop saves expiration offset and unsaved node balances and shuts balanceTracker down +func (bt *balanceTracker) stop() { now := bt.clock.Now() bt.ndb.setExpiration(bt.posExp.LogOffset(now), bt.negExp.LogOffset(now)) close(bt.quit) bt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - if n, ok := bt.ns.GetField(node, bt.BalanceField).(*NodeBalance); ok { + if n, ok := bt.ns.GetField(node, bt.setup.balanceField).(*nodeBalance); ok { n.lock.Lock() n.storeBalance(true, true) n.lock.Unlock() - bt.ns.SetField(node, bt.BalanceField, nil) + bt.ns.SetField(node, bt.setup.balanceField, nil) } }) bt.ndb.close() } // TotalTokenAmount returns the current total amount of service tokens in existence -func (bt *BalanceTracker) TotalTokenAmount() uint64 { +func (bt *balanceTracker) TotalTokenAmount() uint64 { bt.lock.Lock() defer bt.lock.Unlock() bt.balanceTimer.Update(func(_ time.Duration) bool { bt.active = utils.ExpiredValue{} bt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - if n, ok := bt.ns.GetField(node, bt.BalanceField).(*NodeBalance); ok && n.active { + if n, ok := bt.ns.GetField(node, bt.setup.balanceField).(*nodeBalance); ok && n.active { pos, _ := n.GetRawBalance() bt.active.AddExp(pos) } @@ -194,13 +175,21 @@ func (bt *BalanceTracker) TotalTokenAmount() uint64 { } // GetPosBalanceIDs lists node IDs with an associated positive balance -func (bt *BalanceTracker) GetPosBalanceIDs(start, stop enode.ID, maxCount int) (result []enode.ID) { +func (bt *balanceTracker) GetPosBalanceIDs(start, stop enode.ID, maxCount int) (result []enode.ID) { return bt.ndb.getPosBalanceIDs(start, stop, maxCount) } +// SetDefaultFactors sets the default price factors applied to subsequently connected clients +func (bt *balanceTracker) SetDefaultFactors(posFactors, negFactors PriceFactors) { + bt.lock.Lock() + bt.defaultPosFactors = posFactors + bt.defaultNegFactors = negFactors + bt.lock.Unlock() +} + // SetExpirationTCs sets positive and negative token expiration time constants. // Specified in seconds, 0 means infinite (no expiration). -func (bt *BalanceTracker) SetExpirationTCs(pos, neg uint64) { +func (bt *balanceTracker) SetExpirationTCs(pos, neg uint64) { bt.lock.Lock() defer bt.lock.Unlock() @@ -220,39 +209,55 @@ func (bt *BalanceTracker) SetExpirationTCs(pos, neg uint64) { // GetExpirationTCs returns the current positive and negative token expiration // time constants -func (bt *BalanceTracker) GetExpirationTCs() (pos, neg uint64) { +func (bt *balanceTracker) GetExpirationTCs() (pos, neg uint64) { bt.lock.Lock() defer bt.lock.Unlock() return bt.posExpTC, bt.negExpTC } -// newNodeBalance loads balances from the database and creates a NodeBalance instance -// for the given node. It also sets the PriorityFlag and adds balanceCallbackZero if +// BalanceOperation allows atomic operations on the balance of a node regardless of whether +// it is currently connected or not +func (bt *balanceTracker) BalanceOperation(id enode.ID, connAddress string, cb func(AtomicBalanceOperator)) { + bt.ns.Operation(func() { + var nb *nodeBalance + if node := bt.ns.GetNode(id); node != nil { + nb, _ = bt.ns.GetField(node, bt.setup.balanceField).(*nodeBalance) + } else { + node = enode.SignNull(&enr.Record{}, id) + nb = bt.newNodeBalance(node, connAddress, false) + } + cb(nb) + }) +} + +// newNodeBalance loads balances from the database and creates a nodeBalance instance +// for the given node. It also sets the priorityFlag and adds balanceCallbackZero if // the node has a positive balance. // Note: this function should run inside a NodeStateMachine operation -func (bt *BalanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string) *NodeBalance { +func (bt *balanceTracker) newNodeBalance(node *enode.Node, connAddress string, setFlags bool) *nodeBalance { pb := bt.ndb.getOrNewBalance(node.ID().Bytes(), false) - nb := bt.ndb.getOrNewBalance([]byte(negBalanceKey), true) - n := &NodeBalance{ + nb := bt.ndb.getOrNewBalance([]byte(connAddress), true) + n := &nodeBalance{ bt: bt, node: node, - connAddress: negBalanceKey, - balance: balance{pos: pb, neg: nb}, + setFlags: setFlags, + connAddress: connAddress, + balance: balance{pos: pb, neg: nb, posExp: bt.posExp, negExp: bt.negExp}, initTime: bt.clock.Now(), lastUpdate: bt.clock.Now(), } for i := range n.callbackIndex { n.callbackIndex[i] = -1 } - if n.checkPriorityStatus() { - n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0) + if setFlags && n.checkPriorityStatus() { + n.bt.ns.SetStateSub(n.node, n.bt.setup.priorityFlag, nodestate.Flags{}, 0) } return n } // storeBalance stores either a positive or a negative balance in the database -func (bt *BalanceTracker) storeBalance(id []byte, neg bool, value utils.ExpiredValue) { +func (bt *balanceTracker) storeBalance(id []byte, neg bool, value utils.ExpiredValue) { if bt.canDropBalance(bt.clock.Now(), neg, value) { bt.ndb.delBalance(id, neg) // balance is small enough, drop it directly. } else { @@ -262,7 +267,7 @@ func (bt *BalanceTracker) storeBalance(id []byte, neg bool, value utils.ExpiredV // canDropBalance tells whether a positive or negative balance is below the threshold // and therefore can be dropped from the database -func (bt *BalanceTracker) canDropBalance(now mclock.AbsTime, neg bool, b utils.ExpiredValue) bool { +func (bt *balanceTracker) canDropBalance(now mclock.AbsTime, neg bool, b utils.ExpiredValue) bool { if neg { return b.Value(bt.negExp.LogOffset(now)) <= negThreshold } @@ -270,7 +275,7 @@ func (bt *BalanceTracker) canDropBalance(now mclock.AbsTime, neg bool, b utils.E } // updateTotalBalance adjusts the total balance after executing given callback. -func (bt *BalanceTracker) updateTotalBalance(n *NodeBalance, callback func() bool) { +func (bt *balanceTracker) updateTotalBalance(n *nodeBalance, callback func() bool) { bt.lock.Lock() defer bt.lock.Unlock() diff --git a/les/vflux/server/clientpool.go b/les/vflux/server/clientpool.go new file mode 100644 index 0000000000..2e5fdd0ee7 --- /dev/null +++ b/les/vflux/server/clientpool.go @@ -0,0 +1,335 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package server + +import ( + "errors" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/les/utils" + "github.com/ethereum/go-ethereum/les/vflux" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/nodestate" + "github.com/ethereum/go-ethereum/rlp" +) + +var ( + ErrNotConnected = errors.New("client not connected") + ErrNoPriority = errors.New("priority too low to raise capacity") + ErrCantFindMaximum = errors.New("Unable to find maximum allowed capacity") +) + +// ClientPool implements a client database that assigns a priority to each client +// based on a positive and negative balance. Positive balance is externally assigned +// to prioritized clients and is decreased with connection time and processed +// requests (unless the price factors are zero). If the positive balance is zero +// then negative balance is accumulated. +// +// Balance tracking and priority calculation for connected clients is done by +// balanceTracker. PriorityQueue ensures that clients with the lowest positive or +// highest negative balance get evicted when the total capacity allowance is full +// and new clients with a better balance want to connect. +// +// Already connected nodes receive a small bias in their favor in order to avoid +// accepting and instantly kicking out clients. In theory, we try to ensure that +// each client can have several minutes of connection time. +// +// Balances of disconnected clients are stored in nodeDB including positive balance +// and negative banalce. Boeth positive balance and negative balance will decrease +// exponentially. If the balance is low enough, then the record will be dropped. +type ClientPool struct { + *priorityPool + *balanceTracker + + setup *serverSetup + clock mclock.Clock + closed bool + ns *nodestate.NodeStateMachine + synced func() bool + + lock sync.RWMutex + connectedBias time.Duration + + minCap uint64 // the minimal capacity value allowed for any client + capReqNode *enode.Node // node that is requesting capacity change; only used inside NSM operation +} + +// clientPeer represents a peer in the client pool. None of the callbacks should block. +type clientPeer interface { + Node() *enode.Node + FreeClientId() string // unique id for non-priority clients (typically a prefix of the network address) + InactiveAllowance() time.Duration // disconnection timeout for inactive non-priority peers + UpdateCapacity(newCap uint64, requested bool) // signals a capacity update (requested is true if it is a result of a SetCapacity call on the given peer + Disconnect() // initiates disconnection (Unregister should always be called) +} + +// NewClientPool creates a new client pool +func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias time.Duration, clock mclock.Clock, synced func() bool) *ClientPool { + setup := newServerSetup() + ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) + cp := &ClientPool{ + priorityPool: newPriorityPool(ns, setup, clock, minCap, connectedBias, 4, 100), + balanceTracker: newBalanceTracker(ns, setup, balanceDb, clock, &utils.Expirer{}, &utils.Expirer{}), + setup: setup, + ns: ns, + clock: clock, + minCap: minCap, + connectedBias: connectedBias, + synced: synced, + } + + ns.SubscribeState(nodestate.MergeFlags(setup.activeFlag, setup.inactiveFlag, setup.priorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { + if newState.Equals(setup.inactiveFlag) { + // set timeout for non-priority inactive client + var timeout time.Duration + if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { + timeout = c.InactiveAllowance() + } + if timeout > 0 { + ns.AddTimeout(node, setup.inactiveFlag, timeout) + } else { + // Note: if capacity is immediately available then priorityPool will set the active + // flag simultaneously with removing the inactive flag and therefore this will not + // initiate disconnection + ns.SetStateSub(node, nodestate.Flags{}, setup.inactiveFlag, 0) + } + } + if oldState.Equals(setup.inactiveFlag) && newState.Equals(setup.inactiveFlag.Or(setup.priorityFlag)) { + ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0) // priority gained; remove timeout + } + if newState.Equals(setup.activeFlag) { + // active with no priority; limit capacity to minCap + cap, _ := ns.GetField(node, setup.capacityField).(uint64) + if cap > minCap { + cp.requestCapacity(node, minCap, minCap, 0) + } + } + if newState.Equals(nodestate.Flags{}) { + if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { + c.Disconnect() + } + } + }) + + ns.SubscribeField(setup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { + newCap, _ := newValue.(uint64) + c.UpdateCapacity(newCap, node == cp.capReqNode) + } + }) + + // add metrics + cp.ns.SubscribeState(nodestate.MergeFlags(cp.setup.activeFlag, cp.setup.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { + if oldState.IsEmpty() && !newState.IsEmpty() { + clientConnectedMeter.Mark(1) + } + if !oldState.IsEmpty() && newState.IsEmpty() { + clientDisconnectedMeter.Mark(1) + } + if oldState.HasNone(cp.setup.activeFlag) && oldState.HasAll(cp.setup.activeFlag) { + clientActivatedMeter.Mark(1) + } + if oldState.HasAll(cp.setup.activeFlag) && oldState.HasNone(cp.setup.activeFlag) { + clientDeactivatedMeter.Mark(1) + } + _, connected := cp.Active() + totalConnectedGauge.Update(int64(connected)) + }) + return cp +} + +// Start starts the client pool. Should be called before Register/Unregister. +func (cp *ClientPool) Start() { + cp.ns.Start() +} + +// Stop shuts the client pool down. The clientPeer interface callbacks will not be called +// after Stop. Register calls will return nil. +func (cp *ClientPool) Stop() { + cp.balanceTracker.stop() + cp.ns.Stop() +} + +// Register registers the peer into the client pool. If the peer has insufficient +// priority and remains inactive for longer than the allowed timeout then it will be +// disconnected by calling the Disconnect function of the clientPeer interface. +func (cp *ClientPool) Register(peer clientPeer) ConnectedBalance { + cp.ns.SetField(peer.Node(), cp.setup.clientField, peerWrapper{peer}) + balance, _ := cp.ns.GetField(peer.Node(), cp.setup.balanceField).(*nodeBalance) + return balance +} + +// Unregister removes the peer from the client pool +func (cp *ClientPool) Unregister(peer clientPeer) { + cp.ns.SetField(peer.Node(), cp.setup.clientField, nil) +} + +// setConnectedBias sets the connection bias, which is applied to already connected clients +// So that already connected client won't be kicked out very soon and we can ensure all +// connected clients can have enough time to request or sync some data. +func (cp *ClientPool) SetConnectedBias(bias time.Duration) { + cp.lock.Lock() + cp.connectedBias = bias + cp.setActiveBias(bias) + cp.lock.Unlock() +} + +// SetCapacity sets the assigned capacity of a connected client +func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Duration, requested bool) (capacity uint64, err error) { + cp.lock.RLock() + if cp.connectedBias > bias { + bias = cp.connectedBias + } + cp.lock.RUnlock() + + cp.ns.Operation(func() { + balance, _ := cp.ns.GetField(node, cp.setup.balanceField).(*nodeBalance) + if balance == nil { + err = ErrNotConnected + return + } + capacity, _ = cp.ns.GetField(node, cp.setup.capacityField).(uint64) + if capacity == 0 { + // if the client is inactive then it has insufficient priority for the minimal capacity + // (will be activated automatically with minCap when possible) + return + } + if reqCap < cp.minCap { + // can't request less than minCap; switching between 0 (inactive state) and minCap is + // performed by the server automatically as soon as necessary/possible + reqCap = cp.minCap + } + if reqCap > cp.minCap && cp.ns.GetState(node).HasNone(cp.setup.priorityFlag) { + err = ErrNoPriority + return + } + if reqCap == capacity { + return + } + if requested { + // mark the requested node so that the UpdateCapacity callback can signal + // whether the update is the direct result of a SetCapacity call on the given node + cp.capReqNode = node + defer func() { + cp.capReqNode = nil + }() + } + + var minTarget, maxTarget uint64 + if reqCap > capacity { + // Estimate maximum available capacity at the current priority level and request + // the estimated amount. + // Note: requestCapacity could find the highest available capacity between the + // current and the requested capacity but it could cost a lot of iterations with + // fine step adjustment if the requested capacity is very high. By doing a quick + // estimation of the maximum available capacity based on the capacity curve we + // can limit the number of required iterations. + curve := cp.getCapacityCurve().exclude(node.ID()) + maxTarget = curve.maxCapacity(func(capacity uint64) int64 { + return balance.estimatePriority(capacity, 0, 0, bias, false) + }) + if maxTarget <= capacity { + return + } + if maxTarget > reqCap { + maxTarget = reqCap + } + // Specify a narrow target range that allows a limited number of fine step + // iterations + minTarget = maxTarget - maxTarget/20 + if minTarget < capacity { + minTarget = capacity + } + } else { + minTarget, maxTarget = reqCap, reqCap + } + if newCap := cp.requestCapacity(node, minTarget, maxTarget, bias); newCap >= minTarget && newCap <= maxTarget { + capacity = newCap + return + } + // we should be able to find the maximum allowed capacity in a few iterations + log.Error("Unable to find maximum allowed capacity") + err = ErrCantFindMaximum + }) + return +} + +// serveCapQuery serves a vflux capacity query. It receives multiple token amount values +// and a bias time value. For each given token amount it calculates the maximum achievable +// capacity in case the amount is added to the balance. +func (cp *ClientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []byte { + var req vflux.CapacityQueryReq + if rlp.DecodeBytes(data, &req) != nil { + return nil + } + if l := len(req.AddTokens); l == 0 || l > vflux.CapacityQueryMaxLen { + return nil + } + result := make(vflux.CapacityQueryReply, len(req.AddTokens)) + if !cp.synced() { + capacityQueryZeroMeter.Mark(1) + reply, _ := rlp.EncodeToBytes(&result) + return reply + } + + bias := time.Second * time.Duration(req.Bias) + cp.lock.RLock() + if cp.connectedBias > bias { + bias = cp.connectedBias + } + cp.lock.RUnlock() + + // use capacityCurve to answer request for multiple newly bought token amounts + curve := cp.getCapacityCurve().exclude(id) + cp.BalanceOperation(id, freeID, func(balance AtomicBalanceOperator) { + pb, _ := balance.GetBalance() + for i, addTokens := range req.AddTokens { + add := addTokens.Int64() + result[i] = curve.maxCapacity(func(capacity uint64) int64 { + return balance.estimatePriority(capacity, add, 0, bias, false) / int64(capacity) + }) + if add <= 0 && uint64(-add) >= pb && result[i] > cp.minCap { + result[i] = cp.minCap + } + if result[i] < cp.minCap { + result[i] = 0 + } + } + }) + // add first result to metrics (don't care about priority client multi-queries yet) + if result[0] == 0 { + capacityQueryZeroMeter.Mark(1) + } else { + capacityQueryNonZeroMeter.Mark(1) + } + reply, _ := rlp.EncodeToBytes(&result) + return reply +} + +// Handle implements Service +func (cp *ClientPool) Handle(id enode.ID, address string, name string, data []byte) []byte { + switch name { + case vflux.CapacityQueryName: + return cp.serveCapQuery(id, address, data) + default: + return nil + } +} diff --git a/les/clientpool_test.go b/les/vflux/server/clientpool_test.go similarity index 62% rename from les/clientpool_test.go rename to les/vflux/server/clientpool_test.go index 345b373b0f..9503121697 100644 --- a/les/clientpool_test.go +++ b/les/vflux/server/clientpool_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package les +package server import ( "fmt" @@ -24,12 +24,13 @@ import ( "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/core/rawdb" - vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/nodestate" ) +const defaultConnectedBias = time.Minute * 3 + func TestClientPoolL10C100Free(t *testing.T) { testClientPool(t, 10, 100, 0, true) } @@ -64,11 +65,6 @@ type poolTestPeer struct { inactiveAllowed bool } -func testStateMachine() *nodestate.NodeStateMachine { - return nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup) - -} - func newPoolTestPeer(i int, disconnCh chan int) *poolTestPeer { return &poolTestPeer{ index: i, @@ -81,36 +77,39 @@ func (i *poolTestPeer) Node() *enode.Node { return i.node } -func (i *poolTestPeer) freeClientId() string { +func (i *poolTestPeer) FreeClientId() string { return fmt.Sprintf("addr #%d", i.index) } -func (i *poolTestPeer) updateCapacity(cap uint64) { - i.cap = cap +func (i *poolTestPeer) InactiveAllowance() time.Duration { + if i.inactiveAllowed { + return time.Second * 10 + } + return 0 } -func (i *poolTestPeer) freeze() {} - -func (i *poolTestPeer) allowInactive() bool { - return i.inactiveAllowed +func (i *poolTestPeer) UpdateCapacity(capacity uint64, requested bool) { + i.cap = capacity } -func getBalance(pool *clientPool, p *poolTestPeer) (pos, neg uint64) { - temp := pool.ns.GetField(p.node, clientInfoField) == nil - if temp { - pool.ns.SetField(p.node, connAddressField, p.freeClientId()) - } - n, _ := pool.ns.GetField(p.node, pool.BalanceField).(*vfs.NodeBalance) - pos, neg = n.GetBalance() - if temp { - pool.ns.SetField(p.node, connAddressField, nil) +func (i *poolTestPeer) Disconnect() { + if i.disconnCh == nil { + return } + id := i.node.ID() + i.disconnCh <- int(id[0]) + int(id[1])<<8 +} + +func getBalance(pool *ClientPool, p *poolTestPeer) (pos, neg uint64) { + pool.BalanceOperation(p.node.ID(), p.FreeClientId(), func(nb AtomicBalanceOperator) { + pos, neg = nb.GetBalance() + }) return } -func addBalance(pool *clientPool, id enode.ID, amount int64) { - pool.forClients([]enode.ID{id}, func(c *clientInfo) { - c.balance.AddBalance(amount) +func addBalance(pool *ClientPool, id enode.ID, amount int64) { + pool.BalanceOperation(id, "", func(nb AtomicBalanceOperator) { + nb.AddBalance(amount) }) } @@ -122,6 +121,19 @@ func checkDiff(a, b uint64) bool { return a > b+maxDiff || b > a+maxDiff } +func connect(pool *ClientPool, peer *poolTestPeer) uint64 { + pool.Register(peer) + return peer.cap +} + +func disconnect(pool *ClientPool, peer *poolTestPeer) { + pool.Unregister(peer) +} + +func alwaysTrueFn() bool { + return true +} + func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, randomDisconnect bool) { rand.Seed(time.Now().UnixNano()) var ( @@ -130,19 +142,17 @@ func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, rando connected = make([]bool, clientCount) connTicks = make([]int, clientCount) disconnCh = make(chan int, clientCount) - disconnFn = func(id enode.ID) { - disconnCh <- int(id[0]) + int(id[1])<<8 - } - pool = newClientPool(testStateMachine(), db, 1, 0, &clock, disconnFn) + pool = NewClientPool(db, 1, 0, &clock, alwaysTrueFn) ) - pool.ns.Start() + pool.Start() + pool.SetExpirationTCs(0, 1000) - pool.setLimits(activeLimit, uint64(activeLimit)) - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool.SetLimits(uint64(activeLimit), uint64(activeLimit)) + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) // pool should accept new peers up to its connected limit for i := 0; i < activeLimit; i++ { - if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(i, disconnCh)); cap != 0 { connected[i] = true } else { t.Fatalf("Test peer #%d rejected", i) @@ -163,23 +173,23 @@ func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, rando i := rand.Intn(clientCount) if connected[i] { if randomDisconnect { - pool.disconnect(newPoolTestPeer(i, disconnCh)) + disconnect(pool, newPoolTestPeer(i, disconnCh)) connected[i] = false connTicks[i] += tickCounter } } else { - if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(i, disconnCh)); cap != 0 { connected[i] = true connTicks[i] -= tickCounter } else { - pool.disconnect(newPoolTestPeer(i, disconnCh)) + disconnect(pool, newPoolTestPeer(i, disconnCh)) } } pollDisconnects: for { select { case i := <-disconnCh: - pool.disconnect(newPoolTestPeer(i, disconnCh)) + disconnect(pool, newPoolTestPeer(i, disconnCh)) if connected[i] { connTicks[i] += tickCounter connected[i] = false @@ -211,18 +221,18 @@ func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, rando t.Errorf("Total connected time of test node #%d (%d) outside expected range (%d to %d)", i, connTicks[i], min, max) } } - pool.stop() + pool.Stop() } -func testPriorityConnect(t *testing.T, pool *clientPool, p *poolTestPeer, cap uint64, expSuccess bool) { - if cap, _ := pool.connect(p); cap == 0 { +func testPriorityConnect(t *testing.T, pool *ClientPool, p *poolTestPeer, cap uint64, expSuccess bool) { + if cap := connect(pool, p); cap == 0 { if expSuccess { t.Fatalf("Failed to connect paid client") } else { return } } - if _, err := pool.setCapacity(p.node, "", cap, defaultConnectedBias, true); err != nil { + if newCap, _ := pool.SetCapacity(p.node, cap, defaultConnectedBias, true); newCap != cap { if expSuccess { t.Fatalf("Failed to raise capacity of paid client") } else { @@ -239,11 +249,11 @@ func TestConnectPaidClient(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) // Add balance for an external client and mark it as paid client addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute)) @@ -255,16 +265,16 @@ func TestConnectPaidClientToSmallPool(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) // Add balance for an external client and mark it as paid client addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute)) - // Connect a fat paid client to pool, should reject it. + // connect a fat paid client to pool, should reject it. testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 100, false) } @@ -273,24 +283,23 @@ func TestConnectPaidClientToFullPool(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - removeFn := func(enode.ID) {} // Noop - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { addBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20)) - pool.connect(newPoolTestPeer(i, nil)) + connect(pool, newPoolTestPeer(i, nil)) } addBalance(pool, newPoolTestPeer(11, nil).node.ID(), int64(time.Second*2)) // Add low balance to new paid client - if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(11, nil)); cap != 0 { t.Fatalf("Low balance paid client should be rejected") } clock.Run(time.Second) addBalance(pool, newPoolTestPeer(12, nil).node.ID(), int64(time.Minute*5)) // Add high balance to new paid client - if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap == 0 { + if cap := connect(pool, newPoolTestPeer(12, nil)); cap == 0 { t.Fatalf("High balance paid client should be accepted") } } @@ -301,23 +310,20 @@ func TestPaidClientKickedOut(t *testing.T) { db = rawdb.NewMemoryDatabase() kickedCh = make(chan int, 100) ) - removeFn := func(id enode.ID) { - kickedCh <- int(id[0]) - } - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) - pool.ns.Start() - pool.bt.SetExpirationTCs(0, 0) - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + pool.SetExpirationTCs(0, 0) + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { addBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance - pool.connect(newPoolTestPeer(i, kickedCh)) + connect(pool, newPoolTestPeer(i, kickedCh)) clock.Run(time.Millisecond) } clock.Run(defaultConnectedBias + time.Second*11) - if cap, _ := pool.connect(newPoolTestPeer(11, kickedCh)); cap == 0 { + if cap := connect(pool, newPoolTestPeer(11, kickedCh)); cap == 0 { t.Fatalf("Free client should be accepted") } select { @@ -335,12 +341,12 @@ func TestConnectFreeClient(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) - if cap, _ := pool.connect(newPoolTestPeer(0, nil)); cap == 0 { + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + if cap := connect(pool, newPoolTestPeer(0, nil)); cap == 0 { t.Fatalf("Failed to connect free client") } testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 2, false) @@ -351,26 +357,25 @@ func TestConnectFreeClientToFullPool(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - removeFn := func(enode.ID) {} // Noop - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { - pool.connect(newPoolTestPeer(i, nil)) + connect(pool, newPoolTestPeer(i, nil)) } - if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(11, nil)); cap != 0 { t.Fatalf("New free client should be rejected") } clock.Run(time.Minute) - if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(12, nil)); cap != 0 { t.Fatalf("New free client should be rejected") } clock.Run(time.Millisecond) clock.Run(4 * time.Minute) - if cap, _ := pool.connect(newPoolTestPeer(13, nil)); cap == 0 { + if cap := connect(pool, newPoolTestPeer(13, nil)); cap == 0 { t.Fatalf("Old client connects more than 5min should be kicked") } } @@ -381,18 +386,17 @@ func TestFreeClientKickedOut(t *testing.T) { db = rawdb.NewMemoryDatabase() kicked = make(chan int, 100) ) - removeFn := func(id enode.ID) { kicked <- int(id[0]) } - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { - pool.connect(newPoolTestPeer(i, kicked)) + connect(pool, newPoolTestPeer(i, kicked)) clock.Run(time.Millisecond) } - if cap, _ := pool.connect(newPoolTestPeer(10, kicked)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(10, kicked)); cap != 0 { t.Fatalf("New free client should be rejected") } select { @@ -400,10 +404,10 @@ func TestFreeClientKickedOut(t *testing.T) { case <-time.NewTimer(time.Second).C: t.Fatalf("timeout") } - pool.disconnect(newPoolTestPeer(10, kicked)) + disconnect(pool, newPoolTestPeer(10, kicked)) clock.Run(5 * time.Minute) for i := 0; i < 10; i++ { - pool.connect(newPoolTestPeer(i+10, kicked)) + connect(pool, newPoolTestPeer(i+10, kicked)) } for i := 0; i < 10; i++ { select { @@ -423,18 +427,17 @@ func TestPositiveBalanceCalculation(t *testing.T) { db = rawdb.NewMemoryDatabase() kicked = make(chan int, 10) ) - removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3)) testPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true) clock.Run(time.Minute) - pool.disconnect(newPoolTestPeer(0, kicked)) + disconnect(pool, newPoolTestPeer(0, kicked)) pb, _ := getBalance(pool, newPoolTestPeer(0, kicked)) if checkDiff(pb, uint64(time.Minute*2)) { t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute*2), pb) @@ -447,12 +450,11 @@ func TestDowngradePriorityClient(t *testing.T) { db = rawdb.NewMemoryDatabase() kicked = make(chan int, 10) ) - removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) p := newPoolTestPeer(0, kicked) addBalance(pool, p.node.ID(), int64(time.Minute)) @@ -483,30 +485,31 @@ func TestNegativeBalanceCalculation(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetExpirationTCs(0, 3600) + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { - pool.connect(newPoolTestPeer(i, nil)) + connect(pool, newPoolTestPeer(i, nil)) } clock.Run(time.Second) for i := 0; i < 10; i++ { - pool.disconnect(newPoolTestPeer(i, nil)) + disconnect(pool, newPoolTestPeer(i, nil)) _, nb := getBalance(pool, newPoolTestPeer(i, nil)) if nb != 0 { t.Fatalf("Short connection shouldn't be recorded") } } for i := 0; i < 10; i++ { - pool.connect(newPoolTestPeer(i, nil)) + connect(pool, newPoolTestPeer(i, nil)) } clock.Run(time.Minute) for i := 0; i < 10; i++ { - pool.disconnect(newPoolTestPeer(i, nil)) + disconnect(pool, newPoolTestPeer(i, nil)) _, nb := getBalance(pool, newPoolTestPeer(i, nil)) exp := uint64(time.Minute) / 1000 exp -= exp / 120 // correct for negative balance expiration @@ -521,10 +524,10 @@ func TestInactiveClient(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}) - pool.ns.Start() - defer pool.stop() - pool.setLimits(2, uint64(2)) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(2, uint64(2)) p1 := newPoolTestPeer(1, nil) p1.inactiveAllowed = true @@ -535,15 +538,15 @@ func TestInactiveClient(t *testing.T) { addBalance(pool, p1.node.ID(), 1000*int64(time.Second)) addBalance(pool, p3.node.ID(), 2000*int64(time.Second)) // p1: 1000 p2: 0 p3: 2000 - p1.cap, _ = pool.connect(p1) + p1.cap = connect(pool, p1) if p1.cap != 1 { t.Fatalf("Failed to connect peer #1") } - p2.cap, _ = pool.connect(p2) + p2.cap = connect(pool, p2) if p2.cap != 1 { t.Fatalf("Failed to connect peer #2") } - p3.cap, _ = pool.connect(p3) + p3.cap = connect(pool, p3) if p3.cap != 1 { t.Fatalf("Failed to connect peer #3") } @@ -566,11 +569,11 @@ func TestInactiveClient(t *testing.T) { if p2.cap != 0 { t.Fatalf("Failed to deactivate peer #2") } - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}) + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}) p4 := newPoolTestPeer(4, nil) addBalance(pool, p4.node.ID(), 1500*int64(time.Second)) // p1: 1000 p2: 500 p3: 2000 p4: 1500 - p4.cap, _ = pool.connect(p4) + p4.cap = connect(pool, p4) if p4.cap != 1 { t.Fatalf("Failed to activate peer #4") } @@ -579,8 +582,8 @@ func TestInactiveClient(t *testing.T) { } clock.Run(time.Second * 600) // manually trigger a check to avoid a long real-time wait - pool.ns.SetState(p1.node, pool.UpdateFlag, nodestate.Flags{}, 0) - pool.ns.SetState(p1.node, nodestate.Flags{}, pool.UpdateFlag, 0) + pool.ns.SetState(p1.node, pool.setup.updateFlag, nodestate.Flags{}, 0) + pool.ns.SetState(p1.node, nodestate.Flags{}, pool.setup.updateFlag, 0) // p1: 1000 p2: 500 p3: 2000 p4: 900 if p1.cap != 1 { t.Fatalf("Failed to activate peer #1") @@ -588,8 +591,8 @@ func TestInactiveClient(t *testing.T) { if p4.cap != 0 { t.Fatalf("Failed to deactivate peer #4") } - pool.disconnect(p2) - pool.disconnect(p4) + disconnect(pool, p2) + disconnect(pool, p4) addBalance(pool, p1.node.ID(), -1000*int64(time.Second)) if p1.cap != 1 { t.Fatalf("Should not deactivate peer #1") diff --git a/les/vflux/server/metrics.go b/les/vflux/server/metrics.go new file mode 100644 index 0000000000..307b8347af --- /dev/null +++ b/les/vflux/server/metrics.go @@ -0,0 +1,33 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package server + +import ( + "github.com/ethereum/go-ethereum/metrics" +) + +var ( + totalConnectedGauge = metrics.NewRegisteredGauge("vflux/server/totalConnected", nil) + + clientConnectedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/connected", nil) + clientActivatedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/activated", nil) + clientDeactivatedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/deactivated", nil) + clientDisconnectedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/disconnected", nil) + + capacityQueryZeroMeter = metrics.NewRegisteredMeter("vflux/server/capQueryZero", nil) + capacityQueryNonZeroMeter = metrics.NewRegisteredMeter("vflux/server/capQueryNonZero", nil) +) diff --git a/les/vflux/server/prioritypool.go b/les/vflux/server/prioritypool.go index e940ac7c65..573a3570a4 100644 --- a/les/vflux/server/prioritypool.go +++ b/les/vflux/server/prioritypool.go @@ -18,7 +18,6 @@ package server import ( "math" - "reflect" "sync" "time" @@ -33,36 +32,7 @@ const ( lazyQueueRefresh = time.Second * 10 // refresh period of the active queue ) -// PriorityPoolSetup contains node state flags and fields used by PriorityPool -// Note: ActiveFlag and InactiveFlag can be controlled both externally and by the pool, -// see PriorityPool description for details. -type PriorityPoolSetup struct { - // controlled by PriorityPool - ActiveFlag, InactiveFlag nodestate.Flags - CapacityField, ppNodeInfoField nodestate.Field - // external connections - updateFlag nodestate.Flags - priorityField nodestate.Field -} - -// NewPriorityPoolSetup creates a new PriorityPoolSetup and initializes the fields -// and flags controlled by PriorityPool -func NewPriorityPoolSetup(setup *nodestate.Setup) PriorityPoolSetup { - return PriorityPoolSetup{ - ActiveFlag: setup.NewFlag("active"), - InactiveFlag: setup.NewFlag("inactive"), - CapacityField: setup.NewField("capacity", reflect.TypeOf(uint64(0))), - ppNodeInfoField: setup.NewField("ppNodeInfo", reflect.TypeOf(&ppNodeInfo{})), - } -} - -// Connect sets the fields and flags used by PriorityPool as an input -func (pps *PriorityPoolSetup) Connect(priorityField nodestate.Field, updateFlag nodestate.Flags) { - pps.priorityField = priorityField // should implement nodePriority - pps.updateFlag = updateFlag // triggers an immediate priority update -} - -// PriorityPool handles a set of nodes where each node has a capacity (a scalar value) +// priorityPool handles a set of nodes where each node has a capacity (a scalar value) // and a priority (which can change over time and can also depend on the capacity). // A node is active if it has at least the necessary minimal amount of capacity while // inactive nodes have 0 capacity (values between 0 and the minimum are not allowed). @@ -79,70 +49,70 @@ func (pps *PriorityPoolSetup) Connect(priorityField nodestate.Field, updateFlag // This time bias can be interpreted as minimum expected active time at the given // capacity (if the threshold priority stays the same). // -// Nodes in the pool always have either InactiveFlag or ActiveFlag set. A new node is -// added to the pool by externally setting InactiveFlag. PriorityPool can switch a node -// between InactiveFlag and ActiveFlag at any time. Nodes can be removed from the pool -// by externally resetting both flags. ActiveFlag should not be set externally. +// Nodes in the pool always have either inactiveFlag or activeFlag set. A new node is +// added to the pool by externally setting inactiveFlag. priorityPool can switch a node +// between inactiveFlag and activeFlag at any time. Nodes can be removed from the pool +// by externally resetting both flags. activeFlag should not be set externally. // // The highest priority nodes in "inactive" state are moved to "active" state as soon as // the minimum capacity can be granted for them. The capacity of lower priority active // nodes is reduced or they are demoted to "inactive" state if their priority is // insufficient even at minimal capacity. -type PriorityPool struct { - PriorityPoolSetup - ns *nodestate.NodeStateMachine - clock mclock.Clock - lock sync.Mutex - activeQueue *prque.LazyQueue - inactiveQueue *prque.Prque - changed []*ppNodeInfo - activeCount, activeCap uint64 - maxCount, maxCap uint64 - minCap uint64 - activeBias time.Duration - capacityStepDiv uint64 - - cachedCurve *CapacityCurve +type priorityPool struct { + setup *serverSetup + ns *nodestate.NodeStateMachine + clock mclock.Clock + lock sync.Mutex + inactiveQueue *prque.Prque + maxCount, maxCap uint64 + minCap uint64 + activeBias time.Duration + capacityStepDiv, fineStepDiv uint64 + + cachedCurve *capacityCurve ccUpdatedAt mclock.AbsTime ccUpdateForced bool -} -// nodePriority interface provides current and estimated future priorities on demand -type nodePriority interface { - // Priority should return the current priority of the node (higher is better) - Priority(cap uint64) int64 - // EstMinPriority should return a lower estimate for the minimum of the node priority - // value starting from the current moment until the given time. If the priority goes - // under the returned estimate before the specified moment then it is the caller's - // responsibility to signal with updateFlag. - EstimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 + tempState []*ppNodeInfo // nodes currently in temporary state + // the following fields represent the temporary state if tempState is not empty + activeCount, activeCap uint64 + activeQueue *prque.LazyQueue } -// ppNodeInfo is the internal node descriptor of PriorityPool +// ppNodeInfo is the internal node descriptor of priorityPool type ppNodeInfo struct { nodePriority nodePriority node *enode.Node connected bool - capacity, origCap uint64 - bias time.Duration - forced, changed bool + capacity uint64 // only changed when temporary state is committed activeIndex, inactiveIndex int -} -// NewPriorityPool creates a new PriorityPool -func NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv uint64) *PriorityPool { - pp := &PriorityPool{ - ns: ns, - PriorityPoolSetup: setup, - clock: clock, - inactiveQueue: prque.New(inactiveSetIndex), - minCap: minCap, - activeBias: activeBias, - capacityStepDiv: capacityStepDiv, + tempState bool // should only be true while the priorityPool lock is held + tempCapacity uint64 // equals capacity when tempState is false + // the following fields only affect the temporary state and they are set to their + // default value when entering the temp state + minTarget, stepDiv uint64 + bias time.Duration +} + +// newPriorityPool creates a new priorityPool +func newPriorityPool(ns *nodestate.NodeStateMachine, setup *serverSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv, fineStepDiv uint64) *priorityPool { + pp := &priorityPool{ + setup: setup, + ns: ns, + clock: clock, + inactiveQueue: prque.New(inactiveSetIndex), + minCap: minCap, + activeBias: activeBias, + capacityStepDiv: capacityStepDiv, + fineStepDiv: fineStepDiv, + } + if pp.activeBias < time.Duration(1) { + pp.activeBias = time.Duration(1) } pp.activeQueue = prque.NewLazyQueue(activeSetIndex, activePriority, pp.activeMaxPriority, clock, lazyQueueRefresh) - ns.SubscribeField(pp.priorityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + ns.SubscribeField(pp.setup.balanceField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { if newValue != nil { c := &ppNodeInfo{ node: node, @@ -150,18 +120,19 @@ func NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, cl activeIndex: -1, inactiveIndex: -1, } - ns.SetFieldSub(node, pp.ppNodeInfoField, c) + ns.SetFieldSub(node, pp.setup.queueField, c) + ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0) } else { - ns.SetStateSub(node, nodestate.Flags{}, pp.ActiveFlag.Or(pp.InactiveFlag), 0) - if n, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); n != nil { + ns.SetStateSub(node, nodestate.Flags{}, pp.setup.activeFlag.Or(pp.setup.inactiveFlag), 0) + if n, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo); n != nil { pp.disconnectedNode(n) } - ns.SetFieldSub(node, pp.CapacityField, nil) - ns.SetFieldSub(node, pp.ppNodeInfoField, nil) + ns.SetFieldSub(node, pp.setup.capacityField, nil) + ns.SetFieldSub(node, pp.setup.queueField, nil) } }) - ns.SubscribeState(pp.ActiveFlag.Or(pp.InactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { - if c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); c != nil { + ns.SubscribeState(pp.setup.activeFlag.Or(pp.setup.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { + if c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo); c != nil { if oldState.IsEmpty() { pp.connectedNode(c) } @@ -170,7 +141,7 @@ func NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, cl } } }) - ns.SubscribeState(pp.updateFlag, func(node *enode.Node, oldState, newState nodestate.Flags) { + ns.SubscribeState(pp.setup.updateFlag, func(node *enode.Node, oldState, newState nodestate.Flags) { if !newState.IsEmpty() { pp.updatePriority(node) } @@ -178,18 +149,12 @@ func NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, cl return pp } -// RequestCapacity checks whether changing the capacity of a node to the given target -// is possible (bias is applied in favor of other active nodes if the target is higher -// than the current capacity). -// If setCap is true then it also performs the change if possible. The function returns -// the minimum priority needed to do the change and whether it is currently allowed. -// If setCap and allowed are both true then the caller can assume that the change was -// successful. -// Note: priorityField should always be set before calling RequestCapacity. If setCap -// is false then both InactiveFlag and ActiveFlag can be unset and they are not changed -// by this function call either. -// Note 2: this function should run inside a NodeStateMachine operation -func (pp *PriorityPool) RequestCapacity(node *enode.Node, targetCap uint64, bias time.Duration, setCap bool) (minPriority int64, allowed bool) { +// requestCapacity tries to set the capacity of a connected node to the highest possible +// value inside the given target range. If maxTarget is not reachable then the capacity is +// iteratively reduced in fine steps based on the fineStepDiv parameter until minTarget is reached. +// The function returns the new capacity if successful and the original capacity otherwise. +// Note: this function should run inside a NodeStateMachine operation +func (pp *priorityPool) requestCapacity(node *enode.Node, minTarget, maxTarget uint64, bias time.Duration) uint64 { pp.lock.Lock() pp.activeQueue.Refresh() var updates []capUpdate @@ -198,39 +163,37 @@ func (pp *PriorityPool) RequestCapacity(node *enode.Node, targetCap uint64, bias pp.updateFlags(updates) }() - if targetCap < pp.minCap { - targetCap = pp.minCap + if minTarget < pp.minCap { + minTarget = pp.minCap + } + if maxTarget < minTarget { + maxTarget = minTarget } if bias < pp.activeBias { bias = pp.activeBias } - c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo) + c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo) if c == nil { - log.Error("RequestCapacity called for unknown node", "id", node.ID()) - return math.MaxInt64, false + log.Error("requestCapacity called for unknown node", "id", node.ID()) + return 0 } - var priority int64 - if targetCap > c.capacity { - priority = c.nodePriority.EstimatePriority(targetCap, 0, 0, bias, false) - } else { - priority = c.nodePriority.Priority(targetCap) + pp.setTempState(c) + if maxTarget > c.capacity { + c.bias = bias + c.stepDiv = pp.fineStepDiv } - pp.markForChange(c) - pp.setCapacity(c, targetCap) - c.forced = true + pp.setTempCapacity(c, maxTarget) + c.minTarget = minTarget pp.activeQueue.Remove(c.activeIndex) pp.inactiveQueue.Remove(c.inactiveIndex) pp.activeQueue.Push(c) - _, minPriority = pp.enforceLimits() - // if capacity update is possible now then minPriority == math.MinInt64 - // if it is not possible at all then minPriority == math.MaxInt64 - allowed = priority > minPriority - updates = pp.finalizeChanges(setCap && allowed) - return + pp.enforceLimits() + updates = pp.finalizeChanges(c.tempCapacity >= minTarget && c.tempCapacity <= maxTarget && c.tempCapacity != c.capacity) + return c.capacity } // SetLimits sets the maximum number and total capacity of simultaneously active nodes -func (pp *PriorityPool) SetLimits(maxCount, maxCap uint64) { +func (pp *priorityPool) SetLimits(maxCount, maxCap uint64) { pp.lock.Lock() pp.activeQueue.Refresh() var updates []capUpdate @@ -247,27 +210,38 @@ func (pp *PriorityPool) SetLimits(maxCount, maxCap uint64) { updates = pp.finalizeChanges(true) } if inc { - updates = pp.tryActivate() + updates = append(updates, pp.tryActivate(false)...) } } -// SetActiveBias sets the bias applied when trying to activate inactive nodes -func (pp *PriorityPool) SetActiveBias(bias time.Duration) { +// setActiveBias sets the bias applied when trying to activate inactive nodes +func (pp *priorityPool) setActiveBias(bias time.Duration) { pp.lock.Lock() - defer pp.lock.Unlock() - pp.activeBias = bias - pp.tryActivate() + if pp.activeBias < time.Duration(1) { + pp.activeBias = time.Duration(1) + } + updates := pp.tryActivate(false) + pp.lock.Unlock() + pp.ns.Operation(func() { pp.updateFlags(updates) }) } // Active returns the number and total capacity of currently active nodes -func (pp *PriorityPool) Active() (uint64, uint64) { +func (pp *priorityPool) Active() (uint64, uint64) { pp.lock.Lock() defer pp.lock.Unlock() return pp.activeCount, pp.activeCap } +// Limits returns the maximum allowed number and total capacity of active nodes +func (pp *priorityPool) Limits() (uint64, uint64) { + pp.lock.Lock() + defer pp.lock.Unlock() + + return pp.maxCount, pp.maxCap +} + // inactiveSetIndex callback updates ppNodeInfo item index in inactiveQueue func inactiveSetIndex(a interface{}, index int) { a.(*ppNodeInfo).inactiveIndex = index @@ -290,37 +264,31 @@ func invertPriority(p int64) int64 { // activePriority callback returns actual priority of ppNodeInfo item in activeQueue func activePriority(a interface{}) int64 { c := a.(*ppNodeInfo) - if c.forced { - return math.MinInt64 - } if c.bias == 0 { - return invertPriority(c.nodePriority.Priority(c.capacity)) + return invertPriority(c.nodePriority.priority(c.tempCapacity)) } else { - return invertPriority(c.nodePriority.EstimatePriority(c.capacity, 0, 0, c.bias, true)) + return invertPriority(c.nodePriority.estimatePriority(c.tempCapacity, 0, 0, c.bias, true)) } } // activeMaxPriority callback returns estimated maximum priority of ppNodeInfo item in activeQueue -func (pp *PriorityPool) activeMaxPriority(a interface{}, until mclock.AbsTime) int64 { +func (pp *priorityPool) activeMaxPriority(a interface{}, until mclock.AbsTime) int64 { c := a.(*ppNodeInfo) - if c.forced { - return math.MinInt64 - } future := time.Duration(until - pp.clock.Now()) if future < 0 { future = 0 } - return invertPriority(c.nodePriority.EstimatePriority(c.capacity, 0, future, c.bias, false)) + return invertPriority(c.nodePriority.estimatePriority(c.tempCapacity, 0, future, c.bias, false)) } // inactivePriority callback returns actual priority of ppNodeInfo item in inactiveQueue -func (pp *PriorityPool) inactivePriority(p *ppNodeInfo) int64 { - return p.nodePriority.Priority(pp.minCap) +func (pp *priorityPool) inactivePriority(p *ppNodeInfo) int64 { + return p.nodePriority.priority(pp.minCap) } -// connectedNode is called when a new node has been added to the pool (InactiveFlag set) +// connectedNode is called when a new node has been added to the pool (inactiveFlag set) // Note: this function should run inside a NodeStateMachine operation -func (pp *PriorityPool) connectedNode(c *ppNodeInfo) { +func (pp *priorityPool) connectedNode(c *ppNodeInfo) { pp.lock.Lock() pp.activeQueue.Refresh() var updates []capUpdate @@ -334,13 +302,13 @@ func (pp *PriorityPool) connectedNode(c *ppNodeInfo) { } c.connected = true pp.inactiveQueue.Push(c, pp.inactivePriority(c)) - updates = pp.tryActivate() + updates = pp.tryActivate(false) } -// disconnectedNode is called when a node has been removed from the pool (both InactiveFlag -// and ActiveFlag reset) +// disconnectedNode is called when a node has been removed from the pool (both inactiveFlag +// and activeFlag reset) // Note: this function should run inside a NodeStateMachine operation -func (pp *PriorityPool) disconnectedNode(c *ppNodeInfo) { +func (pp *priorityPool) disconnectedNode(c *ppNodeInfo) { pp.lock.Lock() pp.activeQueue.Refresh() var updates []capUpdate @@ -356,42 +324,51 @@ func (pp *PriorityPool) disconnectedNode(c *ppNodeInfo) { pp.activeQueue.Remove(c.activeIndex) pp.inactiveQueue.Remove(c.inactiveIndex) if c.capacity != 0 { - pp.setCapacity(c, 0) - updates = pp.tryActivate() + pp.setTempState(c) + pp.setTempCapacity(c, 0) + updates = pp.tryActivate(true) } } -// markForChange internally puts a node in a temporary state that can either be reverted +// setTempState internally puts a node in a temporary state that can either be reverted // or confirmed later. This temporary state allows changing the capacity of a node and -// moving it between the active and inactive queue. ActiveFlag/InactiveFlag and -// CapacityField are not changed while the changes are still temporary. -func (pp *PriorityPool) markForChange(c *ppNodeInfo) { - if c.changed { +// moving it between the active and inactive queue. activeFlag/inactiveFlag and +// capacityField are not changed while the changes are still temporary. +func (pp *priorityPool) setTempState(c *ppNodeInfo) { + if c.tempState { return } - c.changed = true - c.origCap = c.capacity - pp.changed = append(pp.changed, c) + c.tempState = true + if c.tempCapacity != c.capacity { // should never happen + log.Error("tempCapacity != capacity when entering tempState") + } + c.minTarget = pp.minCap + c.stepDiv = pp.capacityStepDiv + pp.tempState = append(pp.tempState, c) } -// setCapacity changes the capacity of a node and adjusts activeCap and activeCount -// accordingly. Note that this change is performed in the temporary state so it should -// be called after markForChange and before finalizeChanges. -func (pp *PriorityPool) setCapacity(n *ppNodeInfo, cap uint64) { - pp.activeCap += cap - n.capacity - if n.capacity == 0 { +// setTempCapacity changes the capacity of a node in the temporary state and adjusts +// activeCap and activeCount accordingly. Since this change is performed in the temporary +// state it should be called after setTempState and before finalizeChanges. +func (pp *priorityPool) setTempCapacity(n *ppNodeInfo, cap uint64) { + if !n.tempState { // should never happen + log.Error("Node is not in temporary state") + return + } + pp.activeCap += cap - n.tempCapacity + if n.tempCapacity == 0 { pp.activeCount++ } if cap == 0 { pp.activeCount-- } - n.capacity = cap + n.tempCapacity = cap } // enforceLimits enforces active node count and total capacity limits. It returns the // lowest active node priority. Note that this function is performed on the temporary // internal state. -func (pp *PriorityPool) enforceLimits() (*ppNodeInfo, int64) { +func (pp *priorityPool) enforceLimits() (*ppNodeInfo, int64) { if pp.activeCap <= pp.maxCap && pp.activeCount <= pp.maxCount { return nil, math.MinInt64 } @@ -401,16 +378,19 @@ func (pp *PriorityPool) enforceLimits() (*ppNodeInfo, int64) { ) pp.activeQueue.MultiPop(func(data interface{}, priority int64) bool { c = data.(*ppNodeInfo) - pp.markForChange(c) + pp.setTempState(c) maxActivePriority = priority - if c.capacity == pp.minCap || pp.activeCount > pp.maxCount { - pp.setCapacity(c, 0) + if c.tempCapacity == c.minTarget || pp.activeCount > pp.maxCount { + pp.setTempCapacity(c, 0) } else { - sub := c.capacity / pp.capacityStepDiv - if c.capacity-sub < pp.minCap { - sub = c.capacity - pp.minCap + sub := c.tempCapacity / c.stepDiv + if sub == 0 { + sub = 1 } - pp.setCapacity(c, c.capacity-sub) + if c.tempCapacity-sub < c.minTarget { + sub = c.tempCapacity - c.minTarget + } + pp.setTempCapacity(c, c.tempCapacity-sub) pp.activeQueue.Push(c) } return pp.activeCap > pp.maxCap || pp.activeCount > pp.maxCount @@ -421,71 +401,74 @@ func (pp *PriorityPool) enforceLimits() (*ppNodeInfo, int64) { // finalizeChanges either commits or reverts temporary changes. The necessary capacity // field and according flag updates are not performed here but returned in a list because // they should be performed while the mutex is not held. -func (pp *PriorityPool) finalizeChanges(commit bool) (updates []capUpdate) { - for _, c := range pp.changed { - // always remove and push back in order to update biased/forced priority +func (pp *priorityPool) finalizeChanges(commit bool) (updates []capUpdate) { + for _, c := range pp.tempState { + // always remove and push back in order to update biased priority pp.activeQueue.Remove(c.activeIndex) pp.inactiveQueue.Remove(c.inactiveIndex) - c.bias = 0 - c.forced = false - c.changed = false - if !commit { - pp.setCapacity(c, c.origCap) + oldCapacity := c.capacity + if commit { + c.capacity = c.tempCapacity + } else { + pp.setTempCapacity(c, c.capacity) // revert activeCount/activeCap } + c.tempState = false + c.bias = 0 + c.stepDiv = pp.capacityStepDiv + c.minTarget = pp.minCap if c.connected { if c.capacity != 0 { pp.activeQueue.Push(c) } else { pp.inactiveQueue.Push(c, pp.inactivePriority(c)) } - if c.capacity != c.origCap && commit { - updates = append(updates, capUpdate{c.node, c.origCap, c.capacity}) + if c.capacity != oldCapacity { + updates = append(updates, capUpdate{c.node, oldCapacity, c.capacity}) } } - c.origCap = 0 } - pp.changed = nil + pp.tempState = nil if commit { pp.ccUpdateForced = true } return } -// capUpdate describes a CapacityField and ActiveFlag/InactiveFlag update +// capUpdate describes a capacityField and activeFlag/inactiveFlag update type capUpdate struct { node *enode.Node oldCap, newCap uint64 } -// updateFlags performs CapacityField and ActiveFlag/InactiveFlag updates while the +// updateFlags performs capacityField and activeFlag/inactiveFlag updates while the // pool mutex is not held // Note: this function should run inside a NodeStateMachine operation -func (pp *PriorityPool) updateFlags(updates []capUpdate) { +func (pp *priorityPool) updateFlags(updates []capUpdate) { for _, f := range updates { if f.oldCap == 0 { - pp.ns.SetStateSub(f.node, pp.ActiveFlag, pp.InactiveFlag, 0) + pp.ns.SetStateSub(f.node, pp.setup.activeFlag, pp.setup.inactiveFlag, 0) } if f.newCap == 0 { - pp.ns.SetStateSub(f.node, pp.InactiveFlag, pp.ActiveFlag, 0) - pp.ns.SetFieldSub(f.node, pp.CapacityField, nil) + pp.ns.SetStateSub(f.node, pp.setup.inactiveFlag, pp.setup.activeFlag, 0) + pp.ns.SetFieldSub(f.node, pp.setup.capacityField, nil) } else { - pp.ns.SetFieldSub(f.node, pp.CapacityField, f.newCap) + pp.ns.SetFieldSub(f.node, pp.setup.capacityField, f.newCap) } } } // tryActivate tries to activate inactive nodes if possible -func (pp *PriorityPool) tryActivate() []capUpdate { - var commit bool +func (pp *priorityPool) tryActivate(commit bool) []capUpdate { for pp.inactiveQueue.Size() > 0 { c := pp.inactiveQueue.PopItem().(*ppNodeInfo) - pp.markForChange(c) - pp.setCapacity(c, pp.minCap) + pp.setTempState(c) + pp.setTempCapacity(c, pp.minCap) c.bias = pp.activeBias pp.activeQueue.Push(c) pp.enforceLimits() - if c.capacity > 0 { + if c.tempCapacity > 0 { commit = true + c.bias = 0 } else { break } @@ -497,7 +480,7 @@ func (pp *PriorityPool) tryActivate() []capUpdate { // updatePriority gets the current priority value of the given node from the nodePriority // interface and performs the necessary changes. It is triggered by updateFlag. // Note: this function should run inside a NodeStateMachine operation -func (pp *PriorityPool) updatePriority(node *enode.Node) { +func (pp *priorityPool) updatePriority(node *enode.Node) { pp.lock.Lock() pp.activeQueue.Refresh() var updates []capUpdate @@ -506,7 +489,7 @@ func (pp *PriorityPool) updatePriority(node *enode.Node) { pp.updateFlags(updates) }() - c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo) + c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo) if c == nil || !c.connected { return } @@ -517,15 +500,15 @@ func (pp *PriorityPool) updatePriority(node *enode.Node) { } else { pp.inactiveQueue.Push(c, pp.inactivePriority(c)) } - updates = pp.tryActivate() + updates = pp.tryActivate(false) } -// CapacityCurve is a snapshot of the priority pool contents in a format that can efficiently +// capacityCurve is a snapshot of the priority pool contents in a format that can efficiently // estimate how much capacity could be granted to a given node at a given priority level. -type CapacityCurve struct { +type capacityCurve struct { points []curvePoint // curve points sorted in descending order of priority index map[enode.ID][]int // curve point indexes belonging to each node - exclude []int // curve point indexes of excluded node + excludeList []int // curve point indexes of excluded node excludeFirst bool // true if activeCount == maxCount } @@ -534,8 +517,8 @@ type curvePoint struct { nextPri int64 // next priority level where more capacity will be available } -// GetCapacityCurve returns a new or recently cached CapacityCurve based on the contents of the pool -func (pp *PriorityPool) GetCapacityCurve() *CapacityCurve { +// getCapacityCurve returns a new or recently cached capacityCurve based on the contents of the pool +func (pp *priorityPool) getCapacityCurve() *capacityCurve { pp.lock.Lock() defer pp.lock.Unlock() @@ -547,7 +530,7 @@ func (pp *PriorityPool) GetCapacityCurve() *CapacityCurve { pp.ccUpdateForced = false pp.ccUpdatedAt = now - curve := &CapacityCurve{ + curve := &capacityCurve{ index: make(map[enode.ID][]int), } pp.cachedCurve = curve @@ -556,6 +539,7 @@ func (pp *PriorityPool) GetCapacityCurve() *CapacityCurve { excludeFirst := pp.maxCount == pp.activeCount // reduce node capacities or remove nodes until nothing is left in the queue; // record the available capacity and the necessary priority after each step + lastPri := int64(math.MinInt64) for pp.activeCap > 0 { cp := curvePoint{} if pp.activeCap > pp.maxCap { @@ -570,9 +554,15 @@ func (pp *PriorityPool) GetCapacityCurve() *CapacityCurve { // enforceLimits removes the lowest priority node if it has minimal capacity, // otherwise reduces its capacity next, cp.nextPri = pp.enforceLimits() + if cp.nextPri < lastPri { + // enforce monotonicity which may be broken by continuously changing priorities + cp.nextPri = lastPri + } else { + lastPri = cp.nextPri + } pp.activeCap -= tempCap if next == nil { - log.Error("GetCapacityCurve: cannot remove next element from the priority queue") + log.Error("getCapacityCurve: cannot remove next element from the priority queue") break } id := next.node.ID() @@ -595,34 +585,34 @@ func (pp *PriorityPool) GetCapacityCurve() *CapacityCurve { nextPri: math.MaxInt64, }) if curve.excludeFirst { - curve.exclude = curve.index[excludeID] + curve.excludeList = curve.index[excludeID] } return curve } -// Exclude returns a CapacityCurve with the given node excluded from the original curve -func (cc *CapacityCurve) Exclude(id enode.ID) *CapacityCurve { - if exclude, ok := cc.index[id]; ok { +// exclude returns a capacityCurve with the given node excluded from the original curve +func (cc *capacityCurve) exclude(id enode.ID) *capacityCurve { + if excludeList, ok := cc.index[id]; ok { // return a new version of the curve (only one excluded node can be selected) // Note: if the first node was excluded by default (excludeFirst == true) then // we can forget about that and exclude the node with the given id instead. - return &CapacityCurve{ - points: cc.points, - index: cc.index, - exclude: exclude, + return &capacityCurve{ + points: cc.points, + index: cc.index, + excludeList: excludeList, } } return cc } -func (cc *CapacityCurve) getPoint(i int) curvePoint { +func (cc *capacityCurve) getPoint(i int) curvePoint { cp := cc.points[i] if i == 0 && cc.excludeFirst { cp.freeCap = 0 return cp } - for ii := len(cc.exclude) - 1; ii >= 0; ii-- { - ei := cc.exclude[ii] + for ii := len(cc.excludeList) - 1; ii >= 0; ii-- { + ei := cc.excludeList[ii] if ei < i { break } @@ -632,11 +622,11 @@ func (cc *CapacityCurve) getPoint(i int) curvePoint { return cp } -// MaxCapacity calculates the maximum capacity available for a node with a given +// maxCapacity calculates the maximum capacity available for a node with a given // (monotonically decreasing) priority vs. capacity function. Note that if the requesting // node is already in the pool then it should be excluded from the curve in order to get // the correct result. -func (cc *CapacityCurve) MaxCapacity(priority func(cap uint64) int64) uint64 { +func (cc *capacityCurve) maxCapacity(priority func(cap uint64) int64) uint64 { min, max := 0, len(cc.points)-1 // the curve always has at least one point for min < max { mid := (min + max) / 2 diff --git a/les/vflux/server/prioritypool_test.go b/les/vflux/server/prioritypool_test.go index d83ddc1767..5152312116 100644 --- a/les/vflux/server/prioritypool_test.go +++ b/les/vflux/server/prioritypool_test.go @@ -28,18 +28,6 @@ import ( "github.com/ethereum/go-ethereum/p2p/nodestate" ) -var ( - testSetup = &nodestate.Setup{} - ppTestClientFlag = testSetup.NewFlag("ppTestClientFlag") - ppTestClientField = testSetup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{})) - ppUpdateFlag = testSetup.NewFlag("ppUpdateFlag") - ppTestSetup = NewPriorityPoolSetup(testSetup) -) - -func init() { - ppTestSetup.Connect(ppTestClientField, ppUpdateFlag) -} - const ( testCapacityStepDiv = 100 testCapacityToleranceDiv = 10 @@ -51,25 +39,27 @@ type ppTestClient struct { balance, cap uint64 } -func (c *ppTestClient) Priority(cap uint64) int64 { +func (c *ppTestClient) priority(cap uint64) int64 { return int64(c.balance / cap) } -func (c *ppTestClient) EstimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 { +func (c *ppTestClient) estimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 { return int64(c.balance / cap) } func TestPriorityPool(t *testing.T) { clock := &mclock.Simulated{} - ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) + setup := newServerSetup() + setup.balanceField = setup.setup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{})) + ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) - ns.SubscribeField(ppTestSetup.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { - if n := ns.GetField(node, ppTestSetup.priorityField); n != nil { + ns.SubscribeField(setup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + if n := ns.GetField(node, setup.balanceField); n != nil { c := n.(*ppTestClient) c.cap = newValue.(uint64) } }) - pp := NewPriorityPool(ns, ppTestSetup, clock, testMinCap, 0, testCapacityStepDiv) + pp := newPriorityPool(ns, setup, clock, testMinCap, 0, testCapacityStepDiv, testCapacityStepDiv) ns.Start() pp.SetLimits(100, 1000000) clients := make([]*ppTestClient, 100) @@ -77,7 +67,8 @@ func TestPriorityPool(t *testing.T) { for { var ok bool ns.Operation(func() { - _, ok = pp.RequestCapacity(c.node, c.cap+c.cap/testCapacityStepDiv, 0, true) + newCap := c.cap + c.cap/testCapacityStepDiv + ok = pp.requestCapacity(c.node, newCap, newCap, 0) == newCap }) if !ok { return @@ -101,9 +92,8 @@ func TestPriorityPool(t *testing.T) { } sumBalance += c.balance clients[i] = c - ns.SetState(c.node, ppTestClientFlag, nodestate.Flags{}, 0) - ns.SetField(c.node, ppTestSetup.priorityField, c) - ns.SetState(c.node, ppTestSetup.InactiveFlag, nodestate.Flags{}, 0) + ns.SetField(c.node, setup.balanceField, c) + ns.SetState(c.node, setup.inactiveFlag, nodestate.Flags{}, 0) raise(c) check(c) } @@ -113,8 +103,8 @@ func TestPriorityPool(t *testing.T) { oldBalance := c.balance c.balance = uint64(rand.Int63n(100000000000) + 100000000000) sumBalance += c.balance - oldBalance - pp.ns.SetState(c.node, ppUpdateFlag, nodestate.Flags{}, 0) - pp.ns.SetState(c.node, nodestate.Flags{}, ppUpdateFlag, 0) + pp.ns.SetState(c.node, setup.updateFlag, nodestate.Flags{}, 0) + pp.ns.SetState(c.node, nodestate.Flags{}, setup.updateFlag, 0) if c.balance > oldBalance { raise(c) } else { @@ -129,32 +119,28 @@ func TestPriorityPool(t *testing.T) { if count%10 == 0 { // test available capacity calculation with capacity curve c = clients[rand.Intn(len(clients))] - curve := pp.GetCapacityCurve().Exclude(c.node.ID()) + curve := pp.getCapacityCurve().exclude(c.node.ID()) add := uint64(rand.Int63n(10000000000000)) c.balance += add sumBalance += add - expCap := curve.MaxCapacity(func(cap uint64) int64 { + expCap := curve.maxCapacity(func(cap uint64) int64 { return int64(c.balance / cap) }) - //fmt.Println(expCap, c.balance, sumBalance) - /*for i, cp := range curve.points { - fmt.Println("cp", i, cp, "ex", curve.getPoint(i)) - }*/ var ok bool - expFail := expCap + 1 + expFail := expCap + 10 if expFail < testMinCap { expFail = testMinCap } ns.Operation(func() { - _, ok = pp.RequestCapacity(c.node, expFail, 0, true) + ok = pp.requestCapacity(c.node, expFail, expFail, 0) == expFail }) if ok { t.Errorf("Request for more than expected available capacity succeeded") } if expCap >= testMinCap { ns.Operation(func() { - _, ok = pp.RequestCapacity(c.node, expCap, 0, true) + ok = pp.requestCapacity(c.node, expCap, expCap, 0) == expCap }) if !ok { t.Errorf("Request for expected available capacity failed") @@ -162,8 +148,8 @@ func TestPriorityPool(t *testing.T) { } c.balance -= add sumBalance -= add - pp.ns.SetState(c.node, ppUpdateFlag, nodestate.Flags{}, 0) - pp.ns.SetState(c.node, nodestate.Flags{}, ppUpdateFlag, 0) + pp.ns.SetState(c.node, setup.updateFlag, nodestate.Flags{}, 0) + pp.ns.SetState(c.node, nodestate.Flags{}, setup.updateFlag, 0) for _, c := range clients { raise(c) } @@ -175,8 +161,11 @@ func TestPriorityPool(t *testing.T) { func TestCapacityCurve(t *testing.T) { clock := &mclock.Simulated{} - ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) - pp := NewPriorityPool(ns, ppTestSetup, clock, 400000, 0, 2) + setup := newServerSetup() + setup.balanceField = setup.setup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{})) + ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) + + pp := newPriorityPool(ns, setup, clock, 400000, 0, 2, 2) ns.Start() pp.SetLimits(10, 10000000) clients := make([]*ppTestClient, 10) @@ -188,17 +177,16 @@ func TestCapacityCurve(t *testing.T) { cap: 1000000, } clients[i] = c - ns.SetState(c.node, ppTestClientFlag, nodestate.Flags{}, 0) - ns.SetField(c.node, ppTestSetup.priorityField, c) - ns.SetState(c.node, ppTestSetup.InactiveFlag, nodestate.Flags{}, 0) + ns.SetField(c.node, setup.balanceField, c) + ns.SetState(c.node, setup.inactiveFlag, nodestate.Flags{}, 0) ns.Operation(func() { - pp.RequestCapacity(c.node, c.cap, 0, true) + pp.requestCapacity(c.node, c.cap, c.cap, 0) }) } - curve := pp.GetCapacityCurve() + curve := pp.getCapacityCurve() check := func(balance, expCap uint64) { - cap := curve.MaxCapacity(func(cap uint64) int64 { + cap := curve.maxCapacity(func(cap uint64) int64 { return int64(balance / cap) }) var fail bool @@ -226,7 +214,7 @@ func TestCapacityCurve(t *testing.T) { check(1000000000000, 2500000) pp.SetLimits(11, 10000000) - curve = pp.GetCapacityCurve() + curve = pp.getCapacityCurve() check(0, 0) check(10000000000, 100000) diff --git a/les/vflux/server/service.go b/les/vflux/server/service.go index ab759ae441..80a0f47543 100644 --- a/les/vflux/server/service.go +++ b/les/vflux/server/service.go @@ -40,7 +40,6 @@ type ( // Service is a service registered at the Server and identified by a string id Service interface { - ServiceInfo() (id, desc string) // only called during registration Handle(id enode.ID, address string, name string, data []byte) []byte // never called concurrently } @@ -60,9 +59,8 @@ func NewServer(delayPerRequest time.Duration) *Server { } // Register registers a Service -func (s *Server) Register(b Service) { - srv := &serviceEntry{backend: b} - srv.id, srv.desc = b.ServiceInfo() +func (s *Server) Register(b Service, id, desc string) { + srv := &serviceEntry{backend: b, id: id, desc: desc} if strings.Contains(srv.id, ":") { // srv.id + ":" will be used as a service database prefix log.Error("Service ID contains ':'", "id", srv.id) diff --git a/les/vflux/server/status.go b/les/vflux/server/status.go new file mode 100644 index 0000000000..469190777b --- /dev/null +++ b/les/vflux/server/status.go @@ -0,0 +1,59 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package server + +import ( + "reflect" + + "github.com/ethereum/go-ethereum/p2p/nodestate" +) + +type peerWrapper struct{ clientPeer } // the NodeStateMachine type system needs this wrapper + +// serverSetup is a wrapper of the node state machine setup, which contains +// all the created flags and fields used in the vflux server side. +type serverSetup struct { + setup *nodestate.Setup + clientField nodestate.Field // Field contains the client peer handler + + // Flags and fields controlled by balance tracker. BalanceTracker + // is responsible for setting/deleting these flags or fields. + priorityFlag nodestate.Flags // Flag is set if the node has a positive balance + updateFlag nodestate.Flags // Flag is set whenever the node balance is changed(priority changed) + balanceField nodestate.Field // Field contains the client balance for priority calculation + + // Flags and fields controlled by priority queue. Priority queue + // is responsible for setting/deleting these flags or fields. + activeFlag nodestate.Flags // Flag is set if the node is active + inactiveFlag nodestate.Flags // Flag is set if the node is inactive + capacityField nodestate.Field // Field contains the capacity of the node + queueField nodestate.Field // Field contains the infomration in the priority queue +} + +// newServerSetup initializes the setup for state machine and returns the flags/fields group. +func newServerSetup() *serverSetup { + setup := &serverSetup{setup: &nodestate.Setup{}} + setup.clientField = setup.setup.NewField("client", reflect.TypeOf(peerWrapper{})) + setup.priorityFlag = setup.setup.NewFlag("priority") + setup.updateFlag = setup.setup.NewFlag("update") + setup.balanceField = setup.setup.NewField("balance", reflect.TypeOf(&nodeBalance{})) + setup.activeFlag = setup.setup.NewFlag("active") + setup.inactiveFlag = setup.setup.NewFlag("inactive") + setup.capacityField = setup.setup.NewField("capacity", reflect.TypeOf(uint64(0))) + setup.queueField = setup.setup.NewField("queue", reflect.TypeOf(&ppNodeInfo{})) + return setup +} diff --git a/metrics/histogram.go b/metrics/histogram.go index 46f3bbd2f1..2c54ce8b40 100644 --- a/metrics/histogram.go +++ b/metrics/histogram.go @@ -26,6 +26,15 @@ func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram { return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram) } +// GetOrRegisterHistogramLazy returns an existing Histogram or constructs and +// registers a new StandardHistogram. +func GetOrRegisterHistogramLazy(name string, r Registry, s func() Sample) Histogram { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, func() Histogram { return NewHistogram(s()) }).(Histogram) +} + // NewHistogram constructs a new StandardHistogram from a Sample. func NewHistogram(s Sample) Histogram { if !Enabled { diff --git a/metrics/influxdb/influxdb.go b/metrics/influxdb/influxdb.go index d4fc478e7b..52d0091034 100644 --- a/metrics/influxdb/influxdb.go +++ b/metrics/influxdb/influxdb.go @@ -162,26 +162,29 @@ func (r *reporter) send() error { }) case metrics.Histogram: ms := metric.Snapshot() - ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}) - pts = append(pts, client.Point{ - Measurement: fmt.Sprintf("%s%s.histogram", namespace, name), - Tags: r.tags, - Fields: map[string]interface{}{ - "count": ms.Count(), - "max": ms.Max(), - "mean": ms.Mean(), - "min": ms.Min(), - "stddev": ms.StdDev(), - "variance": ms.Variance(), - "p50": ps[0], - "p75": ps[1], - "p95": ps[2], - "p99": ps[3], - "p999": ps[4], - "p9999": ps[5], - }, - Time: now, - }) + + if ms.Count() > 0 { + ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}) + pts = append(pts, client.Point{ + Measurement: fmt.Sprintf("%s%s.histogram", namespace, name), + Tags: r.tags, + Fields: map[string]interface{}{ + "count": ms.Count(), + "max": ms.Max(), + "mean": ms.Mean(), + "min": ms.Min(), + "stddev": ms.StdDev(), + "variance": ms.Variance(), + "p50": ps[0], + "p75": ps[1], + "p95": ps[2], + "p99": ps[3], + "p999": ps[4], + "p9999": ps[5], + }, + Time: now, + }) + } case metrics.Meter: ms := metric.Snapshot() pts = append(pts, client.Point{ diff --git a/metrics/resetting_sample.go b/metrics/resetting_sample.go new file mode 100644 index 0000000000..43c1129cd0 --- /dev/null +++ b/metrics/resetting_sample.go @@ -0,0 +1,24 @@ +package metrics + +// ResettingSample converts an ordinary sample into one that resets whenever its +// snapshot is retrieved. This will break for multi-monitor systems, but when only +// a single metric is being pushed out, this ensure that low-frequency events don't +// skew th charts indefinitely. +func ResettingSample(sample Sample) Sample { + return &resettingSample{ + Sample: sample, + } +} + +// resettingSample is a simple wrapper around a sample that resets it upon the +// snapshot retrieval. +type resettingSample struct { + Sample +} + +// Snapshot returns a read-only copy of the sample with the original reset. +func (rs *resettingSample) Snapshot() Sample { + s := rs.Sample.Snapshot() + rs.Sample.Clear() + return s +} diff --git a/miner/miner.go b/miner/miner.go index a4840e60e8..fca4b26ebd 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -44,15 +44,18 @@ type Backend interface { // Config is the configuration parameters of mining. type Config struct { - Etherbase common.Address `toml:",omitempty"` // Public address for block mining rewards (default = first account) - Notify []string `toml:",omitempty"` // HTTP URL list to be notified of new work packages(only useful in ethash). - ExtraData hexutil.Bytes `toml:",omitempty"` // Block extra data set by the miner - GasFloor uint64 // Target gas floor for mined blocks. - GasCeil uint64 // Target gas ceiling for mined blocks. - GasPrice *big.Int // Minimum gas price for mining a transaction - Recommit time.Duration // The time interval for miner to re-create mining work. - Noverify bool // Disable remote mining solution verification(only useful in ethash). - AllowedFutureBlockTime uint64 // Max time (in seconds) from current time allowed for blocks, before they're considered future blocks + Etherbase common.Address `toml:",omitempty"` // Public address for block mining rewards (default = first account) + Notify []string `toml:",omitempty"` // HTTP URL list to be notified of new work packages (only useful in ethash). + NotifyFull bool `toml:",omitempty"` // Notify with pending block headers instead of work packages + ExtraData hexutil.Bytes `toml:",omitempty"` // Block extra data set by the miner + GasFloor uint64 // Target gas floor for mined blocks. + GasCeil uint64 // Target gas ceiling for mined blocks. + GasPrice *big.Int // Minimum gas price for mining a transaction + Recommit time.Duration // The time interval for miner to re-create mining work. + Noverify bool // Disable remote mining solution verification(only useful in ethash). + + // Quorum + AllowedFutureBlockTime uint64 // Max time (in seconds) from current time allowed for blocks, before they're considered future blocks } // Miner creates blocks and searches for proof-of-work values. @@ -162,7 +165,7 @@ func (miner *Miner) Mining() bool { return miner.worker.isRunning() } -func (miner *Miner) HashRate() uint64 { +func (miner *Miner) Hashrate() uint64 { if pow, ok := miner.engine.(consensus.PoW); ok { return uint64(pow.Hashrate()) } diff --git a/node/api.go b/node/api.go index f3886f1edb..3cb1b790ce 100644 --- a/node/api.go +++ b/node/api.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/internal/debug" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/rpc" @@ -162,8 +163,8 @@ func (api *privateAdminAPI) PeerEvents(ctx context.Context) (*rpc.Subscription, return rpcSub, nil } -// StartRPC starts the HTTP RPC API server. -func (api *privateAdminAPI) StartRPC(host *string, port *int, cors *string, apis *string, vhosts *string) (bool, error) { +// StartHTTP starts the HTTP RPC API server. +func (api *privateAdminAPI) StartHTTP(host *string, port *int, cors *string, apis *string, vhosts *string) (bool, error) { api.node.lock.Lock() defer api.node.lock.Unlock() @@ -221,12 +222,26 @@ func (api *privateAdminAPI) StartRPC(host *string, port *int, cors *string, apis return true, nil } -// StopRPC shuts down the HTTP server. -func (api *privateAdminAPI) StopRPC() (bool, error) { +// StartRPC starts the HTTP RPC API server. +// This method is deprecated. Use StartHTTP instead. +func (api *privateAdminAPI) StartRPC(host *string, port *int, cors *string, apis *string, vhosts *string) (bool, error) { + log.Warn("Deprecation warning", "method", "admin.StartRPC", "use-instead", "admin.StartHTTP") + return api.StartHTTP(host, port, cors, apis, vhosts) +} + +// StopHTTP shuts down the HTTP server. +func (api *privateAdminAPI) StopHTTP() (bool, error) { api.node.http.stop() return true, nil } +// StopRPC shuts down the HTTP server. +// This method is deprecated. Use StopHTTP instead. +func (api *privateAdminAPI) StopRPC() (bool, error) { + log.Warn("Deprecation warning", "method", "admin.StopRPC", "use-instead", "admin.StopHTTP") + return api.StopHTTP() +} + // StartWS starts the websocket RPC API server. func (api *privateAdminAPI) StartWS(host *string, port *int, allowedOrigins *string, apis *string) (bool, error) { api.node.lock.Lock() diff --git a/node/api_test.go b/node/api_test.go index 9c3fa3a31d..9549adf9c2 100644 --- a/node/api_test.go +++ b/node/api_test.go @@ -69,7 +69,7 @@ func TestStartRPC(t *testing.T) { name: "rpc enabled through API", cfg: Config{}, fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - _, err := api.StartRPC(sp("127.0.0.1"), ip(0), nil, nil, nil) + _, err := api.StartHTTP(sp("127.0.0.1"), ip(0), nil, nil, nil) assert.NoError(t, err) }, wantReachable: true, @@ -90,14 +90,14 @@ func TestStartRPC(t *testing.T) { port := listener.Addr().(*net.TCPAddr).Port // Now try to start RPC on that port. This should fail. - _, err = api.StartRPC(sp("127.0.0.1"), ip(port), nil, nil, nil) + _, err = api.StartHTTP(sp("127.0.0.1"), ip(port), nil, nil, nil) if err == nil { - t.Fatal("StartRPC should have failed on port", port) + t.Fatal("StartHTTP should have failed on port", port) } // Try again after unblocking the port. It should work this time. listener.Close() - _, err = api.StartRPC(sp("127.0.0.1"), ip(port), nil, nil, nil) + _, err = api.StartHTTP(sp("127.0.0.1"), ip(port), nil, nil, nil) assert.NoError(t, err) }, wantReachable: true, @@ -109,7 +109,7 @@ func TestStartRPC(t *testing.T) { name: "rpc stopped through API", cfg: Config{HTTPHost: "127.0.0.1"}, fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - _, err := api.StopRPC() + _, err := api.StopHTTP() assert.NoError(t, err) }, wantReachable: false, @@ -121,10 +121,10 @@ func TestStartRPC(t *testing.T) { name: "rpc stopped twice", cfg: Config{HTTPHost: "127.0.0.1"}, fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - _, err := api.StopRPC() + _, err := api.StopHTTP() assert.NoError(t, err) - _, err = api.StopRPC() + _, err = api.StopHTTP() assert.NoError(t, err) }, wantReachable: false, @@ -211,14 +211,14 @@ func TestStartRPC(t *testing.T) { { name: "rpc stopped with ws enabled", fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - _, err := api.StartRPC(sp("127.0.0.1"), ip(0), nil, nil, nil) + _, err := api.StartHTTP(sp("127.0.0.1"), ip(0), nil, nil, nil) assert.NoError(t, err) wsport := n.http.port _, err = api.StartWS(sp("127.0.0.1"), ip(wsport), nil, nil) assert.NoError(t, err) - _, err = api.StopRPC() + _, err = api.StopHTTP() assert.NoError(t, err) }, wantReachable: false, @@ -233,7 +233,7 @@ func TestStartRPC(t *testing.T) { assert.NoError(t, err) wsport := n.http.port - _, err = api.StartRPC(sp("127.0.0.1"), ip(wsport), nil, nil, nil) + _, err = api.StartHTTP(sp("127.0.0.1"), ip(wsport), nil, nil, nil) assert.NoError(t, err) }, wantReachable: true, diff --git a/node/node.go b/node/node.go index 6994072994..04626f71de 100644 --- a/node/node.go +++ b/node/node.go @@ -297,11 +297,13 @@ func (n *Node) openEndpoints() error { if err := n.server.Start(); err != nil { return convertFileLockError(err) } + // Quorum if n.qserver != nil { if err := n.qserver.Start(); err != nil { return convertFileLockError(err) } } + // End Quorum // start RPC endpoints err := n.startRPC() if err != nil { @@ -630,7 +632,7 @@ func (n *Node) EventMux() *event.TypeMux { // OpenDatabase opens an existing database with the given name (or creates one if no // previous can be found) from within the node's instance directory. If the node is // ephemeral, a memory database is returned. -func (n *Node) OpenDatabase(name string, cache, handles int, namespace string) (ethdb.Database, error) { +func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, readonly bool) (ethdb.Database, error) { n.lock.Lock() defer n.lock.Unlock() if n.state == closedState { @@ -642,7 +644,7 @@ func (n *Node) OpenDatabase(name string, cache, handles int, namespace string) ( if n.config.DataDir == "" { db = rawdb.NewMemoryDatabase() } else { - db, err = rawdb.NewLevelDBDatabase(n.ResolvePath(name), cache, handles, namespace) + db, err = rawdb.NewLevelDBDatabase(n.ResolvePath(name), cache, handles, namespace, readonly) } if err == nil { @@ -656,7 +658,7 @@ func (n *Node) OpenDatabase(name string, cache, handles int, namespace string) ( // also attaching a chain freezer to it that moves ancient chain data from the // database to immutable append-only files. If the node is an ephemeral one, a // memory database is returned. -func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, freezer, namespace string) (ethdb.Database, error) { +func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, freezer, namespace string, readonly bool) (ethdb.Database, error) { n.lock.Lock() defer n.lock.Unlock() if n.state == closedState { @@ -675,7 +677,7 @@ func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, freezer, case !filepath.IsAbs(freezer): freezer = n.ResolvePath(freezer) } - db, err = rawdb.NewLevelDBDatabaseWithFreezer(root, cache, handles, freezer, namespace) + db, err = rawdb.NewLevelDBDatabaseWithFreezer(root, cache, handles, freezer, namespace, readonly) } if err == nil { diff --git a/node/node_test.go b/node/node_test.go index c7336e2d8f..2aa58bc2c7 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -158,7 +158,7 @@ func TestNodeCloseClosesDB(t *testing.T) { stack, _ := New(testNodeConfig()) defer stack.Close() - db, err := stack.OpenDatabase("mydb", 0, 0, "") + db, err := stack.OpenDatabase("mydb", 0, 0, "", false) if err != nil { t.Fatal("can't open DB:", err) } @@ -181,7 +181,7 @@ func TestNodeOpenDatabaseFromLifecycleStart(t *testing.T) { var err error stack.RegisterLifecycle(&InstrumentedService{ startHook: func() { - db, err = stack.OpenDatabase("mydb", 0, 0, "") + db, err = stack.OpenDatabase("mydb", 0, 0, "", false) if err != nil { t.Fatal("can't open DB:", err) } @@ -202,7 +202,7 @@ func TestNodeOpenDatabaseFromLifecycleStop(t *testing.T) { stack.RegisterLifecycle(&InstrumentedService{ stopHook: func() { - db, err := stack.OpenDatabase("mydb", 0, 0, "") + db, err := stack.OpenDatabase("mydb", 0, 0, "", false) if err != nil { t.Fatal("can't open DB:", err) } diff --git a/oss-fuzz.sh b/oss-fuzz.sh index ac93a5a467..f8152f0fad 100644 --- a/oss-fuzz.sh +++ b/oss-fuzz.sh @@ -102,6 +102,7 @@ compile_fuzzer tests/fuzzers/stacktrie Fuzz fuzzStackTrie compile_fuzzer tests/fuzzers/difficulty Fuzz fuzzDifficulty compile_fuzzer tests/fuzzers/abi Fuzz fuzzAbi compile_fuzzer tests/fuzzers/les Fuzz fuzzLes +compile_fuzzer tests/fuzzers/vflux FuzzClientPool fuzzClientPool compile_fuzzer tests/fuzzers/bls12381 FuzzG1Add fuzz_g1_add compile_fuzzer tests/fuzzers/bls12381 FuzzG1Mul fuzz_g1_mul diff --git a/p2p/dnsdisc/client.go b/p2p/dnsdisc/client.go index d3e8111ab5..3e4b50aadd 100644 --- a/p2p/dnsdisc/client.go +++ b/p2p/dnsdisc/client.go @@ -37,9 +37,10 @@ import ( // Client discovers nodes by querying DNS servers. type Client struct { - cfg Config - clock mclock.Clock - entries *lru.Cache + cfg Config + clock mclock.Clock + entries *lru.Cache + ratelimit *rate.Limiter } // Config holds configuration options for the client. @@ -97,8 +98,12 @@ func NewClient(cfg Config) *Client { panic(err) } rlimit := rate.NewLimiter(rate.Limit(cfg.RateLimit), 10) - cfg.Resolver = &rateLimitResolver{cfg.Resolver, rlimit} - return &Client{cfg: cfg, entries: cache, clock: mclock.System{}} + return &Client{ + cfg: cfg, + entries: cache, + clock: mclock.System{}, + ratelimit: rlimit, + } } // SyncTree downloads the entire node tree at the given URL. @@ -157,6 +162,13 @@ func parseAndVerifyRoot(txt string, loc *linkEntry) (rootEntry, error) { // resolveEntry retrieves an entry from the cache or fetches it from the network // if it isn't cached. func (c *Client) resolveEntry(ctx context.Context, domain, hash string) (entry, error) { + // The rate limit always applies, even when the result might be cached. This is + // important because it avoids hot-spinning in consumers of node iterators created on + // this client. + if err := c.ratelimit.Wait(ctx); err != nil { + return nil, err + } + cacheKey := truncateHash(hash) if e, ok := c.entries.Get(cacheKey); ok { return e.(entry), nil @@ -196,19 +208,6 @@ func (c *Client) doResolveEntry(ctx context.Context, domain, hash string) (entry return nil, nameError{name, errNoEntry} } -// rateLimitResolver applies a rate limit to a Resolver. -type rateLimitResolver struct { - r Resolver - limiter *rate.Limiter -} - -func (r *rateLimitResolver) LookupTXT(ctx context.Context, domain string) ([]string, error) { - if err := r.limiter.Wait(ctx); err != nil { - return nil, err - } - return r.r.LookupTXT(ctx, domain) -} - // randomIterator traverses a set of trees and returns nodes found in them. type randomIterator struct { cur *enode.Node diff --git a/p2p/dnsdisc/tree.go b/p2p/dnsdisc/tree.go index 82a935ca41..410ec3b854 100644 --- a/p2p/dnsdisc/tree.go +++ b/p2p/dnsdisc/tree.go @@ -113,10 +113,41 @@ func (t *Tree) Nodes() []*enode.Node { return nodes } +/* +We want to keep the UDP size below 512 bytes. The UDP size is roughly: +UDP length = 8 + UDP payload length ( 229 ) +UPD Payload length: + - dns.id 2 + - dns.flags 2 + - dns.count.queries 2 + - dns.count.answers 2 + - dns.count.auth_rr 2 + - dns.count.add_rr 2 + - queries (query-size + 6) + - answers : + - dns.resp.name 2 + - dns.resp.type 2 + - dns.resp.class 2 + - dns.resp.ttl 4 + - dns.resp.len 2 + - dns.txt.length 1 + - dns.txt resp_data_size + +So the total size is roughly a fixed overhead of `39`, and the size of the +query (domain name) and response. +The query size is, for example, FVY6INQ6LZ33WLCHO3BPR3FH6Y.snap.mainnet.ethdisco.net (52) + +We also have some static data in the response, such as `enrtree-branch:`, and potentially +splitting the response up with `" "`, leaving us with a size of roughly `400` that we need +to stay below. + +The number `370` is used to have some margin for extra overhead (for example, the dns query +may be larger - more subdomains). +*/ const ( - hashAbbrev = 16 - maxChildren = 300 / hashAbbrev * (13 / 8) - minHashLength = 12 + hashAbbrevSize = 1 + 16*13/8 // Size of an encoded hash (plus comma) + maxChildren = 370 / hashAbbrevSize // 13 children + minHashLength = 12 ) // MakeTree creates a tree containing the given nodes and links. diff --git a/p2p/enr/enr.go b/p2p/enr/enr.go index c36ae9e3ed..05e43fd805 100644 --- a/p2p/enr/enr.go +++ b/p2p/enr/enr.go @@ -50,6 +50,7 @@ var ( errNotSorted = errors.New("record key/value pairs are not sorted by key") errDuplicateKey = errors.New("record contains duplicate key") errIncompletePair = errors.New("record contains incomplete k/v pair") + errIncompleteList = errors.New("record contains less than two list elements") errTooBig = fmt.Errorf("record bigger than %d bytes", SizeLimit) errEncodeUnsigned = errors.New("can't encode unsigned record") errNotFound = errors.New("no such key in record") @@ -209,9 +210,15 @@ func decodeRecord(s *rlp.Stream) (dec Record, raw []byte, err error) { return dec, raw, err } if err = s.Decode(&dec.signature); err != nil { + if err == rlp.EOL { + err = errIncompleteList + } return dec, raw, err } if err = s.Decode(&dec.seq); err != nil { + if err == rlp.EOL { + err = errIncompleteList + } return dec, raw, err } // The rest of the record contains sorted k/v pairs. diff --git a/p2p/enr/enr_test.go b/p2p/enr/enr_test.go index 96a9ced5cf..bf3f104744 100644 --- a/p2p/enr/enr_test.go +++ b/p2p/enr/enr_test.go @@ -231,6 +231,29 @@ func TestRecordTooBig(t *testing.T) { require.NoError(t, signTest([]byte{5}, &r)) } +// This checks that incomplete RLP inputs are handled correctly. +func TestDecodeIncomplete(t *testing.T) { + type decTest struct { + input []byte + err error + } + tests := []decTest{ + {[]byte{0xC0}, errIncompleteList}, + {[]byte{0xC1, 0x1}, errIncompleteList}, + {[]byte{0xC2, 0x1, 0x2}, nil}, + {[]byte{0xC3, 0x1, 0x2, 0x3}, errIncompletePair}, + {[]byte{0xC4, 0x1, 0x2, 0x3, 0x4}, nil}, + {[]byte{0xC5, 0x1, 0x2, 0x3, 0x4, 0x5}, errIncompletePair}, + } + for _, test := range tests { + var r Record + err := rlp.DecodeBytes(test.input, &r) + if err != test.err { + t.Errorf("wrong error for %X: %v", test.input, err) + } + } +} + // TestSignEncodeAndDecodeRandom tests encoding/decoding of records containing random key/value pairs. func TestSignEncodeAndDecodeRandom(t *testing.T) { var r Record diff --git a/p2p/metrics.go b/p2p/metrics.go index 44946473fa..be0d2f495e 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -25,8 +25,17 @@ import ( ) const ( + // ingressMeterName is the prefix of the per-packet inbound metrics. ingressMeterName = "p2p/ingress" - egressMeterName = "p2p/egress" + + // egressMeterName is the prefix of the per-packet outbound metrics. + egressMeterName = "p2p/egress" + + // HandleHistName is the prefix of the per-packet serving time histograms. + HandleHistName = "p2p/handle" + + // WaitHistName is the prefix of the per-packet (req only) waiting time histograms. + WaitHistName = "p2p/wait" ) var ( diff --git a/p2p/nodestate/nodestate.go b/p2p/nodestate/nodestate.go index d3166f1d87..9323d53cbd 100644 --- a/p2p/nodestate/nodestate.go +++ b/p2p/nodestate/nodestate.go @@ -858,6 +858,23 @@ func (ns *NodeStateMachine) GetField(n *enode.Node, field Field) interface{} { return nil } +// GetState retrieves the current state of the given node. Note that when used in a +// subscription callback the result can be out of sync with the state change represented +// by the callback parameters so extra safety checks might be necessary. +func (ns *NodeStateMachine) GetState(n *enode.Node) Flags { + ns.lock.Lock() + defer ns.lock.Unlock() + + ns.checkStarted() + if ns.closed { + return Flags{} + } + if _, node := ns.updateEnode(n); node != nil { + return Flags{mask: node.state, setup: ns.setup} + } + return Flags{} +} + // SetField sets the given field of the given node and blocks until the operation is finished func (ns *NodeStateMachine) SetField(n *enode.Node, field Field, value interface{}) error { ns.lock.Lock() diff --git a/p2p/server.go b/p2p/server.go index 6fb7e72856..ea9128a293 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -886,8 +886,8 @@ func (srv *Server) listenLoop() { } remoteIP := netutil.AddrIP(fd.RemoteAddr()) - if err := srv.checkInboundConn(fd, remoteIP); err != nil { - srv.log.Debug("Rejected inbound connnection", "addr", fd.RemoteAddr(), "err", err) + if err := srv.checkInboundConn(remoteIP); err != nil { + srv.log.Debug("Rejected inbound connection", "addr", fd.RemoteAddr(), "err", err) fd.Close() slots <- struct{}{} continue @@ -907,7 +907,7 @@ func (srv *Server) listenLoop() { } } -func (srv *Server) checkInboundConn(fd net.Conn, remoteIP net.IP) error { +func (srv *Server) checkInboundConn(remoteIP net.IP) error { if remoteIP == nil { return nil } diff --git a/params/config.go b/params/config.go index b7bac0fdb8..2d6ec6f50d 100644 --- a/params/config.go +++ b/params/config.go @@ -76,10 +76,10 @@ var ( // MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network. MainnetTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 364, - SectionHead: common.HexToHash("0x3fd20ff221f5e962bb66f57a61973bfc2ba959879a6509384a80a45d208b5afc"), - CHTRoot: common.HexToHash("0xe35b3b807f4e9427fb4e2929961c78a9dc10f503a538319031cc7d00946a0591"), - BloomRoot: common.HexToHash("0x340553b378b2db214b898be15c80ac5be7caffc2e6448fd6f7aff23290d89296"), + SectionIndex: 371, + SectionHead: common.HexToHash("0x50fd3cec5376ede90ef9129772022690cd1467f22c18abb7faa11e793c51e9c9"), + CHTRoot: common.HexToHash("0xb57b4b22a77b5930847b1ca9f62daa11eae6578948cb7b18997f2c0fe5757025"), + BloomRoot: common.HexToHash("0xa338f8a868a194fa90327d0f5877f656a9f3640c618d2a01a01f2e76ef9ef954"), } // MainnetCheckpointOracle contains a set of configs for the main network oracle. @@ -159,10 +159,10 @@ var ( // RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network. RinkebyTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 248, - SectionHead: common.HexToHash("0x26874cf023695778cc3175d1bec19894204d8d0b756b587e81e35f300dc5b33c"), - CHTRoot: common.HexToHash("0xc129d1ed6673c5d3e1068e9d97244e72952b7ca08acbd7b3bfa58bc3085c442c"), - BloomRoot: common.HexToHash("0x1dafe79dcd7d348782aa834a4a4397890d9ad90643736791132ed5c16879a037"), + SectionIndex: 254, + SectionHead: common.HexToHash("0x0cba01dd71baa22ac8fa0b105bc908e94f9ecfbc79b4eb97427fe07b5851dd10"), + CHTRoot: common.HexToHash("0x5673d8fc49c9c7d8729068640e4b392d46952a5a38798973bac1cf1d0d27ad7d"), + BloomRoot: common.HexToHash("0x70e01232b66df9a7778ae3291c9217afb9a2d9f799f32d7b912bd37e7bce83a8"), } // RinkebyCheckpointOracle contains a set of configs for the Rinkeby test network oracle. @@ -200,10 +200,10 @@ var ( // GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network. GoerliTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 132, - SectionHead: common.HexToHash("0x29fa240c97b47ecbfef3fea8b3cff035d93154d1d48b25e3333cf2f7067c5324"), - CHTRoot: common.HexToHash("0x85e5c59e5b202284291405dadc40dc36ab6417bd189fb18be24f6dcab6b80511"), - BloomRoot: common.HexToHash("0x0b7afdd200477f46e982e2cabc822ac454424986fa50d899685dfaeede1f882d"), + SectionIndex: 138, + SectionHead: common.HexToHash("0xb7ea0566abd7d0def5b3c9afa3431debb7bb30b65af35f106ca93a59e6c859a7"), + CHTRoot: common.HexToHash("0x378c7ea9081242beb982e2e39567ba12f2ed3e59e5aba3f9db1d595646d7c9f4"), + BloomRoot: common.HexToHash("0x523c169286cfca52e8a6579d8c35dc8bf093412d8a7478163bfa81ae91c2492d"), } // GoerliCheckpointOracle contains a set of configs for the Goerli test network oracle. diff --git a/params/version.go b/params/version.go index ade931e5f1..602b817658 100644 --- a/params/version.go +++ b/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 10 // Minor version component of the current release - VersionPatch = 1 // Patch version component of the current release + VersionPatch = 2 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string QuorumVersionMajor = 22 diff --git a/raft/minter.go b/raft/minter.go index c4dfcc5f76..7c86b34388 100644 --- a/raft/minter.go +++ b/raft/minter.go @@ -381,6 +381,7 @@ func (env *work) commitTransactions(txes *types.TransactionsByPriceAndNonce, bc } env.publicState.Prepare(tx.Hash(), common.Hash{}, txCount) + env.privateState.Prepare(tx.Hash(), common.Hash{}, txCount) publicReceipt, privateReceipt, err := env.commitTransaction(tx, bc, gp) switch { diff --git a/rlp/iterator.go b/rlp/iterator.go index c28866dbc1..559e03a868 100644 --- a/rlp/iterator.go +++ b/rlp/iterator.go @@ -23,6 +23,7 @@ type listIterator struct { } // NewListIterator creates an iterator for the (list) represented by data +// TODO: Consider removing this implementation, as it is no longer used. func NewListIterator(data RawValue) (*listIterator, error) { k, t, c, err := readKind(data) if err != nil { diff --git a/rpc/client_test.go b/rpc/client_test.go index 89c46c7868..ddaa38cf31 100644 --- a/rpc/client_test.go +++ b/rpc/client_test.go @@ -18,6 +18,7 @@ package rpc import ( "context" + "encoding/json" "fmt" "math/rand" "net" @@ -378,6 +379,93 @@ func TestClientCloseUnsubscribeRace(t *testing.T) { } } +// unsubscribeRecorder collects the subscription IDs of *_unsubscribe calls. +type unsubscribeRecorder struct { + ServerCodec + unsubscribes map[string]bool +} + +func (r *unsubscribeRecorder) readBatch() ([]*jsonrpcMessage, bool, error) { + if r.unsubscribes == nil { + r.unsubscribes = make(map[string]bool) + } + + msgs, batch, err := r.ServerCodec.readBatch() + for _, msg := range msgs { + if msg.isUnsubscribe() { + var params []string + if err := json.Unmarshal(msg.Params, ¶ms); err != nil { + panic("unsubscribe decode error: " + err.Error()) + } + r.unsubscribes[params[0]] = true + } + } + return msgs, batch, err +} + +// This checks that Client calls the _unsubscribe method on the server when Unsubscribe is +// called on a subscription. +func TestClientSubscriptionUnsubscribeServer(t *testing.T) { + t.Parallel() + + // Create the server. + srv := NewServer() + srv.RegisterName("nftest", new(notificationTestService)) + p1, p2 := net.Pipe() + recorder := &unsubscribeRecorder{ServerCodec: NewCodec(p1)} + go srv.ServeCodec(recorder, OptionMethodInvocation|OptionSubscriptions) + defer srv.Stop() + + // Create the client on the other end of the pipe. + client, _ := newClient(context.Background(), func(context.Context) (ServerCodec, error) { + return NewCodec(p2), nil + }) + defer client.Close() + + // Create the subscription. + ch := make(chan int) + sub, err := client.Subscribe(context.Background(), "nftest", ch, "someSubscription", 1, 1) + if err != nil { + t.Fatal(err) + } + + // Unsubscribe and check that unsubscribe was called. + sub.Unsubscribe() + if !recorder.unsubscribes[sub.subid] { + t.Fatal("client did not call unsubscribe method") + } + if _, open := <-sub.Err(); open { + t.Fatal("subscription error channel not closed after unsubscribe") + } +} + +// This checks that the subscribed channel can be closed after Unsubscribe. +// It is the reproducer for https://github.com/ethereum/go-ethereum/issues/22322 +func TestClientSubscriptionChannelClose(t *testing.T) { + t.Parallel() + + var ( + srv = NewServer() + httpsrv = httptest.NewServer(srv.WebsocketHandler(nil)) + wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + ) + defer srv.Stop() + defer httpsrv.Close() + + srv.RegisterName("nftest", new(notificationTestService)) + client, _ := Dial(wsURL) + + for i := 0; i < 100; i++ { + ch := make(chan int, 100) + sub, err := client.Subscribe(context.Background(), "nftest", ch, "someSubscription", maxClientSubscriptionBuffer-1, 1) + if err != nil { + t.Fatal(err) + } + sub.Unsubscribe() + close(ch) + } +} + // This test checks that Client doesn't lock up when a single subscriber // doesn't read subscription events. func TestClientNotificationStorm(t *testing.T) { diff --git a/rpc/handler.go b/rpc/handler.go index 2cb6facecc..a79df3f00b 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -189,7 +189,7 @@ func (h *handler) cancelAllRequests(err error, inflightReq *requestOp) { } for id, sub := range h.clientSubs { delete(h.clientSubs, id) - sub.quitWithError(false, err) + sub.close(err) } } @@ -281,7 +281,7 @@ func (h *handler) handleResponse(msg *jsonrpcMessage) { return } if op.err = json.Unmarshal(msg.Result, &op.sub.subid); op.err == nil { - go op.sub.start() + go op.sub.run() h.clientSubs[op.sub.subid] = op.sub } } diff --git a/rpc/subscription.go b/rpc/subscription.go index 233215d792..942e764e5d 100644 --- a/rpc/subscription.go +++ b/rpc/subscription.go @@ -208,23 +208,37 @@ type ClientSubscription struct { channel reflect.Value namespace string subid string - in chan json.RawMessage - quitOnce sync.Once // ensures quit is closed once - quit chan struct{} // quit is closed when the subscription exits - errOnce sync.Once // ensures err is closed once - err chan error + // The in channel receives notification values from client dispatcher. + in chan json.RawMessage + + // The error channel receives the error from the forwarding loop. + // It is closed by Unsubscribe. + err chan error + errOnce sync.Once + + // Closing of the subscription is requested by sending on 'quit'. This is handled by + // the forwarding loop, which closes 'forwardDone' when it has stopped sending to + // sub.channel. Finally, 'unsubDone' is closed after unsubscribing on the server side. + quit chan error + forwardDone chan struct{} + unsubDone chan struct{} } +// This is the sentinel value sent on sub.quit when Unsubscribe is called. +var errUnsubscribed = errors.New("unsubscribed") + func newClientSubscription(c *Client, namespace string, channel reflect.Value) *ClientSubscription { sub := &ClientSubscription{ - client: c, - namespace: namespace, - etype: channel.Type().Elem(), - channel: channel, - quit: make(chan struct{}), - err: make(chan error, 1), - in: make(chan json.RawMessage), + client: c, + namespace: namespace, + etype: channel.Type().Elem(), + channel: channel, + in: make(chan json.RawMessage), + quit: make(chan error), + forwardDone: make(chan struct{}), + unsubDone: make(chan struct{}), + err: make(chan error, 1), } return sub } @@ -232,9 +246,9 @@ func newClientSubscription(c *Client, namespace string, channel reflect.Value) * // Err returns the subscription error channel. The intended use of Err is to schedule // resubscription when the client connection is closed unexpectedly. // -// The error channel receives a value when the subscription has ended due -// to an error. The received error is nil if Close has been called -// on the underlying client and no other error has occurred. +// The error channel receives a value when the subscription has ended due to an error. The +// received error is nil if Close has been called on the underlying client and no other +// error has occurred. // // The error channel is closed when Unsubscribe is called on the subscription. func (sub *ClientSubscription) Err() <-chan error { @@ -244,41 +258,63 @@ func (sub *ClientSubscription) Err() <-chan error { // Unsubscribe unsubscribes the notification and closes the error channel. // It can safely be called more than once. func (sub *ClientSubscription) Unsubscribe() { - sub.quitWithError(true, nil) - sub.errOnce.Do(func() { close(sub.err) }) -} - -func (sub *ClientSubscription) quitWithError(unsubscribeServer bool, err error) { - sub.quitOnce.Do(func() { - // The dispatch loop won't be able to execute the unsubscribe call - // if it is blocked on deliver. Close sub.quit first because it - // unblocks deliver. - close(sub.quit) - if unsubscribeServer { - sub.requestUnsubscribe() - } - if err != nil { - if err == ErrClientQuit { - err = nil // Adhere to subscription semantics. - } - sub.err <- err + sub.errOnce.Do(func() { + select { + case sub.quit <- errUnsubscribed: + <-sub.unsubDone + case <-sub.unsubDone: } + close(sub.err) }) } +// deliver is called by the client's message dispatcher to send a notification value. func (sub *ClientSubscription) deliver(result json.RawMessage) (ok bool) { select { case sub.in <- result: return true - case <-sub.quit: + case <-sub.forwardDone: return false } } -func (sub *ClientSubscription) start() { - sub.quitWithError(sub.forward()) +// close is called by the client's message dispatcher when the connection is closed. +func (sub *ClientSubscription) close(err error) { + select { + case sub.quit <- err: + case <-sub.forwardDone: + } +} + +// run is the forwarding loop of the subscription. It runs in its own goroutine and +// is launched by the client's handler after the subscription has been created. +func (sub *ClientSubscription) run() { + defer close(sub.unsubDone) + + unsubscribe, err := sub.forward() + + // The client's dispatch loop won't be able to execute the unsubscribe call if it is + // blocked in sub.deliver() or sub.close(). Closing forwardDone unblocks them. + close(sub.forwardDone) + + // Call the unsubscribe method on the server. + if unsubscribe { + sub.requestUnsubscribe() + } + + // Send the error. + if err != nil { + if err == ErrClientQuit { + // ErrClientQuit gets here when Client.Close is called. This is reported as a + // nil error because it's not an error, but we can't close sub.err here. + err = nil + } + sub.err <- err + } } +// forward is the forwarding loop. It takes in RPC notifications and sends them +// on the subscription channel. func (sub *ClientSubscription) forward() (unsubscribeServer bool, err error) { cases := []reflect.SelectCase{ {Dir: reflect.SelectRecv, Chan: reflect.ValueOf(sub.quit)}, @@ -286,7 +322,7 @@ func (sub *ClientSubscription) forward() (unsubscribeServer bool, err error) { {Dir: reflect.SelectSend, Chan: sub.channel}, } buffer := list.New() - defer buffer.Init() + for { var chosen int var recv reflect.Value @@ -301,7 +337,15 @@ func (sub *ClientSubscription) forward() (unsubscribeServer bool, err error) { switch chosen { case 0: // <-sub.quit - return false, nil + if !recv.IsNil() { + err = recv.Interface().(error) + } + if err == errUnsubscribed { + // Exiting because Unsubscribe was called, unsubscribe on server. + return true, nil + } + return false, err + case 1: // <-sub.in val, err := sub.unmarshal(recv.Interface().(json.RawMessage)) if err != nil { @@ -311,6 +355,7 @@ func (sub *ClientSubscription) forward() (unsubscribeServer bool, err error) { return true, ErrSubscriptionQueueOverflow } buffer.PushBack(val) + case 2: // sub.channel<- cases[2].Send = reflect.Value{} // Don't hold onto the value. buffer.Remove(buffer.Front()) diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go index c2705a3550..6076ae10ee 100644 --- a/rpc/websocket_test.go +++ b/rpc/websocket_test.go @@ -129,11 +129,15 @@ func TestClientWebsocketPing(t *testing.T) { if err != nil { t.Fatalf("client dial error: %v", err) } + defer client.Close() + resultChan := make(chan int) sub, err := client.EthSubscribe(ctx, resultChan, "foo") if err != nil { t.Fatalf("client subscribe error: %v", err) } + // Note: Unsubscribe is not called on this subscription because the mockup + // server can't handle the request. // Wait for the context's deadline to be reached before proceeding. // This is important for reproducing https://github.com/ethereum/go-ethereum/issues/19798 diff --git a/tests/fuzzers/les/les-fuzzer.go b/tests/fuzzers/les/les-fuzzer.go index 57d07e03de..c379d1f08d 100644 --- a/tests/fuzzers/les/les-fuzzer.go +++ b/tests/fuzzers/les/les-fuzzer.go @@ -261,18 +261,18 @@ func (d dummyMsg) Decode(val interface{}) error { } func (f *fuzzer) doFuzz(msgCode uint64, packet interface{}) { - version := f.randomInt(3) + 2 // [LES2, LES3, LES4] - peer := l.NewFuzzerPeer(version) enc, err := rlp.EncodeToBytes(packet) if err != nil { panic(err) } + version := f.randomInt(3) + 2 // [LES2, LES3, LES4] + peer, closeFn := l.NewFuzzerPeer(version) + defer closeFn() fn, _, _, err := l.Les3[msgCode].Handle(dummyMsg{enc}) if err != nil { panic(err) } fn(f, peer, func() bool { return true }) - } func Fuzz(input []byte) int { diff --git a/tests/fuzzers/vflux/clientpool-fuzzer.go b/tests/fuzzers/vflux/clientpool-fuzzer.go new file mode 100644 index 0000000000..41b8627348 --- /dev/null +++ b/tests/fuzzers/vflux/clientpool-fuzzer.go @@ -0,0 +1,289 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package vflux + +import ( + "bytes" + "encoding/binary" + "io" + "math" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/ethdb/memorydb" + "github.com/ethereum/go-ethereum/les/vflux" + vfs "github.com/ethereum/go-ethereum/les/vflux/server" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/ethereum/go-ethereum/rlp" +) + +type fuzzer struct { + peers [256]*clientPeer + disconnectList []*clientPeer + input io.Reader + exhausted bool + activeCount, activeCap uint64 + maxCount, maxCap uint64 +} + +type clientPeer struct { + fuzzer *fuzzer + node *enode.Node + freeID string + timeout time.Duration + + balance vfs.ConnectedBalance + capacity uint64 +} + +func (p *clientPeer) Node() *enode.Node { + return p.node +} + +func (p *clientPeer) FreeClientId() string { + return p.freeID +} + +func (p *clientPeer) InactiveAllowance() time.Duration { + return p.timeout +} + +func (p *clientPeer) UpdateCapacity(newCap uint64, requested bool) { + p.fuzzer.activeCap -= p.capacity + if p.capacity != 0 { + p.fuzzer.activeCount-- + } + p.capacity = newCap + p.fuzzer.activeCap += p.capacity + if p.capacity != 0 { + p.fuzzer.activeCount++ + } +} + +func (p *clientPeer) Disconnect() { + p.fuzzer.disconnectList = append(p.fuzzer.disconnectList, p) + p.fuzzer.activeCap -= p.capacity + if p.capacity != 0 { + p.fuzzer.activeCount-- + } + p.capacity = 0 + p.balance = nil +} + +func newFuzzer(input []byte) *fuzzer { + f := &fuzzer{ + input: bytes.NewReader(input), + } + for i := range f.peers { + f.peers[i] = &clientPeer{ + fuzzer: f, + node: enode.SignNull(new(enr.Record), enode.ID{byte(i)}), + freeID: string([]byte{byte(i)}), + timeout: f.randomDelay(), + } + } + return f +} + +func (f *fuzzer) read(size int) []byte { + out := make([]byte, size) + if _, err := f.input.Read(out); err != nil { + f.exhausted = true + } + return out +} + +func (f *fuzzer) randomByte() byte { + d := f.read(1) + return d[0] +} + +func (f *fuzzer) randomBool() bool { + d := f.read(1) + return d[0]&1 == 1 +} + +func (f *fuzzer) randomInt(max int) int { + if max == 0 { + return 0 + } + if max <= 256 { + return int(f.randomByte()) % max + } + var a uint16 + if err := binary.Read(f.input, binary.LittleEndian, &a); err != nil { + f.exhausted = true + } + return int(a % uint16(max)) +} + +func (f *fuzzer) randomTokenAmount(signed bool) int64 { + x := uint64(f.randomInt(65000)) + x = x * x * x * x + + if signed && (x&1) == 1 { + if x <= math.MaxInt64 { + return -int64(x) + } + return math.MinInt64 + } + if x <= math.MaxInt64 { + return int64(x) + } + return math.MaxInt64 +} + +func (f *fuzzer) randomDelay() time.Duration { + delay := f.randomByte() + if delay < 128 { + return time.Duration(delay) * time.Second + } + return 0 +} + +func (f *fuzzer) randomFactors() vfs.PriceFactors { + return vfs.PriceFactors{ + TimeFactor: float64(f.randomByte()) / 25500, + CapacityFactor: float64(f.randomByte()) / 255, + RequestFactor: float64(f.randomByte()) / 255, + } +} + +func (f *fuzzer) connectedBalanceOp(balance vfs.ConnectedBalance) { + switch f.randomInt(3) { + case 0: + balance.RequestServed(uint64(f.randomTokenAmount(false))) + case 1: + balance.SetPriceFactors(f.randomFactors(), f.randomFactors()) + case 2: + balance.GetBalance() + balance.GetRawBalance() + balance.GetPriceFactors() + } +} + +func (f *fuzzer) atomicBalanceOp(balance vfs.AtomicBalanceOperator) { + switch f.randomInt(3) { + case 0: + balance.AddBalance(f.randomTokenAmount(true)) + case 1: + balance.SetBalance(uint64(f.randomTokenAmount(false)), uint64(f.randomTokenAmount(false))) + case 2: + balance.GetBalance() + balance.GetRawBalance() + balance.GetPriceFactors() + } +} + +func FuzzClientPool(input []byte) int { + if len(input) > 10000 { + return -1 + } + f := newFuzzer(input) + if f.exhausted { + return 0 + } + clock := &mclock.Simulated{} + db := memorydb.New() + pool := vfs.NewClientPool(db, 10, f.randomDelay(), clock, func() bool { return true }) + pool.Start() + defer pool.Stop() + + count := 0 + for !f.exhausted && count < 1000 { + count++ + switch f.randomInt(11) { + case 0: + i := int(f.randomByte()) + f.peers[i].balance = pool.Register(f.peers[i]) + case 1: + i := int(f.randomByte()) + f.peers[i].Disconnect() + case 2: + f.maxCount = uint64(f.randomByte()) + f.maxCap = uint64(f.randomByte()) + f.maxCap *= f.maxCap + pool.SetLimits(f.maxCount, f.maxCap) + case 3: + pool.SetConnectedBias(f.randomDelay()) + case 4: + pool.SetDefaultFactors(f.randomFactors(), f.randomFactors()) + case 5: + pool.SetExpirationTCs(uint64(f.randomInt(50000)), uint64(f.randomInt(50000))) + case 6: + if _, err := pool.SetCapacity(f.peers[f.randomByte()].node, uint64(f.randomByte()), f.randomDelay(), f.randomBool()); err == vfs.ErrCantFindMaximum { + panic(nil) + } + case 7: + if balance := f.peers[f.randomByte()].balance; balance != nil { + f.connectedBalanceOp(balance) + } + case 8: + pool.BalanceOperation(f.peers[f.randomByte()].node.ID(), f.peers[f.randomByte()].freeID, func(balance vfs.AtomicBalanceOperator) { + count := f.randomInt(4) + for i := 0; i < count; i++ { + f.atomicBalanceOp(balance) + } + }) + case 9: + pool.TotalTokenAmount() + pool.GetExpirationTCs() + pool.Active() + pool.Limits() + pool.GetPosBalanceIDs(f.peers[f.randomByte()].node.ID(), f.peers[f.randomByte()].node.ID(), f.randomInt(100)) + case 10: + req := vflux.CapacityQueryReq{ + Bias: uint64(f.randomByte()), + AddTokens: make([]vflux.IntOrInf, f.randomInt(vflux.CapacityQueryMaxLen+1)), + } + for i := range req.AddTokens { + v := vflux.IntOrInf{Type: uint8(f.randomInt(4))} + if v.Type < 2 { + v.Value = *big.NewInt(f.randomTokenAmount(false)) + } + req.AddTokens[i] = v + } + reqEnc, err := rlp.EncodeToBytes(&req) + if err != nil { + panic(err) + } + p := int(f.randomByte()) + if p < len(reqEnc) { + reqEnc[p] = f.randomByte() + } + pool.Handle(f.peers[f.randomByte()].node.ID(), f.peers[f.randomByte()].freeID, vflux.CapacityQueryName, reqEnc) + } + + for _, peer := range f.disconnectList { + pool.Unregister(peer) + } + f.disconnectList = nil + if d := f.randomDelay(); d > 0 { + clock.Run(d) + } + //fmt.Println(f.activeCount, f.maxCount, f.activeCap, f.maxCap) + if activeCount, activeCap := pool.Active(); activeCount != f.activeCount || activeCap != f.activeCap { + panic(nil) + } + if f.activeCount > f.maxCount || f.activeCap > f.maxCap { + panic(nil) + } + } + return 0 +} diff --git a/tests/fuzzers/vflux/debug/main.go b/tests/fuzzers/vflux/debug/main.go new file mode 100644 index 0000000000..de0b5d4124 --- /dev/null +++ b/tests/fuzzers/vflux/debug/main.go @@ -0,0 +1,41 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package main + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/ethereum/go-ethereum/tests/fuzzers/vflux" +) + +func main() { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, "Usage: debug \n") + fmt.Fprintf(os.Stderr, "Example\n") + fmt.Fprintf(os.Stderr, " $ debug ../crashers/4bbef6857c733a87ecf6fd8b9e7238f65eb9862a\n") + os.Exit(1) + } + crasher := os.Args[1] + data, err := ioutil.ReadFile(crasher) + if err != nil { + fmt.Fprintf(os.Stderr, "error loading crasher %v: %v", crasher, err) + os.Exit(1) + } + vflux.FuzzClientPool(data) +} diff --git a/trie/trie_test.go b/trie/trie_test.go index 87bce9abca..3aa4098d14 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -1060,7 +1060,7 @@ func tempDB() (string, *Database) { if err != nil { panic(fmt.Sprintf("can't create temporary directory: %v", err)) } - diskdb, err := leveldb.New(dir, 256, 0, "") + diskdb, err := leveldb.New(dir, 256, 0, "", false) if err != nil { panic(fmt.Sprintf("can't create temporary database: %v", err)) }